summaryrefslogtreecommitdiff
path: root/deps
diff options
context:
space:
mode:
authorAli Ijaz Sheikh <ofrobots@google.com>2016-04-07 14:06:55 -0700
committerAli Ijaz Sheikh <ofrobots@google.com>2016-04-14 10:03:39 -0700
commit52af5c4eebf4de8638aef0338bd826656312a02a (patch)
tree628dc9fb0b558c3a73a2160706fef368876fe548 /deps
parent6e3e8acc7cc7ebd3d67db5ade1247b8b558efe09 (diff)
downloadnode-new-52af5c4eebf4de8638aef0338bd826656312a02a.tar.gz
deps: upgrade V8 to 5.0.71.32
* Pick up the branch head for V8 5.0 stable [1] * Edit v8 gitignore to allow trace_event copy * Update V8 DEP trace_event as per deps/v8/DEPS [2] [1] https://chromium.googlesource.com/v8/v8.git/+/3c67831 [2] https://chromium.googlesource.com/chromium/src/base/trace_event/common/+/4b09207e447ae5bd34643b4c6321bee7b76d35f9 Ref: https://github.com/nodejs/node/pull/5945 PR-URL: https://github.com/nodejs/node/pull/6111 Reviewed-By: targos - Michaƫl Zasso <mic.besace@gmail.com> Reviewed-By: bnoordhuis - Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: indutny - Fedor Indutny <fedor.indutny@gmail.com>
Diffstat (limited to 'deps')
-rw-r--r--deps/v8/.clang-format2
-rw-r--r--deps/v8/.gitignore9
-rw-r--r--deps/v8/AUTHORS3
-rw-r--r--deps/v8/BUILD.gn133
-rw-r--r--deps/v8/ChangeLog1775
-rw-r--r--deps/v8/DEPS64
-rw-r--r--deps/v8/Makefile2
-rw-r--r--deps/v8/WATCHLISTS24
-rw-r--r--deps/v8/base/trace_event/common/trace_event_common.h63
-rw-r--r--deps/v8/build/all.gyp6
-rwxr-xr-xdeps/v8/build/has_valgrind.py21
-rw-r--r--deps/v8/build/isolate.gypi3
-rw-r--r--deps/v8/build/standalone.gypi73
-rw-r--r--deps/v8/build/toolchain.gypi3
-rw-r--r--deps/v8/build/vs_toolchain.py4
-rw-r--r--deps/v8/include/v8-experimental.h1
-rw-r--r--deps/v8/include/v8-platform.h10
-rw-r--r--deps/v8/include/v8-profiler.h134
-rw-r--r--deps/v8/include/v8-version.h8
-rw-r--r--deps/v8/include/v8.h106
-rw-r--r--deps/v8/infra/config/cq.cfg5
-rw-r--r--deps/v8/snapshot_toolchain.gni2
-rw-r--r--deps/v8/src/DEPS8
-rw-r--r--deps/v8/src/accessors.cc520
-rw-r--r--deps/v8/src/accessors.h24
-rw-r--r--deps/v8/src/address-map.cc9
-rw-r--r--deps/v8/src/api-experimental.cc5
-rw-r--r--deps/v8/src/api-natives.cc296
-rw-r--r--deps/v8/src/api-natives.h4
-rw-r--r--deps/v8/src/api.cc232
-rw-r--r--deps/v8/src/arguments.cc18
-rw-r--r--deps/v8/src/arguments.h55
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h27
-rw-r--r--deps/v8/src/arm/assembler-arm.cc47
-rw-r--r--deps/v8/src/arm/assembler-arm.h23
-rw-r--r--deps/v8/src/arm/builtins-arm.cc659
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc1196
-rw-r--r--deps/v8/src/arm/codegen-arm.cc10
-rw-r--r--deps/v8/src/arm/constants-arm.h16
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc24
-rw-r--r--deps/v8/src/arm/disasm-arm.cc32
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.cc61
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc109
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h24
-rw-r--r--deps/v8/src/arm/simulator-arm.cc50
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h22
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h4
-rw-r--r--deps/v8/src/arm64/builtins-arm64.cc638
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc1414
-rw-r--r--deps/v8/src/arm64/cpu-arm64.cc5
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc25
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc64
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc133
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h28
-rw-r--r--deps/v8/src/arm64/simulator-arm64.cc61
-rw-r--r--deps/v8/src/arm64/utils-arm64.h13
-rw-r--r--deps/v8/src/assembler.cc132
-rw-r--r--deps/v8/src/assembler.h46
-rw-r--r--deps/v8/src/ast/OWNERS1
-rw-r--r--deps/v8/src/ast/ast-expression-rewriter.cc6
-rw-r--r--deps/v8/src/ast/ast-expression-rewriter.h2
-rw-r--r--deps/v8/src/ast/ast-expression-visitor.cc5
-rw-r--r--deps/v8/src/ast/ast-expression-visitor.h2
-rw-r--r--deps/v8/src/ast/ast-literal-reindexer.cc9
-rw-r--r--deps/v8/src/ast/ast-numbering.cc17
-rw-r--r--deps/v8/src/ast/ast-value-factory.cc2
-rw-r--r--deps/v8/src/ast/ast-value-factory.h3
-rw-r--r--deps/v8/src/ast/ast.cc43
-rw-r--r--deps/v8/src/ast/ast.h358
-rw-r--r--deps/v8/src/ast/modules.cc1
-rw-r--r--deps/v8/src/ast/modules.h19
-rw-r--r--deps/v8/src/ast/prettyprinter.cc80
-rw-r--r--deps/v8/src/ast/prettyprinter.h3
-rw-r--r--deps/v8/src/ast/scopeinfo.cc76
-rw-r--r--deps/v8/src/ast/scopes.cc170
-rw-r--r--deps/v8/src/ast/scopes.h57
-rw-r--r--deps/v8/src/ast/variables.cc3
-rw-r--r--deps/v8/src/ast/variables.h56
-rw-r--r--deps/v8/src/bailout-reason.h14
-rw-r--r--deps/v8/src/base.isolate7
-rw-r--r--deps/v8/src/base/atomicops.h2
-rw-r--r--deps/v8/src/base/atomicops_internals_s390_gcc.h152
-rw-r--r--deps/v8/src/base/bits.h14
-rw-r--r--deps/v8/src/base/cpu.cc15
-rw-r--r--deps/v8/src/base/cpu.h5
-rw-r--r--deps/v8/src/bootstrapper.cc355
-rw-r--r--deps/v8/src/bootstrapper.h6
-rw-r--r--deps/v8/src/builtins.cc1014
-rw-r--r--deps/v8/src/builtins.h133
-rw-r--r--deps/v8/src/code-factory.cc93
-rw-r--r--deps/v8/src/code-factory.h33
-rw-r--r--deps/v8/src/code-stubs-hydrogen.cc84
-rw-r--r--deps/v8/src/code-stubs.cc106
-rw-r--r--deps/v8/src/code-stubs.h184
-rw-r--r--deps/v8/src/codegen.cc11
-rw-r--r--deps/v8/src/compiler.cc179
-rw-r--r--deps/v8/src/compiler.h115
-rw-r--r--deps/v8/src/compiler/access-builder.cc14
-rw-r--r--deps/v8/src/compiler/access-info.cc15
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc130
-rw-r--r--deps/v8/src/compiler/arm/instruction-codes-arm.h7
-rw-r--r--deps/v8/src/compiler/arm/instruction-scheduler-arm.cc6
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc166
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc133
-rw-r--r--deps/v8/src/compiler/arm64/instruction-codes-arm64.h15
-rw-r--r--deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc12
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc90
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.cc360
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.h12
-rw-r--r--deps/v8/src/compiler/ast-loop-assignment-analyzer.cc6
-rw-r--r--deps/v8/src/compiler/bytecode-branch-analysis.cc98
-rw-r--r--deps/v8/src/compiler/bytecode-branch-analysis.h34
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc1574
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h233
-rw-r--r--deps/v8/src/compiler/c-linkage.cc20
-rw-r--r--deps/v8/src/compiler/change-lowering.cc76
-rw-r--r--deps/v8/src/compiler/change-lowering.h8
-rw-r--r--deps/v8/src/compiler/code-generator.cc19
-rw-r--r--deps/v8/src/compiler/code-stub-assembler.cc485
-rw-r--r--deps/v8/src/compiler/code-stub-assembler.h210
-rw-r--r--deps/v8/src/compiler/common-operator.cc11
-rw-r--r--deps/v8/src/compiler/common-operator.h10
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.cc280
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.h13
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc1145
-rw-r--r--deps/v8/src/compiler/escape-analysis.h104
-rw-r--r--deps/v8/src/compiler/fast-accessor-assembler.cc44
-rw-r--r--deps/v8/src/compiler/fast-accessor-assembler.h9
-rw-r--r--deps/v8/src/compiler/frame-states.h16
-rw-r--r--deps/v8/src/compiler/frame.h33
-rw-r--r--deps/v8/src/compiler/graph-trimmer.cc3
-rw-r--r--deps/v8/src/compiler/graph-trimmer.h8
-rw-r--r--deps/v8/src/compiler/graph.cc6
-rw-r--r--deps/v8/src/compiler/graph.h8
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc99
-rw-r--r--deps/v8/src/compiler/ia32/instruction-codes-ia32.h4
-rw-r--r--deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc4
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc130
-rw-r--r--deps/v8/src/compiler/instruction-codes.h5
-rw-r--r--deps/v8/src/compiler/instruction-scheduler.cc101
-rw-r--r--deps/v8/src/compiler/instruction-scheduler.h64
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc108
-rw-r--r--deps/v8/src/compiler/instruction-selector.h10
-rw-r--r--deps/v8/src/compiler/instruction.cc126
-rw-r--r--deps/v8/src/compiler/instruction.h41
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc299
-rw-r--r--deps/v8/src/compiler/int64-lowering.h63
-rw-r--r--deps/v8/src/compiler/interpreter-assembler.cc751
-rw-r--r--deps/v8/src/compiler/interpreter-assembler.h224
-rw-r--r--deps/v8/src/compiler/ir-operations.txt0
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.cc35
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.h8
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc32
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h9
-rw-r--r--deps/v8/src/compiler/js-context-relaxation.cc67
-rw-r--r--deps/v8/src/compiler/js-context-relaxation.h32
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc1096
-rw-r--r--deps/v8/src/compiler/js-create-lowering.h99
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc506
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.h2
-rw-r--r--deps/v8/src/compiler/js-global-object-specialization.cc92
-rw-r--r--deps/v8/src/compiler/js-global-object-specialization.h14
-rw-r--r--deps/v8/src/compiler/js-inlining.cc71
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc174
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.h3
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc96
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h10
-rw-r--r--deps/v8/src/compiler/js-operator.cc389
-rw-r--r--deps/v8/src/compiler/js-operator.h158
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc1137
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h27
-rw-r--r--deps/v8/src/compiler/jump-threading.cc13
-rw-r--r--deps/v8/src/compiler/jump-threading.h2
-rw-r--r--deps/v8/src/compiler/linkage.cc110
-rw-r--r--deps/v8/src/compiler/linkage.h40
-rw-r--r--deps/v8/src/compiler/live-range-separator.cc9
-rw-r--r--deps/v8/src/compiler/liveness-analyzer.h4
-rw-r--r--deps/v8/src/compiler/machine-operator.cc40
-rw-r--r--deps/v8/src/compiler/machine-operator.h15
-rw-r--r--deps/v8/src/compiler/mips/code-generator-mips.cc134
-rw-r--r--deps/v8/src/compiler/mips/instruction-codes-mips.h5
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc58
-rw-r--r--deps/v8/src/compiler/mips64/code-generator-mips64.cc207
-rw-r--r--deps/v8/src/compiler/mips64/instruction-codes-mips64.h7
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc58
-rw-r--r--deps/v8/src/compiler/move-optimizer.cc340
-rw-r--r--deps/v8/src/compiler/move-optimizer.h21
-rw-r--r--deps/v8/src/compiler/node-properties.cc14
-rw-r--r--deps/v8/src/compiler/opcodes.h10
-rw-r--r--deps/v8/src/compiler/operator-properties.cc1
-rw-r--r--deps/v8/src/compiler/pipeline.cc100
-rw-r--r--deps/v8/src/compiler/pipeline.h21
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc106
-rw-r--r--deps/v8/src/compiler/ppc/instruction-codes-ppc.h3
-rw-r--r--deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc2
-rw-r--r--deps/v8/src/compiler/ppc/instruction-selector-ppc.cc49
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc95
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h89
-rw-r--r--deps/v8/src/compiler/register-allocator-verifier.cc21
-rw-r--r--deps/v8/src/compiler/register-allocator.cc521
-rw-r--r--deps/v8/src/compiler/register-allocator.h34
-rw-r--r--deps/v8/src/compiler/representation-change.cc4
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc49
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h2
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc4
-rw-r--r--deps/v8/src/compiler/simplified-operator.h6
-rw-r--r--deps/v8/src/compiler/typer.cc318
-rw-r--r--deps/v8/src/compiler/typer.h6
-rw-r--r--deps/v8/src/compiler/verifier.cc20
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc587
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h18
-rw-r--r--deps/v8/src/compiler/wasm-linkage.cc124
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc104
-rw-r--r--deps/v8/src/compiler/x64/instruction-codes-x64.h4
-rw-r--r--deps/v8/src/compiler/x64/instruction-scheduler-x64.cc4
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc124
-rw-r--r--deps/v8/src/compiler/x87/code-generator-x87.cc167
-rw-r--r--deps/v8/src/compiler/x87/instruction-codes-x87.h5
-rw-r--r--deps/v8/src/compiler/x87/instruction-selector-x87.cc133
-rw-r--r--deps/v8/src/context-measure.cc2
-rw-r--r--deps/v8/src/contexts-inl.h3
-rw-r--r--deps/v8/src/contexts.cc19
-rw-r--r--deps/v8/src/contexts.h25
-rw-r--r--deps/v8/src/conversions-inl.h4
-rw-r--r--deps/v8/src/conversions.h2
-rw-r--r--deps/v8/src/counters.cc138
-rw-r--r--deps/v8/src/counters.h194
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-arm.cc38
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-arm.h66
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc158
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-codegen-arm.h14
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-arm64.cc40
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-arm64.h66
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc143
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h14
-rw-r--r--deps/v8/src/crankshaft/hydrogen-alias-analysis.h1
-rw-r--r--deps/v8/src/crankshaft/hydrogen-instructions.cc128
-rw-r--r--deps/v8/src/crankshaft/hydrogen-instructions.h283
-rw-r--r--deps/v8/src/crankshaft/hydrogen-load-elimination.cc2
-rw-r--r--deps/v8/src/crankshaft/hydrogen-range-analysis.cc6
-rw-r--r--deps/v8/src/crankshaft/hydrogen-store-elimination.cc2
-rw-r--r--deps/v8/src/crankshaft/hydrogen-types.cc36
-rw-r--r--deps/v8/src/crankshaft/hydrogen-types.h7
-rw-r--r--deps/v8/src/crankshaft/hydrogen.cc945
-rw-r--r--deps/v8/src/crankshaft/hydrogen.h80
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc145
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h14
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-ia32.cc57
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-ia32.h66
-rw-r--r--deps/v8/src/crankshaft/lithium-codegen.cc4
-rw-r--r--deps/v8/src/crankshaft/lithium.cc22
-rw-r--r--deps/v8/src/crankshaft/lithium.h13
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc196
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-codegen-mips.h14
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-mips.cc38
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-mips.h66
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc193
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h14
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-mips64.cc38
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-mips64.h66
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc212
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h10
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-ppc.cc37
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-ppc.h62
-rw-r--r--deps/v8/src/crankshaft/typing.cc63
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc148
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-codegen-x64.h14
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-x64.cc54
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-x64.h64
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc143
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-codegen-x87.h14
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-x87.cc56
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-x87.h62
-rw-r--r--deps/v8/src/d8.cc8
-rw-r--r--deps/v8/src/debug/arm/debug-arm.cc4
-rw-r--r--deps/v8/src/debug/arm64/debug-arm64.cc4
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc7
-rw-r--r--deps/v8/src/debug/debug-frames.cc51
-rw-r--r--deps/v8/src/debug/debug-frames.h9
-rw-r--r--deps/v8/src/debug/debug-scopes.cc26
-rw-r--r--deps/v8/src/debug/debug-scopes.h5
-rw-r--r--deps/v8/src/debug/debug.cc519
-rw-r--r--deps/v8/src/debug/debug.h147
-rw-r--r--deps/v8/src/debug/debug.js81
-rw-r--r--deps/v8/src/debug/ia32/debug-ia32.cc3
-rw-r--r--deps/v8/src/debug/liveedit.cc4
-rw-r--r--deps/v8/src/debug/mips/debug-mips.cc4
-rw-r--r--deps/v8/src/debug/mips64/debug-mips64.cc4
-rw-r--r--deps/v8/src/debug/mirrors.js2
-rw-r--r--deps/v8/src/debug/ppc/debug-ppc.cc4
-rw-r--r--deps/v8/src/debug/x64/debug-x64.cc3
-rw-r--r--deps/v8/src/debug/x87/debug-x87.cc3
-rw-r--r--deps/v8/src/deoptimizer.cc727
-rw-r--r--deps/v8/src/deoptimizer.h100
-rw-r--r--deps/v8/src/elements-kind.cc5
-rw-r--r--deps/v8/src/elements-kind.h20
-rw-r--r--deps/v8/src/elements.cc475
-rw-r--r--deps/v8/src/elements.h59
-rw-r--r--deps/v8/src/execution.cc18
-rw-r--r--deps/v8/src/execution.h4
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.cc4
-rw-r--r--deps/v8/src/factory.cc65
-rw-r--r--deps/v8/src/factory.h34
-rw-r--r--deps/v8/src/field-type.cc91
-rw-r--r--deps/v8/src/field-type.h49
-rw-r--r--deps/v8/src/flag-definitions.h93
-rw-r--r--deps/v8/src/frames.cc241
-rw-r--r--deps/v8/src/frames.h132
-rw-r--r--deps/v8/src/full-codegen/arm/full-codegen-arm.cc1187
-rw-r--r--deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc1164
-rw-r--r--deps/v8/src/full-codegen/full-codegen.cc407
-rw-r--r--deps/v8/src/full-codegen/full-codegen.h136
-rw-r--r--deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc1220
-rw-r--r--deps/v8/src/full-codegen/mips/full-codegen-mips.cc1198
-rw-r--r--deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc1195
-rw-r--r--deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc1215
-rw-r--r--deps/v8/src/full-codegen/x64/full-codegen-x64.cc1238
-rw-r--r--deps/v8/src/full-codegen/x87/full-codegen-x87.cc1220
-rw-r--r--deps/v8/src/global-handles.cc14
-rw-r--r--deps/v8/src/globals.h91
-rw-r--r--deps/v8/src/heap-symbols.h203
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.cc3
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.h2
-rw-r--r--deps/v8/src/heap/heap-inl.h145
-rw-r--r--deps/v8/src/heap/heap.cc416
-rw-r--r--deps/v8/src/heap/heap.h362
-rw-r--r--deps/v8/src/heap/incremental-marking.cc73
-rw-r--r--deps/v8/src/heap/incremental-marking.h8
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h15
-rw-r--r--deps/v8/src/heap/mark-compact.cc709
-rw-r--r--deps/v8/src/heap/mark-compact.h43
-rw-r--r--deps/v8/src/heap/memory-reducer.cc11
-rw-r--r--deps/v8/src/heap/memory-reducer.h4
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h4
-rw-r--r--deps/v8/src/heap/objects-visiting.cc1
-rw-r--r--deps/v8/src/heap/remembered-set.cc69
-rw-r--r--deps/v8/src/heap/remembered-set.h157
-rw-r--r--deps/v8/src/heap/scavenger-inl.h2
-rw-r--r--deps/v8/src/heap/slot-set.h219
-rw-r--r--deps/v8/src/heap/slots-buffer.cc5
-rw-r--r--deps/v8/src/heap/spaces-inl.h133
-rw-r--r--deps/v8/src/heap/spaces.cc607
-rw-r--r--deps/v8/src/heap/spaces.h901
-rw-r--r--deps/v8/src/heap/store-buffer-inl.h44
-rw-r--r--deps/v8/src/heap/store-buffer.cc556
-rw-r--r--deps/v8/src/heap/store-buffer.h201
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h16
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc14
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h7
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc690
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc1327
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.h26
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc27
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc58
-rw-r--r--deps/v8/src/ia32/interface-descriptors-ia32.cc60
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc218
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h34
-rw-r--r--deps/v8/src/ic/arm/handler-compiler-arm.cc97
-rw-r--r--deps/v8/src/ic/arm/ic-arm.cc57
-rw-r--r--deps/v8/src/ic/arm64/handler-compiler-arm64.cc88
-rw-r--r--deps/v8/src/ic/arm64/ic-arm64.cc62
-rw-r--r--deps/v8/src/ic/call-optimization.cc32
-rw-r--r--deps/v8/src/ic/call-optimization.h3
-rw-r--r--deps/v8/src/ic/handler-compiler.cc69
-rw-r--r--deps/v8/src/ic/handler-compiler.h15
-rw-r--r--deps/v8/src/ic/ia32/handler-compiler-ia32.cc76
-rw-r--r--deps/v8/src/ic/ia32/ic-ia32.cc53
-rw-r--r--deps/v8/src/ic/ic-compiler.cc5
-rw-r--r--deps/v8/src/ic/ic-inl.h3
-rw-r--r--deps/v8/src/ic/ic-state.cc28
-rw-r--r--deps/v8/src/ic/ic-state.h48
-rw-r--r--deps/v8/src/ic/ic.cc303
-rw-r--r--deps/v8/src/ic/ic.h46
-rw-r--r--deps/v8/src/ic/mips/handler-compiler-mips.cc109
-rw-r--r--deps/v8/src/ic/mips/ic-mips.cc72
-rw-r--r--deps/v8/src/ic/mips/stub-cache-mips.cc6
-rw-r--r--deps/v8/src/ic/mips64/handler-compiler-mips64.cc109
-rw-r--r--deps/v8/src/ic/mips64/ic-mips64.cc56
-rw-r--r--deps/v8/src/ic/mips64/stub-cache-mips64.cc6
-rw-r--r--deps/v8/src/ic/ppc/handler-compiler-ppc.cc99
-rw-r--r--deps/v8/src/ic/ppc/ic-ppc.cc57
-rw-r--r--deps/v8/src/ic/x64/handler-compiler-x64.cc83
-rw-r--r--deps/v8/src/ic/x64/ic-x64.cc53
-rw-r--r--deps/v8/src/ic/x87/handler-compiler-x87.cc76
-rw-r--r--deps/v8/src/ic/x87/ic-x87.cc53
-rw-r--r--deps/v8/src/identity-map.cc18
-rw-r--r--deps/v8/src/identity-map.h6
-rw-r--r--deps/v8/src/interface-descriptors.cc243
-rw-r--r--deps/v8/src/interface-descriptors.h118
-rw-r--r--deps/v8/src/interpreter/DEPS3
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc766
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h206
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.cc71
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.h4
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc1464
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h79
-rw-r--r--deps/v8/src/interpreter/bytecode-register-allocator.cc175
-rw-r--r--deps/v8/src/interpreter/bytecode-register-allocator.h67
-rw-r--r--deps/v8/src/interpreter/bytecode-traits.h69
-rw-r--r--deps/v8/src/interpreter/bytecodes.cc303
-rw-r--r--deps/v8/src/interpreter/bytecodes.h258
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.cc14
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.h12
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.cc51
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.h47
-rw-r--r--deps/v8/src/interpreter/handler-table-builder.cc73
-rw-r--r--deps/v8/src/interpreter/handler-table-builder.h61
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc546
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h205
-rw-r--r--deps/v8/src/interpreter/interpreter.cc1038
-rw-r--r--deps/v8/src/interpreter/interpreter.h78
-rw-r--r--deps/v8/src/interpreter/register-translator.cc173
-rw-r--r--deps/v8/src/interpreter/register-translator.h119
-rw-r--r--deps/v8/src/interpreter/source-position-table.cc84
-rw-r--r--deps/v8/src/interpreter/source-position-table.h82
-rw-r--r--deps/v8/src/isolate.cc159
-rw-r--r--deps/v8/src/isolate.h19
-rw-r--r--deps/v8/src/js/array.js123
-rw-r--r--deps/v8/src/js/generator.js45
-rw-r--r--deps/v8/src/js/harmony-reflect.js37
-rw-r--r--deps/v8/src/js/i18n.js50
-rw-r--r--deps/v8/src/js/json.js5
-rw-r--r--deps/v8/src/js/macros.py8
-rw-r--r--deps/v8/src/js/math.js71
-rw-r--r--deps/v8/src/js/prologue.js3
-rw-r--r--deps/v8/src/js/proxy.js39
-rw-r--r--deps/v8/src/js/regexp.js209
-rw-r--r--deps/v8/src/js/runtime.js53
-rw-r--r--deps/v8/src/js/spread.js4
-rw-r--r--deps/v8/src/js/string.js326
-rw-r--r--deps/v8/src/js/symbol.js22
-rw-r--r--deps/v8/src/js/typedarray.js78
-rw-r--r--deps/v8/src/js/v8natives.js94
-rw-r--r--deps/v8/src/json-parser.h (renamed from deps/v8/src/parsing/json-parser.h)30
-rw-r--r--deps/v8/src/json-stringifier.h3
-rw-r--r--deps/v8/src/key-accumulator.cc11
-rw-r--r--deps/v8/src/key-accumulator.h17
-rw-r--r--deps/v8/src/libplatform/default-platform.cc5
-rw-r--r--deps/v8/src/libplatform/default-platform.h1
-rw-r--r--deps/v8/src/list.h6
-rw-r--r--deps/v8/src/log-inl.h13
-rw-r--r--deps/v8/src/log.cc21
-rw-r--r--deps/v8/src/log.h4
-rw-r--r--deps/v8/src/lookup.cc353
-rw-r--r--deps/v8/src/lookup.h77
-rw-r--r--deps/v8/src/machine-type.cc2
-rw-r--r--deps/v8/src/machine-type.h9
-rw-r--r--deps/v8/src/messages.cc11
-rw-r--r--deps/v8/src/messages.h27
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h29
-rw-r--r--deps/v8/src/mips/assembler-mips.cc64
-rw-r--r--deps/v8/src/mips/assembler-mips.h23
-rw-r--r--deps/v8/src/mips/builtins-mips.cc726
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc1348
-rw-r--r--deps/v8/src/mips/codegen-mips.cc12
-rw-r--r--deps/v8/src/mips/constants-mips.cc2
-rw-r--r--deps/v8/src/mips/constants-mips.h21
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc27
-rw-r--r--deps/v8/src/mips/disasm-mips.cc2
-rw-r--r--deps/v8/src/mips/interface-descriptors-mips.cc61
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc194
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h62
-rw-r--r--deps/v8/src/mips/simulator-mips.cc254
-rw-r--r--deps/v8/src/mips/simulator-mips.h6
-rw-r--r--deps/v8/src/mips64/assembler-mips64-inl.h32
-rw-r--r--deps/v8/src/mips64/assembler-mips64.cc3
-rw-r--r--deps/v8/src/mips64/assembler-mips64.h5
-rw-r--r--deps/v8/src/mips64/builtins-mips64.cc713
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.cc1386
-rw-r--r--deps/v8/src/mips64/codegen-mips64.cc6
-rw-r--r--deps/v8/src/mips64/constants-mips64.h25
-rw-r--r--deps/v8/src/mips64/deoptimizer-mips64.cc39
-rw-r--r--deps/v8/src/mips64/interface-descriptors-mips64.cc61
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc322
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.h74
-rw-r--r--deps/v8/src/mips64/simulator-mips64.cc270
-rw-r--r--deps/v8/src/mips64/simulator-mips64.h28
-rw-r--r--deps/v8/src/objects-body-descriptors-inl.h8
-rw-r--r--deps/v8/src/objects-debug.cc47
-rw-r--r--deps/v8/src/objects-inl.h779
-rw-r--r--deps/v8/src/objects-printer.cc113
-rw-r--r--deps/v8/src/objects.cc1949
-rw-r--r--deps/v8/src/objects.h878
-rw-r--r--deps/v8/src/optimizing-compile-dispatcher.cc6
-rw-r--r--deps/v8/src/ostreams.cc16
-rw-r--r--deps/v8/src/ostreams.h10
-rw-r--r--deps/v8/src/parsing/OWNERS1
-rw-r--r--deps/v8/src/parsing/expression-classifier.h71
-rw-r--r--deps/v8/src/parsing/parser-base.h552
-rw-r--r--deps/v8/src/parsing/parser.cc2461
-rw-r--r--deps/v8/src/parsing/parser.h86
-rw-r--r--deps/v8/src/parsing/pattern-rewriter.cc18
-rw-r--r--deps/v8/src/parsing/preparser.cc77
-rw-r--r--deps/v8/src/parsing/preparser.h77
-rw-r--r--deps/v8/src/parsing/rewriter.cc14
-rw-r--r--deps/v8/src/parsing/scanner.cc12
-rw-r--r--deps/v8/src/parsing/scanner.h6
-rw-r--r--deps/v8/src/parsing/token.h16
-rw-r--r--deps/v8/src/ppc/assembler-ppc-inl.h42
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc21
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h42
-rw-r--r--deps/v8/src/ppc/builtins-ppc.cc648
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc1388
-rw-r--r--deps/v8/src/ppc/codegen-ppc.cc8
-rw-r--r--deps/v8/src/ppc/cpu-ppc.cc2
-rw-r--r--deps/v8/src/ppc/deoptimizer-ppc.cc28
-rw-r--r--deps/v8/src/ppc/disasm-ppc.cc8
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc58
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc243
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h55
-rw-r--r--deps/v8/src/ppc/simulator-ppc.cc175
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc13
-rw-r--r--deps/v8/src/profiler/cpu-profiler.h6
-rw-r--r--deps/v8/src/profiler/heap-profiler.cc26
-rw-r--r--deps/v8/src/profiler/heap-profiler.h6
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc25
-rw-r--r--deps/v8/src/profiler/profile-generator.cc38
-rw-r--r--deps/v8/src/profiler/profile-generator.h8
-rw-r--r--deps/v8/src/profiler/sampler.cc29
-rw-r--r--deps/v8/src/profiler/sampler.h23
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.cc260
-rw-r--r--deps/v8/src/profiler/sampling-heap-profiler.h166
-rw-r--r--deps/v8/src/property-descriptor.cc38
-rw-r--r--deps/v8/src/property-descriptor.h10
-rw-r--r--deps/v8/src/property-details.h5
-rw-r--r--deps/v8/src/property.cc6
-rw-r--r--deps/v8/src/property.h35
-rw-r--r--deps/v8/src/prototype.h106
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc16
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h2
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc16
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h2
-rw-r--r--deps/v8/src/regexp/bytecodes-irregexp.h106
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc19
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h2
-rw-r--r--deps/v8/src/regexp/interpreter-irregexp.cc60
-rw-r--r--deps/v8/src/regexp/interpreter-irregexp.h5
-rw-r--r--deps/v8/src/regexp/jsregexp-inl.h5
-rw-r--r--deps/v8/src/regexp/jsregexp.cc847
-rw-r--r--deps/v8/src/regexp/jsregexp.h83
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc16
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h2
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc21
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h42
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc17
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h2
-rw-r--r--deps/v8/src/regexp/regexp-ast.cc4
-rw-r--r--deps/v8/src/regexp/regexp-ast.h62
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h8
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc17
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-irregexp.h10
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-tracer.cc8
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-tracer.h2
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.cc112
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.h37
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc523
-rw-r--r--deps/v8/src/regexp/regexp-parser.h44
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc29
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h2
-rw-r--r--deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc20
-rw-r--r--deps/v8/src/regexp/x87/regexp-macro-assembler-x87.h2
-rw-r--r--deps/v8/src/runtime-profiler.cc263
-rw-r--r--deps/v8/src/runtime-profiler.h5
-rw-r--r--deps/v8/src/runtime/runtime-array.cc22
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc80
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc20
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc54
-rw-r--r--deps/v8/src/runtime/runtime-forin.cc108
-rw-r--r--deps/v8/src/runtime/runtime-function.cc31
-rw-r--r--deps/v8/src/runtime/runtime-generator.cc65
-rw-r--r--deps/v8/src/runtime/runtime-i18n.cc3
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc22
-rw-r--r--deps/v8/src/runtime/runtime-interpreter.cc162
-rw-r--r--deps/v8/src/runtime/runtime-json.cc2
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc54
-rw-r--r--deps/v8/src/runtime/runtime-maths.cc41
-rw-r--r--deps/v8/src/runtime/runtime-object.cc258
-rw-r--r--deps/v8/src/runtime/runtime-operators.cc133
-rw-r--r--deps/v8/src/runtime/runtime-proxy.cc6
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc6
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc456
-rw-r--r--deps/v8/src/runtime/runtime-simd.cc40
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc2
-rw-r--r--deps/v8/src/runtime/runtime-test.cc52
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc36
-rw-r--r--deps/v8/src/runtime/runtime-utils.h16
-rw-r--r--deps/v8/src/runtime/runtime.cc7
-rw-r--r--deps/v8/src/runtime/runtime.h807
-rw-r--r--deps/v8/src/snapshot/serialize.cc135
-rw-r--r--deps/v8/src/snapshot/serialize.h24
-rw-r--r--deps/v8/src/source-position.h87
-rw-r--r--deps/v8/src/startup-data-util.cc5
-rw-r--r--deps/v8/src/string-stream.cc19
-rw-r--r--deps/v8/src/tracing/trace-event.h6
-rw-r--r--deps/v8/src/transitions-inl.h4
-rw-r--r--deps/v8/src/transitions.cc14
-rw-r--r--deps/v8/src/type-cache.cc1
-rw-r--r--deps/v8/src/type-cache.h4
-rw-r--r--deps/v8/src/type-feedback-vector.cc9
-rw-r--r--deps/v8/src/type-feedback-vector.h1
-rw-r--r--deps/v8/src/type-info.cc6
-rw-r--r--deps/v8/src/types-inl.h487
-rw-r--r--deps/v8/src/types.cc579
-rw-r--r--deps/v8/src/types.h1262
-rw-r--r--deps/v8/src/typing-asm.cc296
-rw-r--r--deps/v8/src/typing-asm.h5
-rw-r--r--deps/v8/src/utils-inl.h37
-rw-r--r--deps/v8/src/utils.h97
-rw-r--r--deps/v8/src/vm-state-inl.h26
-rw-r--r--deps/v8/src/vm-state.h2
-rw-r--r--deps/v8/src/wasm/asm-wasm-builder.cc440
-rw-r--r--deps/v8/src/wasm/asm-wasm-builder.h5
-rw-r--r--deps/v8/src/wasm/ast-decoder.cc979
-rw-r--r--deps/v8/src/wasm/ast-decoder.h213
-rw-r--r--deps/v8/src/wasm/decoder.h213
-rw-r--r--deps/v8/src/wasm/encoder.cc82
-rw-r--r--deps/v8/src/wasm/encoder.h19
-rw-r--r--deps/v8/src/wasm/module-decoder.cc195
-rw-r--r--deps/v8/src/wasm/wasm-js.cc172
-rw-r--r--deps/v8/src/wasm/wasm-macro-gen.h9
-rw-r--r--deps/v8/src/wasm/wasm-module.cc420
-rw-r--r--deps/v8/src/wasm/wasm-module.h95
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc14
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h8
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h23
-rw-r--r--deps/v8/src/x64/assembler-x64.cc11
-rw-r--r--deps/v8/src/x64/assembler-x64.h20
-rw-r--r--deps/v8/src/x64/builtins-x64.cc683
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc1326
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc23
-rw-r--r--deps/v8/src/x64/disasm-x64.cc12
-rw-r--r--deps/v8/src/x64/interface-descriptors-x64.cc59
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc205
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h30
-rw-r--r--deps/v8/src/x87/assembler-x87-inl.h16
-rw-r--r--deps/v8/src/x87/assembler-x87.cc5
-rw-r--r--deps/v8/src/x87/assembler-x87.h6
-rw-r--r--deps/v8/src/x87/builtins-x87.cc705
-rw-r--r--deps/v8/src/x87/code-stubs-x87.cc1384
-rw-r--r--deps/v8/src/x87/code-stubs-x87.h16
-rw-r--r--deps/v8/src/x87/deoptimizer-x87.cc27
-rw-r--r--deps/v8/src/x87/disasm-x87.cc48
-rw-r--r--deps/v8/src/x87/interface-descriptors-x87.cc57
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.cc191
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.h31
-rw-r--r--deps/v8/test/bot_default.gyp1
-rw-r--r--deps/v8/test/bot_default.isolate6
-rw-r--r--deps/v8/test/cctest/cctest.gyp40
-rw-r--r--deps/v8/test/cctest/cctest.isolate5
-rw-r--r--deps/v8/test/cctest/cctest.status227
-rw-r--r--deps/v8/test/cctest/cctest_exe.isolate13
-rw-r--r--deps/v8/test/cctest/compiler/c-signature.h8
-rw-r--r--deps/v8/test/cctest/compiler/call-tester.h18
-rw-r--r--deps/v8/test/cctest/compiler/codegen-tester.h53
-rw-r--r--deps/v8/test/cctest/compiler/function-tester.h37
-rw-r--r--deps/v8/test/cctest/compiler/test-code-stub-assembler.cc129
-rw-r--r--deps/v8/test/cctest/compiler/test-js-context-specialization.cc4
-rw-r--r--deps/v8/test/cctest/compiler/test-js-typed-lowering.cc224
-rw-r--r--deps/v8/test/cctest/compiler/test-jump-threading.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-linkage.cc28
-rw-r--r--deps/v8/test/cctest/compiler/test-pipeline.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc829
-rw-r--r--deps/v8/test/cctest/compiler/test-run-deopt.cc3
-rw-r--r--deps/v8/test/cctest/compiler/test-run-intrinsics.cc34
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jscalls.cc36
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jsexceptions.cc7
-rw-r--r--deps/v8/test/cctest/compiler/test-run-machops.cc208
-rw-r--r--deps/v8/test/cctest/compiler/test-run-native-calls.cc93
-rw-r--r--deps/v8/test/cctest/compiler/test-run-stubs.cc9
-rw-r--r--deps/v8/test/cctest/compiler/value-helper.h17
-rw-r--r--deps/v8/test/cctest/heap/heap-tester.h21
-rw-r--r--deps/v8/test/cctest/heap/test-heap.cc132
-rw-r--r--deps/v8/test/cctest/heap/test-spaces.cc441
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc301
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h119
-rw-r--r--deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc469
-rw-r--r--deps/v8/test/cctest/interpreter/test-bytecode-generator.cc4458
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter.cc1574
-rw-r--r--deps/v8/test/cctest/profiler-extension.cc36
-rw-r--r--deps/v8/test/cctest/profiler-extension.h7
-rw-r--r--deps/v8/test/cctest/test-accessors.cc4
-rw-r--r--deps/v8/test/cctest/test-api-fast-accessor-builder.cc82
-rw-r--r--deps/v8/test/cctest/test-api-interceptors.cc8
-rw-r--r--deps/v8/test/cctest/test-api.cc751
-rw-r--r--deps/v8/test/cctest/test-asm-validator.cc352
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc49
-rw-r--r--deps/v8/test/cctest/test-assembler-mips.cc154
-rw-r--r--deps/v8/test/cctest/test-assembler-mips64.cc154
-rw-r--r--deps/v8/test/cctest/test-ast-expression-visitor.cc13
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc744
-rw-r--r--deps/v8/test/cctest/test-debug.cc159
-rw-r--r--deps/v8/test/cctest/test-disasm-arm.cc106
-rw-r--r--deps/v8/test/cctest/test-extra.js9
-rw-r--r--deps/v8/test/cctest/test-field-type-tracking.cc261
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc160
-rw-r--r--deps/v8/test/cctest/test-inobject-slack-tracking.cc4
-rw-r--r--deps/v8/test/cctest/test-log-stack-tracer.cc11
-rw-r--r--deps/v8/test/cctest/test-log.cc24
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips.cc60
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips64.cc66
-rw-r--r--deps/v8/test/cctest/test-object-observe.cc4
-rw-r--r--deps/v8/test/cctest/test-parsing.cc298
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc3
-rw-r--r--deps/v8/test/cctest/test-regexp.cc175
-rw-r--r--deps/v8/test/cctest/test-transitions.cc55
-rw-r--r--deps/v8/test/cctest/test-types.cc974
-rw-r--r--deps/v8/test/cctest/test-unboxed-doubles.cc76
-rw-r--r--deps/v8/test/cctest/test-weakmaps.cc4
-rw-r--r--deps/v8/test/cctest/test-weaksets.cc4
-rw-r--r--deps/v8/test/cctest/trace-extension.cc2
-rw-r--r--deps/v8/test/cctest/types-fuzz.h229
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-js.cc337
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-module.cc9
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm.cc698
-rw-r--r--deps/v8/test/cctest/wasm/test-signatures.h9
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.h435
-rw-r--r--deps/v8/test/default.gyp1
-rw-r--r--deps/v8/test/default.isolate6
-rw-r--r--deps/v8/test/fuzzer/DEPS3
-rw-r--r--deps/v8/test/fuzzer/fuzzer-support.cc100
-rw-r--r--deps/v8/test/fuzzer/fuzzer-support.h37
-rw-r--r--deps/v8/test/fuzzer/fuzzer.cc56
-rw-r--r--deps/v8/test/fuzzer/fuzzer.gyp134
-rw-r--r--deps/v8/test/fuzzer/fuzzer.isolate22
-rw-r--r--deps/v8/test/fuzzer/fuzzer.status7
-rw-r--r--deps/v8/test/fuzzer/json.cc31
-rw-r--r--deps/v8/test/fuzzer/json/json1
-rw-r--r--deps/v8/test/fuzzer/json/not-json1
-rw-r--r--deps/v8/test/fuzzer/parser.cc42
-rw-r--r--deps/v8/test/fuzzer/parser/hello-world1
-rw-r--r--deps/v8/test/fuzzer/regexp.cc75
-rw-r--r--deps/v8/test/fuzzer/regexp/test001
-rw-r--r--deps/v8/test/fuzzer/regexp/test011
-rw-r--r--deps/v8/test/fuzzer/regexp/test021
-rw-r--r--deps/v8/test/fuzzer/regexp/test031
-rw-r--r--deps/v8/test/fuzzer/regexp/test041
-rw-r--r--deps/v8/test/fuzzer/regexp/test051
-rw-r--r--deps/v8/test/fuzzer/regexp/test061
-rw-r--r--deps/v8/test/fuzzer/regexp/test071
-rw-r--r--deps/v8/test/fuzzer/regexp/test081
-rw-r--r--deps/v8/test/fuzzer/regexp/test091
-rw-r--r--deps/v8/test/fuzzer/regexp/test101
-rw-r--r--deps/v8/test/fuzzer/regexp/test111
-rw-r--r--deps/v8/test/fuzzer/regexp/test121
-rw-r--r--deps/v8/test/fuzzer/regexp/test131
-rw-r--r--deps/v8/test/fuzzer/regexp/test141
-rw-r--r--deps/v8/test/fuzzer/regexp/test151
-rw-r--r--deps/v8/test/fuzzer/regexp/test161
-rw-r--r--deps/v8/test/fuzzer/regexp/test171
-rw-r--r--deps/v8/test/fuzzer/regexp/test181
-rw-r--r--deps/v8/test/fuzzer/regexp/test191
-rw-r--r--deps/v8/test/fuzzer/regexp/test201
-rw-r--r--deps/v8/test/fuzzer/regexp/test211
-rw-r--r--deps/v8/test/fuzzer/regexp/test221
-rw-r--r--deps/v8/test/fuzzer/regexp/test231
-rw-r--r--deps/v8/test/fuzzer/regexp/test241
-rw-r--r--deps/v8/test/fuzzer/testcfg.py48
-rw-r--r--deps/v8/test/ignition.isolate5
-rw-r--r--deps/v8/test/message/arrow-invalid-rest-2.js8
-rw-r--r--deps/v8/test/message/arrow-invalid-rest-2.out4
-rw-r--r--deps/v8/test/message/arrow-invalid-rest.js8
-rw-r--r--deps/v8/test/message/arrow-invalid-rest.out4
-rw-r--r--deps/v8/test/message/for-loop-invalid-lhs.out1
-rw-r--r--deps/v8/test/message/function-sent-escaped.js10
-rw-r--r--deps/v8/test/message/function-sent-escaped.out4
-rw-r--r--deps/v8/test/message/let-lexical-name-in-array-prohibited.out1
-rw-r--r--deps/v8/test/message/let-lexical-name-in-object-prohibited.out1
-rw-r--r--deps/v8/test/message/let-lexical-name-prohibited.out1
-rw-r--r--deps/v8/test/message/new-target-escaped.js10
-rw-r--r--deps/v8/test/message/new-target-escaped.out4
-rw-r--r--deps/v8/test/message/try-catch-lexical-conflict.out2
-rw-r--r--deps/v8/test/message/try-catch-variable-conflict.out2
-rw-r--r--deps/v8/test/mjsunit/apply.js4
-rw-r--r--deps/v8/test/mjsunit/arguments-opt.js132
-rw-r--r--deps/v8/test/mjsunit/array-sort.js13
-rw-r--r--deps/v8/test/mjsunit/compiler/debug-catch-prediction.js143
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-materialize-accumulator.js (renamed from deps/v8/test/mjsunit/regress/setvalueof-deopt.js)23
-rw-r--r--deps/v8/test/mjsunit/compiler/double-array-to-global.js17
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-arguments.js26
-rw-r--r--deps/v8/test/mjsunit/compiler/minus-zero.js28
-rw-r--r--deps/v8/test/mjsunit/compiler/optimized-for-in.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1085.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-dead-throw-inlining.js13
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-max.js1
-rw-r--r--deps/v8/test/mjsunit/compiler/try-catch-deopt.js225
-rw-r--r--deps/v8/test/mjsunit/compiler/try-context.js89
-rw-r--r--deps/v8/test/mjsunit/compiler/try-finally-deopt.js249
-rw-r--r--deps/v8/test/mjsunit/constant-fold-control-instructions.js7
-rw-r--r--deps/v8/test/mjsunit/debug-changebreakpoint.js6
-rw-r--r--deps/v8/test/mjsunit/debug-conditional-breakpoints.js17
-rw-r--r--deps/v8/test/mjsunit/debug-ignore-breakpoints.js88
-rw-r--r--deps/v8/test/mjsunit/debug-negative-break-points.js99
-rw-r--r--deps/v8/test/mjsunit/debug-script.js4
-rw-r--r--deps/v8/test/mjsunit/debug-setbreakpoint.js2
-rw-r--r--deps/v8/test/mjsunit/deopt-with-outer-context.js22
-rw-r--r--deps/v8/test/mjsunit/es6/array-concat.js (renamed from deps/v8/test/mjsunit/harmony/array-concat.js)34
-rw-r--r--deps/v8/test/mjsunit/es6/array-length.js2
-rw-r--r--deps/v8/test/mjsunit/es6/block-for.js2
-rw-r--r--deps/v8/test/mjsunit/es6/classes-super.js15
-rw-r--r--deps/v8/test/mjsunit/es6/completion.js (renamed from deps/v8/test/mjsunit/harmony/completion.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/generators-iteration.js71
-rw-r--r--deps/v8/test/mjsunit/es6/generators-objects.js12
-rw-r--r--deps/v8/test/mjsunit/es6/generators-runtime.js2
-rw-r--r--deps/v8/test/mjsunit/es6/generators-states.js27
-rw-r--r--deps/v8/test/mjsunit/es6/hasinstance-symbol.js12
-rw-r--r--deps/v8/test/mjsunit/es6/no-unicode-regexp-flag.js2
-rw-r--r--deps/v8/test/mjsunit/es6/object-assign.js33
-rw-r--r--deps/v8/test/mjsunit/es6/object-literals-method.js6
-rw-r--r--deps/v8/test/mjsunit/es6/regexp-tolength.js2
-rw-r--r--deps/v8/test/mjsunit/es6/regexp-tostring.js46
-rw-r--r--deps/v8/test/mjsunit/es6/symbols.js3
-rw-r--r--deps/v8/test/mjsunit/es6/tail-call-megatest.js292
-rw-r--r--deps/v8/test/mjsunit/es6/tail-call-proxies.js97
-rw-r--r--deps/v8/test/mjsunit/es6/tail-call-simple.js107
-rw-r--r--deps/v8/test/mjsunit/es6/tail-call.js386
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray.js16
-rw-r--r--deps/v8/test/mjsunit/for-in-opt.js16
-rw-r--r--deps/v8/test/mjsunit/function-caller.js3
-rw-r--r--deps/v8/test/mjsunit/function-names.js3
-rw-r--r--deps/v8/test/mjsunit/harmony/array-species-constructor-delete.js28
-rw-r--r--deps/v8/test/mjsunit/harmony/array-species-constructor.js27
-rw-r--r--deps/v8/test/mjsunit/harmony/array-species-delete.js28
-rw-r--r--deps/v8/test/mjsunit/harmony/array-species-modified.js27
-rw-r--r--deps/v8/test/mjsunit/harmony/array-species-parent-constructor.js27
-rw-r--r--deps/v8/test/mjsunit/harmony/array-species-proto.js27
-rw-r--r--deps/v8/test/mjsunit/harmony/array-species.js25
-rw-r--r--deps/v8/test/mjsunit/harmony/block-for-sloppy.js1
-rw-r--r--deps/v8/test/mjsunit/harmony/debug-stepin-proxies.js17
-rw-r--r--deps/v8/test/mjsunit/harmony/destructuring.js4
-rw-r--r--deps/v8/test/mjsunit/harmony/do-expressions.js1
-rw-r--r--deps/v8/test/mjsunit/harmony/function-name.js234
-rw-r--r--deps/v8/test/mjsunit/harmony/function-sent.js90
-rw-r--r--deps/v8/test/mjsunit/harmony/generators.js252
-rw-r--r--deps/v8/test/mjsunit/harmony/instanceof-es6.js50
-rw-r--r--deps/v8/test/mjsunit/harmony/iterator-close.js364
-rw-r--r--deps/v8/test/mjsunit/harmony/object-entries.js249
-rw-r--r--deps/v8/test/mjsunit/harmony/object-get-own-property-descriptors.js206
-rw-r--r--deps/v8/test/mjsunit/harmony/object-values.js229
-rw-r--r--deps/v8/test/mjsunit/harmony/private-symbols.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-apply.js33
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-enumerate.js109
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-for.js47
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-ownkeys.js12
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-set-prototype-of.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/reflect-construct.js5
-rw-r--r--deps/v8/test/mjsunit/harmony/reflect-enumerate-delete.js53
-rw-r--r--deps/v8/test/mjsunit/harmony/reflect-enumerate-opt.js78
-rw-r--r--deps/v8/test/mjsunit/harmony/reflect-enumerate-special-cases.js88
-rw-r--r--deps/v8/test/mjsunit/harmony/reflect-enumerate.js101
-rw-r--r--deps/v8/test/mjsunit/harmony/reflect.js21
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-4696.js29
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-4755.js45
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-crbug-578038.js16
-rw-r--r--deps/v8/test/mjsunit/harmony/simd.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/string-replace.js19
-rw-r--r--deps/v8/test/mjsunit/harmony/unicode-character-ranges.js158
-rw-r--r--deps/v8/test/mjsunit/harmony/unicode-escapes-in-regexps.js32
-rw-r--r--deps/v8/test/mjsunit/harmony/unicode-regexp-backrefs.js53
-rw-r--r--deps/v8/test/mjsunit/harmony/unicode-regexp-ignore-case-noi18n.js59
-rw-r--r--deps/v8/test/mjsunit/harmony/unicode-regexp-ignore-case.js64
-rw-r--r--deps/v8/test/mjsunit/harmony/unicode-regexp-last-index.js104
-rw-r--r--deps/v8/test/mjsunit/harmony/unicode-regexp-property-class.js64
-rw-r--r--deps/v8/test/mjsunit/harmony/unicode-regexp-restricted-syntax.js44
-rw-r--r--deps/v8/test/mjsunit/harmony/unicode-regexp-unanchored-advance.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/unicode-regexp-zero-length.js58
-rw-r--r--deps/v8/test/mjsunit/ignition/dead-code-source-position.js9
-rw-r--r--deps/v8/test/mjsunit/ignition/debug-break-on-stack.js48
-rw-r--r--deps/v8/test/mjsunit/ignition/debug-break.js46
-rw-r--r--deps/v8/test/mjsunit/ignition/debugger-statement.js31
-rw-r--r--deps/v8/test/mjsunit/ignition/stack-trace-source-position.js21
-rw-r--r--deps/v8/test/mjsunit/messages.js45
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status525
-rw-r--r--deps/v8/test/mjsunit/random-bit-correlations.js2
-rw-r--r--deps/v8/test/mjsunit/regexp-compile.js2
-rw-r--r--deps/v8/test/mjsunit/regexp.js3
-rw-r--r--deps/v8/test/mjsunit/regress-587004.js31
-rw-r--r--deps/v8/test/mjsunit/regress/math-min.js66
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3650-1.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3650-2.js23
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3650-3.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4267.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4509-Class-constructor-typeerror-realm.js25
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4654.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4659.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4665-2.js33
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4693.js53
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4715.js48
-rw-r--r--deps/v8/test/mjsunit/regress/regress-575364.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-516775.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-577112.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-580506.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-580584.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-581577.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-582051.js44
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-582703.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-583257.js27
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-584188.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-590989-1.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-590989-2.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-592343.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-593282.js38
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-595657.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-599003.js39
-rw-r--r--deps/v8/test/mjsunit/regress/regress-integer-indexed-element.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex1.js6
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-4839.js62
-rw-r--r--deps/v8/test/mjsunit/samevalue.js4
-rw-r--r--deps/v8/test/mjsunit/strict-mode.js12
-rw-r--r--deps/v8/test/mjsunit/strong/declaration-after-use.js255
-rw-r--r--deps/v8/test/mjsunit/strong/for-in.js6
-rw-r--r--deps/v8/test/mjsunit/strong/mutually-recursive-classes.js229
-rw-r--r--deps/v8/test/mjsunit/to_number_order.js7
-rw-r--r--deps/v8/test/mjsunit/typeof.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm.js663
-rw-r--r--deps/v8/test/mjsunit/wasm/import-table.js387
-rw-r--r--deps/v8/test/mjsunit/wasm/instantiate-run-basic.js (renamed from deps/v8/test/mjsunit/wasm/compile-run-basic.js)2
-rw-r--r--deps/v8/test/mjsunit/wasm/stack.js69
-rw-r--r--deps/v8/test/mjsunit/wasm/start-function.js172
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-constants.js3
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-object-api.js3
-rw-r--r--deps/v8/test/optimize_for_size.isolate5
-rw-r--r--deps/v8/test/perf.gyp27
-rw-r--r--deps/v8/test/perf.isolate23
-rwxr-xr-xdeps/v8/test/test262/archive.py18
-rwxr-xr-xdeps/v8/test/test262/list.py15
-rw-r--r--deps/v8/test/test262/test262.gyp8
-rw-r--r--deps/v8/test/test262/test262.isolate7
-rw-r--r--deps/v8/test/test262/test262.status430
-rw-r--r--deps/v8/test/test262/testcfg.py7
-rw-r--r--deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc99
-rw-r--r--deps/v8/test/unittests/compiler/escape-analysis-unittest.cc164
-rw-r--r--deps/v8/test/unittests/compiler/instruction-selector-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/instruction-selector-unittest.h45
-rw-r--r--deps/v8/test/unittests/compiler/int64-lowering-unittest.cc299
-rw-r--r--deps/v8/test/unittests/compiler/interpreter-assembler-unittest.h57
-rw-r--r--deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc109
-rw-r--r--deps/v8/test/unittests/compiler/js-context-relaxation-unittest.cc285
-rw-r--r--deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc236
-rw-r--r--deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc133
-rw-r--r--deps/v8/test/unittests/compiler/js-operator-unittest.cc150
-rw-r--r--deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc515
-rw-r--r--deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/move-optimizer-unittest.cc97
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.cc42
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.h10
-rw-r--r--deps/v8/test/unittests/compiler/scheduler-rpo-unittest.cc533
-rw-r--r--deps/v8/test/unittests/compiler/scheduler-unittest.cc546
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/typer-unittest.cc111
-rw-r--r--deps/v8/test/unittests/heap/memory-reducer-unittest.cc11
-rw-r--r--deps/v8/test/unittests/heap/slot-set-unittest.cc143
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc289
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc17
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc218
-rw-r--r--deps/v8/test/unittests/interpreter/bytecodes-unittest.cc135
-rw-r--r--deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc11
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc (renamed from deps/v8/test/unittests/compiler/interpreter-assembler-unittest.cc)385
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h57
-rw-r--r--deps/v8/test/unittests/interpreter/register-translator-unittest.cc260
-rw-r--r--deps/v8/test/unittests/unittests.gyp11
-rw-r--r--deps/v8/test/unittests/wasm/ast-decoder-unittest.cc319
-rw-r--r--deps/v8/test/unittests/wasm/encoder-unittest.cc20
-rw-r--r--deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc211
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc287
-rw-r--r--deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc14
-rw-r--r--deps/v8/test/webkit/fast/regex/toString-expected.txt1
-rw-r--r--deps/v8/test/webkit/function-declarations-in-switch-statement-expected.txt2
-rw-r--r--deps/v8/test/webkit/webkit.status5
-rwxr-xr-xdeps/v8/tools/android-sync.sh2
-rwxr-xr-xdeps/v8/tools/eval_gc_time.sh3
-rwxr-xr-xdeps/v8/tools/fuzz-harness.sh46
-rwxr-xr-xdeps/v8/tools/gcmole/download_gcmole_tools.py22
-rw-r--r--deps/v8/tools/gcmole/gcmole-tools.tar.gz.sha11
-rw-r--r--deps/v8/tools/gcmole/run-gcmole.isolate34
-rwxr-xr-xdeps/v8/tools/gcmole/run-gcmole.py23
-rw-r--r--deps/v8/tools/gcmole/run_gcmole.gyp23
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py2
-rw-r--r--deps/v8/tools/gyp/v8.gyp130
-rw-r--r--deps/v8/tools/ic-explorer.html338
-rw-r--r--deps/v8/tools/jsfunfuzz/download_jsfunfuzz.py22
-rwxr-xr-xdeps/v8/tools/jsfunfuzz/fuzz-harness.sh84
-rw-r--r--deps/v8/tools/jsfunfuzz/jsfunfuzz.gyp26
-rw-r--r--deps/v8/tools/jsfunfuzz/jsfunfuzz.isolate18
-rw-r--r--deps/v8/tools/jsfunfuzz/jsfunfuzz.tar.gz.sha11
-rwxr-xr-xdeps/v8/tools/ll_prof.py16
-rw-r--r--deps/v8/tools/luci-go/linux64/isolate.sha12
-rw-r--r--deps/v8/tools/luci-go/mac64/isolate.sha12
-rw-r--r--deps/v8/tools/luci-go/win64/isolate.exe.sha12
-rw-r--r--deps/v8/tools/perf/statistics-for-json.R113
-rwxr-xr-xdeps/v8/tools/presubmit.py24
-rwxr-xr-xdeps/v8/tools/release/auto_roll.py1
-rw-r--r--deps/v8/tools/release/common_includes.py3
-rwxr-xr-xdeps/v8/tools/release/create_release.py16
-rw-r--r--deps/v8/tools/release/test_scripts.py31
-rw-r--r--deps/v8/tools/run-deopt-fuzzer.gyp26
-rw-r--r--deps/v8/tools/run-deopt-fuzzer.isolate19
-rwxr-xr-xdeps/v8/tools/run-deopt-fuzzer.py13
-rwxr-xr-xdeps/v8/tools/run-tests.py6
-rw-r--r--deps/v8/tools/run-valgrind.gyp26
-rw-r--r--deps/v8/tools/run-valgrind.isolate29
-rwxr-xr-xdeps/v8/tools/run_perf.py34
-rw-r--r--deps/v8/tools/testrunner/local/execution.py25
-rw-r--r--deps/v8/tools/testrunner/local/pool.py7
-rw-r--r--deps/v8/tools/testrunner/local/testsuite.py6
-rw-r--r--deps/v8/tools/testrunner/objects/testcase.py19
-rwxr-xr-xdeps/v8/tools/try_perf.py5
-rw-r--r--deps/v8/tools/v8heapconst.py235
-rw-r--r--deps/v8/tools/whitespace.txt2
1008 files changed, 75193 insertions, 52788 deletions
diff --git a/deps/v8/.clang-format b/deps/v8/.clang-format
index d9bbf504a6..ae160a0bcc 100644
--- a/deps/v8/.clang-format
+++ b/deps/v8/.clang-format
@@ -1,4 +1,4 @@
# Defines the Google C++ style for automatic reformatting.
# http://clang.llvm.org/docs/ClangFormatStyleOptions.html
BasedOnStyle: Google
-MaxEmptyLinesToKeep: 2
+MaxEmptyLinesToKeep: 1
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index e77e072c28..805f349a6e 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -42,7 +42,7 @@ shell_g
/_*
/build/Debug
/build/gyp
-/build/ipch/
+/build/ipch
/build/Release
/build/win_toolchain.json
/buildtools
@@ -58,6 +58,7 @@ shell_g
/test/promises-aplus/sinon
/test/simdjs/data
/test/test262/data
+/test/test262/data.tar
/testing/gmock
/testing/gtest
/third_party
@@ -65,8 +66,10 @@ shell_g
/third_party/llvm
/third_party/llvm-build
/tools/clang
-/tools/jsfunfuzz
-/tools/jsfunfuzz.zip
+/tools/gcmole/gcmole-tools
+/tools/gcmole/gcmole-tools.tar.gz
+/tools/jsfunfuzz/jsfunfuzz
+/tools/jsfunfuzz/jsfunfuzz.tar.gz
/tools/luci-go/linux64/isolate
/tools/luci-go/mac64/isolate
/tools/luci-go/win64/isolate.exe
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index c9be8bbcda..ceffb49a99 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -40,7 +40,9 @@ Alexis Campailla <alexis@janeasystems.com>
Andreas Anyuru <andreas.anyuru@gmail.com>
Andrew Paprocki <andrew@ishiboo.com>
Andrei Kashcha <anvaka@gmail.com>
+Bangfu Tao <bangfu.tao@samsung.com>
Ben Noordhuis <info@bnoordhuis.nl>
+Benjamin Tan <demoneaux@gmail.com>
Bert Belder <bertbelder@gmail.com>
Burcu Dogan <burcujdogan@gmail.com>
Caitlin Potter <caitpotter88@gmail.com>
@@ -107,5 +109,6 @@ Vlad Burlik <vladbph@gmail.com>
Vladimir Krivosheev <develar@gmail.com>
Vladimir Shutoff <vovan@shutoff.ru>
Yu Yin <xwafish@gmail.com>
+Zac Hansen <xaxxon@gmail.com>
Zhongping Wang <kewpie.w.zp@gmail.com>
ęŸ³č£äø€ <admin@web-tinker.com> \ No newline at end of file
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index 5279a4a783..5f3baf23c9 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -28,7 +28,7 @@ declare_args() {
v8_deprecation_warnings = false
v8_enable_disassembler = false
v8_enable_gdbjit = false
-v8_enable_handle_zapping = true
+v8_enable_handle_zapping = is_debug
v8_enable_i18n_support = true
v8_enable_verify_heap = false
v8_interpreted_regexp = false
@@ -46,6 +46,15 @@ if (is_msan) {
v8_target_arch = target_cpu
}
+if (v8_use_snapshot && v8_use_external_startup_data) {
+ snapshot_target = ":v8_external_snapshot"
+} else if (v8_use_snapshot) {
+ snapshot_target = ":v8_snapshot"
+} else {
+ assert(!v8_use_external_startup_data)
+ snapshot_target = ":v8_nosnapshot"
+}
+
###############################################################################
# Configurations
#
@@ -173,7 +182,7 @@ config("toolchain") {
}
if (v8_target_arch == "s390") {
defines += [ "V8_TARGET_ARCH_S390" ]
- }
+ }
if (v8_target_arch == "s390x") {
defines += [
"V8_TARGET_ARCH_S390",
@@ -186,7 +195,7 @@ config("toolchain") {
if (v8_target_arch == "x64") {
defines += [ "V8_TARGET_ARCH_X64" ]
}
-
+
if (is_win) {
defines += [ "WIN32" ]
# TODO(jochen): Support v8_enable_prof.
@@ -291,7 +300,6 @@ action("js2c_experimental") {
"src/js/generator.js",
"src/js/harmony-atomics.js",
"src/js/harmony-regexp.js",
- "src/js/harmony-reflect.js",
"src/js/harmony-object-observe.js",
"src/js/harmony-sharedarraybuffer.js",
"src/js/harmony-simd.js",
@@ -774,16 +782,16 @@ source_set("v8_base") {
"src/compiler/instruction-selector.h",
"src/compiler/instruction.cc",
"src/compiler/instruction.h",
- "src/compiler/interpreter-assembler.cc",
- "src/compiler/interpreter-assembler.h",
+ "src/compiler/int64-lowering.cc",
+ "src/compiler/int64-lowering.h",
"src/compiler/js-builtin-reducer.cc",
"src/compiler/js-builtin-reducer.h",
"src/compiler/js-call-reducer.cc",
"src/compiler/js-call-reducer.h",
- "src/compiler/js-context-relaxation.cc",
- "src/compiler/js-context-relaxation.h",
"src/compiler/js-context-specialization.cc",
"src/compiler/js-context-specialization.h",
+ "src/compiler/js-create-lowering.cc",
+ "src/compiler/js-create-lowering.h",
"src/compiler/js-frame-specialization.cc",
"src/compiler/js-frame-specialization.h",
"src/compiler/js-generic-lowering.cc",
@@ -1009,6 +1017,8 @@ source_set("v8_base") {
"src/fast-dtoa.h",
"src/field-index.h",
"src/field-index-inl.h",
+ "src/field-type.cc",
+ "src/field-type.h",
"src/fixed-dtoa.cc",
"src/fixed-dtoa.h",
"src/flag-definitions.h",
@@ -1030,6 +1040,7 @@ source_set("v8_base") {
"src/handles.cc",
"src/handles.h",
"src/hashmap.h",
+ "src/heap-symbols.h",
"src/heap/array-buffer-tracker.cc",
"src/heap/array-buffer-tracker.h",
"src/heap/gc-idle-time-handler.cc",
@@ -1053,11 +1064,14 @@ source_set("v8_base") {
"src/heap/objects-visiting-inl.h",
"src/heap/objects-visiting.cc",
"src/heap/objects-visiting.h",
+ "src/heap/remembered-set.cc",
+ "src/heap/remembered-set.h",
"src/heap/scavenge-job.h",
"src/heap/scavenge-job.cc",
"src/heap/scavenger-inl.h",
"src/heap/scavenger.cc",
"src/heap/scavenger.h",
+ "src/heap/slot-set.h",
"src/heap/slots-buffer.cc",
"src/heap/slots-buffer.h",
"src/heap/spaces-inl.h",
@@ -1104,11 +1118,20 @@ source_set("v8_base") {
"src/interpreter/constant-array-builder.h",
"src/interpreter/control-flow-builders.cc",
"src/interpreter/control-flow-builders.h",
+ "src/interpreter/handler-table-builder.cc",
+ "src/interpreter/handler-table-builder.h",
"src/interpreter/interpreter.cc",
"src/interpreter/interpreter.h",
+ "src/interpreter/interpreter-assembler.cc",
+ "src/interpreter/interpreter-assembler.h",
+ "src/interpreter/register-translator.cc",
+ "src/interpreter/register-translator.h",
+ "src/interpreter/source-position-table.cc",
+ "src/interpreter/source-position-table.h",
"src/isolate-inl.h",
"src/isolate.cc",
"src/isolate.h",
+ "src/json-parser.h",
"src/json-stringifier.h",
"src/key-accumulator.h",
"src/key-accumulator.cc",
@@ -1144,7 +1167,6 @@ source_set("v8_base") {
"src/parsing/expression-classifier.h",
"src/parsing/func-name-inferrer.cc",
"src/parsing/func-name-inferrer.h",
- "src/parsing/json-parser.h",
"src/parsing/parameter-initializer-rewriter.cc",
"src/parsing/parameter-initializer-rewriter.h",
"src/parsing/parser-base.h",
@@ -1183,6 +1205,8 @@ source_set("v8_base") {
"src/profiler/profile-generator.h",
"src/profiler/sampler.cc",
"src/profiler/sampler.h",
+ "src/profiler/sampling-heap-profiler.cc",
+ "src/profiler/sampling-heap-profiler.h",
"src/profiler/strings-storage.cc",
"src/profiler/strings-storage.h",
"src/profiler/unbound-queue-inl.h",
@@ -1262,6 +1286,7 @@ source_set("v8_base") {
"src/snapshot/snapshot-common.cc",
"src/snapshot/snapshot-source-sink.cc",
"src/snapshot/snapshot-source-sink.h",
+ "src/source-position.h",
"src/splay-tree.h",
"src/splay-tree-inl.h",
"src/snapshot/snapshot.h",
@@ -1286,7 +1311,6 @@ source_set("v8_base") {
"src/type-feedback-vector.h",
"src/type-info.cc",
"src/type-info.h",
- "src/types-inl.h",
"src/types.cc",
"src/types.h",
"src/typing-asm.cc",
@@ -1300,6 +1324,7 @@ source_set("v8_base") {
"src/unicode-cache.h",
"src/unicode-decoder.cc",
"src/unicode-decoder.h",
+ "src/utils-inl.h",
"src/utils.cc",
"src/utils.h",
"src/v8.cc",
@@ -1655,6 +1680,7 @@ source_set("v8_libbase") {
"src/base/atomicops_internals_mips_gcc.h",
"src/base/atomicops_internals_mips64_gcc.h",
"src/base/atomicops_internals_portable.h",
+ "src/base/atomicops_internals_s390_gcc.h",
"src/base/atomicops_internals_tsan.h",
"src/base/atomicops_internals_x86_gcc.cc",
"src/base/atomicops_internals_x86_gcc.h",
@@ -1721,8 +1747,6 @@ source_set("v8_libbase") {
libs = [ "dl", "rt" ]
} else if (is_android) {
- defines += [ "CAN_USE_VFP_INSTRUCTIONS" ]
-
if (current_toolchain == host_toolchain) {
libs = [ "dl", "rt" ]
if (host_os == "mac") {
@@ -1782,6 +1806,28 @@ source_set("v8_libplatform") {
]
}
+source_set("fuzzer_support") {
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
+
+ sources = [
+ "test/fuzzer/fuzzer-support.cc",
+ "test/fuzzer/fuzzer-support.h",
+ ]
+
+ configs -= [ "//build/config/compiler:chromium_code" ]
+ configs += [ "//build/config/compiler:no_chromium_code" ]
+ configs += [
+ ":internal_config_base",
+ ":features",
+ ":toolchain",
+ ]
+
+ deps = [
+ ":v8_libplatform",
+ snapshot_target,
+ ]
+}
+
###############################################################################
# Executables
#
@@ -1815,15 +1861,6 @@ if (current_toolchain == snapshot_toolchain) {
# Public targets
#
-if (v8_use_snapshot && v8_use_external_startup_data) {
- snapshot_target = ":v8_external_snapshot"
-} else if (v8_use_snapshot) {
- snapshot_target = ":v8_snapshot"
-} else {
- assert(!v8_use_external_startup_data)
- snapshot_target = ":v8_nosnapshot"
-}
-
if (is_component_build) {
component("v8") {
sources = [
@@ -1905,3 +1942,57 @@ if ((current_toolchain == host_toolchain && v8_toolset_for_d8 == "host") ||
}
}
}
+
+source_set("json_fuzzer") {
+ sources = [
+ "test/fuzzer/json.cc",
+ ]
+
+ deps = [
+ ":fuzzer_support",
+ ]
+
+ configs -= [ "//build/config/compiler:chromium_code" ]
+ configs += [ "//build/config/compiler:no_chromium_code" ]
+ configs += [
+ ":internal_config",
+ ":features",
+ ":toolchain",
+ ]
+}
+
+source_set("parser_fuzzer") {
+ sources = [
+ "test/fuzzer/parser.cc",
+ ]
+
+ deps = [
+ ":fuzzer_support",
+ ]
+
+ configs -= [ "//build/config/compiler:chromium_code" ]
+ configs += [ "//build/config/compiler:no_chromium_code" ]
+ configs += [
+ ":internal_config",
+ ":features",
+ ":toolchain",
+ ]
+}
+
+source_set("regexp_fuzzer") {
+ sources = [
+ "test/fuzzer/regexp.cc",
+ ]
+
+ deps = [
+ ":fuzzer_support",
+ ]
+
+ configs -= [ "//build/config/compiler:chromium_code" ]
+ configs += [ "//build/config/compiler:no_chromium_code" ]
+ configs += [
+ ":internal_config",
+ ":features",
+ ":toolchain",
+ ]
+}
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 54bcbe4275..9e21ba1cd7 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,1778 @@
+2016-02-23: Version 5.0.71
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-23: Version 5.0.70
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-23: Version 5.0.69
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-23: Version 5.0.68
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-23: Version 5.0.67
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-23: Version 5.0.66
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-22: Version 5.0.65
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-22: Version 5.0.64
+
+ ES2015 web compat workaround: RegExp.prototype.flags => "" (Chromium
+ issue 581577).
+
+ Remove the Proxy enumerate trap (issue 4768).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-22: Version 5.0.63
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-22: Version 5.0.62
+
+ Remove Reflect.enumerate (issue 4768).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-22: Version 5.0.61
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-22: Version 5.0.60
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-22: Version 5.0.59
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-22: Version 5.0.58
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-22: Version 5.0.57
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-22: Version 5.0.56
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-22: Version 5.0.55
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-22: Version 5.0.54
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-21: Version 5.0.53
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-21: Version 5.0.52
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-21: Version 5.0.51
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-21: Version 5.0.50
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-21: Version 5.0.49
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-21: Version 5.0.48
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-20: Version 5.0.47
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-20: Version 5.0.46
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-19: Version 5.0.45
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-19: Version 5.0.44
+
+ Return undefined from RegExp.prototype.compile (Chromium issue 585775).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-19: Version 5.0.43
+
+ Disable --harmony-object-observe (Chromium issue 552100).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-19: Version 5.0.42
+
+ Introduce BeforeCallEnteredCallback (Chromium issue 585949).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-19: Version 5.0.41
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-19: Version 5.0.40
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-19: Version 5.0.39
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-19: Version 5.0.38
+
+ [wasm] Add support for import section (Chromium issue 575167).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-19: Version 5.0.37
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-19: Version 5.0.36
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-19: Version 5.0.35
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-19: Version 5.0.34
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-19: Version 5.0.33
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-19: Version 5.0.32
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-19: Version 5.0.31
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-19: Version 5.0.30
+
+ Mark old SetAccessCheckCallback as deprecated.
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-19: Version 5.0.29
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-19: Version 5.0.28
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-19: Version 5.0.27
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-19: Version 5.0.26
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-18: Version 5.0.25
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-18: Version 5.0.24
+
+ Make Date.prototype.toGMTString an alias for Date.prototype.toUTCString
+ (issue 4708).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-18: Version 5.0.23
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-18: Version 5.0.22
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-18: Version 5.0.21
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-18: Version 5.0.20
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-18: Version 5.0.19
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-18: Version 5.0.18
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-18: Version 5.0.17
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-18: Version 5.0.16
+
+ [es6] Implement for-of iterator finalization (issue 2214).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-18: Version 5.0.15
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-18: Version 5.0.14
+
+ Use displayName in Error.stack rendering if present (issue 4761).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-18: Version 5.0.13
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-18: Version 5.0.12
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-18: Version 5.0.11
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-17: Version 5.0.10
+
+ [Atomics] Add dmb/dsb/isb instructions to ARM (issue 4614).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-17: Version 5.0.9
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-17: Version 5.0.8
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-17: Version 5.0.7
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-17: Version 5.0.6
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-17: Version 5.0.5
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-17: Version 5.0.4
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-17: Version 5.0.3
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-17: Version 5.0.2
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-17: Version 5.0.1
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-17: Version 4.10.253
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-17: Version 4.10.252
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-17: Version 4.10.251
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-17: Version 4.10.250
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-16: Version 4.10.249
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-16: Version 4.10.248
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-16: Version 4.10.247
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-16: Version 4.10.246
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-16: Version 4.10.245
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-16: Version 4.10.244
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-16: Version 4.10.243
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-16: Version 4.10.242
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-16: Version 4.10.241
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-16: Version 4.10.240
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-16: Version 4.10.239
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-16: Version 4.10.238
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-16: Version 4.10.237
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-16: Version 4.10.236
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-16: Version 4.10.235
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-16: Version 4.10.234
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-16: Version 4.10.233
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-16: Version 4.10.232
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-16: Version 4.10.231
+
+ Make NamedLoadHandlerCompiler::CompileLoadInterceptor behave correcly
+ with lazy accessors (Chromium issue 585764).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-16: Version 4.10.230
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-16: Version 4.10.229
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-16: Version 4.10.228
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-16: Version 4.10.227
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-16: Version 4.10.226
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-16: Version 4.10.225
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-15: Version 4.10.224
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-15: Version 4.10.223
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-15: Version 4.10.222
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-15: Version 4.10.221
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-15: Version 4.10.220
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-15: Version 4.10.219
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-15: Version 4.10.218
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-15: Version 4.10.217
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-15: Version 4.10.216
+
+ [wasm] Add support for a start function (Chromium issue 575167).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-15: Version 4.10.215
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-15: Version 4.10.214
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-15: Version 4.10.213
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-15: Version 4.10.212
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-15: Version 4.10.211
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-15: Version 4.10.210
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-14: Version 4.10.209
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-14: Version 4.10.208
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-14: Version 4.10.207
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-13: Version 4.10.206
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-12: Version 4.10.205
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-12: Version 4.10.204
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-12: Version 4.10.203
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-12: Version 4.10.202
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-12: Version 4.10.201
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-12: Version 4.10.200
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-12: Version 4.10.199
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-12: Version 4.10.198
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-12: Version 4.10.197
+
+ Stage --harmony-function-name (issue 3699).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-12: Version 4.10.196
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-10: Version 4.10.195
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-10: Version 4.10.194
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-10: Version 4.10.193
+
+ Use a for-of loop in Array.from (issue 4739).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-09: Version 4.10.192
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-09: Version 4.10.191
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-09: Version 4.10.190
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-09: Version 4.10.189
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-09: Version 4.10.188
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-09: Version 4.10.187
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-09: Version 4.10.186
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-09: Version 4.10.185
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-09: Version 4.10.184
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-09: Version 4.10.183
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-08: Version 4.10.182
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-08: Version 4.10.181
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-08: Version 4.10.180
+
+ [wasm] Put the condition last in kExprBrIf (Chromium issue 575167).
+
+ Stage --harmony-species flag, enabling Symbol.species (issue 4093).
+
+ Extend subarray web compatibility fix to --harmony-species (issue 4665).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-08: Version 4.10.179
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-08: Version 4.10.178
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-08: Version 4.10.177
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-08: Version 4.10.176
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-08: Version 4.10.175
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-08: Version 4.10.174
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-08: Version 4.10.173
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-08: Version 4.10.172
+
+ android: Use libc++ instead of stlport (issue 4615).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-08: Version 4.10.171
+
+ [api] Make ObjectTemplate::SetNativeDataProperty() work even if the
+ ObjectTemplate does not have a constructor (Chromium issue 579009).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-05: Version 4.10.170
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-05: Version 4.10.169
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-05: Version 4.10.168
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-05: Version 4.10.167
+
+ [wasm] min/max does not return signaling NaNs anymore (Chromium issue
+ 4733).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-05: Version 4.10.166
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-05: Version 4.10.165
+
+ [wasm] Put the condition last in kExprSelect (issue 4735, Chromium issue
+ 575167).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-05: Version 4.10.164
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-05: Version 4.10.163
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-05: Version 4.10.162
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-05: Version 4.10.161
+
+ PPC: [generators] Implement Generator.prototype.return (issue 3566).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-04: Version 4.10.160
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-04: Version 4.10.159
+
+ [generators] Implement Generator.prototype.return (issue 3566).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-04: Version 4.10.158
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-04: Version 4.10.157
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-04: Version 4.10.156
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-04: Version 4.10.155
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-04: Version 4.10.154
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-04: Version 4.10.153
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-04: Version 4.10.152
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-04: Version 4.10.151
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-03: Version 4.10.150
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-03: Version 4.10.149
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-03: Version 4.10.148
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-03: Version 4.10.147
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-03: Version 4.10.146
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-03: Version 4.10.145
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-02: Version 4.10.144
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-02: Version 4.10.143
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-02: Version 4.10.142
+
+ [wasm] Provide backoff implementations for the Fxx rounding instructions
+ (Chromium issue 575379).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-02: Version 4.10.141
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-02: Version 4.10.140
+
+ [api] Make ObjectTemplate::SetNativeDataProperty() work even if the
+ ObjectTemplate does not have a constructor (Chromium issue 579009).
+
+ Add native callbacks to FastAccessorAssembler (Chromium issue 508898).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-02: Version 4.10.139
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-02: Version 4.10.138
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-01: Version 4.10.137
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-01: Version 4.10.136
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-01: Version 4.10.135
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-01: Version 4.10.134
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-01: Version 4.10.133
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-01: Version 4.10.132
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-01: Version 4.10.131
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-01: Version 4.10.130
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-01: Version 4.10.129
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-01: Version 4.10.128
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-01: Version 4.10.127
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-01: Version 4.10.126
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-01: Version 4.10.125
+
+ Ship RegExp subclassing (issues 4305, 4343, 4344, 4345).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-02-01: Version 4.10.124
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-31: Version 4.10.123
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-31: Version 4.10.122
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-29: Version 4.10.121
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-29: Version 4.10.120
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-29: Version 4.10.119
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-29: Version 4.10.118
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-29: Version 4.10.117
+
+ Fix Unicode string normalization with null bytes (issue 4654).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-29: Version 4.10.116
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-29: Version 4.10.115
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-29: Version 4.10.114
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-29: Version 4.10.113
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-29: Version 4.10.112
+
+ [regexp] stage unicode regexps (issue 2952).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-29: Version 4.10.111
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-28: Version 4.10.110
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-28: Version 4.10.109
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-28: Version 4.10.108
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-28: Version 4.10.107
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-28: Version 4.10.106
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-28: Version 4.10.105
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-28: Version 4.10.104
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-28: Version 4.10.103
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-28: Version 4.10.102
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-28: Version 4.10.101
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-28: Version 4.10.100
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-28: Version 4.10.99
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-28: Version 4.10.98
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-27: Version 4.10.97
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-27: Version 4.10.96
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-27: Version 4.10.95
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-27: Version 4.10.94
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-27: Version 4.10.93
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-27: Version 4.10.92
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-27: Version 4.10.91
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-27: Version 4.10.90
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-27: Version 4.10.89
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-27: Version 4.10.88
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-27: Version 4.10.87
+
+ [api] Default native data property setter to replace the setter if the
+ property is writable (Chromium issue 580584).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-27: Version 4.10.86
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-27: Version 4.10.85
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-27: Version 4.10.84
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-27: Version 4.10.83
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-27: Version 4.10.82
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-27: Version 4.10.81
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-27: Version 4.10.80
+
+ Stage RegExp subclassing (issues 4305, 4343, 4344, 4345).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-26: Version 4.10.79
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-26: Version 4.10.78
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-26: Version 4.10.77
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-26: Version 4.10.76
+
+ Ensure arrow functions can close over lexically-scoped variables (issue
+ 4255, Chromium issue 580934).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-26: Version 4.10.75
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-26: Version 4.10.74
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-26: Version 4.10.73
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-26: Version 4.10.72
+
+ [GN] Remove CAN_USE_VFP_INSTRUCTIONS define to match GYP.
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-26: Version 4.10.71
+
+ PPC: [es6] Tail calls support (issue 4698).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-26: Version 4.10.70
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-26: Version 4.10.69
+
+ [es6] Tail calls support (issue 4698).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-26: Version 4.10.68
+
+ Support @@species in Array.prototype.concat (issue 4093).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-25: Version 4.10.67
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-25: Version 4.10.66
+
+ Restore per-TypedArray-class length accessors as a perf workaround
+ (Chromium issue 579905).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-25: Version 4.10.65
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-25: Version 4.10.64
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-25: Version 4.10.63
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-25: Version 4.10.62
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-25: Version 4.10.61
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-25: Version 4.10.60
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-25: Version 4.10.59
+
+ Don't NULL-check GlobalHandle::Node::object_ (issue 3647, Chromium issue
+ 580651).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-25: Version 4.10.58
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-25: Version 4.10.57
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-25: Version 4.10.56
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-25: Version 4.10.55
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-25: Version 4.10.54
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-25: Version 4.10.53
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-25: Version 4.10.52
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-25: Version 4.10.51
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-22: Version 4.10.50
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-22: Version 4.10.49
+
+ Sloppy mode webcompat: allow conflicting function declarations in blocks
+ (issue 4693, Chromium issue 579395).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-22: Version 4.10.48
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-22: Version 4.10.47
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-22: Version 4.10.46
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-22: Version 4.10.45
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-22: Version 4.10.44
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-22: Version 4.10.43
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-22: Version 4.10.42
+
+ Array length reduction should throw in strict mode if it can't delete an
+ element (issue 4267).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-22: Version 4.10.41
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-22: Version 4.10.40
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-22: Version 4.10.39
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-22: Version 4.10.38
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-22: Version 4.10.37
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-21: Version 4.10.36
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-21: Version 4.10.35
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-21: Version 4.10.34
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-21: Version 4.10.33
+
+ Array length reduction should throw in strict mode if it can't delete an
+ element (issue 4267).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-21: Version 4.10.32
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-21: Version 4.10.31
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-21: Version 4.10.30
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-21: Version 4.10.29
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-21: Version 4.10.28
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-21: Version 4.10.27
+
+ [debugger] negative conditional break points mute breaks and exceptions
+ (Chromium issue 429167).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-21: Version 4.10.26
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-21: Version 4.10.25
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-20: Version 4.10.24
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-20: Version 4.10.23
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-20: Version 4.10.22
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-20: Version 4.10.21
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-20: Version 4.10.20
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-20: Version 4.10.19
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-20: Version 4.10.18
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-20: Version 4.10.17
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-20: Version 4.10.16
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-20: Version 4.10.15
+
+ [wasm] Implemented F32Trunc as a turbofan graph based on int32
+ instructions (Chromium issue 575379).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-20: Version 4.10.14
+
+ [wasm] Verify boundaries of data segments when decoding modules
+ (Chromium issue 575167).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-20: Version 4.10.13
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-20: Version 4.10.12
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-20: Version 4.10.11
+
+ [runtime] Do not use the enum-cache for keys retrieval (issue 705).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-20: Version 4.10.10
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-20: Version 4.10.9
+
+ Separate String.prototype.replace into RegExp.prototype[Symbol.replace]
+ (issue 4343).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-19: Version 4.10.8
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-19: Version 4.10.7
+
+ [wasm] Enable WASM JavaScript API tests (Chromium issue 575167).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-18: Version 4.10.6
+
+ [wasm] Create a wrapper function for WASM.asmCompileRun() (Chromium
+ issue 575372).
+
+ Make generators non-constructable (issues 4163, 4630).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-18: Version 4.10.5
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-18: Version 4.10.4
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-18: Version 4.10.3
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-16: Version 4.10.2
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-16: Version 4.10.1
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-15: Version 4.9.391
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-15: Version 4.9.390
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-15: Version 4.9.389
+
+ Construct instances of base class from TypedArray.prototype.subarray
+ (issue 4665).
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-14: Version 4.9.388
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-14: Version 4.9.387
+
+ Performance and stability improvements on all platforms.
+
+
+2016-01-14: Version 4.9.386
+
+ Performance and stability improvements on all platforms.
+
+
2016-01-14: Version 4.9.385
Performance and stability improvements on all platforms.
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index 07b11a4ae3..5f26e91ecf 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -8,15 +8,15 @@ vars = {
deps = {
"v8/build/gyp":
- Var("git_url") + "/external/gyp.git" + "@" + "b85ad3e578da830377dbc1843aa4fbc5af17a192",
+ Var("git_url") + "/external/gyp.git" + "@" + "ed163ce233f76a950dce1751ac851dbe4b1c00cc",
"v8/third_party/icu":
- Var("git_url") + "/chromium/deps/icu.git" + "@" + "8d342a405be5ae8aacb1e16f0bc31c3a4fbf26a2",
+ Var("git_url") + "/chromium/deps/icu.git" + "@" + "e466f6ac8f60bb9697af4a91c6911c6fc4aec95f",
"v8/buildtools":
- Var("git_url") + "/chromium/buildtools.git" + "@" + "0f8e6e4b126ee88137930a0ae4776c4741808740",
+ Var("git_url") + "/chromium/buildtools.git" + "@" + "97b5c485707335dd2952c05bf11412ada3f4fb6f",
"v8/base/trace_event/common":
- Var("git_url") + "/chromium/src/base/trace_event/common.git" + "@" + "d83d44b13d07c2fd0a40101a7deef9b93b841732",
+ Var("git_url") + "/chromium/src/base/trace_event/common.git" + "@" + "4b09207e447ae5bd34643b4c6321bee7b76d35f9",
"v8/tools/swarming_client":
- Var('git_url') + '/external/swarming.client.git' + '@' + "9cdd76171e517a430a72dcd7d66ade67e109aa00",
+ Var('git_url') + '/external/swarming.client.git' + '@' + "0b908f18767c8304dc089454bc1c91755d21f1f5",
"v8/testing/gtest":
Var("git_url") + "/external/github.com/google/googletest.git" + "@" + "6f8a66431cb592dad629028a50b3dd418a408c87",
"v8/testing/gmock":
@@ -27,9 +27,9 @@ deps = {
Var("git_url") + "/v8/deps/third_party/mozilla-tests.git" + "@" + "f6c578a10ea707b1a8ab0b88943fe5115ce2b9be",
"v8/test/simdjs/data": Var("git_url") + "/external/github.com/tc39/ecmascript_simd.git" + "@" + "c8ef63c728283debc25891123eb00482fee4b8cd",
"v8/test/test262/data":
- Var("git_url") + "/external/github.com/tc39/test262.git" + "@" + "67ba34b03a46bac4254223ae25f42c7b959540f0",
+ Var("git_url") + "/external/github.com/tc39/test262.git" + "@" + "738a24b109f3fa71be44d5c3701d73141d494510",
"v8/tools/clang":
- Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "24e8c1c92fe54ef8ed7651b5850c056983354a4a",
+ Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "a8adb78c8eda9bddb2aa9c51f3fee60296de1ad4",
}
deps_os = {
@@ -102,6 +102,22 @@ hooks = [
"-s", "v8/buildtools/linux64/clang-format.sha1",
],
},
+ {
+ 'name': 'gcmole',
+ 'pattern': '.',
+ 'action': [
+ 'python',
+ 'v8/tools/gcmole/download_gcmole_tools.py',
+ ],
+ },
+ {
+ 'name': 'jsfunfuzz',
+ 'pattern': '.',
+ 'action': [
+ 'python',
+ 'v8/tools/jsfunfuzz/download_jsfunfuzz.py',
+ ],
+ },
# Pull luci-go binaries (isolate, swarming) using checked-in hashes.
{
'name': 'luci-go_win',
@@ -136,6 +152,40 @@ hooks = [
'-d', 'v8/tools/luci-go/linux64',
],
},
+ # Pull GN using checked-in hashes.
+ {
+ "name": "gn_win",
+ "pattern": ".",
+ "action": [ "download_from_google_storage",
+ "--no_resume",
+ "--platform=win32",
+ "--no_auth",
+ "--bucket", "chromium-gn",
+ "-s", "v8/buildtools/win/gn.exe.sha1",
+ ],
+ },
+ {
+ "name": "gn_mac",
+ "pattern": ".",
+ "action": [ "download_from_google_storage",
+ "--no_resume",
+ "--platform=darwin",
+ "--no_auth",
+ "--bucket", "chromium-gn",
+ "-s", "v8/buildtools/mac/gn.sha1",
+ ],
+ },
+ {
+ "name": "gn_linux",
+ "pattern": ".",
+ "action": [ "download_from_google_storage",
+ "--no_resume",
+ "--platform=linux*",
+ "--no_auth",
+ "--bucket", "chromium-gn",
+ "-s", "v8/buildtools/linux64/gn.sha1",
+ ],
+ },
{
# Update the Windows toolchain if necessary.
'name': 'win_toolchain',
diff --git a/deps/v8/Makefile b/deps/v8/Makefile
index 6ae9b24576..4fb6ee0162 100644
--- a/deps/v8/Makefile
+++ b/deps/v8/Makefile
@@ -251,7 +251,7 @@ NACL_ARCHES = nacl_ia32 nacl_x64
GYPFILES = third_party/icu/icu.gypi third_party/icu/icu.gyp \
build/shim_headers.gypi build/features.gypi build/standalone.gypi \
build/toolchain.gypi build/all.gyp build/mac/asan.gyp \
- test/cctest/cctest.gyp \
+ test/cctest/cctest.gyp test/fuzzer/fuzzer.gyp \
test/unittests/unittests.gyp tools/gyp/v8.gyp \
tools/parser-shell.gyp testing/gmock.gyp testing/gtest.gyp \
buildtools/third_party/libc++abi/libc++abi.gyp \
diff --git a/deps/v8/WATCHLISTS b/deps/v8/WATCHLISTS
index 81e941f28a..29b957b091 100644
--- a/deps/v8/WATCHLISTS
+++ b/deps/v8/WATCHLISTS
@@ -43,9 +43,11 @@
'filepath': 'src/debug/',
},
'interpreter': {
- 'filepath': 'src/interpreter/',
- 'filepath': 'test/cctest/interpreter/',
- 'filepath': 'test/unittests/interpreter/',
+ 'filepath': 'src/interpreter/' \
+ '|src/compiler/interpreter' \
+ '|src/compiler/bytecode' \
+ '|test/cctest/interpreter/' \
+ '|test/unittests/interpreter/',
},
'feature_shipping_status': {
'filepath': 'src/flag-definitions.h',
@@ -53,9 +55,12 @@
'gc_changes': {
'filepath': 'src/heap/',
},
- 'merges': {
- 'filepath': '.',
+ 'arm': {
+ 'filepath': '/arm/',
},
+ 'ia32': {
+ 'filepath': '/ia32/',
+ }
},
'WATCHLISTS': {
@@ -79,9 +84,12 @@
'hpayer@chromium.org',
'ulan@chromium.org',
],
- 'merges': [
- # Only enabled on branches created with tools/release/create_release.py
- 'v8-merges@googlegroups.com',
+ 'arm': [
+ 'v8-mips-ports@googlegroups.com',
+ 'v8-ppc-ports@googlegroups.com',
+ ],
+ 'ia32': [
+ 'v8-x87-ports@googlegroups.com',
],
},
}
diff --git a/deps/v8/base/trace_event/common/trace_event_common.h b/deps/v8/base/trace_event/common/trace_event_common.h
index 33578bd37b..8d13fc2e8c 100644
--- a/deps/v8/base/trace_event/common/trace_event_common.h
+++ b/deps/v8/base/trace_event/common/trace_event_common.h
@@ -203,40 +203,26 @@
// - category and name strings must have application lifetime (statics or
// literals). They may not include " chars.
#define TRACE_EVENT0(category_group, name) \
- INTERNAL_TRACE_MEMORY(category_group, name) \
INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name)
#define TRACE_EVENT_WITH_FLOW0(category_group, name, bind_id, flow_flags) \
- INTERNAL_TRACE_MEMORY(category_group, name) \
INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, bind_id, \
flow_flags)
#define TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
- INTERNAL_TRACE_MEMORY(category_group, name) \
INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, arg1_name, arg1_val)
#define TRACE_EVENT_WITH_FLOW1(category_group, name, bind_id, flow_flags, \
arg1_name, arg1_val) \
- INTERNAL_TRACE_MEMORY(category_group, name) \
INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, bind_id, \
flow_flags, arg1_name, arg1_val)
#define TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, \
arg2_val) \
- INTERNAL_TRACE_MEMORY(category_group, name) \
INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, arg1_name, arg1_val, \
arg2_name, arg2_val)
#define TRACE_EVENT_WITH_FLOW2(category_group, name, bind_id, flow_flags, \
arg1_name, arg1_val, arg2_name, arg2_val) \
- INTERNAL_TRACE_MEMORY(category_group, name) \
INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, bind_id, \
flow_flags, arg1_name, arg1_val, \
arg2_name, arg2_val)
-// Records events like TRACE_EVENT2 but uses |memory_tag| for memory tracing.
-// Use this where |name| is too generic to accurately aggregate allocations.
-#define TRACE_EVENT_WITH_MEMORY_TAG2(category, name, memory_tag, arg1_name, \
- arg1_val, arg2_name, arg2_val) \
- INTERNAL_TRACE_MEMORY(category, memory_tag) \
- INTERNAL_TRACE_EVENT_ADD_SCOPED(category, name, arg1_name, arg1_val, \
- arg2_name, arg2_val)
-
// UNSHIPPED_TRACE_EVENT* are like TRACE_EVENT* except that they are not
// included in official builds.
@@ -309,6 +295,12 @@
TRACE_EVENT_FLAG_COPY | scope, arg1_name, arg1_val, \
arg2_name, arg2_val)
+#define TRACE_EVENT_INSTANT_WITH_TIMESTAMP0(category_group, name, scope, \
+ timestamp) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_INSTANT, category_group, name, 0, 0, timestamp, \
+ TRACE_EVENT_FLAG_NONE | scope)
+
// Syntactic sugars for the sampling tracing in the main thread.
#define TRACE_EVENT_SCOPED_SAMPLING_STATE(category, name) \
TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(0, category, name)
@@ -478,6 +470,20 @@
static_cast<int>(value1_val), value2_name, \
static_cast<int>(value2_val))
+// Similar to TRACE_COUNTERx, but with a custom |timestamp| provided.
+#define TRACE_COUNTER_WITH_TIMESTAMP1(category_group, name, timestamp, value) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
+ TRACE_EVENT_PHASE_COUNTER, category_group, name, timestamp, \
+ TRACE_EVENT_FLAG_NONE, "value", static_cast<int>(value))
+
+#define TRACE_COUNTER_WITH_TIMESTAMP2(category_group, name, timestamp, \
+ value1_name, value1_val, value2_name, \
+ value2_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
+ TRACE_EVENT_PHASE_COUNTER, category_group, name, timestamp, \
+ TRACE_EVENT_FLAG_NONE, value1_name, static_cast<int>(value1_val), \
+ value2_name, static_cast<int>(value2_val))
+
// Records the value of a counter called "name" immediately. Value
// must be representable as a 32 bit integer.
// - category and name strings must have application lifetime (statics or
@@ -920,6 +926,17 @@
name, id, TRACE_EVENT_FLAG_COPY, arg1_name, \
arg1_val, arg2_name, arg2_val)
+// Records a clock sync event.
+#define TRACE_EVENT_CLOCK_SYNC_RECEIVER(sync_id) \
+ INTERNAL_TRACE_EVENT_ADD( \
+ TRACE_EVENT_PHASE_CLOCK_SYNC, "__metadata", "clock_sync", \
+ TRACE_EVENT_FLAG_NONE, "sync_id", sync_id)
+#define TRACE_EVENT_CLOCK_SYNC_ISSUER(sync_id, issue_ts, issue_end_ts) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
+ TRACE_EVENT_PHASE_CLOCK_SYNC, "__metadata", "clock_sync", \
+ issue_end_ts.ToInternalValue(), TRACE_EVENT_FLAG_NONE, \
+ "sync_id", sync_id, "issue_ts", issue_ts.ToInternalValue())
+
// Macros to track the life time and value of arbitrary client objects.
// See also TraceTrackableObject.
#define TRACE_EVENT_OBJECT_CREATED_WITH_ID(category_group, name, id) \
@@ -945,6 +962,21 @@
TRACE_EVENT_PHASE_DELETE_OBJECT, category_group, name, \
TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE)
+// Records entering and leaving trace event contexts. |category_group| and
+// |name| specify the context category and type. |context| is a
+// snapshotted context object id.
+#define TRACE_EVENT_ENTER_CONTEXT(category_group, name, context) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_ENTER_CONTEXT, category_group, name, \
+ TRACE_ID_DONT_MANGLE(context), TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_LEAVE_CONTEXT(category_group, name, context) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
+ TRACE_EVENT_PHASE_LEAVE_CONTEXT, category_group, name, \
+ TRACE_ID_DONT_MANGLE(context), TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context) \
+ INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name, \
+ TRACE_ID_DONT_MANGLE(context))
+
// Macro to efficiently determine if a given category group is enabled.
#define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret) \
do { \
@@ -1007,6 +1039,9 @@
#define TRACE_EVENT_PHASE_DELETE_OBJECT ('D')
#define TRACE_EVENT_PHASE_MEMORY_DUMP ('v')
#define TRACE_EVENT_PHASE_MARK ('R')
+#define TRACE_EVENT_PHASE_CLOCK_SYNC ('c')
+#define TRACE_EVENT_PHASE_ENTER_CONTEXT ('(')
+#define TRACE_EVENT_PHASE_LEAVE_CONTEXT (')')
// Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT.
#define TRACE_EVENT_FLAG_NONE (static_cast<unsigned int>(0))
diff --git a/deps/v8/build/all.gyp b/deps/v8/build/all.gyp
index 0a05a2f02f..feaf4feccc 100644
--- a/deps/v8/build/all.gyp
+++ b/deps/v8/build/all.gyp
@@ -11,6 +11,7 @@
'../samples/samples.gyp:*',
'../src/d8.gyp:d8',
'../test/cctest/cctest.gyp:*',
+ '../test/fuzzer/fuzzer.gyp:*',
'../test/unittests/unittests.gyp:*',
],
'conditions': [
@@ -30,11 +31,16 @@
'../test/mjsunit/mjsunit.gyp:*',
'../test/mozilla/mozilla.gyp:*',
'../test/optimize_for_size.gyp:*',
+ '../test/perf.gyp:*',
'../test/preparser/preparser.gyp:*',
'../test/simdjs/simdjs.gyp:*',
'../test/test262/test262.gyp:*',
'../test/webkit/webkit.gyp:*',
'../tools/check-static-initializers.gyp:*',
+ '../tools/gcmole/run_gcmole.gyp:*',
+ '../tools/jsfunfuzz/jsfunfuzz.gyp:*',
+ '../tools/run-deopt-fuzzer.gyp:*',
+ '../tools/run-valgrind.gyp:*',
],
}],
]
diff --git a/deps/v8/build/has_valgrind.py b/deps/v8/build/has_valgrind.py
new file mode 100755
index 0000000000..83a848d50b
--- /dev/null
+++ b/deps/v8/build/has_valgrind.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+
+BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+VALGRIND_DIR = os.path.join(BASE_DIR, 'third_party', 'valgrind')
+LINUX32_DIR = os.path.join(VALGRIND_DIR, 'linux_x86')
+LINUX64_DIR = os.path.join(VALGRIND_DIR, 'linux_x64')
+
+
+def DoMain(_):
+ """Hook to be called from gyp without starting a separate python
+ interpreter."""
+ return int(os.path.exists(LINUX32_DIR) and os.path.exists(LINUX64_DIR))
+
+
+if __name__ == '__main__':
+ print DoMain([])
diff --git a/deps/v8/build/isolate.gypi b/deps/v8/build/isolate.gypi
index 5d30005e74..546870a755 100644
--- a/deps/v8/build/isolate.gypi
+++ b/deps/v8/build/isolate.gypi
@@ -71,12 +71,15 @@
'--config-variable', 'OS=<(OS)',
'--config-variable', 'asan=<(asan)',
'--config-variable', 'cfi_vptr=<(cfi_vptr)',
+ '--config-variable', 'gcmole=<(gcmole)',
+ '--config-variable', 'has_valgrind=<(has_valgrind)',
'--config-variable', 'icu_use_data_file_flag=0',
'--config-variable', 'msan=<(msan)',
'--config-variable', 'tsan=<(tsan)',
'--config-variable', 'component=<(component)',
'--config-variable', 'target_arch=<(target_arch)',
'--config-variable', 'use_custom_libcxx=<(use_custom_libcxx)',
+ '--config-variable', 'v8_separate_ignition_snapshot=<(v8_separate_ignition_snapshot)',
'--config-variable', 'v8_use_external_startup_data=<(v8_use_external_startup_data)',
'--config-variable', 'v8_use_snapshot=<(v8_use_snapshot)',
],
diff --git a/deps/v8/build/standalone.gypi b/deps/v8/build/standalone.gypi
index 273d72b744..5269b95f64 100644
--- a/deps/v8/build/standalone.gypi
+++ b/deps/v8/build/standalone.gypi
@@ -110,6 +110,9 @@
'use_goma%': 0,
'gomadir%': '',
+ # Check if valgrind directories are present.
+ 'has_valgrind%': '<!pymod_do_main(has_valgrind)',
+
'conditions': [
# Set default gomadir.
['OS=="win"', {
@@ -166,6 +169,7 @@
'test_isolation_mode%': '<(test_isolation_mode)',
'fastbuild%': '<(fastbuild)',
'coverage%': '<(coverage)',
+ 'has_valgrind%': '<(has_valgrind)',
# Add a simple extras solely for the purpose of the cctests
'v8_extra_library_files': ['../test/cctest/test-extra.js'],
@@ -195,6 +199,9 @@
# their own default value.
'v8_use_external_startup_data%': 1,
+ # Use a separate ignition snapshot file in standalone builds.
+ 'v8_separate_ignition_snapshot': 1,
+
# Relative path to icu.gyp from this file.
'icu_gyp_path': '../third_party/icu/icu.gyp',
@@ -313,9 +320,8 @@
['android_ndk_root==""', {
'variables': {
'android_sysroot': '<(android_toolchain)/sysroot/',
- 'android_stlport': '<(android_toolchain)/sources/cxx-stl/stlport/',
+ 'android_stl': '<(android_toolchain)/sources/cxx-stl/',
},
- 'android_include': '<(android_sysroot)/usr/include',
'conditions': [
['target_arch=="x64"', {
'android_lib': '<(android_sysroot)/usr/lib64',
@@ -323,14 +329,16 @@
'android_lib': '<(android_sysroot)/usr/lib',
}],
],
- 'android_stlport_include': '<(android_stlport)/stlport',
- 'android_stlport_libs': '<(android_stlport)/libs',
+ 'android_libcpp_include': '<(android_stl)/llvm-libc++/libcxx/include',
+ 'android_libcpp_abi_include': '<(android_stl)/llvm-libc++abi/libcxxabi/include',
+ 'android_libcpp_libs': '<(android_stl)/llvm-libc++/libs',
+ 'android_support_include': '<(android_toolchain)/sources/android/support/include',
+ 'android_sysroot': '<(android_sysroot)',
}, {
'variables': {
'android_sysroot': '<(android_ndk_root)/platforms/android-<(android_target_platform)/arch-<(android_target_arch)',
- 'android_stlport': '<(android_ndk_root)/sources/cxx-stl/stlport/',
+ 'android_stl': '<(android_ndk_root)/sources/cxx-stl/',
},
- 'android_include': '<(android_sysroot)/usr/include',
'conditions': [
['target_arch=="x64"', {
'android_lib': '<(android_sysroot)/usr/lib64',
@@ -338,11 +346,14 @@
'android_lib': '<(android_sysroot)/usr/lib',
}],
],
- 'android_stlport_include': '<(android_stlport)/stlport',
- 'android_stlport_libs': '<(android_stlport)/libs',
+ 'android_libcpp_include': '<(android_stl)/llvm-libc++/libcxx/include',
+ 'android_libcpp_abi_include': '<(android_stl)/llvm-libc++abi/libcxxabi/include',
+ 'android_libcpp_libs': '<(android_stl)/llvm-libc++/libs',
+ 'android_support_include': '<(android_ndk_root)/sources/android/support/include',
+ 'android_sysroot': '<(android_sysroot)',
}],
],
- 'android_stlport_library': 'stlport_static',
+ 'android_libcpp_library': 'c++_static',
}], # OS=="android"
['host_clang==1', {
'host_cc': '<(clang_dir)/bin/clang',
@@ -367,6 +378,9 @@
# fpxx - compatibility mode, it chooses fp32 or fp64 depending on runtime
# detection
'mips_fpu_mode%': 'fp32',
+
+ # Indicates if gcmole tools are downloaded by a hook.
+ 'gcmole%': 0,
},
'target_defaults': {
'variables': {
@@ -720,8 +734,7 @@
'cflags': [ '-fPIC', ],
}],
[ 'coverage==1', {
- 'cflags!': [ '-O3', '-O2', '-O1', ],
- 'cflags': [ '-fprofile-arcs', '-ftest-coverage', '-O0'],
+ 'cflags': [ '-fprofile-arcs', '-ftest-coverage'],
'ldflags': [ '-fprofile-arcs'],
}],
],
@@ -1005,11 +1018,7 @@
}, # configurations
'cflags': [ '-Wno-abi', '-Wall', '-W', '-Wno-unused-parameter'],
'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-fno-exceptions',
- # Note: Using -std=c++0x will define __STRICT_ANSI__, which
- # in turn will leave out some template stuff for 'long
- # long'. What we want is -std=c++11, but this is not
- # supported by GCC 4.6 or Xcode 4.2
- '-std=gnu++0x' ],
+ '-std=gnu++11' ],
'target_conditions': [
['_toolset=="target"', {
'cflags!': [
@@ -1022,19 +1031,16 @@
'-fno-short-enums',
'-finline-limit=64',
'-Wa,--noexecstack',
- # Note: This include is in cflags to ensure that it comes after
- # all of the includes.
- '-I<(android_include)',
- '-I<(android_stlport_include)',
+ '--sysroot=<(android_sysroot)',
],
'cflags_cc': [
- '-Wno-error=non-virtual-dtor', # TODO(michaelbai): Fix warnings.
+ '-isystem<(android_libcpp_include)',
+ '-isystem<(android_libcpp_abi_include)',
+ '-isystem<(android_support_include)',
],
'defines': [
'ANDROID',
#'__GNU_SOURCE=1', # Necessary for clone()
- 'USE_STLPORT=1',
- '_STLP_USE_PTR_SPECIALIZATIONS=1',
'HAVE_OFF64_T',
'HAVE_SYS_UIO_H',
'ANDROID_BINSIZE_HACK', # Enable temporary hacks to reduce binsize.
@@ -1043,10 +1049,9 @@
'-pthread', # Not supported by Android toolchain.
],
'ldflags': [
- '-nostdlib',
'-Wl,--no-undefined',
- '-Wl,-rpath-link=<(android_lib)',
- '-L<(android_lib)',
+ '--sysroot=<(android_sysroot)',
+ '-nostdlib',
],
'libraries!': [
'-lrt', # librt is built into Bionic.
@@ -1057,12 +1062,12 @@
'-lpthread', '-lnss3', '-lnssutil3', '-lsmime3', '-lplds4', '-lplc4', '-lnspr4',
],
'libraries': [
- '-l<(android_stlport_library)',
+ '-l<(android_libcpp_library)',
+ '-latomic',
# Manually link the libgcc.a that the cross compiler uses.
'<!(<(android_toolchain)/*-gcc -print-libgcc-file-name)',
'-lc',
'-ldl',
- '-lstdc++',
'-lm',
],
'conditions': [
@@ -1079,22 +1084,22 @@
'-mfpu=vfp3',
],
'ldflags': [
- '-L<(android_stlport_libs)/armeabi-v7a',
+ '-L<(android_libcpp_libs)/armeabi-v7a',
],
}],
['target_arch=="arm" and arm_version < 7', {
'ldflags': [
- '-L<(android_stlport_libs)/armeabi',
+ '-L<(android_libcpp_libs)/armeabi',
],
}],
['target_arch=="x64"', {
'ldflags': [
- '-L<(android_stlport_libs)/x86_64',
+ '-L<(android_libcpp_libs)/x86_64',
],
}],
['target_arch=="arm64"', {
'ldflags': [
- '-L<(android_stlport_libs)/arm64-v8a',
+ '-L<(android_libcpp_libs)/arm64-v8a',
],
}],
['target_arch=="ia32" or target_arch=="x87"', {
@@ -1106,7 +1111,7 @@
'-fno-stack-protector',
],
'ldflags': [
- '-L<(android_stlport_libs)/x86',
+ '-L<(android_libcpp_libs)/x86',
],
}],
['target_arch=="mipsel"', {
@@ -1119,7 +1124,7 @@
'-fno-stack-protector',
],
'ldflags': [
- '-L<(android_stlport_libs)/mips',
+ '-L<(android_libcpp_libs)/mips',
],
}],
['(target_arch=="arm" or target_arch=="arm64" or target_arch=="x64" or target_arch=="ia32") and component!="shared_library"', {
diff --git a/deps/v8/build/toolchain.gypi b/deps/v8/build/toolchain.gypi
index e1cd791490..c2974c52bf 100644
--- a/deps/v8/build/toolchain.gypi
+++ b/deps/v8/build/toolchain.gypi
@@ -81,6 +81,9 @@
# The setting is ignored if want_separate_host_toolset is 0.
'v8_toolset_for_d8%': 'target',
+ # Control usage of a separate ignition snapshot file.
+ 'v8_separate_ignition_snapshot%': 0,
+
'host_os%': '<(OS)',
'werror%': '-Werror',
# For a shared library build, results in "libv8-<(soname_version).so".
diff --git a/deps/v8/build/vs_toolchain.py b/deps/v8/build/vs_toolchain.py
index 294ade3818..95fbcf4993 100644
--- a/deps/v8/build/vs_toolchain.py
+++ b/deps/v8/build/vs_toolchain.py
@@ -191,10 +191,10 @@ def _GetDesiredVsToolchainHashes():
"""Load a list of SHA1s corresponding to the toolchains that we want installed
to build with."""
if os.environ.get('GYP_MSVS_VERSION') == '2015':
- return ['49ae4b60d898182fc3f521c2fcda82c453915011']
+ return ['5a85cf1ce842f7cc96b9d17039a445a9dc9cf0dd']
else:
# Default to VS2013.
- return ['ee7d718ec60c2dc5d255bbe325909c2021a7efef']
+ return ['9ff97c632ae1fee0c98bcd53e71770eb3a0d8deb']
def Update(force=False):
diff --git a/deps/v8/include/v8-experimental.h b/deps/v8/include/v8-experimental.h
index f988e14054..3874e91101 100644
--- a/deps/v8/include/v8-experimental.h
+++ b/deps/v8/include/v8-experimental.h
@@ -39,6 +39,7 @@ class V8_EXPORT FastAccessorBuilder {
LabelId MakeLabel();
void SetLabel(LabelId label_id);
void CheckNotZeroOrJump(ValueId value_id, LabelId label_id);
+ ValueId Call(v8::FunctionCallback callback, ValueId value_id);
private:
FastAccessorBuilder() = delete;
diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h
index 4fbef0f5d9..11f8d51f02 100644
--- a/deps/v8/include/v8-platform.h
+++ b/deps/v8/include/v8-platform.h
@@ -5,6 +5,7 @@
#ifndef V8_V8_PLATFORM_H_
#define V8_V8_PLATFORM_H_
+#include <stddef.h>
#include <stdint.h>
namespace v8 {
@@ -56,6 +57,15 @@ class Platform {
virtual ~Platform() {}
/**
+ * Gets the number of threads that are used to execute background tasks. Is
+ * used to estimate the number of tasks a work package should be split into.
+ * A return value of 0 means that there are no background threads available.
+ * Note that a value of 0 won't prohibit V8 from posting tasks using
+ * |CallOnBackgroundThread|.
+ */
+ virtual size_t NumberOfAvailableBackgroundThreads() { return 0; }
+
+ /**
* Schedules a task to be invoked on a background thread. |expected_runtime|
* indicates that the task will run a long time. The Platform implementation
* takes ownership of |task|. There is no guarantee about order of execution
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index e432600290..007ae2eca5 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -207,6 +207,13 @@ class V8_EXPORT CpuProfiler {
CpuProfile* StopProfiling(Local<String> title);
/**
+ * Force collection of a sample. Must be called on the VM thread.
+ * Recording the forced sample does not contribute to the aggregated
+ * profile statistics.
+ */
+ void CollectSample();
+
+ /**
* Tells the profiler whether the embedder is idle.
*/
void SetIdle(bool is_idle);
@@ -419,6 +426,90 @@ class V8_EXPORT ActivityControl { // NOLINT
/**
+ * AllocationProfile is a sampled profile of allocations done by the program.
+ * This is structured as a call-graph.
+ */
+class V8_EXPORT AllocationProfile {
+ public:
+ struct Allocation {
+ /**
+ * Size of the sampled allocation object.
+ */
+ size_t size;
+
+ /**
+ * The number of objects of such size that were sampled.
+ */
+ unsigned int count;
+ };
+
+ /**
+ * Represents a node in the call-graph.
+ */
+ struct Node {
+ /**
+ * Name of the function. May be empty for anonymous functions or if the
+ * script corresponding to this function has been unloaded.
+ */
+ Local<String> name;
+
+ /**
+ * Name of the script containing the function. May be empty if the script
+ * name is not available, or if the script has been unloaded.
+ */
+ Local<String> script_name;
+
+ /**
+ * id of the script where the function is located. May be equal to
+ * v8::UnboundScript::kNoScriptId in cases where the script doesn't exist.
+ */
+ int script_id;
+
+ /**
+ * Start position of the function in the script.
+ */
+ int start_position;
+
+ /**
+ * 1-indexed line number where the function starts. May be
+ * kNoLineNumberInfo if no line number information is available.
+ */
+ int line_number;
+
+ /**
+ * 1-indexed column number where the function starts. May be
+ * kNoColumnNumberInfo if no line number information is available.
+ */
+ int column_number;
+
+ /**
+ * List of callees called from this node for which we have sampled
+ * allocations. The lifetime of the children is scoped to the containing
+ * AllocationProfile.
+ */
+ std::vector<Node*> children;
+
+ /**
+ * List of self allocations done by this node in the call-graph.
+ */
+ std::vector<Allocation> allocations;
+ };
+
+ /**
+ * Returns the root node of the call-graph. The root node corresponds to an
+ * empty JS call-stack. The lifetime of the returned Node* is scoped to the
+ * containing AllocationProfile.
+ */
+ virtual Node* GetRootNode() = 0;
+
+ virtual ~AllocationProfile() {}
+
+ static const int kNoLineNumberInfo = Message::kNoLineNumberInfo;
+ static const int kNoColumnNumberInfo = Message::kNoColumnInfo;
+};
+
+
+/**
* Interface for controlling heap profiling. Instance of the
* profiler can be retrieved using v8::Isolate::GetHeapProfiler.
*/
@@ -522,6 +613,49 @@ class V8_EXPORT HeapProfiler {
void StopTrackingHeapObjects();
/**
+ * Starts gathering a sampling heap profile. A sampling heap profile is
+ * similar to tcmalloc's heap profiler and Go's mprof. It samples object
+ * allocations and builds an online 'sampling' heap profile. At any point in
+ * time, this profile is expected to be a representative sample of objects
+ * currently live in the system. Each sampled allocation includes the stack
+ * trace at the time of allocation, which makes this really useful for memory
+ * leak detection.
+ *
+ * This mechanism is intended to be cheap enough that it can be used in
+ * production with minimal performance overhead.
+ *
+ * Allocations are sampled using a randomized Poisson process. On average, one
+ * allocation will be sampled every |sample_interval| bytes allocated. The
+ * |stack_depth| parameter controls the maximum number of stack frames to be
+ * captured on each allocation.
+ *
+ * NOTE: This is a proof-of-concept at this point. Right now we only sample
+ * newspace allocations. Support for paged space allocation (e.g. pre-tenured
+ * objects, large objects, code objects, etc.) and native allocations
+ * doesn't exist yet, but is anticipated in the future.
+ *
+ * Objects allocated before the sampling is started will not be included in
+ * the profile.
+ *
+ * Returns false if a sampling heap profiler is already running.
+ */
+ bool StartSamplingHeapProfiler(uint64_t sample_interval = 512 * 1024,
+ int stack_depth = 16);
+
+ /**
+ * Stops the sampling heap profile and discards the current profile.
+ */
+ void StopSamplingHeapProfiler();
+
+ /**
+ * Returns the sampled profile of allocations allocated (and still live) since
+ * StartSamplingHeapProfiler was called. The ownership of the pointer is
+ * transfered to the caller. Returns nullptr if sampling heap profiler is not
+ * active.
+ */
+ AllocationProfile* GetAllocationProfile();
+
+ /**
* Deletes all snapshots taken. All previously returned pointers to
* snapshots and their contents become invalid after this call.
*/
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index 1213993dad..3d41a2cb3f 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -8,10 +8,10 @@
// These macros define the version number for the current version.
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
-#define V8_MAJOR_VERSION 4
-#define V8_MINOR_VERSION 9
-#define V8_BUILD_NUMBER 385
-#define V8_PATCH_LEVEL 35
+#define V8_MAJOR_VERSION 5
+#define V8_MINOR_VERSION 0
+#define V8_BUILD_NUMBER 71
+#define V8_PATCH_LEVEL 32
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 86d65c33db..9ccbc6eb18 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -329,9 +329,7 @@ class Local {
friend class PersistentValueMapBase;
template<class F1, class F2> friend class PersistentValueVector;
- template <class S>
- V8_INLINE Local(S* that)
- : val_(that) {}
+ explicit V8_INLINE Local(T* that) : val_(that) {}
V8_INLINE static Local<T> New(Isolate* isolate, T* that);
T* val_;
};
@@ -434,7 +432,10 @@ class WeakCallbackInfo {
return internal_fields_[1];
}
- bool IsFirstPass() const { return callback_ != nullptr; }
+ V8_DEPRECATED("Not realiable once SetSecondPassCallback() was used.",
+ bool IsFirstPass() const) {
+ return callback_ != nullptr;
+ }
// When first called, the embedder MUST Reset() the Global which triggered the
// callback. The Global itself is unusable for anything else. No v8 other api
@@ -787,7 +788,7 @@ template <class T, class M> class Persistent : public PersistentBase<T> {
template<class F1, class F2> friend class Persistent;
template<class F> friend class ReturnValue;
- template <class S> V8_INLINE Persistent(S* that) : PersistentBase<T>(that) { }
+ explicit V8_INLINE Persistent(T* that) : PersistentBase<T>(that) {}
V8_INLINE T* operator*() const { return this->val_; }
template<class S, class M2>
V8_INLINE void Copy(const Persistent<S, M2>& that);
@@ -886,7 +887,7 @@ using UniquePersistent = Global<T>;
*/
class V8_EXPORT HandleScope {
public:
- HandleScope(Isolate* isolate);
+ explicit HandleScope(Isolate* isolate);
~HandleScope();
@@ -939,7 +940,7 @@ class V8_EXPORT HandleScope {
*/
class V8_EXPORT EscapableHandleScope : public HandleScope {
public:
- EscapableHandleScope(Isolate* isolate);
+ explicit EscapableHandleScope(Isolate* isolate);
V8_INLINE ~EscapableHandleScope() {}
/**
@@ -3147,7 +3148,8 @@ class FunctionCallbackInfo {
public:
V8_INLINE int Length() const;
V8_INLINE Local<Value> operator[](int i) const;
- V8_INLINE Local<Function> Callee() const;
+ V8_INLINE V8_DEPRECATED("Use Data() to explicitly pass Callee instead",
+ Local<Function> Callee() const);
V8_INLINE Local<Object> This() const;
V8_INLINE Local<Object> Holder() const;
V8_INLINE bool IsConstructCall() const;
@@ -3191,19 +3193,21 @@ class PropertyCallbackInfo {
V8_INLINE Local<Object> This() const;
V8_INLINE Local<Object> Holder() const;
V8_INLINE ReturnValue<T> GetReturnValue() const;
+ V8_INLINE bool ShouldThrowOnError() const;
// This shouldn't be public, but the arm compiler needs it.
- static const int kArgsLength = 6;
+ static const int kArgsLength = 7;
protected:
friend class MacroAssembler;
friend class internal::PropertyCallbackArguments;
friend class internal::CustomArguments<PropertyCallbackInfo>;
- static const int kHolderIndex = 0;
- static const int kIsolateIndex = 1;
- static const int kReturnValueDefaultValueIndex = 2;
- static const int kReturnValueIndex = 3;
- static const int kDataIndex = 4;
- static const int kThisIndex = 5;
+ static const int kShouldThrowOnErrorIndex = 0;
+ static const int kHolderIndex = 1;
+ static const int kIsolateIndex = 2;
+ static const int kReturnValueDefaultValueIndex = 3;
+ static const int kReturnValueIndex = 4;
+ static const int kDataIndex = 5;
+ static const int kThisIndex = 6;
V8_INLINE PropertyCallbackInfo(internal::Object** args) : args_(args) {}
internal::Object** args_;
@@ -4322,8 +4326,10 @@ enum AccessType {
* object.
*/
typedef bool (*AccessCheckCallback)(Local<Context> accessing_context,
- Local<Object> accessed_object);
-
+ Local<Object> accessed_object,
+ Local<Value> data);
+typedef bool (*DeprecatedAccessCheckCallback)(Local<Context> accessing_context,
+ Local<Object> accessed_object);
/**
* Returns true if cross-context access should be allowed to the named
@@ -4753,6 +4759,10 @@ class V8_EXPORT ObjectTemplate : public Template {
*/
void SetAccessCheckCallback(AccessCheckCallback callback,
Local<Value> data = Local<Value>());
+ V8_DEPRECATED(
+ "Use SetAccessCheckCallback with new AccessCheckCallback signature.",
+ void SetAccessCheckCallback(DeprecatedAccessCheckCallback callback,
+ Local<Value> data = Local<Value>()));
V8_DEPRECATED(
"Use SetAccessCheckCallback instead",
@@ -4999,8 +5009,10 @@ typedef void (*MemoryAllocationCallback)(ObjectSpace space,
AllocationAction action,
int size);
-// --- Leave Script Callback ---
-typedef void (*CallCompletedCallback)();
+// --- Enter/Leave Script Callback ---
+typedef void (*BeforeCallEnteredCallback)(Isolate*);
+typedef void (*CallCompletedCallback)(Isolate*);
+typedef void (*DeprecatedCallCompletedCallback)();
// --- Promise Reject Callback ---
enum PromiseRejectEvent {
@@ -5069,11 +5081,24 @@ enum GCType {
kGCTypeIncrementalMarking | kGCTypeProcessWeakCallbacks
};
+/**
+ * GCCallbackFlags is used to notify additional information about the GC
+ * callback.
+ * - kGCCallbackFlagConstructRetainedObjectInfos: The GC callback is for
+ * constructing retained object infos.
+ * - kGCCallbackFlagForced: The GC callback is for a forced GC for testing.
+ * - kGCCallbackFlagSynchronousPhantomCallbackProcessing: The GC callback
+ * is called synchronously without getting posted to an idle task.
+ * - kGCCallbackFlagCollectAllAvailableGarbage: The GC callback is called
+ * in a phase where V8 is trying to collect all available garbage
+ * (e.g., handling a low memory notification).
+ */
enum GCCallbackFlags {
kNoGCCallbackFlags = 0,
kGCCallbackFlagConstructRetainedObjectInfos = 1 << 1,
kGCCallbackFlagForced = 1 << 2,
- kGCCallbackFlagSynchronousPhantomCallbackProcessing = 1 << 3
+ kGCCallbackFlagSynchronousPhantomCallbackProcessing = 1 << 3,
+ kGCCallbackFlagCollectAllAvailableGarbage = 1 << 4,
};
typedef void (*GCCallback)(GCType type, GCCallbackFlags flags);
@@ -5455,6 +5480,18 @@ class V8_EXPORT Isolate {
kPromiseChain = 17,
kPromiseAccept = 18,
kPromiseDefer = 19,
+ kHtmlCommentInExternalScript = 20,
+ kHtmlComment = 21,
+ kSloppyModeBlockScopedFunctionRedefinition = 22,
+ kForInInitializer = 23,
+ kArrayProtectorDirtied = 24,
+ kArraySpeciesModified = 25,
+ kArrayPrototypeConstructorModified = 26,
+ kArrayInstanceProtoModified = 27,
+ kArrayInstanceConstructorModified = 28,
+
+ // If you add new values here, you'll also need to update V8Initializer.cpp
+ // in Chromium.
kUseCounterFeatureCount // This enum value must be last.
};
@@ -5796,6 +5833,19 @@ class V8_EXPORT Isolate {
void SetEventLogger(LogEventCallback that);
/**
+ * Adds a callback to notify the host application right before a script
+ * is about to run. If a script re-enters the runtime during executing, the
+ * BeforeCallEnteredCallback is invoked for each re-entrance.
+ * Executing scripts inside the callback will re-trigger the callback.
+ */
+ void AddBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
+
+ /**
+ * Removes callback that was installed by AddBeforeCallEnteredCallback.
+ */
+ void RemoveBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
+
+ /**
* Adds a callback to notify the host application when a script finished
* running. If a script re-enters the runtime during executing, the
* CallCompletedCallback is only invoked when the outer-most script
@@ -5803,12 +5853,18 @@ class V8_EXPORT Isolate {
* further callbacks.
*/
void AddCallCompletedCallback(CallCompletedCallback callback);
+ V8_DEPRECATE_SOON(
+ "Use callback with parameter",
+ void AddCallCompletedCallback(DeprecatedCallCompletedCallback callback));
/**
* Removes callback that was installed by AddCallCompletedCallback.
*/
void RemoveCallCompletedCallback(CallCompletedCallback callback);
-
+ V8_DEPRECATE_SOON(
+ "Use callback with parameter",
+ void RemoveCallCompletedCallback(
+ DeprecatedCallCompletedCallback callback));
/**
* Set callback to notify about promise reject with no handler, or
@@ -7132,7 +7188,7 @@ class Internals {
static const int kNodeIsPartiallyDependentShift = 4;
static const int kNodeIsActiveShift = 4;
- static const int kJSObjectType = 0xb7;
+ static const int kJSObjectType = 0xb5;
static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x83;
static const int kForeignType = 0x87;
@@ -8262,6 +8318,12 @@ ReturnValue<T> PropertyCallbackInfo<T>::GetReturnValue() const {
return ReturnValue<T>(&args_[kReturnValueIndex]);
}
+template <typename T>
+bool PropertyCallbackInfo<T>::ShouldThrowOnError() const {
+ typedef internal::Internals I;
+ return args_[kShouldThrowOnErrorIndex] != I::IntToSmi(0);
+}
+
Local<Primitive> Undefined(Isolate* isolate) {
typedef internal::Object* S;
diff --git a/deps/v8/infra/config/cq.cfg b/deps/v8/infra/config/cq.cfg
index 6d3624992c..fbf090bb59 100644
--- a/deps/v8/infra/config/cq.cfg
+++ b/deps/v8/infra/config/cq.cfg
@@ -16,6 +16,7 @@ rietveld {
verifiers {
reviewer_lgtm {
committer_list: "project-v8-committers"
+ dry_run_access_list: "project-v8-tryjob-access"
}
tree_status {
@@ -66,9 +67,9 @@ verifiers {
}
}
buckets {
- name: "tryserver.blink"
+ name: "tryserver.v8"
builders {
- name: "linux_blink_rel"
+ name: "v8_linux_blink_rel"
experiment_percentage: 20
}
}
diff --git a/deps/v8/snapshot_toolchain.gni b/deps/v8/snapshot_toolchain.gni
index 11b73c5804..4932110489 100644
--- a/deps/v8/snapshot_toolchain.gni
+++ b/deps/v8/snapshot_toolchain.gni
@@ -42,3 +42,5 @@ if (host_cpu == "x64" && host_os == "linux") {
} else {
snapshot_toolchain = default_toolchain
}
+
+
diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS
index b54cd04563..b0b703b7cc 100644
--- a/deps/v8/src/DEPS
+++ b/deps/v8/src/DEPS
@@ -12,18 +12,12 @@ include_rules = [
"+src/interpreter/bytecode-array-iterator.h",
"+src/interpreter/bytecodes.h",
"+src/interpreter/interpreter.h",
+ "+src/interpreter/source-position-table.h",
"-src/libplatform",
"-include/libplatform"
]
specific_include_rules = {
- ".*\.h": [
- # Note that src/v8.h by now is a regular header file, it doesn't provide
- # any special declarations besides the V8 class. There should be no need
- # for including it in any .h files though. This rule is just a reminder,
- # and can be removed once the dust has settled.
- "-src/v8.h",
- ],
"d8\.cc": [
"+include/libplatform/libplatform.h",
],
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index 2094cdb20d..766509e2a5 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -27,13 +27,15 @@ Handle<AccessorInfo> Accessors::MakeAccessor(
AccessorNameSetterCallback setter,
PropertyAttributes attributes) {
Factory* factory = isolate->factory();
- Handle<ExecutableAccessorInfo> info = factory->NewExecutableAccessorInfo();
+ Handle<AccessorInfo> info = factory->NewAccessorInfo();
info->set_property_attributes(attributes);
info->set_all_can_read(false);
info->set_all_can_write(false);
info->set_is_special_data_property(true);
+ name = factory->InternalizeName(name);
info->set_name(*name);
Handle<Object> get = v8::FromCData(isolate, getter);
+ if (setter == nullptr) setter = &ReconfigureToDataProperty;
Handle<Object> set = v8::FromCData(isolate, setter);
info->set_getter(*get);
info->set_setter(*set);
@@ -41,21 +43,6 @@ Handle<AccessorInfo> Accessors::MakeAccessor(
}
-Handle<ExecutableAccessorInfo> Accessors::CloneAccessor(
- Isolate* isolate,
- Handle<ExecutableAccessorInfo> accessor) {
- Factory* factory = isolate->factory();
- Handle<ExecutableAccessorInfo> info = factory->NewExecutableAccessorInfo();
- info->set_name(accessor->name());
- info->set_flag(accessor->flag());
- info->set_expected_receiver_type(accessor->expected_receiver_type());
- info->set_getter(accessor->getter());
- info->set_setter(accessor->setter());
- info->set_data(accessor->data());
- return info;
-}
-
-
static V8_INLINE bool CheckForName(Handle<Name> name,
Handle<String> property_name,
int offset,
@@ -96,6 +83,7 @@ bool Accessors::IsJSObjectFieldAccessor(Handle<Map> map, Handle<Name> name,
bool Accessors::IsJSArrayBufferViewFieldAccessor(Handle<Map> map,
Handle<Name> name,
int* object_offset) {
+ DCHECK(name->IsUniqueName());
Isolate* isolate = name->GetIsolate();
switch (map->instance_type()) {
@@ -113,7 +101,7 @@ bool Accessors::IsJSArrayBufferViewFieldAccessor(Handle<Map> map,
// Check if the property is overridden on the instance.
DescriptorArray* descriptors = map->instance_descriptors();
- int descriptor = descriptors->SearchWithCache(*name, *map);
+ int descriptor = descriptors->SearchWithCache(isolate, *name, *map);
if (descriptor != DescriptorArray::kNotFound) return false;
Handle<Object> proto = Handle<Object>(map->prototype(), isolate);
@@ -140,6 +128,50 @@ bool Accessors::IsJSArrayBufferViewFieldAccessor(Handle<Map> map,
}
}
+MUST_USE_RESULT static MaybeHandle<Object> ReplaceAccessorWithDataProperty(
+ Isolate* isolate, Handle<JSObject> receiver, Handle<JSObject> holder,
+ Handle<Name> name, Handle<Object> value, bool observe) {
+ LookupIterator it(receiver, name, holder,
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
+ // Skip any access checks we might hit. This accessor should never hit in a
+ // situation where the caller does not have access.
+ if (it.state() == LookupIterator::ACCESS_CHECK) {
+ CHECK(it.HasAccess());
+ it.Next();
+ }
+ CHECK_EQ(LookupIterator::ACCESSOR, it.state());
+
+ Handle<Object> old_value;
+ bool is_observed = observe && receiver->map()->is_observed();
+ if (is_observed) {
+ MaybeHandle<Object> maybe_old = Object::GetPropertyWithAccessor(&it);
+ if (!maybe_old.ToHandle(&old_value)) return maybe_old;
+ }
+
+ it.ReconfigureDataProperty(value, it.property_attributes());
+
+ if (is_observed && !old_value->SameValue(*value)) {
+ return JSObject::EnqueueChangeRecord(receiver, "update", name, old_value);
+ }
+
+ return value;
+}
+
+void Accessors::ReconfigureToDataProperty(
+ v8::Local<v8::Name> key, v8::Local<v8::Value> val,
+ const v8::PropertyCallbackInfo<void>& info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ HandleScope scope(isolate);
+ Handle<JSObject> receiver =
+ Handle<JSObject>::cast(Utils::OpenHandle(*info.This()));
+ Handle<JSObject> holder =
+ Handle<JSObject>::cast(Utils::OpenHandle(*info.Holder()));
+ Handle<Name> name = Utils::OpenHandle(*key);
+ Handle<Object> value = Utils::OpenHandle(*val);
+ MaybeHandle<Object> result = ReplaceAccessorWithDataProperty(
+ isolate, receiver, holder, name, value, false);
+ if (result.is_null()) isolate->OptionalRescheduleException(false);
+}
//
// Accessors::ArgumentsIterator
@@ -156,29 +188,11 @@ void Accessors::ArgumentsIteratorGetter(
}
-void Accessors::ArgumentsIteratorSetter(
- v8::Local<v8::Name> name, v8::Local<v8::Value> val,
- const v8::PropertyCallbackInfo<void>& info) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- HandleScope scope(isolate);
- Handle<JSObject> object_handle =
- Handle<JSObject>::cast(Utils::OpenHandle(*info.This()));
- Handle<Object> value_handle = Utils::OpenHandle(*val);
- Handle<Name> name_handle = Utils::OpenHandle(*name);
-
- if (JSObject::DefinePropertyOrElementIgnoreAttributes(
- object_handle, name_handle, value_handle, NONE)
- .is_null()) {
- isolate->OptionalRescheduleException(false);
- }
-}
-
-
Handle<AccessorInfo> Accessors::ArgumentsIteratorInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<Name> name = isolate->factory()->iterator_symbol();
- return MakeAccessor(isolate, name, &ArgumentsIteratorGetter,
- &ArgumentsIteratorSetter, attributes);
+ return MakeAccessor(isolate, name, &ArgumentsIteratorGetter, nullptr,
+ attributes);
}
@@ -219,6 +233,19 @@ void Accessors::ArrayLengthSetter(
if (JSArray::ObservableSetLength(array, length).is_null()) {
isolate->OptionalRescheduleException(false);
}
+
+ if (info.ShouldThrowOnError()) {
+ uint32_t actual_new_len = 0;
+ CHECK(array->length()->ToArrayLength(&actual_new_len));
+ // Throw TypeError if there were non-deletable elements.
+ if (actual_new_len != length) {
+ Factory* factory = isolate->factory();
+ isolate->Throw(*factory->NewTypeError(
+ MessageTemplate::kStrictDeleteProperty,
+ factory->NewNumberFromUint(actual_new_len - 1), array));
+ isolate->OptionalRescheduleException(false);
+ }
+ }
}
@@ -259,21 +286,10 @@ void Accessors::StringLengthGetter(
}
-void Accessors::StringLengthSetter(
- v8::Local<v8::Name> name,
- v8::Local<v8::Value> value,
- const v8::PropertyCallbackInfo<void>& info) {
- UNREACHABLE();
-}
-
-
Handle<AccessorInfo> Accessors::StringLengthInfo(
Isolate* isolate, PropertyAttributes attributes) {
- return MakeAccessor(isolate,
- isolate->factory()->length_string(),
- &StringLengthGetter,
- &StringLengthSetter,
- attributes);
+ return MakeAccessor(isolate, isolate->factory()->length_string(),
+ &StringLengthGetter, nullptr, attributes);
}
@@ -295,22 +311,11 @@ void Accessors::ScriptColumnOffsetGetter(
}
-void Accessors::ScriptColumnOffsetSetter(
- v8::Local<v8::Name> name,
- v8::Local<v8::Value> value,
- const v8::PropertyCallbackInfo<void>& info) {
- UNREACHABLE();
-}
-
-
Handle<AccessorInfo> Accessors::ScriptColumnOffsetInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("column_offset")));
- return MakeAccessor(isolate,
- name,
- &ScriptColumnOffsetGetter,
- &ScriptColumnOffsetSetter,
+ return MakeAccessor(isolate, name, &ScriptColumnOffsetGetter, nullptr,
attributes);
}
@@ -332,23 +337,11 @@ void Accessors::ScriptIdGetter(
}
-void Accessors::ScriptIdSetter(
- v8::Local<v8::Name> name,
- v8::Local<v8::Value> value,
- const v8::PropertyCallbackInfo<void>& info) {
- UNREACHABLE();
-}
-
-
Handle<AccessorInfo> Accessors::ScriptIdInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name(
isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("id")));
- return MakeAccessor(isolate,
- name,
- &ScriptIdGetter,
- &ScriptIdSetter,
- attributes);
+ return MakeAccessor(isolate, name, &ScriptIdGetter, nullptr, attributes);
}
@@ -369,21 +362,10 @@ void Accessors::ScriptNameGetter(
}
-void Accessors::ScriptNameSetter(
- v8::Local<v8::Name> name,
- v8::Local<v8::Value> value,
- const v8::PropertyCallbackInfo<void>& info) {
- UNREACHABLE();
-}
-
-
Handle<AccessorInfo> Accessors::ScriptNameInfo(
Isolate* isolate, PropertyAttributes attributes) {
- return MakeAccessor(isolate,
- isolate->factory()->name_string(),
- &ScriptNameGetter,
- &ScriptNameSetter,
- attributes);
+ return MakeAccessor(isolate, isolate->factory()->name_string(),
+ &ScriptNameGetter, nullptr, attributes);
}
@@ -404,21 +386,10 @@ void Accessors::ScriptSourceGetter(
}
-void Accessors::ScriptSourceSetter(
- v8::Local<v8::Name> name,
- v8::Local<v8::Value> value,
- const v8::PropertyCallbackInfo<void>& info) {
- UNREACHABLE();
-}
-
-
Handle<AccessorInfo> Accessors::ScriptSourceInfo(
Isolate* isolate, PropertyAttributes attributes) {
- return MakeAccessor(isolate,
- isolate->factory()->source_string(),
- &ScriptSourceGetter,
- &ScriptSourceSetter,
- attributes);
+ return MakeAccessor(isolate, isolate->factory()->source_string(),
+ &ScriptSourceGetter, nullptr, attributes);
}
@@ -440,22 +411,11 @@ void Accessors::ScriptLineOffsetGetter(
}
-void Accessors::ScriptLineOffsetSetter(
- v8::Local<v8::Name> name,
- v8::Local<v8::Value> value,
- const v8::PropertyCallbackInfo<void>& info) {
- UNREACHABLE();
-}
-
-
Handle<AccessorInfo> Accessors::ScriptLineOffsetInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("line_offset")));
- return MakeAccessor(isolate,
- name,
- &ScriptLineOffsetGetter,
- &ScriptLineOffsetSetter,
+ return MakeAccessor(isolate, name, &ScriptLineOffsetGetter, nullptr,
attributes);
}
@@ -478,23 +438,11 @@ void Accessors::ScriptTypeGetter(
}
-void Accessors::ScriptTypeSetter(
- v8::Local<v8::Name> name,
- v8::Local<v8::Value> value,
- const v8::PropertyCallbackInfo<void>& info) {
- UNREACHABLE();
-}
-
-
Handle<AccessorInfo> Accessors::ScriptTypeInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name(
isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("type")));
- return MakeAccessor(isolate,
- name,
- &ScriptTypeGetter,
- &ScriptTypeSetter,
- attributes);
+ return MakeAccessor(isolate, name, &ScriptTypeGetter, nullptr, attributes);
}
@@ -516,22 +464,11 @@ void Accessors::ScriptCompilationTypeGetter(
}
-void Accessors::ScriptCompilationTypeSetter(
- v8::Local<v8::Name> name,
- v8::Local<v8::Value> value,
- const v8::PropertyCallbackInfo<void>& info) {
- UNREACHABLE();
-}
-
-
Handle<AccessorInfo> Accessors::ScriptCompilationTypeInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("compilation_type")));
- return MakeAccessor(isolate,
- name,
- &ScriptCompilationTypeGetter,
- &ScriptCompilationTypeSetter,
+ return MakeAccessor(isolate, name, &ScriptCompilationTypeGetter, nullptr,
attributes);
}
@@ -561,22 +498,11 @@ void Accessors::ScriptLineEndsGetter(
}
-void Accessors::ScriptLineEndsSetter(
- v8::Local<v8::Name> name,
- v8::Local<v8::Value> value,
- const v8::PropertyCallbackInfo<void>& info) {
- UNREACHABLE();
-}
-
-
Handle<AccessorInfo> Accessors::ScriptLineEndsInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("line_ends")));
- return MakeAccessor(isolate,
- name,
- &ScriptLineEndsGetter,
- &ScriptLineEndsSetter,
+ return MakeAccessor(isolate, name, &ScriptLineEndsGetter, nullptr,
attributes);
}
@@ -598,21 +524,10 @@ void Accessors::ScriptSourceUrlGetter(
}
-void Accessors::ScriptSourceUrlSetter(
- v8::Local<v8::Name> name,
- v8::Local<v8::Value> value,
- const v8::PropertyCallbackInfo<void>& info) {
- UNREACHABLE();
-}
-
-
Handle<AccessorInfo> Accessors::ScriptSourceUrlInfo(
Isolate* isolate, PropertyAttributes attributes) {
- return MakeAccessor(isolate,
- isolate->factory()->source_url_string(),
- &ScriptSourceUrlGetter,
- &ScriptSourceUrlSetter,
- attributes);
+ return MakeAccessor(isolate, isolate->factory()->source_url_string(),
+ &ScriptSourceUrlGetter, nullptr, attributes);
}
@@ -634,21 +549,10 @@ void Accessors::ScriptSourceMappingUrlGetter(
}
-void Accessors::ScriptSourceMappingUrlSetter(
- v8::Local<v8::Name> name,
- v8::Local<v8::Value> value,
- const v8::PropertyCallbackInfo<void>& info) {
- UNREACHABLE();
-}
-
-
Handle<AccessorInfo> Accessors::ScriptSourceMappingUrlInfo(
Isolate* isolate, PropertyAttributes attributes) {
- return MakeAccessor(isolate,
- isolate->factory()->source_mapping_url_string(),
- &ScriptSourceMappingUrlGetter,
- &ScriptSourceMappingUrlSetter,
- attributes);
+ return MakeAccessor(isolate, isolate->factory()->source_mapping_url_string(),
+ &ScriptSourceMappingUrlGetter, nullptr, attributes);
}
@@ -671,19 +575,12 @@ void Accessors::ScriptIsEmbedderDebugScriptGetter(
}
-void Accessors::ScriptIsEmbedderDebugScriptSetter(
- v8::Local<v8::Name> name, v8::Local<v8::Value> value,
- const v8::PropertyCallbackInfo<void>& info) {
- UNREACHABLE();
-}
-
-
Handle<AccessorInfo> Accessors::ScriptIsEmbedderDebugScriptInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("is_debugger_script")));
return MakeAccessor(isolate, name, &ScriptIsEmbedderDebugScriptGetter,
- &ScriptIsEmbedderDebugScriptSetter, attributes);
+ nullptr, attributes);
}
@@ -704,22 +601,11 @@ void Accessors::ScriptContextDataGetter(
}
-void Accessors::ScriptContextDataSetter(
- v8::Local<v8::Name> name,
- v8::Local<v8::Value> value,
- const v8::PropertyCallbackInfo<void>& info) {
- UNREACHABLE();
-}
-
-
Handle<AccessorInfo> Accessors::ScriptContextDataInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("context_data")));
- return MakeAccessor(isolate,
- name,
- &ScriptContextDataGetter,
- &ScriptContextDataSetter,
+ return MakeAccessor(isolate, name, &ScriptContextDataGetter, nullptr,
attributes);
}
@@ -751,22 +637,11 @@ void Accessors::ScriptEvalFromScriptGetter(
}
-void Accessors::ScriptEvalFromScriptSetter(
- v8::Local<v8::Name> name,
- v8::Local<v8::Value> value,
- const v8::PropertyCallbackInfo<void>& info) {
- UNREACHABLE();
-}
-
-
Handle<AccessorInfo> Accessors::ScriptEvalFromScriptInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("eval_from_script")));
- return MakeAccessor(isolate,
- name,
- &ScriptEvalFromScriptGetter,
- &ScriptEvalFromScriptSetter,
+ return MakeAccessor(isolate, name, &ScriptEvalFromScriptGetter, nullptr,
attributes);
}
@@ -789,7 +664,6 @@ void Accessors::ScriptEvalFromScriptPositionGetter(
Handle<Code> code(SharedFunctionInfo::cast(
script->eval_from_shared())->code());
result = Handle<Object>(Smi::FromInt(code->SourcePosition(
- code->instruction_start() +
script->eval_from_instructions_offset())),
isolate);
}
@@ -797,23 +671,12 @@ void Accessors::ScriptEvalFromScriptPositionGetter(
}
-void Accessors::ScriptEvalFromScriptPositionSetter(
- v8::Local<v8::Name> name,
- v8::Local<v8::Value> value,
- const v8::PropertyCallbackInfo<void>& info) {
- UNREACHABLE();
-}
-
-
Handle<AccessorInfo> Accessors::ScriptEvalFromScriptPositionInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("eval_from_script_position")));
- return MakeAccessor(isolate,
- name,
- &ScriptEvalFromScriptPositionGetter,
- &ScriptEvalFromScriptPositionSetter,
- attributes);
+ return MakeAccessor(isolate, name, &ScriptEvalFromScriptPositionGetter,
+ nullptr, attributes);
}
@@ -843,22 +706,11 @@ void Accessors::ScriptEvalFromFunctionNameGetter(
}
-void Accessors::ScriptEvalFromFunctionNameSetter(
- v8::Local<v8::Name> name,
- v8::Local<v8::Value> value,
- const v8::PropertyCallbackInfo<void>& info) {
- UNREACHABLE();
-}
-
-
Handle<AccessorInfo> Accessors::ScriptEvalFromFunctionNameInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("eval_from_function_name")));
- return MakeAccessor(isolate,
- name,
- &ScriptEvalFromFunctionNameGetter,
- &ScriptEvalFromFunctionNameSetter,
+ return MakeAccessor(isolate, name, &ScriptEvalFromFunctionNameGetter, nullptr,
attributes);
}
@@ -976,59 +828,27 @@ void Accessors::FunctionLengthGetter(
info.GetReturnValue().Set(Utils::ToLocal(result));
}
-
-MUST_USE_RESULT static MaybeHandle<Object> ReplaceAccessorWithDataProperty(
- Isolate* isolate, Handle<JSObject> object, Handle<Name> name,
- Handle<Object> value, bool is_observed, Handle<Object> old_value) {
- LookupIterator it(object, name);
- CHECK_EQ(LookupIterator::ACCESSOR, it.state());
- DCHECK(it.HolderIsReceiverOrHiddenPrototype());
- it.ReconfigureDataProperty(value, it.property_details().attributes());
-
- if (is_observed && !old_value->SameValue(*value)) {
- return JSObject::EnqueueChangeRecord(object, "update", name, old_value);
- }
-
- return value;
-}
-
-
-MUST_USE_RESULT static MaybeHandle<Object> SetFunctionLength(
- Isolate* isolate, Handle<JSFunction> function, Handle<Object> value) {
- Handle<Object> old_value;
- bool is_observed = function->map()->is_observed();
- if (is_observed) {
- old_value = handle(Smi::FromInt(function->shared()->length()), isolate);
- }
-
- return ReplaceAccessorWithDataProperty(isolate, function,
- isolate->factory()->length_string(),
- value, is_observed, old_value);
-}
-
-
-void Accessors::FunctionLengthSetter(
- v8::Local<v8::Name> name,
- v8::Local<v8::Value> val,
+void Accessors::ObservedReconfigureToDataProperty(
+ v8::Local<v8::Name> key, v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<void>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
+ Handle<JSObject> receiver =
+ Handle<JSObject>::cast(Utils::OpenHandle(*info.This()));
+ Handle<JSObject> holder =
+ Handle<JSObject>::cast(Utils::OpenHandle(*info.Holder()));
+ Handle<Name> name = Utils::OpenHandle(*key);
Handle<Object> value = Utils::OpenHandle(*val);
-
- Handle<JSFunction> object =
- Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
- if (SetFunctionLength(isolate, object, value).is_null()) {
- isolate->OptionalRescheduleException(false);
- }
+ MaybeHandle<Object> result = ReplaceAccessorWithDataProperty(
+ isolate, receiver, holder, name, value, true);
+ if (result.is_null()) isolate->OptionalRescheduleException(false);
}
Handle<AccessorInfo> Accessors::FunctionLengthInfo(
Isolate* isolate, PropertyAttributes attributes) {
- return MakeAccessor(isolate,
- isolate->factory()->length_string(),
- &FunctionLengthGetter,
- &FunctionLengthSetter,
+ return MakeAccessor(isolate, isolate->factory()->length_string(),
+ &FunctionLengthGetter, &ObservedReconfigureToDataProperty,
attributes);
}
@@ -1054,43 +874,10 @@ void Accessors::FunctionNameGetter(
info.GetReturnValue().Set(Utils::ToLocal(result));
}
-
-MUST_USE_RESULT static MaybeHandle<Object> SetFunctionName(
- Isolate* isolate, Handle<JSFunction> function, Handle<Object> value) {
- Handle<Object> old_value;
- bool is_observed = function->map()->is_observed();
- if (is_observed) {
- old_value = handle(function->shared()->name(), isolate);
- }
-
- return ReplaceAccessorWithDataProperty(isolate, function,
- isolate->factory()->name_string(),
- value, is_observed, old_value);
-}
-
-
-void Accessors::FunctionNameSetter(
- v8::Local<v8::Name> name,
- v8::Local<v8::Value> val,
- const v8::PropertyCallbackInfo<void>& info) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- HandleScope scope(isolate);
- Handle<Object> value = Utils::OpenHandle(*val);
-
- Handle<JSFunction> object =
- Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
- if (SetFunctionName(isolate, object, value).is_null()) {
- isolate->OptionalRescheduleException(false);
- }
-}
-
-
Handle<AccessorInfo> Accessors::FunctionNameInfo(
Isolate* isolate, PropertyAttributes attributes) {
- return MakeAccessor(isolate,
- isolate->factory()->name_string(),
- &FunctionNameGetter,
- &FunctionNameSetter,
+ return MakeAccessor(isolate, isolate->factory()->name_string(),
+ &FunctionNameGetter, &ObservedReconfigureToDataProperty,
attributes);
}
@@ -1158,10 +945,10 @@ static int FindFunctionInFrame(JavaScriptFrame* frame,
}
+namespace {
+
Handle<Object> GetFunctionArguments(Isolate* isolate,
Handle<JSFunction> function) {
- if (function->shared()->native()) return isolate->factory()->null_value();
-
// Find the top invocation of the function by traversing frames.
for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
@@ -1200,9 +987,14 @@ Handle<Object> GetFunctionArguments(Isolate* isolate,
return isolate->factory()->null_value();
}
+} // namespace
-Handle<Object> Accessors::FunctionGetArguments(Handle<JSFunction> function) {
- return GetFunctionArguments(function->GetIsolate(), function);
+
+Handle<JSObject> Accessors::FunctionGetArguments(Handle<JSFunction> function) {
+ Handle<Object> arguments =
+ GetFunctionArguments(function->GetIsolate(), function);
+ CHECK(arguments->IsJSObject());
+ return Handle<JSObject>::cast(arguments);
}
@@ -1213,27 +1005,18 @@ void Accessors::FunctionArgumentsGetter(
HandleScope scope(isolate);
Handle<JSFunction> function =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
- Handle<Object> result = GetFunctionArguments(isolate, function);
+ Handle<Object> result =
+ function->shared()->native()
+ ? Handle<Object>::cast(isolate->factory()->null_value())
+ : GetFunctionArguments(isolate, function);
info.GetReturnValue().Set(Utils::ToLocal(result));
}
-void Accessors::FunctionArgumentsSetter(
- v8::Local<v8::Name> name,
- v8::Local<v8::Value> val,
- const v8::PropertyCallbackInfo<void>& info) {
- // Function arguments is non writable, non configurable.
- UNREACHABLE();
-}
-
-
Handle<AccessorInfo> Accessors::FunctionArgumentsInfo(
Isolate* isolate, PropertyAttributes attributes) {
- return MakeAccessor(isolate,
- isolate->factory()->arguments_string(),
- &FunctionArgumentsGetter,
- &FunctionArgumentsSetter,
- attributes);
+ return MakeAccessor(isolate, isolate->factory()->arguments_string(),
+ &FunctionArgumentsGetter, nullptr, attributes);
}
@@ -1363,22 +1146,10 @@ void Accessors::FunctionCallerGetter(
}
-void Accessors::FunctionCallerSetter(
- v8::Local<v8::Name> name,
- v8::Local<v8::Value> val,
- const v8::PropertyCallbackInfo<void>& info) {
- // Function caller is non writable, non configurable.
- UNREACHABLE();
-}
-
-
Handle<AccessorInfo> Accessors::FunctionCallerInfo(
Isolate* isolate, PropertyAttributes attributes) {
- return MakeAccessor(isolate,
- isolate->factory()->caller_string(),
- &FunctionCallerGetter,
- &FunctionCallerSetter,
- attributes);
+ return MakeAccessor(isolate, isolate->factory()->caller_string(),
+ &FunctionCallerGetter, nullptr, attributes);
}
@@ -1386,9 +1157,8 @@ Handle<AccessorInfo> Accessors::FunctionCallerInfo(
// Accessors::MakeModuleExport
//
-static void ModuleGetExport(
- v8::Local<v8::String> property,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
+static void ModuleGetExport(v8::Local<v8::Name> property,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
Context* context = Context::cast(instance->context());
DCHECK(context->IsModuleContext());
@@ -1397,7 +1167,7 @@ static void ModuleGetExport(
->Int32Value(info.GetIsolate()->GetCurrentContext())
.FromMaybe(-1);
if (slot < 0 || slot >= context->length()) {
- Handle<String> name = v8::Utils::OpenHandle(*property);
+ Handle<Name> name = v8::Utils::OpenHandle(*property);
Handle<Object> exception = isolate->factory()->NewReferenceError(
MessageTemplate::kNotDefined, name);
@@ -1406,7 +1176,7 @@ static void ModuleGetExport(
}
Object* value = context->get(slot);
if (value->IsTheHole()) {
- Handle<String> name = v8::Utils::OpenHandle(*property);
+ Handle<Name> name = v8::Utils::OpenHandle(*property);
Handle<Object> exception = isolate->factory()->NewReferenceError(
MessageTemplate::kNotDefined, name);
@@ -1417,33 +1187,15 @@ static void ModuleGetExport(
}
-static void ModuleSetExport(
- v8::Local<v8::String> property,
- v8::Local<v8::Value> value,
- const v8::PropertyCallbackInfo<v8::Value>& info) {
- JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
- Context* context = Context::cast(instance->context());
- DCHECK(context->IsModuleContext());
- Isolate* isolate = instance->GetIsolate();
- int slot = info.Data()
- ->Int32Value(info.GetIsolate()->GetCurrentContext())
- .FromMaybe(-1);
- if (slot < 0 || slot >= context->length()) {
- Handle<String> name = v8::Utils::OpenHandle(*property);
- Handle<Object> exception = isolate->factory()->NewReferenceError(
- MessageTemplate::kNotDefined, name);
- isolate->ScheduleThrow(*exception);
- return;
- }
- Object* old_value = context->get(slot);
- if (old_value->IsTheHole()) {
- Handle<String> name = v8::Utils::OpenHandle(*property);
- Handle<Object> exception = isolate->factory()->NewReferenceError(
- MessageTemplate::kNotDefined, name);
- isolate->ScheduleThrow(*exception);
- return;
- }
- context->set(slot, *v8::Utils::OpenHandle(*value));
+static void ModuleSetExport(v8::Local<v8::Name> property,
+ v8::Local<v8::Value> value,
+ const v8::PropertyCallbackInfo<void>& info) {
+ if (!info.ShouldThrowOnError()) return;
+ Handle<Name> name = v8::Utils::OpenHandle(*property);
+ Isolate* isolate = name->GetIsolate();
+ Handle<Object> exception =
+ isolate->factory()->NewTypeError(MessageTemplate::kNotDefined, name);
+ isolate->ScheduleThrow(*exception);
}
@@ -1452,17 +1204,9 @@ Handle<AccessorInfo> Accessors::MakeModuleExport(
int index,
PropertyAttributes attributes) {
Isolate* isolate = name->GetIsolate();
- Factory* factory = isolate->factory();
- Handle<ExecutableAccessorInfo> info = factory->NewExecutableAccessorInfo();
- info->set_property_attributes(attributes);
- info->set_all_can_read(true);
- info->set_all_can_write(true);
- info->set_name(*name);
+ Handle<AccessorInfo> info = MakeAccessor(isolate, name, &ModuleGetExport,
+ &ModuleSetExport, attributes);
info->set_data(Smi::FromInt(index));
- Handle<Object> getter = v8::FromCData(isolate, &ModuleGetExport);
- Handle<Object> setter = v8::FromCData(isolate, &ModuleSetExport);
- info->set_getter(*getter);
- if (!(attributes & ReadOnly)) info->set_setter(*setter);
return info;
}
diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h
index 6c1765c404..3fe550c25a 100644
--- a/deps/v8/src/accessors.h
+++ b/deps/v8/src/accessors.h
@@ -15,7 +15,7 @@ namespace v8 {
namespace internal {
// Forward declarations.
-class ExecutableAccessorInfo;
+class AccessorInfo;
// The list of accessor descriptors. This is a second-order macro
// taking a macro to be applied to all accessor descriptor names.
@@ -44,6 +44,12 @@ class ExecutableAccessorInfo;
V(ScriptIsEmbedderDebugScript) \
V(StringLength)
+#define ACCESSOR_SETTER_LIST(V) \
+ V(ReconfigureToDataProperty) \
+ V(ObservedReconfigureToDataProperty) \
+ V(ArrayLengthSetter) \
+ V(FunctionPrototypeSetter)
+
// Accessors contains all predefined proxy accessors.
class Accessors : public AllStatic {
@@ -53,16 +59,18 @@ class Accessors : public AllStatic {
static void name##Getter( \
v8::Local<v8::Name> name, \
const v8::PropertyCallbackInfo<v8::Value>& info); \
- static void name##Setter( \
- v8::Local<v8::Name> name, \
- v8::Local<v8::Value> value, \
- const v8::PropertyCallbackInfo<void>& info); \
static Handle<AccessorInfo> name##Info( \
Isolate* isolate, \
PropertyAttributes attributes);
ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
#undef ACCESSOR_INFO_DECLARATION
+#define ACCESSOR_SETTER_DECLARATION(name) \
+ static void name(v8::Local<v8::Name> name, v8::Local<v8::Value> value, \
+ const v8::PropertyCallbackInfo<void>& info);
+ ACCESSOR_SETTER_LIST(ACCESSOR_SETTER_DECLARATION)
+#undef ACCESSOR_SETTER_DECLARATION
+
enum DescriptorId {
#define ACCESSOR_INFO_DECLARATION(name) \
k##name##Getter, \
@@ -75,7 +83,7 @@ class Accessors : public AllStatic {
// Accessor functions called directly from the runtime system.
MUST_USE_RESULT static MaybeHandle<Object> FunctionSetPrototype(
Handle<JSFunction> object, Handle<Object> value);
- static Handle<Object> FunctionGetArguments(Handle<JSFunction> object);
+ static Handle<JSObject> FunctionGetArguments(Handle<JSFunction> object);
// Accessor infos.
static Handle<AccessorInfo> MakeModuleExport(
@@ -100,10 +108,6 @@ class Accessors : public AllStatic {
AccessorNameGetterCallback getter,
AccessorNameSetterCallback setter,
PropertyAttributes attributes);
-
- static Handle<ExecutableAccessorInfo> CloneAccessor(
- Isolate* isolate,
- Handle<ExecutableAccessorInfo> accessor);
};
} // namespace internal
diff --git a/deps/v8/src/address-map.cc b/deps/v8/src/address-map.cc
index 681661af29..86558e094a 100644
--- a/deps/v8/src/address-map.cc
+++ b/deps/v8/src/address-map.cc
@@ -17,10 +17,10 @@ RootIndexMap::RootIndexMap(Isolate* isolate) {
for (uint32_t i = 0; i < Heap::kStrongRootListLength; i++) {
Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(i);
Object* root = isolate->heap()->root(root_index);
+ if (!root->IsHeapObject()) continue;
// Omit root entries that can be written after initialization. They must
// not be referenced through the root list in the snapshot.
- if (root->IsHeapObject() &&
- isolate->heap()->RootCanBeTreatedAsConstant(root_index)) {
+ if (isolate->heap()->RootCanBeTreatedAsConstant(root_index)) {
HeapObject* heap_object = HeapObject::cast(root);
HashMap::Entry* entry = LookupEntry(map_, heap_object, false);
if (entry != NULL) {
@@ -29,6 +29,11 @@ RootIndexMap::RootIndexMap(Isolate* isolate) {
} else {
SetValue(LookupEntry(map_, heap_object, true), i);
}
+ } else {
+ // Immortal immovable root objects are constant and allocated on the first
+ // page of old space. Non-constant roots cannot be immortal immovable. The
+ // root index map contains all immortal immmovable root objects.
+ CHECK(!Heap::RootIsImmortalImmovable(root_index));
}
}
isolate->set_root_index_map(map_);
diff --git a/deps/v8/src/api-experimental.cc b/deps/v8/src/api-experimental.cc
index 2b49e9723a..98d62e33a2 100644
--- a/deps/v8/src/api-experimental.cc
+++ b/deps/v8/src/api-experimental.cc
@@ -122,5 +122,10 @@ void FastAccessorBuilder::CheckNotZeroOrJump(ValueId value_id,
FromApi(this)->CheckNotZeroOrJump(value_id, label_id);
}
+FastAccessorBuilder::ValueId FastAccessorBuilder::Call(
+ v8::FunctionCallback callback, ValueId value_id) {
+ return FromApi(this)->Call(callback, value_id);
+}
+
} // namespace experimental
} // namespace v8
diff --git a/deps/v8/src/api-natives.cc b/deps/v8/src/api-natives.cc
index bc71e3ef90..3be2df0bb6 100644
--- a/deps/v8/src/api-natives.cc
+++ b/deps/v8/src/api-natives.cc
@@ -16,8 +16,8 @@ namespace internal {
namespace {
MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
- Handle<ObjectTemplateInfo> data);
-
+ Handle<ObjectTemplateInfo> data,
+ bool is_hidden_prototype);
MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
Handle<FunctionTemplateInfo> data,
@@ -30,32 +30,36 @@ MaybeHandle<Object> Instantiate(Isolate* isolate, Handle<Object> data,
return InstantiateFunction(isolate,
Handle<FunctionTemplateInfo>::cast(data), name);
} else if (data->IsObjectTemplateInfo()) {
- return InstantiateObject(isolate, Handle<ObjectTemplateInfo>::cast(data));
+ return InstantiateObject(isolate, Handle<ObjectTemplateInfo>::cast(data),
+ false);
} else {
return data;
}
}
-
-MaybeHandle<Object> DefineAccessorProperty(Isolate* isolate,
- Handle<JSObject> object,
- Handle<Name> name,
- Handle<Object> getter,
- Handle<Object> setter,
- PropertyAttributes attributes) {
- if (!getter->IsUndefined()) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, getter,
- InstantiateFunction(isolate,
- Handle<FunctionTemplateInfo>::cast(getter)),
- Object);
- }
- if (!setter->IsUndefined()) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, setter,
- InstantiateFunction(isolate,
- Handle<FunctionTemplateInfo>::cast(setter)),
- Object);
+MaybeHandle<Object> DefineAccessorProperty(
+ Isolate* isolate, Handle<JSObject> object, Handle<Name> name,
+ Handle<Object> getter, Handle<Object> setter, PropertyAttributes attributes,
+ bool force_instantiate) {
+ DCHECK(!getter->IsFunctionTemplateInfo() ||
+ !FunctionTemplateInfo::cast(*getter)->do_not_cache());
+ DCHECK(!setter->IsFunctionTemplateInfo() ||
+ !FunctionTemplateInfo::cast(*setter)->do_not_cache());
+ if (force_instantiate) {
+ if (getter->IsFunctionTemplateInfo()) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, getter,
+ InstantiateFunction(isolate,
+ Handle<FunctionTemplateInfo>::cast(getter)),
+ Object);
+ }
+ if (setter->IsFunctionTemplateInfo()) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, setter,
+ InstantiateFunction(isolate,
+ Handle<FunctionTemplateInfo>::cast(setter)),
+ Object);
+ }
}
RETURN_ON_EXCEPTION(isolate, JSObject::DefineAccessor(object, name, getter,
setter, attributes),
@@ -148,17 +152,78 @@ Object* GetIntrinsic(Isolate* isolate, v8::Intrinsic intrinsic) {
return nullptr;
}
+// Returns parent function template or null.
+FunctionTemplateInfo* GetParent(FunctionTemplateInfo* data) {
+ Object* parent = data->parent_template();
+ return parent->IsUndefined() ? nullptr : FunctionTemplateInfo::cast(parent);
+}
+
+// Starting from given object template's constructor walk up the inheritance
+// chain till a function template that has an instance template is found.
+ObjectTemplateInfo* GetParent(ObjectTemplateInfo* data) {
+ Object* maybe_ctor = data->constructor();
+ if (maybe_ctor->IsUndefined()) return nullptr;
+ FunctionTemplateInfo* ctor = FunctionTemplateInfo::cast(maybe_ctor);
+ while (true) {
+ ctor = GetParent(ctor);
+ if (ctor == nullptr) return nullptr;
+ Object* maybe_obj = ctor->instance_template();
+ if (!maybe_obj->IsUndefined()) return ObjectTemplateInfo::cast(maybe_obj);
+ }
+}
+template <typename TemplateInfoT>
MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
- Handle<TemplateInfo> data) {
+ Handle<TemplateInfoT> data,
+ bool is_hidden_prototype) {
+ HandleScope scope(isolate);
+ // Disable access checks while instantiating the object.
+ AccessCheckDisableScope access_check_scope(isolate, obj);
+
+ // Walk the inheritance chain and copy all accessors to current object.
+ int max_number_of_properties = 0;
+ TemplateInfoT* info = *data;
+ while (info != nullptr) {
+ if (!info->property_accessors()->IsUndefined()) {
+ Object* props = info->property_accessors();
+ if (!props->IsUndefined()) {
+ Handle<Object> props_handle(props, isolate);
+ NeanderArray props_array(props_handle);
+ max_number_of_properties += props_array.length();
+ }
+ }
+ info = GetParent(info);
+ }
+
+ if (max_number_of_properties > 0) {
+ int valid_descriptors = 0;
+ // Use a temporary FixedArray to accumulate unique accessors.
+ Handle<FixedArray> array =
+ isolate->factory()->NewFixedArray(max_number_of_properties);
+
+ info = *data;
+ while (info != nullptr) {
+ // Accumulate accessors.
+ if (!info->property_accessors()->IsUndefined()) {
+ Handle<Object> props(info->property_accessors(), isolate);
+ valid_descriptors =
+ AccessorInfo::AppendUnique(props, array, valid_descriptors);
+ }
+ info = GetParent(info);
+ }
+
+ // Install accumulated accessors.
+ for (int i = 0; i < valid_descriptors; i++) {
+ Handle<AccessorInfo> accessor(AccessorInfo::cast(array->get(i)));
+ JSObject::SetAccessor(obj, accessor).Assert();
+ }
+ }
+
auto property_list = handle(data->property_list(), isolate);
if (property_list->IsUndefined()) return obj;
// TODO(dcarney): just use a FixedArray here.
NeanderArray properties(property_list);
if (properties.length() == 0) return obj;
- HandleScope scope(isolate);
- // Disable access checks while instantiating the object.
- AccessCheckDisableScope access_check_scope(isolate, obj);
int i = 0;
for (int c = 0; c < data->number_of_properties(); c++) {
@@ -171,17 +236,16 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
if (kind == kData) {
auto prop_data = handle(properties.get(i++), isolate);
-
RETURN_ON_EXCEPTION(isolate, DefineDataProperty(isolate, obj, name,
prop_data, attributes),
JSObject);
} else {
auto getter = handle(properties.get(i++), isolate);
auto setter = handle(properties.get(i++), isolate);
- RETURN_ON_EXCEPTION(isolate,
- DefineAccessorProperty(isolate, obj, name, getter,
- setter, attributes),
- JSObject);
+ RETURN_ON_EXCEPTION(
+ isolate, DefineAccessorProperty(isolate, obj, name, getter, setter,
+ attributes, is_hidden_prototype),
+ JSObject);
}
} else {
// Intrinsic data property --- Get appropriate value from the current
@@ -202,14 +266,28 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
return obj;
}
+void CacheTemplateInstantiation(Isolate* isolate, Handle<Smi> serial_number,
+ Handle<JSObject> object) {
+ auto cache = isolate->template_instantiations_cache();
+ auto new_cache = ObjectHashTable::Put(cache, serial_number, object);
+ isolate->native_context()->set_template_instantiations_cache(*new_cache);
+}
+
+void UncacheTemplateInstantiation(Isolate* isolate, Handle<Smi> serial_number) {
+ auto cache = isolate->template_instantiations_cache();
+ bool was_present = false;
+ auto new_cache = ObjectHashTable::Remove(cache, serial_number, &was_present);
+ DCHECK(was_present);
+ isolate->native_context()->set_template_instantiations_cache(*new_cache);
+}
MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
- Handle<ObjectTemplateInfo> data) {
+ Handle<ObjectTemplateInfo> info,
+ bool is_hidden_prototype) {
// Enter a new scope. Recursion could otherwise create a lot of handles.
HandleScope scope(isolate);
// Fast path.
Handle<JSObject> result;
- auto info = Handle<ObjectTemplateInfo>::cast(data);
auto constructor = handle(info->constructor(), isolate);
Handle<JSFunction> cons;
if (constructor->IsUndefined()) {
@@ -219,29 +297,32 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
ASSIGN_RETURN_ON_EXCEPTION(
isolate, cons, InstantiateFunction(isolate, cons_templ), JSFunction);
}
+ auto serial_number = handle(Smi::cast(info->serial_number()), isolate);
+ if (serial_number->value()) {
+ // Probe cache.
+ auto cache = isolate->template_instantiations_cache();
+ Object* boilerplate = cache->Lookup(serial_number);
+ if (boilerplate->IsJSObject()) {
+ result = handle(JSObject::cast(boilerplate), isolate);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result, JSObject::DeepCopyApiBoilerplate(result), JSObject);
+ return scope.CloseAndEscape(result);
+ }
+ }
auto object = isolate->factory()->NewJSObject(cons);
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result, ConfigureInstance(isolate, object, info), JSFunction);
+ isolate, result,
+ ConfigureInstance(isolate, object, info, is_hidden_prototype),
+ JSFunction);
// TODO(dcarney): is this necessary?
JSObject::MigrateSlowToFast(result, 0, "ApiNatives::InstantiateObject");
- return scope.CloseAndEscape(result);
-}
-
-
-void CacheFunction(Isolate* isolate, Handle<Smi> serial_number,
- Handle<JSFunction> function) {
- auto cache = isolate->function_cache();
- auto new_cache = ObjectHashTable::Put(cache, serial_number, function);
- isolate->native_context()->set_function_cache(*new_cache);
-}
-
-void UncacheFunction(Isolate* isolate, Handle<Smi> serial_number) {
- auto cache = isolate->function_cache();
- bool was_present = false;
- auto new_cache = ObjectHashTable::Remove(cache, serial_number, &was_present);
- DCHECK(was_present);
- isolate->native_context()->set_function_cache(*new_cache);
+ if (serial_number->value()) {
+ CacheTemplateInstantiation(isolate, serial_number, result);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result, JSObject::DeepCopyApiBoilerplate(result), JSObject);
+ }
+ return scope.CloseAndEscape(result);
}
@@ -249,9 +330,9 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
Handle<FunctionTemplateInfo> data,
Handle<Name> name) {
auto serial_number = handle(Smi::cast(data->serial_number()), isolate);
- // Probe cache.
- if (!data->do_not_cache()) {
- auto cache = isolate->function_cache();
+ if (serial_number->value()) {
+ // Probe cache.
+ auto cache = isolate->template_instantiations_cache();
Object* element = cache->Lookup(serial_number);
if (element->IsJSFunction()) {
return handle(JSFunction::cast(element), isolate);
@@ -268,7 +349,8 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
ASSIGN_RETURN_ON_EXCEPTION(
isolate, prototype,
InstantiateObject(isolate,
- Handle<ObjectTemplateInfo>::cast(prototype_templ)),
+ Handle<ObjectTemplateInfo>::cast(prototype_templ),
+ data->hidden_prototype()),
JSFunction);
}
auto parent = handle(data->parent_template(), isolate);
@@ -296,15 +378,16 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
if (!name.is_null() && name->IsString()) {
function->shared()->set_name(*name);
}
- if (!data->do_not_cache()) {
+ if (serial_number->value()) {
// Cache the function.
- CacheFunction(isolate, serial_number, function);
+ CacheTemplateInstantiation(isolate, serial_number, function);
}
- auto result = ConfigureInstance(isolate, function, data);
+ auto result =
+ ConfigureInstance(isolate, function, data, data->hidden_prototype());
if (result.is_null()) {
// Uncache on error.
- if (!data->do_not_cache()) {
- UncacheFunction(isolate, serial_number);
+ if (serial_number->value()) {
+ UncacheTemplateInstantiation(isolate, serial_number);
}
return MaybeHandle<JSFunction>();
}
@@ -364,23 +447,7 @@ MaybeHandle<JSObject> ApiNatives::InstantiateObject(
Handle<ObjectTemplateInfo> data) {
Isolate* isolate = data->GetIsolate();
InvokeScope invoke_scope(isolate);
- return ::v8::internal::InstantiateObject(isolate, data);
-}
-
-
-MaybeHandle<FunctionTemplateInfo> ApiNatives::ConfigureInstance(
- Isolate* isolate, Handle<FunctionTemplateInfo> desc,
- Handle<JSObject> instance) {
- // Configure the instance by adding the properties specified by the
- // instance template.
- if (desc->instance_template()->IsUndefined()) return desc;
- InvokeScope invoke_scope(isolate);
- Handle<ObjectTemplateInfo> instance_template(
- ObjectTemplateInfo::cast(desc->instance_template()), isolate);
- RETURN_ON_EXCEPTION(isolate, ::v8::internal::ConfigureInstance(
- isolate, instance, instance_template),
- FunctionTemplateInfo);
- return desc;
+ return ::v8::internal::InstantiateObject(isolate, data, false);
}
@@ -527,11 +594,6 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
map->set_is_undetectable();
}
- // Mark as hidden for the __proto__ accessor if needed.
- if (obj->hidden_prototype()) {
- map->set_is_hidden_prototype();
- }
-
// Mark as needs_access_check if needed.
if (obj->needs_access_check()) {
map->set_is_access_check_needed(true);
@@ -548,73 +610,7 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
// Mark instance as callable in the map.
if (!obj->instance_call_handler()->IsUndefined()) {
map->set_is_callable();
- map->set_is_constructor();
- }
-
- // Recursively copy parent instance templates' accessors,
- // 'data' may be modified.
- int max_number_of_additional_properties = 0;
- int max_number_of_static_properties = 0;
- FunctionTemplateInfo* info = *obj;
- while (true) {
- if (!info->instance_template()->IsUndefined()) {
- Object* props = ObjectTemplateInfo::cast(info->instance_template())
- ->property_accessors();
- if (!props->IsUndefined()) {
- Handle<Object> props_handle(props, isolate);
- NeanderArray props_array(props_handle);
- max_number_of_additional_properties += props_array.length();
- }
- }
- if (!info->property_accessors()->IsUndefined()) {
- Object* props = info->property_accessors();
- if (!props->IsUndefined()) {
- Handle<Object> props_handle(props, isolate);
- NeanderArray props_array(props_handle);
- max_number_of_static_properties += props_array.length();
- }
- }
- Object* parent = info->parent_template();
- if (parent->IsUndefined()) break;
- info = FunctionTemplateInfo::cast(parent);
- }
-
- Map::EnsureDescriptorSlack(map, max_number_of_additional_properties);
-
- // Use a temporary FixedArray to acculumate static accessors
- int valid_descriptors = 0;
- Handle<FixedArray> array;
- if (max_number_of_static_properties > 0) {
- array = isolate->factory()->NewFixedArray(max_number_of_static_properties);
- }
-
- while (true) {
- // Install instance descriptors
- if (!obj->instance_template()->IsUndefined()) {
- Handle<ObjectTemplateInfo> instance = Handle<ObjectTemplateInfo>(
- ObjectTemplateInfo::cast(obj->instance_template()), isolate);
- Handle<Object> props =
- Handle<Object>(instance->property_accessors(), isolate);
- if (!props->IsUndefined()) {
- Map::AppendCallbackDescriptors(map, props);
- }
- }
- // Accumulate static accessors
- if (!obj->property_accessors()->IsUndefined()) {
- Handle<Object> props = Handle<Object>(obj->property_accessors(), isolate);
- valid_descriptors =
- AccessorInfo::AppendUnique(props, array, valid_descriptors);
- }
- // Climb parent chain
- Handle<Object> parent = Handle<Object>(obj->parent_template(), isolate);
- if (parent->IsUndefined()) break;
- obj = Handle<FunctionTemplateInfo>::cast(parent);
- }
-
- // Install accumulated static accessors
- for (int i = 0; i < valid_descriptors; i++) {
- Handle<AccessorInfo> accessor(AccessorInfo::cast(array->get(i)));
- JSObject::SetAccessor(result, accessor).Assert();
+ map->set_is_constructor(true);
}
DCHECK(result->shared()->IsApiFunction());
diff --git a/deps/v8/src/api-natives.h b/deps/v8/src/api-natives.h
index fcca4a5a17..91f0b168d9 100644
--- a/deps/v8/src/api-natives.h
+++ b/deps/v8/src/api-natives.h
@@ -25,10 +25,6 @@ class ApiNatives {
MUST_USE_RESULT static MaybeHandle<JSObject> InstantiateObject(
Handle<ObjectTemplateInfo> data);
- MUST_USE_RESULT static MaybeHandle<FunctionTemplateInfo> ConfigureInstance(
- Isolate* isolate, Handle<FunctionTemplateInfo> instance,
- Handle<JSObject> data);
-
enum ApiInstanceType {
JavaScriptObjectType,
GlobalObjectType,
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index c787dd5f1f..a71dcfaec3 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -15,6 +15,7 @@
#include "include/v8-experimental.h"
#include "include/v8-profiler.h"
#include "include/v8-testing.h"
+#include "src/accessors.h"
#include "src/api-experimental.h"
#include "src/api-natives.h"
#include "src/assert-scope.h"
@@ -38,8 +39,8 @@
#include "src/global-handles.h"
#include "src/icu_util.h"
#include "src/isolate-inl.h"
+#include "src/json-parser.h"
#include "src/messages.h"
-#include "src/parsing/json-parser.h"
#include "src/parsing/parser.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/pending-compilation-error-handler.h"
@@ -58,6 +59,7 @@
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
#include "src/startup-data-util.h"
+#include "src/tracing/trace-event.h"
#include "src/unicode-inl.h"
#include "src/v8.h"
#include "src/v8threads.h"
@@ -167,6 +169,7 @@ class CallDepthScope {
isolate_->IncrementJsCallsFromApiCounter();
isolate_->handle_scope_implementer()->IncrementCallDepth();
if (!context_.IsEmpty()) context_->Enter();
+ if (do_callback_) isolate_->FireBeforeCallEnteredCallback();
}
~CallDepthScope() {
if (!context_.IsEmpty()) context_->Exit();
@@ -969,6 +972,9 @@ static void InitializeFunctionTemplate(
info->set_flag(0);
}
+static Local<ObjectTemplate> ObjectTemplateNew(
+ i::Isolate* isolate, v8::Local<FunctionTemplate> constructor,
+ bool do_not_cache);
Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() {
i::Isolate* i_isolate = Utils::OpenHandle(this)->GetIsolate();
@@ -976,8 +982,9 @@ Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() {
i::Handle<i::Object> result(Utils::OpenHandle(this)->prototype_template(),
i_isolate);
if (result->IsUndefined()) {
- v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(i_isolate);
- result = Utils::OpenHandle(*ObjectTemplate::New(isolate));
+ // Do not cache prototype objects.
+ result = Utils::OpenHandle(
+ *ObjectTemplateNew(i_isolate, Local<FunctionTemplate>(), true));
Utils::OpenHandle(this)->set_prototype_template(*result);
}
return ToApiHandle<ObjectTemplate>(result);
@@ -1119,21 +1126,23 @@ static i::Handle<i::AccessorInfo> SetAccessorInfoProperties(
return obj;
}
-
template <typename Getter, typename Setter>
static i::Handle<i::AccessorInfo> MakeAccessorInfo(
v8::Local<Name> name, Getter getter, Setter setter, v8::Local<Value> data,
v8::AccessControl settings, v8::PropertyAttribute attributes,
- v8::Local<AccessorSignature> signature) {
+ v8::Local<AccessorSignature> signature, bool is_special_data_property) {
i::Isolate* isolate = Utils::OpenHandle(*name)->GetIsolate();
- i::Handle<i::ExecutableAccessorInfo> obj =
- isolate->factory()->NewExecutableAccessorInfo();
+ i::Handle<i::AccessorInfo> obj = isolate->factory()->NewAccessorInfo();
SET_FIELD_WRAPPED(obj, set_getter, getter);
+ if (is_special_data_property && setter == nullptr) {
+ setter = reinterpret_cast<Setter>(&i::Accessors::ReconfigureToDataProperty);
+ }
SET_FIELD_WRAPPED(obj, set_setter, setter);
if (data.IsEmpty()) {
data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
}
obj->set_data(*Utils::OpenHandle(*data));
+ obj->set_is_special_data_property(is_special_data_property);
return SetAccessorInfoProperties(obj, name, settings, attributes, signature);
}
@@ -1225,9 +1234,9 @@ Local<ObjectTemplate> ObjectTemplate::New() {
return New(i::Isolate::Current(), Local<FunctionTemplate>());
}
-
-Local<ObjectTemplate> ObjectTemplate::New(
- i::Isolate* isolate, v8::Local<FunctionTemplate> constructor) {
+static Local<ObjectTemplate> ObjectTemplateNew(
+ i::Isolate* isolate, v8::Local<FunctionTemplate> constructor,
+ bool do_not_cache) {
// Changes to the environment cannot be captured in the snapshot. Expect no
// object templates when the isolate is created for serialization.
DCHECK(!isolate->serializer_enabled());
@@ -1238,12 +1247,22 @@ Local<ObjectTemplate> ObjectTemplate::New(
i::Handle<i::ObjectTemplateInfo> obj =
i::Handle<i::ObjectTemplateInfo>::cast(struct_obj);
InitializeTemplate(obj, Consts::OBJECT_TEMPLATE);
+ int next_serial_number = 0;
+ if (!do_not_cache) {
+ next_serial_number = isolate->next_serial_number() + 1;
+ isolate->set_next_serial_number(next_serial_number);
+ }
+ obj->set_serial_number(i::Smi::FromInt(next_serial_number));
if (!constructor.IsEmpty())
obj->set_constructor(*Utils::OpenHandle(*constructor));
obj->set_internal_field_count(i::Smi::FromInt(0));
return Utils::ToLocal(obj);
}
+Local<ObjectTemplate> ObjectTemplate::New(
+ i::Isolate* isolate, v8::Local<FunctionTemplate> constructor) {
+ return ObjectTemplateNew(isolate, constructor, false);
+}
// Ensure that the object template has a constructor. If no
// constructor is available we create one.
@@ -1264,39 +1283,20 @@ static i::Handle<i::FunctionTemplateInfo> EnsureConstructor(
}
-static inline i::Handle<i::TemplateInfo> GetTemplateInfo(
- i::Isolate* isolate,
- Template* template_obj) {
- return Utils::OpenHandle(template_obj);
-}
-
-
-// TODO(dcarney): remove this with ObjectTemplate::SetAccessor
-static inline i::Handle<i::TemplateInfo> GetTemplateInfo(
- i::Isolate* isolate,
- ObjectTemplate* object_template) {
- EnsureConstructor(isolate, object_template);
- return Utils::OpenHandle(object_template);
-}
-
-
-template<typename Getter, typename Setter, typename Data, typename Template>
-static bool TemplateSetAccessor(
- Template* template_obj,
- v8::Local<Name> name,
- Getter getter,
- Setter setter,
- Data data,
- AccessControl settings,
- PropertyAttribute attribute,
- v8::Local<AccessorSignature> signature) {
- auto isolate = Utils::OpenHandle(template_obj)->GetIsolate();
+template <typename Getter, typename Setter, typename Data, typename Template>
+static bool TemplateSetAccessor(Template* template_obj, v8::Local<Name> name,
+ Getter getter, Setter setter, Data data,
+ AccessControl settings,
+ PropertyAttribute attribute,
+ v8::Local<AccessorSignature> signature,
+ bool is_special_data_property) {
+ auto info = Utils::OpenHandle(template_obj);
+ auto isolate = info->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
auto obj = MakeAccessorInfo(name, getter, setter, data, settings, attribute,
- signature);
+ signature, is_special_data_property);
if (obj.is_null()) return false;
- auto info = GetTemplateInfo(isolate, template_obj);
i::ApiNatives::AddNativeDataProperty(isolate, info, obj);
return true;
}
@@ -1309,8 +1309,8 @@ void Template::SetNativeDataProperty(v8::Local<String> name,
PropertyAttribute attribute,
v8::Local<AccessorSignature> signature,
AccessControl settings) {
- TemplateSetAccessor(
- this, name, getter, setter, data, settings, attribute, signature);
+ TemplateSetAccessor(this, name, getter, setter, data, settings, attribute,
+ signature, true);
}
@@ -1321,8 +1321,8 @@ void Template::SetNativeDataProperty(v8::Local<Name> name,
PropertyAttribute attribute,
v8::Local<AccessorSignature> signature,
AccessControl settings) {
- TemplateSetAccessor(
- this, name, getter, setter, data, settings, attribute, signature);
+ TemplateSetAccessor(this, name, getter, setter, data, settings, attribute,
+ signature, true);
}
@@ -1344,8 +1344,8 @@ void ObjectTemplate::SetAccessor(v8::Local<String> name,
v8::Local<Value> data, AccessControl settings,
PropertyAttribute attribute,
v8::Local<AccessorSignature> signature) {
- TemplateSetAccessor(
- this, name, getter, setter, data, settings, attribute, signature);
+ TemplateSetAccessor(this, name, getter, setter, data, settings, attribute,
+ signature, i::FLAG_disable_old_api_accessors);
}
@@ -1355,8 +1355,8 @@ void ObjectTemplate::SetAccessor(v8::Local<Name> name,
v8::Local<Value> data, AccessControl settings,
PropertyAttribute attribute,
v8::Local<AccessorSignature> signature) {
- TemplateSetAccessor(
- this, name, getter, setter, data, settings, attribute, signature);
+ TemplateSetAccessor(this, name, getter, setter, data, settings, attribute,
+ signature, i::FLAG_disable_old_api_accessors);
}
@@ -1452,6 +1452,10 @@ void ObjectTemplate::SetAccessCheckCallback(AccessCheckCallback callback,
cons->set_needs_access_check(true);
}
+void ObjectTemplate::SetAccessCheckCallback(
+ DeprecatedAccessCheckCallback callback, Local<Value> data) {
+ SetAccessCheckCallback(reinterpret_cast<AccessCheckCallback>(callback), data);
+}
void ObjectTemplate::SetAccessCheckCallbacks(
NamedSecurityCallback named_callback,
@@ -1602,32 +1606,7 @@ Local<Script> UnboundScript::BindToCurrentContext() {
function_info(i::SharedFunctionInfo::cast(*obj), obj->GetIsolate());
i::Isolate* isolate = obj->GetIsolate();
- i::ScopeInfo* scope_info = function_info->scope_info();
i::Handle<i::JSReceiver> global(isolate->native_context()->global_object());
- for (int i = 0; i < scope_info->StrongModeFreeVariableCount(); ++i) {
- i::Handle<i::String> name_string(scope_info->StrongModeFreeVariableName(i));
- i::ScriptContextTable::LookupResult result;
- i::Handle<i::ScriptContextTable> script_context_table(
- isolate->native_context()->script_context_table());
- if (!i::ScriptContextTable::Lookup(script_context_table, name_string,
- &result)) {
- i::Handle<i::Name> name(scope_info->StrongModeFreeVariableName(i));
- Maybe<bool> has = i::JSReceiver::HasProperty(global, name);
- if (has.IsJust() && !has.FromJust()) {
- i::PendingCompilationErrorHandler pending_error_handler_;
- pending_error_handler_.ReportMessageAt(
- scope_info->StrongModeFreeVariableStartPosition(i),
- scope_info->StrongModeFreeVariableEndPosition(i),
- i::MessageTemplate::kStrongUnboundGlobal, name_string,
- i::kReferenceError);
- i::Handle<i::Script> script(i::Script::cast(function_info->script()));
- pending_error_handler_.ThrowPendingError(isolate, script);
- isolate->ReportPendingMessages();
- isolate->OptionalRescheduleException(true);
- return Local<Script>();
- }
- }
- }
i::Handle<i::JSFunction> function =
obj->GetIsolate()->factory()->NewFunctionFromSharedFunctionInfo(
function_info, isolate->native_context());
@@ -1708,6 +1687,7 @@ MaybeLocal<Value> Script::Run(Local<Context> context) {
PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, "v8::Script::Run()", Value)
i::AggregatingHistogramTimerScope timer(isolate->counters()->compile_lazy());
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
+ TRACE_EVENT0("v8", "V8.Execute");
auto fun = i::Handle<i::JSFunction>::cast(Utils::OpenHandle(this));
i::Handle<i::Object> receiver(isolate->global_proxy(), isolate);
Local<Value> result;
@@ -1761,6 +1741,7 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
i::Handle<i::SharedFunctionInfo> result;
{
i::HistogramTimerScope total(isolate->counters()->compile_script(), true);
+ TRACE_EVENT0("v8", "V8.CompileScript");
i::Handle<i::Object> name_obj;
i::Handle<i::Object> source_map_url;
int line_offset = 0;
@@ -2930,11 +2911,11 @@ Local<String> Value::ToDetailString(Isolate* isolate) const {
MaybeLocal<Object> Value::ToObject(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
- if (obj->IsJSObject()) return ToApiHandle<Object>(obj);
+ if (obj->IsJSReceiver()) return ToApiHandle<Object>(obj);
PREPARE_FOR_EXECUTION(context, "ToObject", Object);
Local<Object> result;
has_pending_exception =
- !ToLocal<Object>(i::Execution::ToObject(isolate, obj), &result);
+ !ToLocal<Object>(i::Object::ToObject(isolate, obj), &result);
RETURN_ON_FAILED_EXECUTION(Object);
RETURN_ESCAPED(result);
}
@@ -3308,16 +3289,14 @@ double Value::NumberValue() const {
Maybe<int64_t> Value::IntegerValue(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
- i::Handle<i::Object> num;
if (obj->IsNumber()) {
- num = obj;
- } else {
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "IntegerValue", int64_t);
- has_pending_exception = !i::Object::ToInteger(isolate, obj).ToHandle(&num);
- RETURN_ON_FAILED_EXECUTION_PRIMITIVE(int64_t);
+ return Just(NumberToInt64(*obj));
}
- return Just(num->IsSmi() ? static_cast<int64_t>(i::Smi::cast(*num)->value())
- : static_cast<int64_t>(num->Number()));
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "IntegerValue", int64_t);
+ i::Handle<i::Object> num;
+ has_pending_exception = !i::Object::ToInteger(isolate, obj).ToHandle(&num);
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(int64_t);
+ return Just(NumberToInt64(*num));
}
@@ -3559,7 +3538,8 @@ static i::MaybeHandle<i::Object> DefineObjectProperty(
isolate, js_object, key, &success, i::LookupIterator::OWN);
if (!success) return i::MaybeHandle<i::Object>();
- return i::JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, attrs);
+ return i::JSObject::DefineOwnPropertyIgnoreAttributes(
+ &it, value, attrs, i::JSObject::FORCE_FIELD);
}
@@ -3600,8 +3580,27 @@ bool v8::Object::ForceSet(v8::Local<Value> key, v8::Local<Value> value,
Maybe<bool> v8::Object::SetPrivate(Local<Context> context, Local<Private> key,
Local<Value> value) {
- return DefineOwnProperty(context, Local<Name>(reinterpret_cast<Name*>(*key)),
- value, DontEnum);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::SetPrivate()", bool);
+ auto self = Utils::OpenHandle(this);
+ auto key_obj = Utils::OpenHandle(reinterpret_cast<Name*>(*key));
+ auto value_obj = Utils::OpenHandle(*value);
+ if (self->IsJSProxy()) {
+ i::PropertyDescriptor desc;
+ desc.set_writable(true);
+ desc.set_enumerable(false);
+ desc.set_configurable(true);
+ desc.set_value(value_obj);
+ return i::JSProxy::SetPrivateProperty(
+ isolate, i::Handle<i::JSProxy>::cast(self),
+ i::Handle<i::Symbol>::cast(key_obj), &desc, i::Object::DONT_THROW);
+ }
+ auto js_object = i::Handle<i::JSObject>::cast(self);
+ i::LookupIterator it(js_object, key_obj);
+ has_pending_exception = i::JSObject::DefineOwnPropertyIgnoreAttributes(
+ &it, value_obj, i::DONT_ENUM)
+ .is_null();
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return Just(true);
}
@@ -3753,8 +3752,7 @@ MaybeLocal<Array> v8::Object::GetPropertyNames(Local<Context> context) {
auto self = Utils::OpenHandle(this);
i::Handle<i::FixedArray> value;
has_pending_exception =
- !i::JSReceiver::GetKeys(self, i::JSReceiver::INCLUDE_PROTOS,
- i::ENUMERABLE_STRINGS)
+ !i::JSReceiver::GetKeys(self, i::INCLUDE_PROTOS, i::ENUMERABLE_STRINGS)
.ToHandle(&value);
RETURN_ON_FAILED_EXECUTION(Array);
// Because we use caching to speed up enumeration it is important
@@ -3776,9 +3774,9 @@ MaybeLocal<Array> v8::Object::GetOwnPropertyNames(Local<Context> context) {
PREPARE_FOR_EXECUTION(context, "v8::Object::GetOwnPropertyNames()", Array);
auto self = Utils::OpenHandle(this);
i::Handle<i::FixedArray> value;
- has_pending_exception = !i::JSReceiver::GetKeys(self, i::JSReceiver::OWN_ONLY,
- i::ENUMERABLE_STRINGS)
- .ToHandle(&value);
+ has_pending_exception =
+ !i::JSReceiver::GetKeys(self, i::OWN_ONLY, i::ENUMERABLE_STRINGS)
+ .ToHandle(&value);
RETURN_ON_FAILED_EXECUTION(Array);
// Because we use caching to speed up enumeration it is important
// to never change the result of the basic enumeration function so
@@ -3921,7 +3919,7 @@ static Maybe<bool> ObjectSetAccessor(Local<Context> context, Object* self,
i::Handle<i::JSObject>::cast(Utils::OpenHandle(self));
v8::Local<AccessorSignature> signature;
auto info = MakeAccessorInfo(name, getter, setter, data, settings, attributes,
- signature);
+ signature, i::FLAG_disable_old_api_accessors);
if (info.is_null()) return Nothing<bool>();
bool fast = obj->HasFastProperties();
i::Handle<i::Object> result;
@@ -4282,6 +4280,7 @@ MaybeLocal<Value> Object::CallAsFunction(Local<Context> context,
PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, "v8::Object::CallAsFunction()",
Value);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
+ TRACE_EVENT0("v8", "V8.Execute");
auto self = Utils::OpenHandle(this);
auto recv_obj = Utils::OpenHandle(*recv);
STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
@@ -4308,6 +4307,7 @@ MaybeLocal<Value> Object::CallAsConstructor(Local<Context> context, int argc,
PREPARE_FOR_EXECUTION_WITH_CALLBACK(context,
"v8::Object::CallAsConstructor()", Value);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
+ TRACE_EVENT0("v8", "V8.Execute");
auto self = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
@@ -4357,6 +4357,7 @@ MaybeLocal<Object> Function::NewInstance(Local<Context> context, int argc,
PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, "v8::Function::NewInstance()",
Object);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
+ TRACE_EVENT0("v8", "V8.Execute");
auto self = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
@@ -4380,6 +4381,7 @@ MaybeLocal<v8::Value> Function::Call(Local<Context> context,
v8::Local<v8::Value> argv[]) {
PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, "v8::Function::Call()", Value);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
+ TRACE_EVENT0("v8", "V8.Execute");
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
@@ -7378,6 +7380,20 @@ void Isolate::SetEventLogger(LogEventCallback that) {
}
+void Isolate::AddBeforeCallEnteredCallback(BeforeCallEnteredCallback callback) {
+ if (callback == NULL) return;
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->AddBeforeCallEnteredCallback(callback);
+}
+
+
+void Isolate::RemoveBeforeCallEnteredCallback(
+ BeforeCallEnteredCallback callback) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->RemoveBeforeCallEnteredCallback(callback);
+}
+
+
void Isolate::AddCallCompletedCallback(CallCompletedCallback callback) {
if (callback == NULL) return;
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -7391,6 +7407,19 @@ void Isolate::RemoveCallCompletedCallback(CallCompletedCallback callback) {
}
+void Isolate::AddCallCompletedCallback(
+ DeprecatedCallCompletedCallback callback) {
+ AddCallCompletedCallback(reinterpret_cast<CallCompletedCallback>(callback));
+}
+
+
+void Isolate::RemoveCallCompletedCallback(
+ DeprecatedCallCompletedCallback callback) {
+ RemoveCallCompletedCallback(
+ reinterpret_cast<CallCompletedCallback>(callback));
+}
+
+
void Isolate::SetPromiseRejectCallback(PromiseRejectCallback callback) {
if (callback == NULL) return;
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -7483,6 +7512,7 @@ void Isolate::LowMemoryNotification() {
{
i::HistogramTimerScope idle_notification_scope(
isolate->counters()->gc_low_memory_notification());
+ TRACE_EVENT0("v8", "V8.GCLowMemoryNotification");
isolate->heap()->CollectAllAvailableGarbage("low memory notification");
}
}
@@ -8057,6 +8087,9 @@ void CpuProfiler::SetSamplingInterval(int us) {
base::TimeDelta::FromMicroseconds(us));
}
+void CpuProfiler::CollectSample() {
+ reinterpret_cast<i::CpuProfiler*>(this)->CollectSample();
+}
void CpuProfiler::StartProfiling(Local<String> title, bool record_samples) {
reinterpret_cast<i::CpuProfiler*>(this)->StartProfiling(
@@ -8285,6 +8318,23 @@ SnapshotObjectId HeapProfiler::GetHeapStats(OutputStream* stream,
}
+bool HeapProfiler::StartSamplingHeapProfiler(uint64_t sample_interval,
+ int stack_depth) {
+ return reinterpret_cast<i::HeapProfiler*>(this)
+ ->StartSamplingHeapProfiler(sample_interval, stack_depth);
+}
+
+
+void HeapProfiler::StopSamplingHeapProfiler() {
+ reinterpret_cast<i::HeapProfiler*>(this)->StopSamplingHeapProfiler();
+}
+
+
+AllocationProfile* HeapProfiler::GetAllocationProfile() {
+ return reinterpret_cast<i::HeapProfiler*>(this)->GetAllocationProfile();
+}
+
+
void HeapProfiler::DeleteAllHeapSnapshots() {
reinterpret_cast<i::HeapProfiler*>(this)->DeleteAllSnapshots();
}
diff --git a/deps/v8/src/arguments.cc b/deps/v8/src/arguments.cc
index a783357896..077991bbee 100644
--- a/deps/v8/src/arguments.cc
+++ b/deps/v8/src/arguments.cc
@@ -70,16 +70,14 @@ v8::Local<v8::Value> FunctionCallbackArguments::Call(FunctionCallback f) {
}
-#define WRITE_CALL_2_VOID(Function, ReturnValue, Arg1, Arg2) \
-void PropertyCallbackArguments::Call(Function f, \
- Arg1 arg1, \
- Arg2 arg2) { \
- Isolate* isolate = this->isolate(); \
- VMState<EXTERNAL> state(isolate); \
- ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
- PropertyCallbackInfo<ReturnValue> info(begin()); \
- f(arg1, arg2, info); \
-}
+#define WRITE_CALL_2_VOID(Function, ReturnValue, Arg1, Arg2) \
+ void PropertyCallbackArguments::Call(Function f, Arg1 arg1, Arg2 arg2) { \
+ Isolate* isolate = this->isolate(); \
+ VMState<EXTERNAL> state(isolate); \
+ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
+ PropertyCallbackInfo<ReturnValue> info(begin()); \
+ f(arg1, arg2, info); \
+ }
FOR_EACH_CALLBACK_TABLE_MAPPING_0(WRITE_CALL_0)
diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h
index d11a8cd61e..35096774db 100644
--- a/deps/v8/src/arguments.h
+++ b/deps/v8/src/arguments.h
@@ -152,17 +152,19 @@ class PropertyCallbackArguments
static const int kReturnValueDefaultValueIndex =
T::kReturnValueDefaultValueIndex;
static const int kIsolateIndex = T::kIsolateIndex;
+ static const int kShouldThrowOnErrorIndex = T::kShouldThrowOnErrorIndex;
- PropertyCallbackArguments(Isolate* isolate,
- Object* data,
- Object* self,
- JSObject* holder)
+ PropertyCallbackArguments(Isolate* isolate, Object* data, Object* self,
+ JSObject* holder, Object::ShouldThrow should_throw)
: Super(isolate) {
Object** values = this->begin();
values[T::kThisIndex] = self;
values[T::kHolderIndex] = holder;
values[T::kDataIndex] = data;
values[T::kIsolateIndex] = reinterpret_cast<Object*>(isolate);
+ values[T::kShouldThrowOnErrorIndex] =
+ Smi::FromInt(should_throw == Object::THROW_ON_ERROR ? 1 : 0);
+
// Here the hole is set as default value.
// It cannot escape into js as it's remove in Call below.
values[T::kReturnValueDefaultValueIndex] =
@@ -218,17 +220,14 @@ class FunctionCallbackArguments
static const int kCalleeIndex = T::kCalleeIndex;
static const int kContextSaveIndex = T::kContextSaveIndex;
- FunctionCallbackArguments(internal::Isolate* isolate,
- internal::Object* data,
- internal::JSFunction* callee,
- internal::Object* holder,
- internal::Object** argv,
- int argc,
- bool is_construct_call)
- : Super(isolate),
- argv_(argv),
- argc_(argc),
- is_construct_call_(is_construct_call) {
+ FunctionCallbackArguments(internal::Isolate* isolate, internal::Object* data,
+ internal::HeapObject* callee,
+ internal::Object* holder, internal::Object** argv,
+ int argc, bool is_construct_call)
+ : Super(isolate),
+ argv_(argv),
+ argc_(argc),
+ is_construct_call_(is_construct_call) {
Object** values = begin();
values[T::kDataIndex] = data;
values[T::kCalleeIndex] = callee;
@@ -240,7 +239,8 @@ class FunctionCallbackArguments
values[T::kReturnValueDefaultValueIndex] =
isolate->heap()->the_hole_value();
values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
- DCHECK(values[T::kCalleeIndex]->IsJSFunction());
+ DCHECK(values[T::kCalleeIndex]->IsJSFunction() ||
+ values[T::kCalleeIndex]->IsFunctionTemplateInfo());
DCHECK(values[T::kHolderIndex]->IsHeapObject());
DCHECK(values[T::kIsolateIndex]->IsSmi());
}
@@ -271,20 +271,23 @@ double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
#define CLOBBER_DOUBLE_REGISTERS()
#endif
-
-#define RUNTIME_FUNCTION_RETURNS_TYPE(Type, Name) \
-static INLINE(Type __RT_impl_##Name(Arguments args, Isolate* isolate)); \
-Type Name(int args_length, Object** args_object, Isolate* isolate) { \
- CLOBBER_DOUBLE_REGISTERS(); \
- Arguments args(args_length, args_object); \
- return __RT_impl_##Name(args, isolate); \
-} \
-static Type __RT_impl_##Name(Arguments args, Isolate* isolate)
-
+#define RUNTIME_FUNCTION_RETURNS_TYPE(Type, Name) \
+ static INLINE(Type __RT_impl_##Name(Arguments args, Isolate* isolate)); \
+ Type Name(int args_length, Object** args_object, Isolate* isolate) { \
+ CLOBBER_DOUBLE_REGISTERS(); \
+ RuntimeCallStats* stats = isolate->counters()->runtime_call_stats(); \
+ RuntimeCallTimerScope timer(isolate, &stats->Name); \
+ Arguments args(args_length, args_object); \
+ Type value = __RT_impl_##Name(args, isolate); \
+ return value; \
+ } \
+ static Type __RT_impl_##Name(Arguments args, Isolate* isolate)
#define RUNTIME_FUNCTION(Name) RUNTIME_FUNCTION_RETURNS_TYPE(Object*, Name)
#define RUNTIME_FUNCTION_RETURN_PAIR(Name) \
RUNTIME_FUNCTION_RETURNS_TYPE(ObjectPair, Name)
+#define RUNTIME_FUNCTION_RETURN_TRIPLE(Name) \
+ RUNTIME_FUNCTION_RETURNS_TYPE(ObjectTriple, Name)
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index c9602ea028..0de96428f3 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -138,8 +138,8 @@ void RelocInfo::set_target_object(Object* target,
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target));
}
}
@@ -197,10 +197,8 @@ void RelocInfo::set_target_cell(Cell* cell,
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
- // TODO(1550) We are passing NULL as a slot because cell can never be on
- // evacuation candidate.
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), NULL, cell);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
+ cell);
}
}
@@ -263,23 +261,6 @@ void RelocInfo::WipeOut() {
}
-bool RelocInfo::IsPatchedReturnSequence() {
- Instr current_instr = Assembler::instr_at(pc_);
- Instr next_instr = Assembler::instr_at(pc_ + Assembler::kInstrSize);
- // A patched return sequence is:
- // ldr ip, [pc, #0]
- // blx ip
- return Assembler::IsLdrPcImmediateOffset(current_instr) &&
- Assembler::IsBlxReg(next_instr);
-}
-
-
-bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
- Instr current_instr = Assembler::instr_at(pc_);
- return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
-}
-
-
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index d2e3231bb8..b0fa462c9f 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -82,7 +82,7 @@ static unsigned CpuFeaturesImpliedByCompiler() {
void CpuFeatures::ProbeImpl(bool cross_compile) {
supported_ |= CpuFeaturesImpliedByCompiler();
- cache_line_size_ = 64;
+ dcache_line_size_ = 64;
// Only use statically determined features for cross compile (snapshot).
if (cross_compile) return;
@@ -137,7 +137,7 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
if (cpu.implementer() == base::CPU::ARM &&
(cpu.part() == base::CPU::ARM_CORTEX_A5 ||
cpu.part() == base::CPU::ARM_CORTEX_A9)) {
- cache_line_size_ = 32;
+ dcache_line_size_ = 32;
}
if (FLAG_enable_32dregs && cpu.has_vfp3_d32()) supported_ |= 1u << VFP32DREGS;
@@ -1947,6 +1947,16 @@ void Assembler::uxtah(Register dst, Register src1, Register src2, int rotate,
}
+void Assembler::rbit(Register dst, Register src, Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.144.
+ // cond(31-28) | 011011111111(27-16) | Rd(15-12) | 11110011(11-4) | Rm(3-0)
+ DCHECK(IsEnabled(ARMv7));
+ DCHECK(!dst.is(pc));
+ DCHECK(!src.is(pc));
+ emit(cond | 0x6FF * B16 | dst.code() * B12 | 0xF3 * B4 | src.code());
+}
+
+
// Status register access instructions.
void Assembler::mrs(Register dst, SRegister s, Condition cond) {
DCHECK(!dst.is(pc));
@@ -2135,6 +2145,21 @@ void Assembler::svc(uint32_t imm24, Condition cond) {
}
+void Assembler::dmb(BarrierOption option) {
+ emit(kSpecialCondition | 0x57ff*B12 | 5*B4 | option);
+}
+
+
+void Assembler::dsb(BarrierOption option) {
+ emit(kSpecialCondition | 0x57ff*B12 | 4*B4 | option);
+}
+
+
+void Assembler::isb(BarrierOption option) {
+ emit(kSpecialCondition | 0x57ff*B12 | 6*B4 | option);
+}
+
+
// Coprocessor instructions.
void Assembler::cdp(Coprocessor coproc,
int opcode_1,
@@ -2923,6 +2948,24 @@ void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
}
+void Assembler::vcvt_f32_u32(const SwVfpRegister dst, const SwVfpRegister src,
+ VFPConversionMode mode, const Condition cond) {
+ emit(EncodeVCVT(F32, dst.code(), U32, src.code(), mode, cond));
+}
+
+
+void Assembler::vcvt_s32_f32(const SwVfpRegister dst, const SwVfpRegister src,
+ VFPConversionMode mode, const Condition cond) {
+ emit(EncodeVCVT(S32, dst.code(), F32, src.code(), mode, cond));
+}
+
+
+void Assembler::vcvt_u32_f32(const SwVfpRegister dst, const SwVfpRegister src,
+ VFPConversionMode mode, const Condition cond) {
+ emit(EncodeVCVT(U32, dst.code(), F32, src.code(), mode, cond));
+}
+
+
void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
VFPConversionMode mode,
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 1abf1ab6a6..d381653bde 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -285,6 +285,7 @@ struct QwNeonRegister {
typedef QwNeonRegister QuadRegister;
+typedef QwNeonRegister Simd128Register;
// Support for the VFP registers s0 to s31 (d0 to d15).
// Note that "s(N):s(N+1)" is the same as "d(N/2)".
@@ -950,6 +951,9 @@ class Assembler : public AssemblerBase {
void uxtah(Register dst, Register src1, Register src2, int rotate = 0,
Condition cond = al);
+ // Reverse the bits in a register.
+ void rbit(Register dst, Register src, Condition cond = al);
+
// Status register access instructions
void mrs(Register dst, SRegister s, Condition cond = al);
@@ -986,6 +990,11 @@ class Assembler : public AssemblerBase {
void bkpt(uint32_t imm16); // v5 and above
void svc(uint32_t imm24, Condition cond = al);
+ // Synchronization instructions
+ void dmb(BarrierOption option);
+ void dsb(BarrierOption option);
+ void isb(BarrierOption option);
+
// Coprocessor instructions
void cdp(Coprocessor coproc, int opcode_1,
@@ -1125,6 +1134,18 @@ class Assembler : public AssemblerBase {
const SwVfpRegister src,
VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
+ void vcvt_f32_u32(const SwVfpRegister dst,
+ const SwVfpRegister src,
+ VFPConversionMode mode = kDefaultRoundToZero,
+ const Condition cond = al);
+ void vcvt_s32_f32(const SwVfpRegister dst,
+ const SwVfpRegister src,
+ VFPConversionMode mode = kDefaultRoundToZero,
+ const Condition cond = al);
+ void vcvt_u32_f32(const SwVfpRegister dst,
+ const SwVfpRegister src,
+ VFPConversionMode mode = kDefaultRoundToZero,
+ const Condition cond = al);
void vcvt_s32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
VFPConversionMode mode = kDefaultRoundToZero,
@@ -1336,7 +1357,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, const SourcePosition position);
+ void RecordDeoptReason(const int reason, int raw_position);
// Record the emission of a constant pool.
//
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 0c83f918ca..a6bfdb128d 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -137,6 +137,108 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// static
+void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
+ // ----------- S t a t e -------------
+ // -- r0 : number of arguments
+ // -- lr : return address
+ // -- sp[(argc - n) * 8] : arg[n] (zero-based)
+ // -- sp[(argc + 1) * 8] : receiver
+ // -----------------------------------
+ Condition const cc_done = (kind == MathMaxMinKind::kMin) ? mi : gt;
+ Condition const cc_swap = (kind == MathMaxMinKind::kMin) ? gt : mi;
+ Heap::RootListIndex const root_index =
+ (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
+ : Heap::kMinusInfinityValueRootIndex;
+ DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? d2 : d1;
+
+ // Load the accumulator with the default return value (either -Infinity or
+ // +Infinity), with the tagged value in r1 and the double value in d1.
+ __ LoadRoot(r1, root_index);
+ __ vldr(d1, FieldMemOperand(r1, HeapNumber::kValueOffset));
+
+ // Remember how many slots to drop (including the receiver).
+ __ add(r4, r0, Operand(1));
+
+ Label done_loop, loop;
+ __ bind(&loop);
+ {
+ // Check if all parameters done.
+ __ sub(r0, r0, Operand(1), SetCC);
+ __ b(lt, &done_loop);
+
+ // Load the next parameter tagged value into r2.
+ __ ldr(r2, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+
+ // Load the double value of the parameter into d2, maybe converting the
+ // parameter to a number first using the ToNumberStub if necessary.
+ Label convert, convert_smi, convert_number, done_convert;
+ __ bind(&convert);
+ __ JumpIfSmi(r2, &convert_smi);
+ __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ JumpIfRoot(r3, Heap::kHeapNumberMapRootIndex, &convert_number);
+ {
+ // Parameter is not a Number, use the ToNumberStub to convert it.
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(r0);
+ __ SmiTag(r4);
+ __ Push(r0, r1, r4);
+ __ mov(r0, r2);
+ ToNumberStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ mov(r2, r0);
+ __ Pop(r0, r1, r4);
+ {
+ // Restore the double accumulator value (d1).
+ Label done_restore;
+ __ SmiToDouble(d1, r1);
+ __ JumpIfSmi(r1, &done_restore);
+ __ vldr(d1, FieldMemOperand(r1, HeapNumber::kValueOffset));
+ __ bind(&done_restore);
+ }
+ __ SmiUntag(r4);
+ __ SmiUntag(r0);
+ }
+ __ b(&convert);
+ __ bind(&convert_number);
+ __ vldr(d2, FieldMemOperand(r2, HeapNumber::kValueOffset));
+ __ b(&done_convert);
+ __ bind(&convert_smi);
+ __ SmiToDouble(d2, r2);
+ __ bind(&done_convert);
+
+ // Perform the actual comparison with the accumulator value on the left hand
+ // side (d1) and the next parameter value on the right hand side (d2).
+ Label compare_nan, compare_swap;
+ __ VFPCompareAndSetFlags(d1, d2);
+ __ b(cc_done, &loop);
+ __ b(cc_swap, &compare_swap);
+ __ b(vs, &compare_nan);
+
+ // Left and right hand side are equal, check for -0 vs. +0.
+ __ VmovHigh(ip, reg);
+ __ cmp(ip, Operand(0x80000000));
+ __ b(ne, &loop);
+
+ // Result is on the right hand side.
+ __ bind(&compare_swap);
+ __ vmov(d1, d2);
+ __ mov(r1, r2);
+ __ b(&loop);
+
+ // At least one side is NaN, which means that the result will be NaN too.
+ __ bind(&compare_nan);
+ __ LoadRoot(r1, Heap::kNanValueRootIndex);
+ __ vldr(d1, FieldMemOperand(r1, HeapNumber::kValueOffset));
+ __ b(&loop);
+ }
+
+ __ bind(&done_loop);
+ __ mov(r0, r1);
+ __ Drop(r4);
+ __ Ret();
+}
+
+// static
void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
@@ -227,8 +329,9 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r2, r1, r3); // first argument, constructor, new target
- __ CallRuntime(Runtime::kNewObject);
+ __ Push(r2); // first argument
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ Pop(r2);
}
__ str(r2, FieldMemOperand(r0, JSValue::kValueOffset));
@@ -352,8 +455,9 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r2, r1, r3); // first argument, constructor, new target
- __ CallRuntime(Runtime::kNewObject);
+ __ Push(r2); // first argument
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ Pop(r2);
}
__ str(r2, FieldMemOperand(r0, JSValue::kValueOffset));
@@ -361,27 +465,6 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
}
-static void CallRuntimePassFunction(
- MacroAssembler* masm, Runtime::FunctionId function_id) {
- // ----------- S t a t e -------------
- // -- r1 : target function (preserved for callee)
- // -- r3 : new target (preserved for callee)
- // -----------------------------------
-
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the target function and the new target.
- __ push(r1);
- __ push(r3);
- // Push function as parameter to the runtime call.
- __ Push(r1);
-
- __ CallRuntime(function_id, 1);
- // Restore target function and new target.
- __ pop(r3);
- __ pop(r1);
-}
-
-
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCodeOffset));
@@ -389,10 +472,35 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ Jump(r2);
}
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ // ----------- S t a t e -------------
+ // -- r0 : argument count (preserved for callee)
+ // -- r1 : target function (preserved for callee)
+ // -- r3 : new target (preserved for callee)
+ // -----------------------------------
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ // Push the number of arguments to the callee.
+ __ SmiTag(r0);
+ __ push(r0);
+ // Push a copy of the target function and the new target.
+ __ push(r1);
+ __ push(r3);
+ // Push function as parameter to the runtime call.
+ __ Push(r1);
+
+ __ CallRuntime(function_id, 1);
+ __ mov(r2, r0);
-static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
- __ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(r0);
+ // Restore target function and new target.
+ __ pop(r3);
+ __ pop(r1);
+ __ pop(r0);
+ __ SmiUntag(r0, r0);
+ }
+ __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(r2);
}
@@ -407,8 +515,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
- CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
@@ -417,7 +524,8 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool create_implicit_receiver) {
+ bool create_implicit_receiver,
+ bool check_derived_construct) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
// -- r1 : constructor function
@@ -435,155 +543,22 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Preserve the incoming parameters on the stack.
__ AssertUndefinedOrAllocationSite(r2, r4);
- __ push(r2);
__ SmiTag(r0);
- __ push(r0);
+ __ Push(r2, r0);
if (create_implicit_receiver) {
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- // Verify that the new target is a JSFunction.
- __ CompareObjectType(r3, r5, r4, JS_FUNCTION_TYPE);
- __ b(ne, &rt_call);
-
- // Load the initial map and verify that it is in fact a map.
- // r3: new target
- __ ldr(r2,
- FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(r2, &rt_call);
- __ CompareObjectType(r2, r5, r4, MAP_TYPE);
- __ b(ne, &rt_call);
-
- // Fall back to runtime if the expected base constructor and base
- // constructor differ.
- __ ldr(r5, FieldMemOperand(r2, Map::kConstructorOrBackPointerOffset));
- __ cmp(r1, r5);
- __ b(ne, &rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // r1: constructor function
- // r2: initial map
- // r3: new target
- __ CompareInstanceType(r2, r5, JS_FUNCTION_TYPE);
- __ b(eq, &rt_call);
-
- // Now allocate the JSObject on the heap.
- // r1: constructor function
- // r2: initial map
- // r3: new target
- __ ldrb(r9, FieldMemOperand(r2, Map::kInstanceSizeOffset));
-
- __ Allocate(r9, r4, r9, r6, &rt_call, SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to
- // initial map and properties and elements are set to empty fixed array.
- // r1: constructor function
- // r2: initial map
- // r3: new target
- // r4: JSObject (not HeapObject tagged - the actual address).
- // r9: start of next object
- __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
- __ mov(r5, r4);
- STATIC_ASSERT(0 * kPointerSize == JSObject::kMapOffset);
- __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
- STATIC_ASSERT(1 * kPointerSize == JSObject::kPropertiesOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
- STATIC_ASSERT(2 * kPointerSize == JSObject::kElementsOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
- STATIC_ASSERT(3 * kPointerSize == JSObject::kHeaderSize);
-
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on.
- __ add(r4, r4, Operand(kHeapObjectTag));
-
- // Fill all the in-object properties with the appropriate filler.
- // r4: JSObject (tagged)
- // r5: First in-object property of JSObject (not tagged)
- __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
-
- if (!is_api_function) {
- Label no_inobject_slack_tracking;
-
- // Check if slack tracking is enabled.
- MemOperand bit_field3 = FieldMemOperand(r2, Map::kBitField3Offset);
- // Check if slack tracking is enabled.
- __ ldr(r0, bit_field3);
- __ DecodeField<Map::ConstructionCounter>(ip, r0);
- // ip: slack tracking counter
- __ cmp(ip, Operand(Map::kSlackTrackingCounterEnd));
- __ b(lt, &no_inobject_slack_tracking);
- __ push(ip); // Save allocation count value.
- // Decrease generous allocation count.
- __ sub(r0, r0, Operand(1 << Map::ConstructionCounter::kShift));
- __ str(r0, bit_field3);
-
- // Allocate object with a slack.
- __ ldr(r0, FieldMemOperand(r2, Map::kInstanceAttributesOffset));
- __ Ubfx(r0, r0, Map::kUnusedPropertyFieldsByte * kBitsPerByte,
- kBitsPerByte);
- __ sub(r0, r9, Operand(r0, LSL, kPointerSizeLog2));
- // r0: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ cmp(r5, r0);
- __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
- }
- __ InitializeFieldsWithFiller(r5, r0, r6);
-
- // To allow truncation fill the remaining fields with one pointer
- // filler map.
- __ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
- __ InitializeFieldsWithFiller(r5, r9, r6);
-
- __ pop(r0); // Restore allocation count value before decreasing.
- __ cmp(r0, Operand(Map::kSlackTrackingCounterEnd));
- __ b(ne, &allocated);
-
- // Push the constructor, new_target and the object to the stack,
- // and then the initial map as an argument to the runtime call.
- __ Push(r1, r3, r4, r2);
- __ CallRuntime(Runtime::kFinalizeInstanceSize);
- __ Pop(r1, r3, r4);
-
- // Continue with JSObject being successfully allocated
- // r1: constructor function
- // r3: new target
- // r4: JSObject
- __ jmp(&allocated);
-
- __ bind(&no_inobject_slack_tracking);
- }
-
- __ InitializeFieldsWithFiller(r5, r9, r6);
-
- // Continue with JSObject being successfully allocated
- // r1: constructor function
- // r3: new target
- // r4: JSObject
- __ jmp(&allocated);
- }
-
- // Allocate the new receiver object using the runtime call.
- // r1: constructor function
- // r3: new target
- __ bind(&rt_call);
-
- // Push the constructor and new_target twice, second pair as arguments
- // to the runtime call.
+ // Allocate the new receiver object.
__ Push(r1, r3);
- __ Push(r1, r3); // constructor function, new target
- __ CallRuntime(Runtime::kNewObject);
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ mov(r4, r0);
__ Pop(r1, r3);
- // Receiver for constructor call allocated.
- // r1: constructor function
- // r3: new target
- // r4: JSObject
- __ bind(&allocated);
+ // ----------- S t a t e -------------
+ // -- r1: constructor function
+ // -- r3: new target
+ // -- r4: newly allocated object
+ // -----------------------------------
// Retrieve smi-tagged arguments count from the stack.
__ ldr(r0, MemOperand(sp));
@@ -685,6 +660,19 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Leave construct frame.
}
+ // ES6 9.2.2. Step 13+
+ // Check that the result is not a Smi, indicating that the constructor result
+ // from a derived class is neither undefined nor an Object.
+ if (check_derived_construct) {
+ Label dont_throw;
+ __ JumpIfNotSmi(r0, &dont_throw);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
+ }
+ __ bind(&dont_throw);
+ }
+
__ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
__ add(sp, sp, Operand(kPointerSize));
if (create_implicit_receiver) {
@@ -695,17 +683,23 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
+ Generate_JSConstructStubHelper(masm, false, true, false);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, true);
+ Generate_JSConstructStubHelper(masm, true, false, false);
}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
+ Generate_JSConstructStubHelper(masm, false, false, false);
+}
+
+
+void Builtins::Generate_JSBuiltinsConstructStubForDerived(
+ MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false, true);
}
@@ -854,10 +848,8 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
// o sp: stack pointer
// o lr: return address
//
-// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-arm.h for its layout.
-// TODO(rmcilroy): We will need to include the current bytecode pointer in the
-// frame.
+// The function builds an interpreter frame. See InterpreterFrameConstants in
+// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
@@ -865,17 +857,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushFixedFrame(r1);
__ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
- __ push(r3);
-
- // Push zero for bytecode array offset.
- __ mov(r0, Operand(0));
- __ push(r0);
// Get the bytecode array from the function object and load the pointer to the
// first entry into kInterpreterBytecodeRegister.
__ ldr(r0, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ Register debug_info = kInterpreterBytecodeArrayRegister;
+ DCHECK(!debug_info.is(r0));
+ __ ldr(debug_info, FieldMemOperand(r0, SharedFunctionInfo::kDebugInfoOffset));
+ __ cmp(debug_info, Operand(DebugInfo::uninitialized()));
+ // Load original bytecode array or the debug copy.
__ ldr(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset));
+ FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset), eq);
+ __ ldr(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(debug_info, DebugInfo::kAbstractCodeIndex), ne);
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -886,6 +880,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
+ // Push new.target, bytecode array and zero for bytecode array offset.
+ __ mov(r0, Operand(0));
+ __ Push(r3, kInterpreterBytecodeArrayRegister, r0);
+
// Allocate the local and temporary register file on the stack.
{
// Load frame size from the BytecodeArray object.
@@ -917,23 +915,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// TODO(rmcilroy): List of things not currently dealt with here but done in
// fullcodegen's prologue:
- // - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
- // - Allow simulator stop operations if FLAG_stop_at is set.
// - Code aging of the BytecodeArray object.
- // Perform stack guard check.
- {
- Label ok;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &ok);
- __ push(kInterpreterBytecodeArrayRegister);
- __ CallRuntime(Runtime::kStackGuard);
- __ pop(kInterpreterBytecodeArrayRegister);
- __ bind(&ok);
- }
-
// Load accumulator, register file, bytecode offset, dispatch table into
// registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
@@ -941,10 +925,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ mov(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
- __ LoadRoot(kInterpreterDispatchTableRegister,
- Heap::kInterpreterTableRootIndex);
- __ add(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ mov(kInterpreterDispatchTableRegister,
+ Operand(ExternalReference::interpreter_dispatch_table_address(
+ masm->isolate())));
// Dispatch to the first bytecode handler for the function.
__ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
@@ -955,6 +938,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// and header removal.
__ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(ip);
+
+ // Even though the first bytecode handler was called, we will never return.
+ __ Abort(kUnexpectedReturnFromBytecodeHandler);
}
@@ -992,7 +978,8 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm, Register index,
// static
-void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+ MacroAssembler* masm, TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r2 : the address of the first argument to be pushed. Subsequent
@@ -1010,7 +997,9 @@ void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
Generate_InterpreterPushArgs(masm, r2, r3, r4);
// Call the target.
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ tail_call_mode),
+ RelocInfo::CODE_TARGET);
}
@@ -1039,47 +1028,24 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
}
-static void Generate_InterpreterNotifyDeoptimizedHelper(
- MacroAssembler* masm, Deoptimizer::BailoutType type) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(kInterpreterAccumulatorRegister); // Save accumulator register.
-
- // Pass the deoptimization type to the runtime system.
- __ mov(r1, Operand(Smi::FromInt(static_cast<int>(type))));
- __ push(r1);
- __ CallRuntime(Runtime::kNotifyDeoptimized);
-
- __ pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
- // Tear down internal frame.
- }
-
- // Drop state (we don't use this for interpreter deopts).
- __ Drop(1);
-
+static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
// Initialize register file register and dispatch table register.
__ add(kInterpreterRegisterFileRegister, fp,
Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
- __ LoadRoot(kInterpreterDispatchTableRegister,
- Heap::kInterpreterTableRootIndex);
- __ add(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ mov(kInterpreterDispatchTableRegister,
+ Operand(ExternalReference::interpreter_dispatch_table_address(
+ masm->isolate())));
// Get the context from the frame.
- // TODO(rmcilroy): Update interpreter frame to expect current context at the
- // context slot instead of the function context.
__ ldr(kContextRegister,
MemOperand(kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kContextFromRegisterPointer));
// Get the bytecode array pointer from the frame.
- __ ldr(r1,
- MemOperand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kFunctionFromRegisterPointer));
- __ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(r1, SharedFunctionInfo::kFunctionDataOffset));
+ __ ldr(
+ kInterpreterBytecodeArrayRegister,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -1107,6 +1073,29 @@ static void Generate_InterpreterNotifyDeoptimizedHelper(
}
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+ MacroAssembler* masm, Deoptimizer::BailoutType type) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Pass the deoptimization type to the runtime system.
+ __ mov(r1, Operand(Smi::FromInt(static_cast<int>(type))));
+ __ push(r1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+ // Tear down internal frame.
+ }
+
+ // Drop state (we don't use these for interpreter deopts) and and pop the
+ // accumulator value into the accumulator register.
+ __ Drop(1);
+ __ Pop(kInterpreterAccumulatorRegister);
+
+ // Enter the bytecode dispatch.
+ Generate_EnterBytecodeDispatch(masm);
+}
+
+
void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
@@ -1121,22 +1110,30 @@ void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ // Set the address of the interpreter entry trampoline as a return address.
+ // This simulates the initial call to bytecode handlers in interpreter entry
+ // trampoline. The return will never actually be taken, but our stack walker
+ // uses this address to determine whether a frame is interpreted.
+ __ Move(lr, masm->isolate()->builtins()->InterpreterEntryTrampoline());
+
+ Generate_EnterBytecodeDispatch(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileLazy);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm,
+ Runtime::kCompileOptimized_NotConcurrent);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
@@ -1342,14 +1339,11 @@ static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
// Load the next prototype.
__ bind(&next_prototype);
- __ ldr(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
- // End if the prototype is null or not hidden.
- __ CompareRoot(receiver, Heap::kNullValueRootIndex);
- __ b(eq, receiver_check_failed);
- __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ ldr(ip, FieldMemOperand(map, Map::kBitField3Offset));
- __ tst(ip, Operand(Map::IsHiddenPrototype::kMask));
+ __ tst(ip, Operand(Map::HasHiddenPrototype::kMask));
__ b(eq, receiver_check_failed);
+ __ ldr(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+ __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Iterate.
__ b(&prototype_loop_start);
@@ -1829,9 +1823,7 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Try to create the list from an arguments object.
__ bind(&create_arguments);
- __ ldr(r2,
- FieldMemOperand(r0, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize));
+ __ ldr(r2, FieldMemOperand(r0, JSArgumentsObject::kLengthOffset));
__ ldr(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
__ ldr(ip, FieldMemOperand(r4, FixedArray::kLengthOffset));
__ cmp(r2, ip);
@@ -1906,10 +1898,136 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
}
}
+namespace {
+
+// Drops top JavaScript frame and an arguments adaptor frame below it (if
+// present) preserving all the arguments prepared for current call.
+// Does nothing if debugger is currently active.
+// ES6 14.6.3. PrepareForTailCall
+//
+// Stack structure for the function g() tail calling f():
+//
+// ------- Caller frame: -------
+// | ...
+// | g()'s arg M
+// | ...
+// | g()'s arg 1
+// | g()'s receiver arg
+// | g()'s caller pc
+// ------- g()'s frame: -------
+// | g()'s caller fp <- fp
+// | g()'s context
+// | function pointer: g
+// | -------------------------
+// | ...
+// | ...
+// | f()'s arg N
+// | ...
+// | f()'s arg 1
+// | f()'s receiver arg <- sp (f()'s caller pc is not on the stack yet!)
+// ----------------------
+//
+void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+ DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+ Comment cmnt(masm, "[ PrepareForTailCall");
+
+ // Prepare for tail call only if the debugger is not active.
+ Label done;
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(masm->isolate());
+ __ mov(scratch1, Operand(debug_is_active));
+ __ ldrb(scratch1, MemOperand(scratch1));
+ __ cmp(scratch1, Operand(0));
+ __ b(ne, &done);
+
+ // Drop possible interpreter handler/stub frame.
+ {
+ Label no_interpreter_frame;
+ __ ldr(scratch3, MemOperand(fp, StandardFrameConstants::kMarkerOffset));
+ __ cmp(scratch3, Operand(Smi::FromInt(StackFrame::STUB)));
+ __ b(ne, &no_interpreter_frame);
+ __ ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&no_interpreter_frame);
+ }
+
+ // Check if next frame is an arguments adaptor frame.
+ Label no_arguments_adaptor, formal_parameter_count_loaded;
+ __ ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(scratch3,
+ MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+ __ cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ b(ne, &no_arguments_adaptor);
+
+ // Drop arguments adaptor frame and load arguments count.
+ __ mov(fp, scratch2);
+ __ ldr(scratch1,
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(scratch1);
+ __ b(&formal_parameter_count_loaded);
+
+ __ bind(&no_arguments_adaptor);
+ // Load caller's formal parameter count
+ __ ldr(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ldr(scratch1,
+ FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(scratch1,
+ FieldMemOperand(scratch1,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+ __ SmiUntag(scratch1);
+
+ __ bind(&formal_parameter_count_loaded);
+
+ // Calculate the end of destination area where we will put the arguments
+ // after we drop current frame. We add kPointerSize to count the receiver
+ // argument which is not included into formal parameters count.
+ Register dst_reg = scratch2;
+ __ add(dst_reg, fp, Operand(scratch1, LSL, kPointerSizeLog2));
+ __ add(dst_reg, dst_reg,
+ Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+
+ Register src_reg = scratch1;
+ __ add(src_reg, sp, Operand(args_reg, LSL, kPointerSizeLog2));
+ // Count receiver argument as well (not included in args_reg).
+ __ add(src_reg, src_reg, Operand(kPointerSize));
+
+ if (FLAG_debug_code) {
+ __ cmp(src_reg, dst_reg);
+ __ Check(lo, kStackAccessBelowStackPointer);
+ }
+
+ // Restore caller's frame pointer and return address now as they will be
+ // overwritten by the copying loop.
+ __ ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Now copy callee arguments to the caller frame going backwards to avoid
+ // callee arguments corruption (source and destination areas could overlap).
+
+ // Both src_reg and dst_reg are pointing to the word after the one to copy,
+ // so they must be pre-decremented in the loop.
+ Register tmp_reg = scratch3;
+ Label loop, entry;
+ __ b(&entry);
+ __ bind(&loop);
+ __ ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
+ __ str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
+ __ bind(&entry);
+ __ cmp(sp, src_reg);
+ __ b(ne, &loop);
+
+ // Leave current frame.
+ __ mov(sp, dst_reg);
+
+ __ bind(&done);
+}
+} // namespace
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode) {
+ ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r1 : the function to call (checked to be a JSFunction)
@@ -1995,6 +2113,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- cp : the function context.
// -----------------------------------
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, r0, r3, r4, r5);
+ }
+
__ ldr(r2,
FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
__ SmiUntag(r2);
@@ -2093,13 +2215,18 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// static
-void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r1 : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(r1);
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, r0, r3, r4, r5);
+ }
+
// Patch the receiver to [[BoundThis]].
__ ldr(ip, FieldMemOperand(r1, JSBoundFunction::kBoundThisOffset));
__ str(ip, MemOperand(sp, r0, LSL, kPointerSizeLog2));
@@ -2117,7 +2244,8 @@ void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
// static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r1 : the target to call (can be any Object).
@@ -2127,14 +2255,25 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ JumpIfSmi(r1, &non_callable);
__ bind(&non_smi);
__ CompareObjectType(r1, r4, r5, JS_FUNCTION_TYPE);
- __ Jump(masm->isolate()->builtins()->CallFunction(mode),
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
RelocInfo::CODE_TARGET, eq);
__ cmp(r5, Operand(JS_BOUND_FUNCTION_TYPE));
- __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
RelocInfo::CODE_TARGET, eq);
+
+ // Check if target has a [[Call]] internal method.
+ __ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
+ __ tst(r4, Operand(1 << Map::kIsCallable));
+ __ b(eq, &non_callable);
+
__ cmp(r5, Operand(JS_PROXY_TYPE));
__ b(ne, &non_function);
+ // 0. Prepare for tail call if necessary.
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, r0, r3, r4, r5);
+ }
+
// 1. Runtime fallback for Proxy [[Call]].
__ Push(r1);
// Increase the arguments size to include the pushed function and the
@@ -2147,16 +2286,12 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
__ bind(&non_function);
- // Check if target has a [[Call]] internal method.
- __ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
- __ tst(r4, Operand(1 << Map::kIsCallable));
- __ b(eq, &non_callable);
// Overwrite the original receiver the (original) target.
__ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r1);
__ Jump(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined),
+ ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 21413335ea..239eddd28e 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -92,9 +92,8 @@ void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
#define __ ACCESS_MASM(masm)
-
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
- Condition cond, Strength strength);
+ Condition cond);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register lhs,
Register rhs,
@@ -238,7 +237,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// Equality is almost reflexive (everything but NaN), so this is a test
// for "identity and not NaN".
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
- Condition cond, Strength strength) {
+ Condition cond) {
Label not_identical;
Label heap_number, return_equal;
__ cmp(r0, r1);
@@ -258,14 +257,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// Call runtime on identical SIMD values since we must throw a TypeError.
__ cmp(r4, Operand(SIMD128_VALUE_TYPE));
__ b(eq, slow);
- if (is_strong(strength)) {
- // Call the runtime on anything that is converted in the semantics, since
- // we need to throw a TypeError. Smis have already been ruled out.
- __ cmp(r4, Operand(HEAP_NUMBER_TYPE));
- __ b(eq, &return_equal);
- __ tst(r4, Operand(kIsNotStringMask));
- __ b(ne, slow);
- }
} else {
__ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
__ b(eq, &heap_number);
@@ -279,13 +270,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// Call runtime on identical SIMD values since we must throw a TypeError.
__ cmp(r4, Operand(SIMD128_VALUE_TYPE));
__ b(eq, slow);
- if (is_strong(strength)) {
- // Call the runtime on anything that is converted in the semantics,
- // since we need to throw a TypeError. Smis and heap numbers have
- // already been ruled out.
- __ tst(r4, Operand(kIsNotStringMask));
- __ b(ne, slow);
- }
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
@@ -495,44 +479,52 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
// Fast negative check for internalized-to-internalized equality.
static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
- Register lhs,
- Register rhs,
+ Register lhs, Register rhs,
Label* possible_strings,
- Label* not_both_strings) {
+ Label* runtime_call) {
DCHECK((lhs.is(r0) && rhs.is(r1)) ||
(lhs.is(r1) && rhs.is(r0)));
// r2 is object type of rhs.
- Label object_test;
+ Label object_test, return_unequal, undetectable;
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ tst(r2, Operand(kIsNotStringMask));
__ b(ne, &object_test);
__ tst(r2, Operand(kIsNotInternalizedMask));
__ b(ne, possible_strings);
__ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
- __ b(ge, not_both_strings);
+ __ b(ge, runtime_call);
__ tst(r3, Operand(kIsNotInternalizedMask));
__ b(ne, possible_strings);
- // Both are internalized. We already checked they weren't the same pointer
- // so they are not equal.
- __ mov(r0, Operand(NOT_EQUAL));
+ // Both are internalized. We already checked they weren't the same pointer so
+ // they are not equal. Return non-equal by returning the non-zero object
+ // pointer in r0.
__ Ret();
__ bind(&object_test);
- __ cmp(r2, Operand(FIRST_JS_RECEIVER_TYPE));
- __ b(lt, not_both_strings);
- __ CompareObjectType(lhs, r2, r3, FIRST_JS_RECEIVER_TYPE);
- __ b(lt, not_both_strings);
- // If both objects are undetectable, they are equal. Otherwise, they
- // are not equal, since they are different objects and an object is not
- // equal to undefined.
+ __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
__ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset));
- __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset));
- __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
- __ and_(r0, r2, Operand(r3));
- __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
- __ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
+ __ ldrb(r4, FieldMemOperand(r2, Map::kBitFieldOffset));
+ __ ldrb(r5, FieldMemOperand(r3, Map::kBitFieldOffset));
+ __ tst(r4, Operand(1 << Map::kIsUndetectable));
+ __ b(ne, &undetectable);
+ __ tst(r5, Operand(1 << Map::kIsUndetectable));
+ __ b(ne, &return_unequal);
+
+ __ CompareInstanceType(r2, r2, FIRST_JS_RECEIVER_TYPE);
+ __ b(lt, runtime_call);
+ __ CompareInstanceType(r3, r3, FIRST_JS_RECEIVER_TYPE);
+ __ b(lt, runtime_call);
+
+ __ bind(&return_unequal);
+ // Return non-equal by returning the non-zero object pointer in r0.
+ __ Ret();
+
+ __ bind(&undetectable);
+ __ tst(r5, Operand(1 << Map::kIsUndetectable));
+ __ b(eq, &return_unequal);
+ __ mov(r0, Operand(EQUAL));
__ Ret();
}
@@ -583,7 +575,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc, strength());
+ EmitIdenticalObjectComparison(masm, &slow, cc);
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
@@ -696,8 +688,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
- : Runtime::kCompare);
+ __ TailCallRuntime(Runtime::kCompare);
}
__ bind(&miss);
@@ -895,7 +886,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ vcvt_f64_s32(double_exponent, single_scratch);
// Returning or bailing out.
- Counters* counters = isolate()->counters();
if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
@@ -909,7 +899,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ vstr(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
DCHECK(heapnumber.is(r0));
- __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
__ Ret(2);
} else {
__ push(lr);
@@ -925,7 +914,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ MovFromFloatResult(double_result);
__ bind(&done);
- __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
__ Ret();
}
}
@@ -999,11 +987,9 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// r1: pointer to the first argument (C callee-saved)
// r5: pointer to builtin function (C callee-saved)
- // Result returned in r0 or r0+r1 by default.
-
-#if V8_HOST_ARCH_ARM
int frame_alignment = MacroAssembler::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
+#if V8_HOST_ARCH_ARM
if (FLAG_debug_code) {
if (frame_alignment > kPointerSize) {
Label alignment_as_expected;
@@ -1018,8 +1004,25 @@ void CEntryStub::Generate(MacroAssembler* masm) {
#endif
// Call C built-in.
- // r0 = argc, r1 = argv
- __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
+ int result_stack_size;
+ if (result_size() <= 2) {
+ // r0 = argc, r1 = argv, r2 = isolate
+ __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
+ result_stack_size = 0;
+ } else {
+ DCHECK_EQ(3, result_size());
+ // Allocate additional space for the result.
+ result_stack_size =
+ ((result_size() * kPointerSize) + frame_alignment_mask) &
+ ~frame_alignment_mask;
+ __ sub(sp, sp, Operand(result_stack_size));
+
+ // r0 = hidden result argument, r1 = argc, r2 = argv, r3 = isolate.
+ __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
+ __ mov(r2, Operand(r1));
+ __ mov(r1, Operand(r0));
+ __ mov(r0, Operand(sp));
+ }
// To let the GC traverse the return address of the exit frames, we need to
// know where the return address is. The CEntryStub is unmovable, so
@@ -1032,11 +1035,19 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Prevent literal pool emission before return address.
Assembler::BlockConstPoolScope block_const_pool(masm);
__ add(lr, pc, Operand(4));
- __ str(lr, MemOperand(sp, 0));
+ __ str(lr, MemOperand(sp, result_stack_size));
__ Call(r5);
}
+ if (result_size() > 2) {
+ DCHECK_EQ(3, result_size());
+ // Read result values stored on stack.
+ __ ldr(r2, MemOperand(r0, 2 * kPointerSize));
+ __ ldr(r1, MemOperand(r0, 1 * kPointerSize));
+ __ ldr(r0, MemOperand(r0, 0 * kPointerSize));
+ }
+ // Result returned in r0, r1:r0 or r2:r1:r0 - do not destroy these registers!
- __ VFPEnsureFPSCRState(r2);
+ __ VFPEnsureFPSCRState(r3);
// Check result for exception sentinel.
Label exception_returned;
@@ -1049,9 +1060,9 @@ void CEntryStub::Generate(MacroAssembler* masm) {
Label okay;
ExternalReference pending_exception_address(
Isolate::kPendingExceptionAddress, isolate());
- __ mov(r2, Operand(pending_exception_address));
- __ ldr(r2, MemOperand(r2));
- __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
+ __ mov(r3, Operand(pending_exception_address));
+ __ ldr(r3, MemOperand(r3));
+ __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
// Cannot use check here as it attempts to generate call into runtime.
__ b(eq, &okay);
__ stop("Unexpected pending exception");
@@ -1461,286 +1472,6 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- // The displacement is the offset of the last parameter (if any)
- // relative to the frame pointer.
- const int kDisplacement =
- StandardFrameConstants::kCallerSPOffset - kPointerSize;
- DCHECK(r1.is(ArgumentsAccessReadDescriptor::index()));
- DCHECK(r0.is(ArgumentsAccessReadDescriptor::parameter_count()));
-
- // Check that the key is a smi.
- Label slow;
- __ JumpIfNotSmi(r1, &slow);
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor;
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(eq, &adaptor);
-
- // Check index against formal parameters count limit passed in
- // through register r0. Use unsigned comparison to get negative
- // check for free.
- __ cmp(r1, r0);
- __ b(hs, &slow);
-
- // Read the argument from the stack and return it.
- __ sub(r3, r0, r1);
- __ add(r3, fp, Operand::PointerOffsetFromSmiKey(r3));
- __ ldr(r0, MemOperand(r3, kDisplacement));
- __ Jump(lr);
-
- // Arguments adaptor case: Check index against actual arguments
- // limit found in the arguments adaptor frame. Use unsigned
- // comparison to get negative check for free.
- __ bind(&adaptor);
- __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmp(r1, r0);
- __ b(cs, &slow);
-
- // Read the argument from the adaptor frame and return it.
- __ sub(r3, r0, r1);
- __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r3));
- __ ldr(r0, MemOperand(r3, kDisplacement));
- __ Jump(lr);
-
- // Slow-case: Handle non-smi or out-of-bounds access to arguments
- // by calling the runtime system.
- __ bind(&slow);
- __ push(r1);
- __ TailCallRuntime(Runtime::kArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
- // r1 : function
- // r2 : number of parameters (tagged)
- // r3 : parameters pointer
-
- DCHECK(r1.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(r2.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(r3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- __ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r0, MemOperand(r4, StandardFrameConstants::kContextOffset));
- __ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(ne, &runtime);
-
- // Patch the arguments.length and the parameters pointer in the current frame.
- __ ldr(r2, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ add(r4, r4, Operand(r2, LSL, 1));
- __ add(r3, r4, Operand(StandardFrameConstants::kCallerSPOffset));
-
- __ bind(&runtime);
- __ Push(r1, r3, r2);
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
- // r1 : function
- // r2 : number of parameters (tagged)
- // r3 : parameters pointer
- // Registers used over whole function:
- // r5 : arguments count (tagged)
- // r6 : mapped parameter count (tagged)
-
- DCHECK(r1.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(r2.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(r3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r0, MemOperand(r4, StandardFrameConstants::kContextOffset));
- __ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(eq, &adaptor_frame);
-
- // No adaptor, parameter count = argument count.
- __ mov(r5, r2);
- __ mov(r6, r2);
- __ b(&try_allocate);
-
- // We have an adaptor frame. Patch the parameters pointer.
- __ bind(&adaptor_frame);
- __ ldr(r5, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ add(r4, r4, Operand(r5, LSL, 1));
- __ add(r3, r4, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // r5 = argument count (tagged)
- // r6 = parameter count (tagged)
- // Compute the mapped parameter count = min(r6, r5) in r6.
- __ mov(r6, r2);
- __ cmp(r6, Operand(r5));
- __ mov(r6, Operand(r5), LeaveCC, gt);
-
- __ bind(&try_allocate);
-
- // Compute the sizes of backing store, parameter map, and arguments object.
- // 1. Parameter map, has 2 extra words containing context and backing store.
- const int kParameterMapHeaderSize =
- FixedArray::kHeaderSize + 2 * kPointerSize;
- // If there are no mapped parameters, we do not need the parameter_map.
- __ cmp(r6, Operand(Smi::FromInt(0)));
- __ mov(r9, Operand::Zero(), LeaveCC, eq);
- __ mov(r9, Operand(r6, LSL, 1), LeaveCC, ne);
- __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
-
- // 2. Backing store.
- __ add(r9, r9, Operand(r5, LSL, 1));
- __ add(r9, r9, Operand(FixedArray::kHeaderSize));
-
- // 3. Arguments object.
- __ add(r9, r9, Operand(Heap::kSloppyArgumentsObjectSize));
-
- // Do the allocation of all three objects in one go.
- __ Allocate(r9, r0, r9, r4, &runtime, TAG_OBJECT);
-
- // r0 = address of new object(s) (tagged)
- // r2 = argument count (smi-tagged)
- // Get the arguments boilerplate from the current native context into r4.
- const int kNormalOffset =
- Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
- const int kAliasedOffset =
- Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
-
- __ ldr(r4, NativeContextMemOperand());
- __ cmp(r6, Operand::Zero());
- __ ldr(r4, MemOperand(r4, kNormalOffset), eq);
- __ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
-
- // r0 = address of new object (tagged)
- // r2 = argument count (smi-tagged)
- // r4 = address of arguments map (tagged)
- // r6 = mapped parameter count (tagged)
- __ str(r4, FieldMemOperand(r0, JSObject::kMapOffset));
- __ LoadRoot(r9, Heap::kEmptyFixedArrayRootIndex);
- __ str(r9, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r9, FieldMemOperand(r0, JSObject::kElementsOffset));
-
- // Set up the callee in-object property.
- STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ AssertNotSmi(r1);
- const int kCalleeOffset = JSObject::kHeaderSize +
- Heap::kArgumentsCalleeIndex * kPointerSize;
- __ str(r1, FieldMemOperand(r0, kCalleeOffset));
-
- // Use the length (smi tagged) and set that as an in-object property too.
- __ AssertSmi(r5);
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- const int kLengthOffset = JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize;
- __ str(r5, FieldMemOperand(r0, kLengthOffset));
-
- // Set up the elements pointer in the allocated arguments object.
- // If we allocated a parameter map, r4 will point there, otherwise
- // it will point to the backing store.
- __ add(r4, r0, Operand(Heap::kSloppyArgumentsObjectSize));
- __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
-
- // r0 = address of new object (tagged)
- // r2 = argument count (tagged)
- // r4 = address of parameter map or backing store (tagged)
- // r6 = mapped parameter count (tagged)
- // Initialize parameter map. If there are no mapped arguments, we're done.
- Label skip_parameter_map;
- __ cmp(r6, Operand(Smi::FromInt(0)));
- // Move backing store address to r1, because it is
- // expected there when filling in the unmapped arguments.
- __ mov(r1, r4, LeaveCC, eq);
- __ b(eq, &skip_parameter_map);
-
- __ LoadRoot(r5, Heap::kSloppyArgumentsElementsMapRootIndex);
- __ str(r5, FieldMemOperand(r4, FixedArray::kMapOffset));
- __ add(r5, r6, Operand(Smi::FromInt(2)));
- __ str(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
- __ str(cp, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
- __ add(r5, r4, Operand(r6, LSL, 1));
- __ add(r5, r5, Operand(kParameterMapHeaderSize));
- __ str(r5, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
-
- // Copy the parameter slots and the holes in the arguments.
- // We need to fill in mapped_parameter_count slots. They index the context,
- // where parameters are stored in reverse order, at
- // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
- // The mapped parameter thus need to get indices
- // MIN_CONTEXT_SLOTS+parameter_count-1 ..
- // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
- // We loop from right to left.
- Label parameters_loop, parameters_test;
- __ mov(r5, r6);
- __ add(r9, r2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
- __ sub(r9, r9, Operand(r6));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ add(r1, r4, Operand(r5, LSL, 1));
- __ add(r1, r1, Operand(kParameterMapHeaderSize));
-
- // r1 = address of backing store (tagged)
- // r4 = address of parameter map (tagged), which is also the address of new
- // object + Heap::kSloppyArgumentsObjectSize (tagged)
- // r0 = temporary scratch (a.o., for address calculation)
- // r5 = loop variable (tagged)
- // ip = the hole value
- __ jmp(&parameters_test);
-
- __ bind(&parameters_loop);
- __ sub(r5, r5, Operand(Smi::FromInt(1)));
- __ mov(r0, Operand(r5, LSL, 1));
- __ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
- __ str(r9, MemOperand(r4, r0));
- __ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
- __ str(ip, MemOperand(r1, r0));
- __ add(r9, r9, Operand(Smi::FromInt(1)));
- __ bind(&parameters_test);
- __ cmp(r5, Operand(Smi::FromInt(0)));
- __ b(ne, &parameters_loop);
-
- // Restore r0 = new object (tagged) and r5 = argument count (tagged).
- __ sub(r0, r4, Operand(Heap::kSloppyArgumentsObjectSize));
- __ ldr(r5, FieldMemOperand(r0, kLengthOffset));
-
- __ bind(&skip_parameter_map);
- // r0 = address of new object (tagged)
- // r1 = address of backing store (tagged)
- // r5 = argument count (tagged)
- // r6 = mapped parameter count (tagged)
- // r9 = scratch
- // Copy arguments header and remaining slots (if there are any).
- __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
- __ str(r9, FieldMemOperand(r1, FixedArray::kMapOffset));
- __ str(r5, FieldMemOperand(r1, FixedArray::kLengthOffset));
-
- Label arguments_loop, arguments_test;
- __ sub(r3, r3, Operand(r6, LSL, 1));
- __ jmp(&arguments_test);
-
- __ bind(&arguments_loop);
- __ sub(r3, r3, Operand(kPointerSize));
- __ ldr(r4, MemOperand(r3, 0));
- __ add(r9, r1, Operand(r6, LSL, 1));
- __ str(r4, FieldMemOperand(r9, FixedArray::kHeaderSize));
- __ add(r6, r6, Operand(Smi::FromInt(1)));
-
- __ bind(&arguments_test);
- __ cmp(r6, Operand(r5));
- __ b(lt, &arguments_loop);
-
- // Return.
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- // r0 = address of new object (tagged)
- // r5 = argument count (tagged)
- __ bind(&runtime);
- __ Push(r1, r3, r5);
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
// Return address is in lr.
Label slow;
@@ -1764,117 +1495,6 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
- // r1 : function
- // r2 : number of parameters (tagged)
- // r3 : parameters pointer
-
- DCHECK(r1.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(r2.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(r3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label try_allocate, runtime;
- __ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r0, MemOperand(r4, StandardFrameConstants::kContextOffset));
- __ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(ne, &try_allocate);
-
- // Patch the arguments.length and the parameters pointer.
- __ ldr(r2, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ add(r4, r4, Operand::PointerOffsetFromSmiKey(r2));
- __ add(r3, r4, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Try the new space allocation. Start out with computing the size
- // of the arguments object and the elements array in words.
- Label add_arguments_object;
- __ bind(&try_allocate);
- __ SmiUntag(r9, r2, SetCC);
- __ b(eq, &add_arguments_object);
- __ add(r9, r9, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ bind(&add_arguments_object);
- __ add(r9, r9, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
-
- // Do the allocation of both objects in one go.
- __ Allocate(r9, r0, r4, r5, &runtime,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
-
- // Get the arguments boilerplate from the current native context.
- __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r4);
-
- __ str(r4, FieldMemOperand(r0, JSObject::kMapOffset));
- __ LoadRoot(r5, Heap::kEmptyFixedArrayRootIndex);
- __ str(r5, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r5, FieldMemOperand(r0, JSObject::kElementsOffset));
-
- // Get the length (smi tagged) and set that as an in-object property too.
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ AssertSmi(r2);
- __ str(r2,
- FieldMemOperand(r0, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize));
-
- // If there are no actual arguments, we're done.
- Label done;
- __ cmp(r2, Operand::Zero());
- __ b(eq, &done);
-
- // Set up the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- __ add(r4, r0, Operand(Heap::kStrictArgumentsObjectSize));
- __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex);
- __ str(r5, FieldMemOperand(r4, FixedArray::kMapOffset));
- __ str(r2, FieldMemOperand(r4, FixedArray::kLengthOffset));
- __ SmiUntag(r2);
-
- // Copy the fixed array slots.
- Label loop;
- // Set up r4 to point to the first array slot.
- __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ bind(&loop);
- // Pre-decrement r3 with kPointerSize on each iteration.
- // Pre-decrement in order to skip receiver.
- __ ldr(r5, MemOperand(r3, kPointerSize, NegPreIndex));
- // Post-increment r4 with kPointerSize on each iteration.
- __ str(r5, MemOperand(r4, kPointerSize, PostIndex));
- __ sub(r2, r2, Operand(1));
- __ cmp(r2, Operand::Zero());
- __ b(ne, &loop);
-
- // Return.
- __ bind(&done);
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ Push(r1, r3, r2);
- __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
-void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
- // r2 : number of parameters (tagged)
- // r3 : parameters pointer
- // r4 : rest parameter index (tagged)
-
- Label runtime;
- __ ldr(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r0, MemOperand(r5, StandardFrameConstants::kContextOffset));
- __ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(ne, &runtime);
-
- // Patch the arguments.length and the parameters pointer.
- __ ldr(r2, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ add(r3, r5, Operand::PointerOffsetFromSmiKey(r2));
- __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
-
- __ bind(&runtime);
- __ Push(r2, r3, r4);
- __ TailCallRuntime(Runtime::kNewRestParam);
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -2506,7 +2126,8 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&call_function);
__ mov(r0, Operand(argc));
- __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+ __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
+ tail_call_mode()),
RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
@@ -2545,7 +2166,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&call);
__ mov(r0, Operand(argc));
- __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+ __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
RelocInfo::CODE_TARGET);
__ bind(&uninitialized);
@@ -3096,6 +2717,37 @@ void ToStringStub::Generate(MacroAssembler* masm) {
}
+void ToNameStub::Generate(MacroAssembler* masm) {
+ // The ToName stub takes one argument in r0.
+ Label is_number;
+ __ JumpIfSmi(r0, &is_number);
+
+ STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+ __ CompareObjectType(r0, r1, r1, LAST_NAME_TYPE);
+ // r0: receiver
+ // r1: receiver instance type
+ __ Ret(ls);
+
+ Label not_heap_number;
+ __ cmp(r1, Operand(HEAP_NUMBER_TYPE));
+ __ b(ne, &not_heap_number);
+ __ bind(&is_number);
+ NumberToStringStub stub(isolate());
+ __ TailCallStub(&stub);
+ __ bind(&not_heap_number);
+
+ Label not_oddball;
+ __ cmp(r1, Operand(ODDBALL_TYPE));
+ __ b(ne, &not_oddball);
+ __ ldr(r0, FieldMemOperand(r0, Oddball::kToStringOffset));
+ __ Ret();
+ __ bind(&not_oddball);
+
+ __ push(r0); // Push argument.
+ __ TailCallRuntime(Runtime::kToName);
+}
+
+
void StringHelper::GenerateFlatOneByteStringEquals(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3) {
@@ -3263,18 +2915,14 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ CheckMap(r1, r2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
__ CheckMap(r0, r3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
- if (op() != Token::EQ_STRICT && is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
- } else {
- if (!Token::IsEqualityOp(op())) {
- __ ldr(r1, FieldMemOperand(r1, Oddball::kToNumberOffset));
- __ AssertSmi(r1);
- __ ldr(r0, FieldMemOperand(r0, Oddball::kToNumberOffset));
- __ AssertSmi(r0);
- }
- __ sub(r0, r1, r0);
- __ Ret();
+ if (!Token::IsEqualityOp(op())) {
+ __ ldr(r1, FieldMemOperand(r1, Oddball::kToNumberOffset));
+ __ AssertSmi(r1);
+ __ ldr(r0, FieldMemOperand(r0, Oddball::kToNumberOffset));
+ __ AssertSmi(r0);
}
+ __ sub(r0, r1, r0);
+ __ Ret();
__ bind(&miss);
GenerateMiss(masm);
@@ -3354,7 +3002,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&unordered);
__ bind(&generic_stub);
- CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
+ CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
CompareICState::GENERIC, CompareICState::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
@@ -3572,8 +3220,6 @@ void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
if (Token::IsEqualityOp(op())) {
__ sub(r0, r0, Operand(r1));
__ Ret();
- } else if (is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (op() == Token::LT || op() == Token::LTE) {
__ mov(r2, Operand(Smi::FromInt(GREATER)));
@@ -3939,11 +3585,8 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
regs_.scratch0(),
&dont_need_remembered_set);
- __ CheckPageFlag(regs_.object(),
- regs_.scratch0(),
- 1 << MemoryChunk::SCAN_ON_SCAVENGE,
- ne,
- &dont_need_remembered_set);
+ __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
+ &dont_need_remembered_set);
// First notify the incremental marker if necessary, then update the
// remembered set.
@@ -4918,6 +4561,584 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void FastNewObjectStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r1 : target
+ // -- r3 : new target
+ // -- cp : context
+ // -- lr : return address
+ // -----------------------------------
+ __ AssertFunction(r1);
+ __ AssertReceiver(r3);
+
+ // Verify that the new target is a JSFunction.
+ Label new_object;
+ __ CompareObjectType(r3, r2, r2, JS_FUNCTION_TYPE);
+ __ b(ne, &new_object);
+
+ // Load the initial map and verify that it's in fact a map.
+ __ ldr(r2, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(r2, &new_object);
+ __ CompareObjectType(r2, r0, r0, MAP_TYPE);
+ __ b(ne, &new_object);
+
+ // Fall back to runtime if the target differs from the new target's
+ // initial map constructor.
+ __ ldr(r0, FieldMemOperand(r2, Map::kConstructorOrBackPointerOffset));
+ __ cmp(r0, r1);
+ __ b(ne, &new_object);
+
+ // Allocate the JSObject on the heap.
+ Label allocate, done_allocate;
+ __ ldrb(r4, FieldMemOperand(r2, Map::kInstanceSizeOffset));
+ __ Allocate(r4, r0, r5, r6, &allocate, SIZE_IN_WORDS);
+ __ bind(&done_allocate);
+
+ // Initialize the JSObject fields.
+ __ str(r2, MemOperand(r0, JSObject::kMapOffset));
+ __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
+ __ str(r3, MemOperand(r0, JSObject::kPropertiesOffset));
+ __ str(r3, MemOperand(r0, JSObject::kElementsOffset));
+ STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
+ __ add(r1, r0, Operand(JSObject::kHeaderSize));
+
+ // ----------- S t a t e -------------
+ // -- r0 : result (untagged)
+ // -- r1 : result fields (untagged)
+ // -- r5 : result end (untagged)
+ // -- r2 : initial map
+ // -- cp : context
+ // -- lr : return address
+ // -----------------------------------
+
+ // Perform in-object slack tracking if requested.
+ Label slack_tracking;
+ STATIC_ASSERT(Map::kNoSlackTracking == 0);
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+ __ ldr(r3, FieldMemOperand(r2, Map::kBitField3Offset));
+ __ tst(r3, Operand(Map::ConstructionCounter::kMask));
+ __ b(ne, &slack_tracking);
+ {
+ // Initialize all in-object fields with undefined.
+ __ InitializeFieldsWithFiller(r1, r5, r6);
+
+ // Add the object tag to make the JSObject real.
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ add(r0, r0, Operand(kHeapObjectTag));
+ __ Ret();
+ }
+ __ bind(&slack_tracking);
+ {
+ // Decrease generous allocation count.
+ STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+ __ sub(r3, r3, Operand(1 << Map::ConstructionCounter::kShift));
+ __ str(r3, FieldMemOperand(r2, Map::kBitField3Offset));
+
+ // Initialize the in-object fields with undefined.
+ __ ldrb(r4, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
+ __ sub(r4, r5, Operand(r4, LSL, kPointerSizeLog2));
+ __ InitializeFieldsWithFiller(r1, r4, r6);
+
+ // Initialize the remaining (reserved) fields with one pointer filler map.
+ __ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(r1, r5, r6);
+
+ // Add the object tag to make the JSObject real.
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ add(r0, r0, Operand(kHeapObjectTag));
+
+ // Check if we can finalize the instance size.
+ STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
+ __ tst(r3, Operand(Map::ConstructionCounter::kMask));
+ __ Ret(ne);
+
+ // Finalize the instance size.
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r0, r2);
+ __ CallRuntime(Runtime::kFinalizeInstanceSize);
+ __ Pop(r0);
+ }
+ __ Ret();
+ }
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ __ mov(r4, Operand(r4, LSL, kPointerSizeLog2 + 1));
+ __ Push(r2, r4);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ Pop(r2);
+ }
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ sub(r0, r0, Operand(kHeapObjectTag));
+ __ ldrb(r5, FieldMemOperand(r2, Map::kInstanceSizeOffset));
+ __ add(r5, r0, Operand(r5, LSL, kPointerSizeLog2));
+ __ b(&done_allocate);
+
+ // Fall back to %NewObject.
+ __ bind(&new_object);
+ __ Push(r1, r3);
+ __ TailCallRuntime(Runtime::kNewObject);
+}
+
+
+void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r1 : function
+ // -- cp : context
+ // -- fp : frame pointer
+ // -- lr : return address
+ // -----------------------------------
+ __ AssertFunction(r1);
+
+ // For Ignition we need to skip all possible handler/stub frames until
+ // we reach the JavaScript frame for the function (similar to what the
+ // runtime fallback implementation does). So make r2 point to that
+ // JavaScript frame.
+ {
+ Label loop, loop_entry;
+ __ mov(r2, fp);
+ __ b(&loop_entry);
+ __ bind(&loop);
+ __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&loop_entry);
+ __ ldr(ip, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
+ __ cmp(ip, r1);
+ __ b(ne, &loop);
+ }
+
+ // Check if we have rest parameters (only possible if we have an
+ // arguments adaptor frame below the function frame).
+ Label no_rest_parameters;
+ __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(ip, MemOperand(r2, StandardFrameConstants::kContextOffset));
+ __ cmp(ip, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ b(ne, &no_rest_parameters);
+
+ // Check if the arguments adaptor frame contains more arguments than
+ // specified by the function's internal formal parameter count.
+ Label rest_parameters;
+ __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r1,
+ FieldMemOperand(r1, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ sub(r0, r0, r1, SetCC);
+ __ b(gt, &rest_parameters);
+
+ // Return an empty rest parameter array.
+ __ bind(&no_rest_parameters);
+ {
+ // ----------- S t a t e -------------
+ // -- cp : context
+ // -- lr : return address
+ // -----------------------------------
+
+ // Allocate an empty rest parameter array.
+ Label allocate, done_allocate;
+ __ Allocate(JSArray::kSize, r0, r1, r2, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Setup the rest parameter array in r0.
+ __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r1);
+ __ str(r1, FieldMemOperand(r0, JSArray::kMapOffset));
+ __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
+ __ str(r1, FieldMemOperand(r0, JSArray::kPropertiesOffset));
+ __ str(r1, FieldMemOperand(r0, JSArray::kElementsOffset));
+ __ mov(r1, Operand(0));
+ __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
+ STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+ __ Ret();
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(Smi::FromInt(JSArray::kSize));
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ }
+ __ jmp(&done_allocate);
+ }
+
+ __ bind(&rest_parameters);
+ {
+ // Compute the pointer to the first rest parameter (skippping the receiver).
+ __ add(r2, r2, Operand(r0, LSL, kPointerSizeLog2 - 1));
+ __ add(r2, r2,
+ Operand(StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
+
+ // ----------- S t a t e -------------
+ // -- cp : context
+ // -- r0 : number of rest parameters (tagged)
+ // -- r2 : pointer to first rest parameters
+ // -- lr : return address
+ // -----------------------------------
+
+ // Allocate space for the rest parameter array plus the backing store.
+ Label allocate, done_allocate;
+ __ mov(r1, Operand(JSArray::kSize + FixedArray::kHeaderSize));
+ __ add(r1, r1, Operand(r0, LSL, kPointerSizeLog2 - 1));
+ __ Allocate(r1, r3, r4, r5, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Setup the elements array in r3.
+ __ LoadRoot(r1, Heap::kFixedArrayMapRootIndex);
+ __ str(r1, FieldMemOperand(r3, FixedArray::kMapOffset));
+ __ str(r0, FieldMemOperand(r3, FixedArray::kLengthOffset));
+ __ add(r4, r3, Operand(FixedArray::kHeaderSize));
+ {
+ Label loop, done_loop;
+ __ add(r1, r4, Operand(r0, LSL, kPointerSizeLog2 - 1));
+ __ bind(&loop);
+ __ cmp(r4, r1);
+ __ b(eq, &done_loop);
+ __ ldr(ip, MemOperand(r2, 1 * kPointerSize, NegPostIndex));
+ __ str(ip, FieldMemOperand(r4, 0 * kPointerSize));
+ __ add(r4, r4, Operand(1 * kPointerSize));
+ __ b(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Setup the rest parameter array in r4.
+ __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r1);
+ __ str(r1, FieldMemOperand(r4, JSArray::kMapOffset));
+ __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
+ __ str(r1, FieldMemOperand(r4, JSArray::kPropertiesOffset));
+ __ str(r3, FieldMemOperand(r4, JSArray::kElementsOffset));
+ __ str(r0, FieldMemOperand(r4, JSArray::kLengthOffset));
+ STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+ __ mov(r0, r4);
+ __ Ret();
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(r1);
+ __ Push(r0, r2, r1);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ mov(r3, r0);
+ __ Pop(r0, r2);
+ }
+ __ jmp(&done_allocate);
+ }
+}
+
+
+void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r1 : function
+ // -- cp : context
+ // -- fp : frame pointer
+ // -- lr : return address
+ // -----------------------------------
+ __ AssertFunction(r1);
+
+ // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r2,
+ FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ add(r3, fp, Operand(r2, LSL, kPointerSizeLog2 - 1));
+ __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // r1 : function
+ // r2 : number of parameters (tagged)
+ // r3 : parameters pointer
+ // Registers used over whole function:
+ // r5 : arguments count (tagged)
+ // r6 : mapped parameter count (tagged)
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, try_allocate, runtime;
+ __ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r0, MemOperand(r4, StandardFrameConstants::kContextOffset));
+ __ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ b(eq, &adaptor_frame);
+
+ // No adaptor, parameter count = argument count.
+ __ mov(r5, r2);
+ __ mov(r6, r2);
+ __ b(&try_allocate);
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ ldr(r5, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ add(r4, r4, Operand(r5, LSL, 1));
+ __ add(r3, r4, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // r5 = argument count (tagged)
+ // r6 = parameter count (tagged)
+ // Compute the mapped parameter count = min(r6, r5) in r6.
+ __ mov(r6, r2);
+ __ cmp(r6, Operand(r5));
+ __ mov(r6, Operand(r5), LeaveCC, gt);
+
+ __ bind(&try_allocate);
+
+ // Compute the sizes of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has 2 extra words containing context and backing store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+ // If there are no mapped parameters, we do not need the parameter_map.
+ __ cmp(r6, Operand(Smi::FromInt(0)));
+ __ mov(r9, Operand::Zero(), LeaveCC, eq);
+ __ mov(r9, Operand(r6, LSL, 1), LeaveCC, ne);
+ __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
+
+ // 2. Backing store.
+ __ add(r9, r9, Operand(r5, LSL, 1));
+ __ add(r9, r9, Operand(FixedArray::kHeaderSize));
+
+ // 3. Arguments object.
+ __ add(r9, r9, Operand(JSSloppyArgumentsObject::kSize));
+
+ // Do the allocation of all three objects in one go.
+ __ Allocate(r9, r0, r9, r4, &runtime, TAG_OBJECT);
+
+ // r0 = address of new object(s) (tagged)
+ // r2 = argument count (smi-tagged)
+ // Get the arguments boilerplate from the current native context into r4.
+ const int kNormalOffset =
+ Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
+ const int kAliasedOffset =
+ Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
+
+ __ ldr(r4, NativeContextMemOperand());
+ __ cmp(r6, Operand::Zero());
+ __ ldr(r4, MemOperand(r4, kNormalOffset), eq);
+ __ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
+
+ // r0 = address of new object (tagged)
+ // r2 = argument count (smi-tagged)
+ // r4 = address of arguments map (tagged)
+ // r6 = mapped parameter count (tagged)
+ __ str(r4, FieldMemOperand(r0, JSObject::kMapOffset));
+ __ LoadRoot(r9, Heap::kEmptyFixedArrayRootIndex);
+ __ str(r9, FieldMemOperand(r0, JSObject::kPropertiesOffset));
+ __ str(r9, FieldMemOperand(r0, JSObject::kElementsOffset));
+
+ // Set up the callee in-object property.
+ __ AssertNotSmi(r1);
+ __ str(r1, FieldMemOperand(r0, JSSloppyArgumentsObject::kCalleeOffset));
+
+ // Use the length (smi tagged) and set that as an in-object property too.
+ __ AssertSmi(r5);
+ __ str(r5, FieldMemOperand(r0, JSSloppyArgumentsObject::kLengthOffset));
+
+ // Set up the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, r4 will point there, otherwise
+ // it will point to the backing store.
+ __ add(r4, r0, Operand(JSSloppyArgumentsObject::kSize));
+ __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
+
+ // r0 = address of new object (tagged)
+ // r2 = argument count (tagged)
+ // r4 = address of parameter map or backing store (tagged)
+ // r6 = mapped parameter count (tagged)
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ __ cmp(r6, Operand(Smi::FromInt(0)));
+ // Move backing store address to r1, because it is
+ // expected there when filling in the unmapped arguments.
+ __ mov(r1, r4, LeaveCC, eq);
+ __ b(eq, &skip_parameter_map);
+
+ __ LoadRoot(r5, Heap::kSloppyArgumentsElementsMapRootIndex);
+ __ str(r5, FieldMemOperand(r4, FixedArray::kMapOffset));
+ __ add(r5, r6, Operand(Smi::FromInt(2)));
+ __ str(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
+ __ str(cp, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
+ __ add(r5, r4, Operand(r6, LSL, 1));
+ __ add(r5, r5, Operand(kParameterMapHeaderSize));
+ __ str(r5, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. They index the context,
+ // where parameters are stored in reverse order, at
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+ // The mapped parameter thus need to get indices
+ // MIN_CONTEXT_SLOTS+parameter_count-1 ..
+ // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+ // We loop from right to left.
+ Label parameters_loop, parameters_test;
+ __ mov(r5, r6);
+ __ add(r9, r2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+ __ sub(r9, r9, Operand(r6));
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ add(r1, r4, Operand(r5, LSL, 1));
+ __ add(r1, r1, Operand(kParameterMapHeaderSize));
+
+ // r1 = address of backing store (tagged)
+ // r4 = address of parameter map (tagged), which is also the address of new
+ // object + Heap::kSloppyArgumentsObjectSize (tagged)
+ // r0 = temporary scratch (a.o., for address calculation)
+ // r5 = loop variable (tagged)
+ // ip = the hole value
+ __ jmp(&parameters_test);
+
+ __ bind(&parameters_loop);
+ __ sub(r5, r5, Operand(Smi::FromInt(1)));
+ __ mov(r0, Operand(r5, LSL, 1));
+ __ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+ __ str(r9, MemOperand(r4, r0));
+ __ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
+ __ str(ip, MemOperand(r1, r0));
+ __ add(r9, r9, Operand(Smi::FromInt(1)));
+ __ bind(&parameters_test);
+ __ cmp(r5, Operand(Smi::FromInt(0)));
+ __ b(ne, &parameters_loop);
+
+ // Restore r0 = new object (tagged) and r5 = argument count (tagged).
+ __ sub(r0, r4, Operand(JSSloppyArgumentsObject::kSize));
+ __ ldr(r5, FieldMemOperand(r0, JSSloppyArgumentsObject::kLengthOffset));
+
+ __ bind(&skip_parameter_map);
+ // r0 = address of new object (tagged)
+ // r1 = address of backing store (tagged)
+ // r5 = argument count (tagged)
+ // r6 = mapped parameter count (tagged)
+ // r9 = scratch
+ // Copy arguments header and remaining slots (if there are any).
+ __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
+ __ str(r9, FieldMemOperand(r1, FixedArray::kMapOffset));
+ __ str(r5, FieldMemOperand(r1, FixedArray::kLengthOffset));
+
+ Label arguments_loop, arguments_test;
+ __ sub(r3, r3, Operand(r6, LSL, 1));
+ __ jmp(&arguments_test);
+
+ __ bind(&arguments_loop);
+ __ sub(r3, r3, Operand(kPointerSize));
+ __ ldr(r4, MemOperand(r3, 0));
+ __ add(r9, r1, Operand(r6, LSL, 1));
+ __ str(r4, FieldMemOperand(r9, FixedArray::kHeaderSize));
+ __ add(r6, r6, Operand(Smi::FromInt(1)));
+
+ __ bind(&arguments_test);
+ __ cmp(r6, Operand(r5));
+ __ b(lt, &arguments_loop);
+
+ // Return.
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ // r0 = address of new object (tagged)
+ // r5 = argument count (tagged)
+ __ bind(&runtime);
+ __ Push(r1, r3, r5);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
+}
+
+
+void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r1 : function
+ // -- cp : context
+ // -- fp : frame pointer
+ // -- lr : return address
+ // -----------------------------------
+ __ AssertFunction(r1);
+
+ // For Ignition we need to skip all possible handler/stub frames until
+ // we reach the JavaScript frame for the function (similar to what the
+ // runtime fallback implementation does). So make r2 point to that
+ // JavaScript frame.
+ {
+ Label loop, loop_entry;
+ __ mov(r2, fp);
+ __ b(&loop_entry);
+ __ bind(&loop);
+ __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&loop_entry);
+ __ ldr(ip, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
+ __ cmp(ip, r1);
+ __ b(ne, &loop);
+ }
+
+ // Check if we have an arguments adaptor frame below the function frame.
+ Label arguments_adaptor, arguments_done;
+ __ ldr(r3, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(ip, MemOperand(r3, StandardFrameConstants::kContextOffset));
+ __ cmp(ip, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ b(eq, &arguments_adaptor);
+ {
+ __ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r0, FieldMemOperand(
+ r1, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ add(r2, r2, Operand(r0, LSL, kPointerSizeLog2 - 1));
+ __ add(r2, r2,
+ Operand(StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
+ }
+ __ b(&arguments_done);
+ __ bind(&arguments_adaptor);
+ {
+ __ ldr(r0, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ add(r2, r3, Operand(r0, LSL, kPointerSizeLog2 - 1));
+ __ add(r2, r2,
+ Operand(StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
+ }
+ __ bind(&arguments_done);
+
+ // ----------- S t a t e -------------
+ // -- cp : context
+ // -- r0 : number of rest parameters (tagged)
+ // -- r2 : pointer to first rest parameters
+ // -- lr : return address
+ // -----------------------------------
+
+ // Allocate space for the strict arguments object plus the backing store.
+ Label allocate, done_allocate;
+ __ mov(r1, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
+ __ add(r1, r1, Operand(r0, LSL, kPointerSizeLog2 - 1));
+ __ Allocate(r1, r3, r4, r5, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Setup the elements array in r3.
+ __ LoadRoot(r1, Heap::kFixedArrayMapRootIndex);
+ __ str(r1, FieldMemOperand(r3, FixedArray::kMapOffset));
+ __ str(r0, FieldMemOperand(r3, FixedArray::kLengthOffset));
+ __ add(r4, r3, Operand(FixedArray::kHeaderSize));
+ {
+ Label loop, done_loop;
+ __ add(r1, r4, Operand(r0, LSL, kPointerSizeLog2 - 1));
+ __ bind(&loop);
+ __ cmp(r4, r1);
+ __ b(eq, &done_loop);
+ __ ldr(ip, MemOperand(r2, 1 * kPointerSize, NegPostIndex));
+ __ str(ip, FieldMemOperand(r4, 0 * kPointerSize));
+ __ add(r4, r4, Operand(1 * kPointerSize));
+ __ b(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Setup the strict arguments object in r4.
+ __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r1);
+ __ str(r1, FieldMemOperand(r4, JSStrictArgumentsObject::kMapOffset));
+ __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
+ __ str(r1, FieldMemOperand(r4, JSStrictArgumentsObject::kPropertiesOffset));
+ __ str(r3, FieldMemOperand(r4, JSStrictArgumentsObject::kElementsOffset));
+ __ str(r0, FieldMemOperand(r4, JSStrictArgumentsObject::kLengthOffset));
+ STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
+ __ mov(r0, r4);
+ __ Ret();
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(r1);
+ __ Push(r0, r2, r1);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ mov(r3, r0);
+ __ Pop(r0, r2);
+ }
+ __ b(&done_allocate);
+}
+
+
void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
Register context = cp;
Register result = r0;
@@ -5206,11 +5427,10 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ jmp(&leave_exit_frame);
}
-
static void CallApiFunctionStubHelper(MacroAssembler* masm,
const ParameterCount& argc,
bool return_first_arg,
- bool call_data_undefined) {
+ bool call_data_undefined, bool is_lazy) {
// ----------- S t a t e -------------
// -- r0 : callee
// -- r4 : call_data
@@ -5246,8 +5466,10 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
// context save
__ push(context);
- // load context from callee
- __ ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+ if (!is_lazy) {
+ // load context from callee
+ __ ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+ }
// callee
__ push(callee);
@@ -5339,7 +5561,7 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
void CallApiFunctionStub::Generate(MacroAssembler* masm) {
bool call_data_undefined = this->call_data_undefined();
CallApiFunctionStubHelper(masm, ParameterCount(r3), false,
- call_data_undefined);
+ call_data_undefined, false);
}
@@ -5347,41 +5569,47 @@ void CallApiAccessorStub::Generate(MacroAssembler* masm) {
bool is_store = this->is_store();
int argc = this->argc();
bool call_data_undefined = this->call_data_undefined();
+ bool is_lazy = this->is_lazy();
CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
- call_data_undefined);
+ call_data_undefined, is_lazy);
}
void CallApiGetterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- sp[0] : name
- // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
+ // -- sp[0] : name
+ // -- sp[4 .. (4 + kArgsLength*4)] : v8::PropertyCallbackInfo::args_
// -- ...
- // -- r2 : api_function_address
+ // -- r2 : api_function_address
// -----------------------------------
Register api_function_address = ApiGetterDescriptor::function_address();
DCHECK(api_function_address.is(r2));
- __ mov(r0, sp); // r0 = Handle<Name>
- __ add(r1, r0, Operand(1 * kPointerSize)); // r1 = PCA
+ // v8::PropertyCallbackInfo::args_ array and name handle.
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
+ __ mov(r0, sp); // r0 = Handle<Name>
+ __ add(r1, r0, Operand(1 * kPointerSize)); // r1 = v8::PCI::args_
const int kApiStackSpace = 1;
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
- // Create PropertyAccessorInfo instance on the stack above the exit frame with
- // r1 (internal::Object** args_) as the data.
+ // Create v8::PropertyCallbackInfo object on the stack and initialize
+ // it's args_ field.
__ str(r1, MemOperand(sp, 1 * kPointerSize));
- __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
-
- const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+ __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = v8::PropertyCallbackInfo&
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback(isolate());
+
+ // +3 is to skip prolog, return address and name handle.
+ MemOperand return_value_operand(
+ fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
- kStackUnwindSpace, NULL,
- MemOperand(fp, 6 * kPointerSize), NULL);
+ kStackUnwindSpace, NULL, return_value_operand, NULL);
}
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index c34acd6a5b..2dee363bbf 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -108,23 +108,23 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
__ b(lt, &size_less_than_8);
__ cmp(chars, Operand(32));
__ b(lt, &less_32);
- if (CpuFeatures::cache_line_size() == 32) {
+ if (CpuFeatures::dcache_line_size() == 32) {
__ pld(MemOperand(src, 32));
}
__ cmp(chars, Operand(64));
__ b(lt, &less_64);
__ pld(MemOperand(src, 64));
- if (CpuFeatures::cache_line_size() == 32) {
+ if (CpuFeatures::dcache_line_size() == 32) {
__ pld(MemOperand(src, 96));
}
__ cmp(chars, Operand(128));
__ b(lt, &less_128);
__ pld(MemOperand(src, 128));
- if (CpuFeatures::cache_line_size() == 32) {
+ if (CpuFeatures::dcache_line_size() == 32) {
__ pld(MemOperand(src, 160));
}
__ pld(MemOperand(src, 192));
- if (CpuFeatures::cache_line_size() == 32) {
+ if (CpuFeatures::dcache_line_size() == 32) {
__ pld(MemOperand(src, 224));
}
__ cmp(chars, Operand(256));
@@ -134,7 +134,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
__ bind(&loop);
__ pld(MemOperand(src, 256));
__ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
- if (CpuFeatures::cache_line_size() == 32) {
+ if (CpuFeatures::dcache_line_size() == 32) {
__ pld(MemOperand(src, 256));
}
__ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index efc060a82d..b9d4788eb5 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -219,6 +219,22 @@ enum {
};
+enum BarrierOption {
+ OSHLD = 0x1,
+ OSHST = 0x2,
+ OSH = 0x3,
+ NSHLD = 0x5,
+ NSHST = 0x6,
+ NSH = 0x7,
+ ISHLD = 0x9,
+ ISHST = 0xa,
+ ISH = 0xb,
+ LD = 0xd,
+ ST = 0xe,
+ SY = 0xf,
+};
+
+
// -----------------------------------------------------------------------------
// Addressing modes and instruction variants.
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index e4fc2138fc..3e9fac7d12 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -85,27 +85,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
-void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
- // Set the register values. The values are not important as there are no
- // callee saved registers in JavaScript frames, so all registers are
- // spilled. Registers fp and sp are set to the correct values though.
-
- for (int i = 0; i < Register::kNumRegisters; i++) {
- input_->SetRegister(i, i * 4);
- }
- input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
- input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) {
- input_->SetDoubleRegister(i, 0.0);
- }
-
- // Fill the frame content from the actual data on the frame.
- for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
- input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
- }
-}
-
-
void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
ApiFunction function(descriptor->deoptimization_handler());
@@ -124,8 +103,7 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
}
}
-
-bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
// There is no dynamic alignment padding on ARM in the input frame.
return false;
}
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 66b7f45849..9258703fbc 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -1188,7 +1188,13 @@ void Decoder::DecodeType3(Instruction* instr) {
}
}
} else {
- UNREACHABLE();
+ // PU == 0b01, BW == 0b11, Bits(9, 6) != 0b0001
+ if ((instr->Bits(20, 16) == 0x1f) &&
+ (instr->Bits(11, 4) == 0xf3)) {
+ Format(instr, "rbit'cond 'rd, 'rm");
+ } else {
+ UNREACHABLE();
+ }
}
break;
}
@@ -1689,6 +1695,12 @@ void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
}
+static const char* const barrier_option_names[] = {
+ "invalid", "oshld", "oshst", "osh", "invalid", "nshld", "nshst", "nsh",
+ "invalid", "ishld", "ishst", "ish", "invalid", "ld", "st", "sy",
+};
+
+
void Decoder::DecodeSpecialCondition(Instruction* instr) {
switch (instr->SpecialValue()) {
case 5:
@@ -1765,6 +1777,24 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"pld [r%d, #+%d]", Rn, offset);
}
+ } else if (instr->SpecialValue() == 0xA && instr->Bits(22, 20) == 7) {
+ int option = instr->Bits(3, 0);
+ switch (instr->Bits(7, 4)) {
+ case 4:
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "dsb %s", barrier_option_names[option]);
+ break;
+ case 5:
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "dmb %s", barrier_option_names[option]);
+ break;
+ case 6:
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "isb %s", barrier_option_names[option]);
+ break;
+ default:
+ Unknown(instr);
+ }
} else {
Unknown(instr);
}
diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc
index b7fad7bee6..1f55c0bb4b 100644
--- a/deps/v8/src/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/arm/interface-descriptors-arm.cc
@@ -56,20 +56,6 @@ const Register StringCompareDescriptor::LeftRegister() { return r1; }
const Register StringCompareDescriptor::RightRegister() { return r0; }
-const Register ArgumentsAccessReadDescriptor::index() { return r1; }
-const Register ArgumentsAccessReadDescriptor::parameter_count() { return r0; }
-
-
-const Register ArgumentsAccessNewDescriptor::function() { return r1; }
-const Register ArgumentsAccessNewDescriptor::parameter_count() { return r2; }
-const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return r3; }
-
-
-const Register RestParamAccessDescriptor::parameter_count() { return r2; }
-const Register RestParamAccessDescriptor::parameter_pointer() { return r3; }
-const Register RestParamAccessDescriptor::rest_parameter_index() { return r4; }
-
-
const Register ApiGetterDescriptor::function_address() { return r2; }
@@ -98,6 +84,32 @@ void FastNewContextDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void FastNewObjectDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r1, r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewRestParameterDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void ToNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -115,6 +127,10 @@ const Register ToStringDescriptor::ReceiverRegister() { return r0; }
// static
+const Register ToNameDescriptor::ReceiverRegister() { return r0; }
+
+
+// static
const Register ToObjectDescriptor::ReceiverRegister() { return r0; }
@@ -167,13 +183,6 @@ void CreateWeakCellDescriptor::InitializePlatformSpecific(
}
-void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r3, r0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1};
@@ -432,6 +441,14 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
&default_descriptor);
}
+void InterpreterDispatchDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
+ kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
+ kInterpreterDispatchTableRegister};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -443,7 +460,6 @@ void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@@ -455,7 +471,6 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 57fa3f5804..80aef0c4ff 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -185,6 +185,9 @@ void MacroAssembler::Drop(int count, Condition cond) {
}
}
+void MacroAssembler::Drop(Register count, Condition cond) {
+ add(sp, sp, Operand(count, LSL, kPointerSizeLog2), LeaveCC, cond);
+}
void MacroAssembler::Ret(int drop, Condition cond) {
Drop(drop, cond);
@@ -449,9 +452,9 @@ void MacroAssembler::InNewSpace(Register object,
Condition cond,
Label* branch) {
DCHECK(cond == eq || cond == ne);
- and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
- cmp(scratch, Operand(ExternalReference::new_space_start(isolate())));
- b(cond, branch);
+ const int mask =
+ (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
+ CheckPageFlag(object, scratch, mask, cond, branch);
}
@@ -648,6 +651,69 @@ void MacroAssembler::RecordWrite(
}
}
+void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
+ Register code_entry,
+ Register scratch) {
+ const int offset = JSFunction::kCodeEntryOffset;
+
+ // Since a code entry (value) is always in old space, we don't need to update
+ // remembered set. If incremental marking is off, there is nothing for us to
+ // do.
+ if (!FLAG_incremental_marking) return;
+
+ DCHECK(js_function.is(r1));
+ DCHECK(code_entry.is(r4));
+ DCHECK(scratch.is(r5));
+ AssertNotSmi(js_function);
+
+ if (emit_debug_code()) {
+ add(scratch, js_function, Operand(offset - kHeapObjectTag));
+ ldr(ip, MemOperand(scratch));
+ cmp(ip, code_entry);
+ Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ }
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis and stores into young gen.
+ Label done;
+
+ CheckPageFlag(code_entry, scratch,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
+ CheckPageFlag(js_function, scratch,
+ MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
+
+ const Register dst = scratch;
+ add(dst, js_function, Operand(offset - kHeapObjectTag));
+
+ push(code_entry);
+
+ // Save caller-saved registers, which includes js_function.
+ DCHECK((kCallerSaved & js_function.bit()) != 0);
+ DCHECK_EQ(kCallerSaved & code_entry.bit(), 0);
+ stm(db_w, sp, (kCallerSaved | lr.bit()));
+
+ int argument_count = 3;
+ PrepareCallCFunction(argument_count, code_entry);
+
+ mov(r0, js_function);
+ mov(r1, dst);
+ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
+
+ {
+ AllowExternalCallThatCantCauseGC scope(this);
+ CallCFunction(
+ ExternalReference::incremental_marking_record_write_code_entry_function(
+ isolate()),
+ argument_count);
+ }
+
+ // Restore caller-saved registers (including js_function and code_entry).
+ ldm(ia_w, sp, (kCallerSaved | lr.bit()));
+
+ pop(code_entry);
+
+ bind(&done);
+}
void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Register address,
@@ -1330,7 +1396,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
}
Push(fun);
Push(fun);
- CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
Pop(fun);
if (new_target.is_valid()) {
Pop(new_target);
@@ -2506,18 +2572,6 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
}
-void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- // You can't call a builtin without a valid frame.
- DCHECK(flag == JUMP_FUNCTION || has_frame());
-
- // Fake a parameter count to avoid emitting code to do the check.
- ParameterCount expected(0);
- LoadNativeContextSlot(native_context_index, r1);
- InvokeFunctionCode(r1, no_reg, expected, expected, flag, call_wrapper);
-}
-
-
void MacroAssembler::SetCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
if (FLAG_native_code_counters && counter->Enabled()) {
@@ -2613,9 +2667,9 @@ void MacroAssembler::Abort(BailoutReason reason) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 1);
+ CallRuntime(Runtime::kAbort);
} else {
- CallRuntime(Runtime::kAbort, 1);
+ CallRuntime(Runtime::kAbort);
}
// will not return here
if (is_const_pool_blocked()) {
@@ -2822,6 +2876,20 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
+void MacroAssembler::AssertReceiver(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ tst(object, Operand(kSmiTagMask));
+ Check(ne, kOperandIsASmiAndNotAReceiver);
+ push(object);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ CompareObjectType(object, object, object, FIRST_JS_RECEIVER_TYPE);
+ pop(object);
+ Check(hs, kOperandIsNotAReceiver);
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (emit_debug_code()) {
@@ -3259,6 +3327,7 @@ void MacroAssembler::CheckPageFlag(
int mask,
Condition cc,
Label* condition_met) {
+ DCHECK(cc == eq || cc == ne);
Bfc(scratch, object, 0, kPageSizeBits);
ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
tst(scratch, Operand(mask));
@@ -3396,7 +3465,8 @@ void MacroAssembler::LoadAccessor(Register dst, Register holder,
}
-void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
+void MacroAssembler::CheckEnumCache(Label* call_runtime) {
+ Register null_value = r5;
Register empty_fixed_array_value = r6;
LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
Label next, start;
@@ -3410,6 +3480,7 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
cmp(r3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
b(eq, call_runtime);
+ LoadRoot(null_value, Heap::kNullValueRootIndex);
jmp(&start);
bind(&next);
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 26811b988c..468f4b521a 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -16,6 +16,7 @@ namespace internal {
// Give alias names to registers for calling conventions.
const Register kReturnRegister0 = {Register::kCode_r0};
const Register kReturnRegister1 = {Register::kCode_r1};
+const Register kReturnRegister2 = {Register::kCode_r2};
const Register kJSFunctionRegister = {Register::kCode_r1};
const Register kContextRegister = {Register::kCode_r7};
const Register kInterpreterAccumulatorRegister = {Register::kCode_r0};
@@ -127,6 +128,7 @@ class MacroAssembler: public Assembler {
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
void Drop(int count, Condition cond = al);
+ void Drop(Register count, Condition cond = al);
void Ret(int drop, Condition cond = al);
@@ -218,7 +220,7 @@ class MacroAssembler: public Assembler {
void JumpIfNotInNewSpace(Register object,
Register scratch,
Label* branch) {
- InNewSpace(object, scratch, ne, branch);
+ InNewSpace(object, scratch, eq, branch);
}
// Check if object is in new space. Jumps if the object is in new space.
@@ -226,7 +228,7 @@ class MacroAssembler: public Assembler {
void JumpIfInNewSpace(Register object,
Register scratch,
Label* branch) {
- InNewSpace(object, scratch, eq, branch);
+ InNewSpace(object, scratch, ne, branch);
}
// Check if an object has a given incremental marking color.
@@ -288,6 +290,11 @@ class MacroAssembler: public Assembler {
pointers_to_here_check_for_value);
}
+ // Notify the garbage collector that we wrote a code entry into a
+ // JSFunction. Only scratch is clobbered by the operation.
+ void RecordWriteCodeEntryField(Register js_function, Register code_entry,
+ Register scratch);
+
void RecordWriteForMap(
Register object,
Register map,
@@ -315,7 +322,6 @@ class MacroAssembler: public Assembler {
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Condition cond = al) {
- DCHECK(!src1.is(src2));
if (src1.code() > src2.code()) {
stm(db_w, sp, src1.bit() | src2.bit(), cond);
} else {
@@ -326,7 +332,6 @@ class MacroAssembler: public Assembler {
// Push three registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Condition cond = al) {
- DCHECK(!AreAliased(src1, src2, src3));
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
@@ -346,7 +351,6 @@ class MacroAssembler: public Assembler {
Register src3,
Register src4,
Condition cond = al) {
- DCHECK(!AreAliased(src1, src2, src3, src4));
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
if (src3.code() > src4.code()) {
@@ -371,7 +375,6 @@ class MacroAssembler: public Assembler {
// Push five registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Register src4,
Register src5, Condition cond = al) {
- DCHECK(!AreAliased(src1, src2, src3, src4, src5));
if (src1.code() > src2.code()) {
if (src2.code() > src3.code()) {
if (src3.code() > src4.code()) {
@@ -1143,10 +1146,6 @@ class MacroAssembler: public Assembler {
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin);
- // Invoke specified builtin JavaScript function.
- void InvokeBuiltin(int native_context_index, InvokeFlag flag,
- const CallWrapper& call_wrapper = NullCallWrapper());
-
Handle<Object> CodeObject() {
DCHECK(!code_object_.is_null());
return code_object_;
@@ -1298,6 +1297,9 @@ class MacroAssembler: public Assembler {
// enabled via --debug-code.
void AssertBoundFunction(Register object);
+ // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
+ void AssertReceiver(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@@ -1407,7 +1409,7 @@ class MacroAssembler: public Assembler {
// Expects object in r0 and returns map with validated enum cache
// in r0. Assumes that any other register can be used as a scratch.
- void CheckEnumCache(Register null_value, Label* call_runtime);
+ void CheckEnumCache(Label* call_runtime);
// AllocationMemento support. Arrays may have an associated
// AllocationMemento object that can be checked for in order to pretransition
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 6e193885b0..4630b94b63 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -14,6 +14,7 @@
#include "src/base/bits.h"
#include "src/codegen.h"
#include "src/disasm.h"
+#include "src/runtime/runtime-utils.h"
#if defined(USE_SIMULATOR)
@@ -391,7 +392,8 @@ void ArmDebugger::Debug() {
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
int value = *cur;
Heap* current_heap = sim_->isolate_->heap();
- if (((value & 1) == 0) || current_heap->Contains(obj)) {
+ if (((value & 1) == 0) ||
+ current_heap->ContainsSlow(obj->address())) {
PrintF(" (");
if ((value & 1) == 0) {
PrintF("smi %d", value / 2);
@@ -1717,6 +1719,10 @@ typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
int32_t arg4,
int32_t arg5);
+typedef ObjectTriple (*SimulatorRuntimeTripleCall)(int32_t arg0, int32_t arg1,
+ int32_t arg2, int32_t arg3,
+ int32_t arg4);
+
// These prototypes handle the four types of FP calls.
typedef int64_t (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
@@ -1900,9 +1906,36 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(
external);
target(arg0, arg1, Redirection::ReverseRedirection(arg2));
+ } else if (redirection->type() ==
+ ExternalReference::BUILTIN_CALL_TRIPLE) {
+ // builtin call returning ObjectTriple.
+ SimulatorRuntimeTripleCall target =
+ reinterpret_cast<SimulatorRuntimeTripleCall>(external);
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ PrintF(
+ "Call to host triple returning runtime function %p "
+ "args %08x, %08x, %08x, %08x, %08x",
+ FUNCTION_ADDR(target), arg1, arg2, arg3, arg4, arg5);
+ if (!stack_aligned) {
+ PrintF(" with unaligned stack %08x\n", get_register(sp));
+ }
+ PrintF("\n");
+ }
+ CHECK(stack_aligned);
+ // arg0 is a hidden argument pointing to the return location, so don't
+ // pass it to the target function.
+ ObjectTriple result = target(arg1, arg2, arg3, arg4, arg5);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned { %p, %p, %p }\n", result.x, result.y, result.z);
+ }
+ // Return is passed back in address pointed to by hidden first argument.
+ ObjectTriple* sim_result = reinterpret_cast<ObjectTriple*>(arg0);
+ *sim_result = result;
+ set_register(r0, arg0);
} else {
// builtin call.
- DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL);
+ DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL ||
+ redirection->type() == ExternalReference::BUILTIN_CALL_PAIR);
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
@@ -2887,7 +2920,15 @@ void Simulator::DecodeType3(Instruction* instr) {
}
}
} else {
- UNIMPLEMENTED();
+ // PU == 0b01, BW == 0b11, Bits(9, 6) != 0b0001
+ if ((instr->Bits(20, 16) == 0x1f) &&
+ (instr->Bits(11, 4) == 0xf3)) {
+ // Rbit.
+ uint32_t rm_val = get_register(instr->RmValue());
+ set_register(rd, base::bits::ReverseBits(rm_val));
+ } else {
+ UNIMPLEMENTED();
+ }
}
break;
}
@@ -3871,6 +3912,9 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
case 0xB:
if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xf)) {
// pld: ignore instruction.
+ } else if (instr->SpecialValue() == 0xA && instr->Bits(22, 20) == 7) {
+ // dsb, dmb, isb: ignore instruction for now.
+ // TODO(binji): implement
} else {
UNIMPLEMENTED();
}
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index d7769791ef..aeca563c37 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -731,8 +731,8 @@ void RelocInfo::set_target_object(Object* target,
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target));
}
}
@@ -853,24 +853,6 @@ void RelocInfo::WipeOut() {
}
-bool RelocInfo::IsPatchedReturnSequence() {
- // The sequence must be:
- // ldr ip0, [pc, #offset]
- // blr ip0
- // See arm64/debug-arm64.cc DebugCodegen::PatchDebugBreakSlot
- Instruction* i1 = reinterpret_cast<Instruction*>(pc_);
- Instruction* i2 = i1->following();
- return i1->IsLdrLiteralX() && (i1->Rt() == kIp0Code) &&
- i2->IsBranchAndLinkToRegister() && (i2->Rn() == kIp0Code);
-}
-
-
-bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
- Instruction* current_instr = reinterpret_cast<Instruction*>(pc_);
- return !current_instr->IsNop(Assembler::DEBUG_BREAK_NOP);
-}
-
-
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index 5854704b68..47786eb710 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -369,6 +369,8 @@ bool AreSameSizeAndType(const CPURegister& reg1,
typedef FPRegister DoubleRegister;
+// TODO(arm64) Define SIMD registers.
+typedef FPRegister Simd128Register;
// -----------------------------------------------------------------------------
// Lists of registers.
@@ -925,7 +927,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, const SourcePosition position);
+ void RecordDeoptReason(const int reason, int raw_position);
int buffer_space() const;
diff --git a/deps/v8/src/arm64/builtins-arm64.cc b/deps/v8/src/arm64/builtins-arm64.cc
index b6bae4ad0e..11f66a4ef4 100644
--- a/deps/v8/src/arm64/builtins-arm64.cc
+++ b/deps/v8/src/arm64/builtins-arm64.cc
@@ -138,6 +138,97 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// static
+void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments
+ // -- lr : return address
+ // -- sp[(argc - n) * 8] : arg[n] (zero-based)
+ // -- sp[(argc + 1) * 8] : receiver
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_MathMaxMin");
+
+ Heap::RootListIndex const root_index =
+ (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
+ : Heap::kMinusInfinityValueRootIndex;
+
+ // Load the accumulator with the default return value (either -Infinity or
+ // +Infinity), with the tagged value in x1 and the double value in d1.
+ __ LoadRoot(x1, root_index);
+ __ Ldr(d1, FieldMemOperand(x1, HeapNumber::kValueOffset));
+
+ // Remember how many slots to drop (including the receiver).
+ __ Add(x4, x0, 1);
+
+ Label done_loop, loop;
+ __ Bind(&loop);
+ {
+ // Check if all parameters done.
+ __ Subs(x0, x0, 1);
+ __ B(lt, &done_loop);
+
+ // Load the next parameter tagged value into x2.
+ __ Peek(x2, Operand(x0, LSL, kPointerSizeLog2));
+
+ // Load the double value of the parameter into d2, maybe converting the
+ // parameter to a number first using the ToNumberStub if necessary.
+ Label convert_smi, convert_number, done_convert;
+ __ JumpIfSmi(x2, &convert_smi);
+ __ JumpIfHeapNumber(x2, &convert_number);
+ {
+ // Parameter is not a Number, use the ToNumberStub to convert it.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(x0);
+ __ SmiTag(x4);
+ __ Push(x0, x1, x4);
+ __ Mov(x0, x2);
+ ToNumberStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ Mov(x2, x0);
+ __ Pop(x4, x1, x0);
+ {
+ // Restore the double accumulator value (d1).
+ Label done_restore;
+ __ SmiUntagToDouble(d1, x1, kSpeculativeUntag);
+ __ JumpIfSmi(x1, &done_restore);
+ __ Ldr(d1, FieldMemOperand(x1, HeapNumber::kValueOffset));
+ __ Bind(&done_restore);
+ }
+ __ SmiUntag(x4);
+ __ SmiUntag(x0);
+ }
+ __ AssertNumber(x2);
+ __ JumpIfSmi(x2, &convert_smi);
+
+ __ Bind(&convert_number);
+ __ Ldr(d2, FieldMemOperand(x2, HeapNumber::kValueOffset));
+ __ B(&done_convert);
+
+ __ Bind(&convert_smi);
+ __ SmiUntagToDouble(d2, x2);
+ __ Bind(&done_convert);
+
+ // We can use a single fmin/fmax for the operation itself, but we then need
+ // to work out which HeapNumber (or smi) the result came from.
+ __ Fmov(x11, d1);
+ if (kind == MathMaxMinKind::kMin) {
+ __ Fmin(d1, d1, d2);
+ } else {
+ DCHECK(kind == MathMaxMinKind::kMax);
+ __ Fmax(d1, d1, d2);
+ }
+ __ Fmov(x10, d1);
+ __ Cmp(x10, x11);
+ __ Csel(x1, x1, x2, eq);
+ __ B(&loop);
+ }
+
+ __ Bind(&done_loop);
+ __ Mov(x0, x1);
+ __ Drop(x4);
+ __ Ret();
+}
+
+// static
void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
@@ -229,8 +320,9 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(x2, x1, x3); // first argument, constructor, new target
- __ CallRuntime(Runtime::kNewObject);
+ __ Push(x2); // first argument
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ Pop(x2);
}
__ Str(x2, FieldMemOperand(x0, JSValue::kValueOffset));
@@ -356,48 +448,49 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(x2, x1, x3); // first argument, constructor, new target
- __ CallRuntime(Runtime::kNewObject);
+ __ Push(x2); // first argument
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ Pop(x2);
}
__ Str(x2, FieldMemOperand(x0, JSValue::kValueOffset));
__ Ret();
}
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+ __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset));
+ __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(x2);
+}
-static void CallRuntimePassFunction(MacroAssembler* masm,
- Runtime::FunctionId function_id) {
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
+ // -- x0 : argument count (preserved for callee)
// -- x1 : target function (preserved for callee)
// -- x3 : new target (preserved for callee)
// -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the target function and the new target.
+ // Push another copy as a parameter to the runtime call.
+ __ SmiTag(x0);
+ __ Push(x0, x1, x3, x1);
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the target function and the new target.
- // Push another copy as a parameter to the runtime call.
- __ Push(x1, x3, x1);
-
- __ CallRuntime(function_id, 1);
-
- // Restore target function and new target.
- __ Pop(x3, x1);
-}
+ __ CallRuntime(function_id, 1);
+ __ Move(x2, x0);
+ // Restore target function and new target.
+ __ Pop(x3, x1, x0);
+ __ SmiUntag(x0);
+ }
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset));
__ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
__ Br(x2);
}
-static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
- __ Add(x0, x0, Code::kHeaderSize - kHeapObjectTag);
- __ Br(x0);
-}
-
-
void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However, not
@@ -408,8 +501,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
__ CompareRoot(masm->StackPointer(), Heap::kStackLimitRootIndex);
__ B(hs, &ok);
- CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
__ Bind(&ok);
GenerateTailCallToSharedCode(masm);
@@ -418,7 +510,8 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool create_implicit_receiver) {
+ bool create_implicit_receiver,
+ bool check_derived_construct) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
// -- x1 : constructor function
@@ -448,148 +541,18 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Push(allocation_site, argc);
if (create_implicit_receiver) {
- // sp[0]: new.target
- // sp[1]: Constructor function.
- // sp[2]: number of arguments (smi-tagged)
- // sp[3]: allocation site
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- // Verify that the new target is a JSFunction.
- __ JumpIfNotObjectType(new_target, x10, x11, JS_FUNCTION_TYPE,
- &rt_call);
-
- // Load the initial map and verify that it is in fact a map.
- Register init_map = x2;
- __ Ldr(init_map,
- FieldMemOperand(new_target,
- JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(init_map, &rt_call);
- __ JumpIfNotObjectType(init_map, x10, x11, MAP_TYPE, &rt_call);
-
- // Fall back to runtime if the expected base constructor and base
- // constructor differ.
- __ Ldr(x10,
- FieldMemOperand(init_map, Map::kConstructorOrBackPointerOffset));
- __ Cmp(constructor, x10);
- __ B(ne, &rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial
- // map's instance type would be JS_FUNCTION_TYPE.
- __ CompareInstanceType(init_map, x10, JS_FUNCTION_TYPE);
- __ B(eq, &rt_call);
-
- // Now allocate the JSObject on the heap.
- Register obj_size = x10;
- Register new_obj = x4;
- Register next_obj = obj_size; // May overlap.
- __ Ldrb(obj_size, FieldMemOperand(init_map, Map::kInstanceSizeOffset));
- __ Allocate(obj_size, new_obj, next_obj, x11, &rt_call, SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to
- // initial map and properties and elements are set to empty fixed array.
- // NB. the object pointer is not tagged, so MemOperand is used.
- Register write_address = x5;
- Register empty = x7;
- __ Mov(write_address, new_obj);
- __ LoadRoot(empty, Heap::kEmptyFixedArrayRootIndex);
- STATIC_ASSERT(0 * kPointerSize == JSObject::kMapOffset);
- __ Str(init_map, MemOperand(write_address, kPointerSize, PostIndex));
- STATIC_ASSERT(1 * kPointerSize == JSObject::kPropertiesOffset);
- STATIC_ASSERT(2 * kPointerSize == JSObject::kElementsOffset);
- __ Stp(empty, empty,
- MemOperand(write_address, 2 * kPointerSize, PostIndex));
- STATIC_ASSERT(3 * kPointerSize == JSObject::kHeaderSize);
-
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on.
- __ Add(new_obj, new_obj, kHeapObjectTag);
-
- // Fill all of the in-object properties with the appropriate filler.
- Register filler = x7;
- __ LoadRoot(filler, Heap::kUndefinedValueRootIndex);
-
- if (!is_api_function) {
- Label no_inobject_slack_tracking;
-
- Register constructon_count = x14;
- MemOperand bit_field3 =
- FieldMemOperand(init_map, Map::kBitField3Offset);
- // Check if slack tracking is enabled.
- __ Ldr(x11, bit_field3);
- __ DecodeField<Map::ConstructionCounter>(constructon_count, x11);
- __ Cmp(constructon_count, Operand(Map::kSlackTrackingCounterEnd));
- __ B(lt, &no_inobject_slack_tracking);
- // Decrease generous allocation count.
- __ Subs(x11, x11, Operand(1 << Map::ConstructionCounter::kShift));
- __ Str(x11, bit_field3);
-
- // Allocate object with a slack.
- Register unused_props = x11;
- __ Ldr(unused_props,
- FieldMemOperand(init_map, Map::kInstanceAttributesOffset));
- __ Ubfx(unused_props, unused_props,
- Map::kUnusedPropertyFieldsByte * kBitsPerByte, kBitsPerByte);
-
- Register end_of_pre_allocated = x11;
- __ Sub(end_of_pre_allocated, next_obj,
- Operand(unused_props, LSL, kPointerSizeLog2));
- unused_props = NoReg;
-
- if (FLAG_debug_code) {
- __ Cmp(write_address, end_of_pre_allocated);
- __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
- }
-
- // Fill the pre-allocated fields with undef.
- __ InitializeFieldsWithFiller(write_address, end_of_pre_allocated,
- filler);
-
- // Fill the remaining fields with one pointer filler map.
- __ LoadRoot(filler, Heap::kOnePointerFillerMapRootIndex);
- __ InitializeFieldsWithFiller(write_address, next_obj, filler);
-
- __ Cmp(constructon_count, Operand(Map::kSlackTrackingCounterEnd));
- __ B(ne, &allocated);
-
- // Push the constructor, new_target and the object to the stack,
- // and then the initial map as an argument to the runtime call.
- __ Push(constructor, new_target, new_obj, init_map);
- __ CallRuntime(Runtime::kFinalizeInstanceSize);
- __ Pop(new_obj, new_target, constructor);
-
- // Continue with JSObject being successfully allocated.
- __ B(&allocated);
-
- __ bind(&no_inobject_slack_tracking);
- }
-
- __ InitializeFieldsWithFiller(write_address, next_obj, filler);
-
- // Continue with JSObject being successfully allocated.
- __ B(&allocated);
- }
-
- // Allocate the new receiver object using the runtime call.
- // x1: constructor function
- // x3: new target
- __ Bind(&rt_call);
-
- // Push the constructor and new_target twice, second pair as arguments
- // to the runtime call.
- __ Push(constructor, new_target, constructor, new_target);
- __ CallRuntime(Runtime::kNewObject);
+ // Allocate the new receiver object.
+ __ Push(constructor, new_target);
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ Mov(x4, x0);
__ Pop(new_target, constructor);
- // Receiver for constructor call allocated.
- // x1: constructor function
- // x3: new target
- // x4: JSObject
- __ Bind(&allocated);
+ // ----------- S t a t e -------------
+ // -- x1: constructor function
+ // -- x3: new target
+ // -- x4: newly allocated object
+ // -----------------------------------
// Reload the number of arguments from the stack.
// Set it up in x0 for the function call below.
@@ -697,6 +660,19 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Leave construct frame.
}
+ // ES6 9.2.2. Step 13+
+ // Check that the result is not a Smi, indicating that the constructor result
+ // from a derived class is neither undefined nor an Object.
+ if (check_derived_construct) {
+ Label dont_throw;
+ __ JumpIfNotSmi(x0, &dont_throw);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
+ }
+ __ Bind(&dont_throw);
+ }
+
__ DropBySMI(x1);
__ Drop(1);
if (create_implicit_receiver) {
@@ -707,17 +683,23 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
+ Generate_JSConstructStubHelper(masm, false, true, false);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, true);
+ Generate_JSConstructStubHelper(masm, true, false, false);
}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
+ Generate_JSConstructStubHelper(masm, false, false, false);
+}
+
+
+void Builtins::Generate_JSBuiltinsConstructStubForDerived(
+ MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false, true);
}
@@ -877,10 +859,8 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
// - jssp: stack pointer.
// - lr: return address.
//
-// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-arm64.h for its layout.
-// TODO(rmcilroy): We will need to include the current bytecode pointer in the
-// frame.
+// The function builds an interpreter frame. See InterpreterFrameConstants in
+// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
@@ -888,17 +868,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ Push(lr, fp, cp, x1);
__ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
- __ Push(x3);
-
- // Push zero for bytecode array offset.
- __ Mov(x0, Operand(0));
- __ Push(x0);
// Get the bytecode array from the function object and load the pointer to the
// first entry into kInterpreterBytecodeRegister.
__ Ldr(x0, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ Register debug_info = kInterpreterBytecodeArrayRegister;
+ Label load_debug_bytecode_array, bytecode_array_loaded;
+ DCHECK(!debug_info.is(x0));
+ __ Ldr(debug_info, FieldMemOperand(x0, SharedFunctionInfo::kDebugInfoOffset));
+ __ Cmp(debug_info, Operand(DebugInfo::uninitialized()));
+ __ B(ne, &load_debug_bytecode_array);
__ Ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
+ __ Bind(&bytecode_array_loaded);
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -909,6 +891,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
+ // Push new.target, bytecode array and zero for bytecode array offset.
+ __ Mov(x0, Operand(0));
+ __ Push(x3, kInterpreterBytecodeArrayRegister, x0);
+
// Allocate the local and temporary register file on the stack.
{
// Load frame size from the BytecodeArray object.
@@ -938,22 +924,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// TODO(rmcilroy): List of things not currently dealt with here but done in
// fullcodegen's prologue:
- // - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
- // - Allow simulator stop operations if FLAG_stop_at is set.
// - Code aging of the BytecodeArray object.
- // Perform stack guard check.
- {
- Label ok;
- __ CompareRoot(jssp, Heap::kStackLimitRootIndex);
- __ B(hs, &ok);
- __ Push(kInterpreterBytecodeArrayRegister);
- __ CallRuntime(Runtime::kStackGuard);
- __ Pop(kInterpreterBytecodeArrayRegister);
- __ Bind(&ok);
- }
-
// Load accumulator, register file, bytecode offset, dispatch table into
// registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
@@ -961,10 +934,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ Mov(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
- __ LoadRoot(kInterpreterDispatchTableRegister,
- Heap::kInterpreterTableRootIndex);
- __ Add(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Mov(kInterpreterDispatchTableRegister,
+ Operand(ExternalReference::interpreter_dispatch_table_address(
+ masm->isolate())));
// Dispatch to the first bytecode handler for the function.
__ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
@@ -975,6 +947,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// and header removal.
__ Add(ip0, ip0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(ip0);
+
+ // Even though the first bytecode handler was called, we will never return.
+ __ Abort(kUnexpectedReturnFromBytecodeHandler);
+
+ // Load debug copy of the bytecode array.
+ __ Bind(&load_debug_bytecode_array);
+ __ Ldr(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(debug_info, DebugInfo::kAbstractCodeIndex));
+ __ B(&bytecode_array_loaded);
}
@@ -998,47 +979,24 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
}
-static void Generate_InterpreterNotifyDeoptimizedHelper(
- MacroAssembler* masm, Deoptimizer::BailoutType type) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(kInterpreterAccumulatorRegister); // Save accumulator register.
-
- // Pass the deoptimization type to the runtime system.
- __ Mov(x1, Operand(Smi::FromInt(static_cast<int>(type))));
- __ Push(x1);
- __ CallRuntime(Runtime::kNotifyDeoptimized);
-
- __ Pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
- // Tear down internal frame.
- }
-
- // Drop state (we don't use this for interpreter deopts).
- __ Drop(1);
-
+static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
// Initialize register file register and dispatch table register.
__ Add(kInterpreterRegisterFileRegister, fp,
Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
- __ LoadRoot(kInterpreterDispatchTableRegister,
- Heap::kInterpreterTableRootIndex);
- __ Add(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Mov(kInterpreterDispatchTableRegister,
+ Operand(ExternalReference::interpreter_dispatch_table_address(
+ masm->isolate())));
// Get the context from the frame.
- // TODO(rmcilroy): Update interpreter frame to expect current context at the
- // context slot instead of the function context.
__ Ldr(kContextRegister,
MemOperand(kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kContextFromRegisterPointer));
// Get the bytecode array pointer from the frame.
- __ Ldr(x1,
- MemOperand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kFunctionFromRegisterPointer));
- __ Ldr(x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(x1, SharedFunctionInfo::kFunctionDataOffset));
+ __ Ldr(
+ kInterpreterBytecodeArrayRegister,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -1066,6 +1024,29 @@ static void Generate_InterpreterNotifyDeoptimizedHelper(
}
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+ MacroAssembler* masm, Deoptimizer::BailoutType type) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Pass the deoptimization type to the runtime system.
+ __ Mov(x1, Operand(Smi::FromInt(static_cast<int>(type))));
+ __ Push(x1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+ // Tear down internal frame.
+ }
+
+ // Drop state (we don't use these for interpreter deopts) and and pop the
+ // accumulator value into the accumulator register.
+ __ Drop(1);
+ __ Pop(kInterpreterAccumulatorRegister);
+
+ // Enter the bytecode dispatch.
+ Generate_EnterBytecodeDispatch(masm);
+}
+
+
void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
@@ -1080,22 +1061,30 @@ void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ // Set the address of the interpreter entry trampoline as a return address.
+ // This simulates the initial call to bytecode handlers in interpreter entry
+ // trampoline. The return will never actually be taken, but our stack walker
+ // uses this address to determine whether a frame is interpreted.
+ __ LoadObject(lr, masm->isolate()->builtins()->InterpreterEntryTrampoline());
+
+ Generate_EnterBytecodeDispatch(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileLazy);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm,
+ Runtime::kCompileOptimized_NotConcurrent);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
@@ -1321,14 +1310,11 @@ static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
// Load the next prototype.
__ Bind(&next_prototype);
- __ Ldr(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
- // End if the prototype is null or not hidden.
- __ CompareRoot(receiver, Heap::kNullValueRootIndex);
- __ B(eq, receiver_check_failed);
- __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Ldr(x16, FieldMemOperand(map, Map::kBitField3Offset));
- __ Tst(x16, Operand(Map::IsHiddenPrototype::kMask));
+ __ Tst(x16, Operand(Map::HasHiddenPrototype::kMask));
__ B(eq, receiver_check_failed);
+ __ Ldr(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+ __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Iterate.
__ B(&prototype_loop_start);
@@ -1868,10 +1854,8 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Try to create the list from an arguments object.
__ Bind(&create_arguments);
- __ Ldrsw(len, UntagSmiFieldMemOperand(
- arguments_list,
- JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize));
+ __ Ldrsw(len, UntagSmiFieldMemOperand(arguments_list,
+ JSArgumentsObject::kLengthOffset));
__ Ldr(x10, FieldMemOperand(arguments_list, JSObject::kElementsOffset));
__ Ldrsw(x11, UntagSmiFieldMemOperand(x10, FixedArray::kLengthOffset));
__ CompareAndBranch(len, x11, ne, &create_runtime);
@@ -1953,10 +1937,136 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
}
}
+namespace {
+
+// Drops top JavaScript frame and an arguments adaptor frame below it (if
+// present) preserving all the arguments prepared for current call.
+// Does nothing if debugger is currently active.
+// ES6 14.6.3. PrepareForTailCall
+//
+// Stack structure for the function g() tail calling f():
+//
+// ------- Caller frame: -------
+// | ...
+// | g()'s arg M
+// | ...
+// | g()'s arg 1
+// | g()'s receiver arg
+// | g()'s caller pc
+// ------- g()'s frame: -------
+// | g()'s caller fp <- fp
+// | g()'s context
+// | function pointer: g
+// | -------------------------
+// | ...
+// | ...
+// | f()'s arg N
+// | ...
+// | f()'s arg 1
+// | f()'s receiver arg <- sp (f()'s caller pc is not on the stack yet!)
+// ----------------------
+//
+void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+ DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+ Comment cmnt(masm, "[ PrepareForTailCall");
+
+ // Prepare for tail call only if the debugger is not active.
+ Label done;
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(masm->isolate());
+ __ Mov(scratch1, Operand(debug_is_active));
+ __ Ldrb(scratch1, MemOperand(scratch1));
+ __ Cmp(scratch1, Operand(0));
+ __ B(ne, &done);
+
+ // Drop possible interpreter handler/stub frame.
+ {
+ Label no_interpreter_frame;
+ __ Ldr(scratch3, MemOperand(fp, StandardFrameConstants::kMarkerOffset));
+ __ Cmp(scratch3, Operand(Smi::FromInt(StackFrame::STUB)));
+ __ B(ne, &no_interpreter_frame);
+ __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&no_interpreter_frame);
+ }
+
+ // Check if next frame is an arguments adaptor frame.
+ Label no_arguments_adaptor, formal_parameter_count_loaded;
+ __ Ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(scratch3,
+ MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+ __ Cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ B(ne, &no_arguments_adaptor);
+
+ // Drop arguments adaptor frame and load arguments count.
+ __ mov(fp, scratch2);
+ __ Ldr(scratch1,
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(scratch1);
+ __ B(&formal_parameter_count_loaded);
+
+ __ bind(&no_arguments_adaptor);
+ // Load caller's formal parameter count
+ __ Ldr(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ldr(scratch1,
+ FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldrsw(scratch1,
+ FieldMemOperand(scratch1,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+ __ bind(&formal_parameter_count_loaded);
+
+ // Calculate the end of destination area where we will put the arguments
+ // after we drop current frame. We add kPointerSize to count the receiver
+ // argument which is not included into formal parameters count.
+ Register dst_reg = scratch2;
+ __ add(dst_reg, fp, Operand(scratch1, LSL, kPointerSizeLog2));
+ __ add(dst_reg, dst_reg,
+ Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+
+ Register src_reg = scratch1;
+ __ add(src_reg, jssp, Operand(args_reg, LSL, kPointerSizeLog2));
+ // Count receiver argument as well (not included in args_reg).
+ __ add(src_reg, src_reg, Operand(kPointerSize));
+
+ if (FLAG_debug_code) {
+ __ Cmp(src_reg, dst_reg);
+ __ Check(lo, kStackAccessBelowStackPointer);
+ }
+
+ // Restore caller's frame pointer and return address now as they will be
+ // overwritten by the copying loop.
+ __ Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Now copy callee arguments to the caller frame going backwards to avoid
+ // callee arguments corruption (source and destination areas could overlap).
+
+ // Both src_reg and dst_reg are pointing to the word after the one to copy,
+ // so they must be pre-decremented in the loop.
+ Register tmp_reg = scratch3;
+ Label loop, entry;
+ __ B(&entry);
+ __ bind(&loop);
+ __ Ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
+ __ Str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
+ __ bind(&entry);
+ __ Cmp(jssp, src_reg);
+ __ B(ne, &loop);
+
+ // Leave current frame.
+ __ Mov(jssp, dst_reg);
+ __ SetStackPointer(jssp);
+ __ AssertStackConsistency();
+
+ __ bind(&done);
+}
+} // namespace
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode) {
+ ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
ASM_LOCATION("Builtins::Generate_CallFunction");
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
@@ -2044,6 +2154,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- cp : the function context.
// -----------------------------------
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, x0, x3, x4, x5);
+ }
+
__ Ldrsw(
x2, FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount actual(x0);
@@ -2140,13 +2254,18 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// static
-void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x1 : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(x1);
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, x0, x3, x4, x5);
+ }
+
// Patch the receiver to [[BoundThis]].
__ Ldr(x10, FieldMemOperand(x1, JSBoundFunction::kBoundThisOffset));
__ Poke(x10, Operand(x0, LSL, kPointerSizeLog2));
@@ -2165,7 +2284,8 @@ void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
// static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x1 : the target to call (can be any Object).
@@ -2175,14 +2295,24 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ JumpIfSmi(x1, &non_callable);
__ Bind(&non_smi);
__ CompareObjectType(x1, x4, x5, JS_FUNCTION_TYPE);
- __ Jump(masm->isolate()->builtins()->CallFunction(mode),
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
RelocInfo::CODE_TARGET, eq);
__ Cmp(x5, JS_BOUND_FUNCTION_TYPE);
- __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
RelocInfo::CODE_TARGET, eq);
+
+ // Check if target has a [[Call]] internal method.
+ __ Ldrb(x4, FieldMemOperand(x4, Map::kBitFieldOffset));
+ __ TestAndBranchIfAllClear(x4, 1 << Map::kIsCallable, &non_callable);
+
__ Cmp(x5, JS_PROXY_TYPE);
__ B(ne, &non_function);
+ // 0. Prepare for tail call if necessary.
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, x0, x3, x4, x5);
+ }
+
// 1. Runtime fallback for Proxy [[Call]].
__ Push(x1);
// Increase the arguments size to include the pushed function and the
@@ -2195,15 +2325,12 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
__ Bind(&non_function);
- // Check if target has a [[Call]] internal method.
- __ Ldrb(x4, FieldMemOperand(x4, Map::kBitFieldOffset));
- __ TestAndBranchIfAllClear(x4, 1 << Map::kIsCallable, &non_callable);
// Overwrite the original receiver with the (original) target.
__ Poke(x1, Operand(x0, LSL, kXRegSizeLog2));
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, x1);
__ Jump(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined),
+ ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
@@ -2341,7 +2468,8 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// static
-void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+ MacroAssembler* masm, TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x2 : the address of the first argument to be pushed. Subsequent
@@ -2369,7 +2497,9 @@ void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
__ B(gt, &loop_header);
// Call the target.
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ tail_call_mode),
+ RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index a1e920755d..57a0ffde92 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -207,8 +207,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
Register right, Register scratch,
FPRegister double_scratch,
- Label* slow, Condition cond,
- Strength strength) {
+ Label* slow, Condition cond) {
DCHECK(!AreAliased(left, right, scratch));
Label not_identical, return_equal, heap_number;
Register result = x0;
@@ -231,14 +230,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
// Call runtime on identical SIMD values since we must throw a TypeError.
__ Cmp(right_type, SIMD128_VALUE_TYPE);
__ B(eq, slow);
- if (is_strong(strength)) {
- // Call the runtime on anything that is converted in the semantics, since
- // we need to throw a TypeError. Smis have already been ruled out.
- __ Cmp(right_type, Operand(HEAP_NUMBER_TYPE));
- __ B(eq, &return_equal);
- __ Tst(right_type, Operand(kIsNotStringMask));
- __ B(ne, slow);
- }
} else if (cond == eq) {
__ JumpIfHeapNumber(right, &heap_number);
} else {
@@ -253,13 +244,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
// Call runtime on identical SIMD values since we must throw a TypeError.
__ Cmp(right_type, SIMD128_VALUE_TYPE);
__ B(eq, slow);
- if (is_strong(strength)) {
- // Call the runtime on anything that is converted in the semantics,
- // since we need to throw a TypeError. Smis and heap numbers have
- // already been ruled out.
- __ Tst(right_type, Operand(kIsNotStringMask));
- __ B(ne, slow);
- }
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
@@ -443,54 +427,49 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
// Fast negative check for internalized-to-internalized equality.
// See call site for description.
-static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
- Register left,
- Register right,
- Register left_map,
- Register right_map,
- Register left_type,
- Register right_type,
- Label* possible_strings,
- Label* not_both_strings) {
+static void EmitCheckForInternalizedStringsOrObjects(
+ MacroAssembler* masm, Register left, Register right, Register left_map,
+ Register right_map, Register left_type, Register right_type,
+ Label* possible_strings, Label* runtime_call) {
DCHECK(!AreAliased(left, right, left_map, right_map, left_type, right_type));
Register result = x0;
+ DCHECK(left.is(x0) || right.is(x0));
- Label object_test;
+ Label object_test, return_unequal, undetectable;
STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
// TODO(all): reexamine this branch sequence for optimisation wrt branch
// prediction.
__ Tbnz(right_type, MaskToBit(kIsNotStringMask), &object_test);
__ Tbnz(right_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
- __ Tbnz(left_type, MaskToBit(kIsNotStringMask), not_both_strings);
+ __ Tbnz(left_type, MaskToBit(kIsNotStringMask), runtime_call);
__ Tbnz(left_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
- // Both are internalized. We already checked that they weren't the same
- // pointer, so they are not equal.
- __ Mov(result, NOT_EQUAL);
+ // Both are internalized. We already checked they weren't the same pointer so
+ // they are not equal. Return non-equal by returning the non-zero object
+ // pointer in x0.
__ Ret();
__ Bind(&object_test);
- __ Cmp(right_type, FIRST_JS_RECEIVER_TYPE);
-
- // If right >= FIRST_JS_RECEIVER_TYPE, test left.
- // Otherwise, right < FIRST_JS_RECEIVER_TYPE, so set lt condition.
- __ Ccmp(left_type, FIRST_JS_RECEIVER_TYPE, NFlag, ge);
-
- __ B(lt, not_both_strings);
-
- // If both objects are undetectable, they are equal. Otherwise, they are not
- // equal, since they are different objects and an object is not equal to
- // undefined.
-
- // Returning here, so we can corrupt right_type and left_type.
- Register right_bitfield = right_type;
Register left_bitfield = left_type;
+ Register right_bitfield = right_type;
__ Ldrb(right_bitfield, FieldMemOperand(right_map, Map::kBitFieldOffset));
__ Ldrb(left_bitfield, FieldMemOperand(left_map, Map::kBitFieldOffset));
- __ And(result, right_bitfield, left_bitfield);
- __ And(result, result, 1 << Map::kIsUndetectable);
- __ Eor(result, result, 1 << Map::kIsUndetectable);
+ __ Tbnz(right_bitfield, MaskToBit(1 << Map::kIsUndetectable), &undetectable);
+ __ Tbnz(left_bitfield, MaskToBit(1 << Map::kIsUndetectable), &return_unequal);
+
+ __ CompareInstanceType(right_map, right_type, FIRST_JS_RECEIVER_TYPE);
+ __ B(lt, runtime_call);
+ __ CompareInstanceType(left_map, left_type, FIRST_JS_RECEIVER_TYPE);
+ __ B(lt, runtime_call);
+
+ __ bind(&return_unequal);
+ // Return non-equal by returning the non-zero object pointer in x0.
+ __ Ret();
+
+ __ bind(&undetectable);
+ __ Tbz(left_bitfield, MaskToBit(1 << Map::kIsUndetectable), &return_unequal);
+ __ Mov(result, EQUAL);
__ Ret();
}
@@ -536,8 +515,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond,
- strength());
+ EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond);
// If either is a smi (we know that at least one is not a smi), then they can
// only be strictly equal if the other is a HeapNumber.
@@ -667,8 +645,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
- : Runtime::kCompare);
+ __ TailCallRuntime(Runtime::kCompare);
}
__ Bind(&miss);
@@ -971,8 +948,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1,
result_double);
DCHECK(result_tagged.is(x0));
- __ IncrementCounter(
- isolate()->counters()->math_pow(), 1, scratch0, scratch1);
__ Ret();
} else {
AllowExternalCallThatCantCauseGC scope(masm);
@@ -984,8 +959,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
0, 2);
__ Mov(lr, saved_lr);
__ Bind(&done);
- __ IncrementCounter(
- isolate()->counters()->math_pow(), 1, scratch0, scratch1);
__ Ret();
}
}
@@ -1104,10 +1077,13 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Sub(temp_argv, temp_argv, 1 * kPointerSize);
}
- // Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved
- // registers.
+ // Reserve three slots to preserve x21-x23 callee-saved registers. If the
+ // result size is too large to be returned in registers then also reserve
+ // space for the return value.
+ int extra_stack_space = 3 + (result_size() <= 2 ? 0 : result_size());
+ // Enter the exit frame.
FrameScope scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(save_doubles(), x10, 3);
+ __ EnterExitFrame(save_doubles(), x10, extra_stack_space);
DCHECK(csp.Is(__ StackPointer()));
// Poke callee-saved registers into reserved space.
@@ -1115,6 +1091,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Poke(argc, 2 * kPointerSize);
__ Poke(target, 3 * kPointerSize);
+ if (result_size() > 2) {
+ // Save the location of the return value into x8 for call.
+ __ Add(x8, __ StackPointer(), Operand(4 * kPointerSize));
+ }
+
// We normally only keep tagged values in callee-saved registers, as they
// could be pushed onto the stack by called stubs and functions, and on the
// stack they can confuse the GC. However, we're only calling C functions
@@ -1184,7 +1165,18 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Blr(target);
__ Bind(&return_location);
- // x0 result The return code from the call.
+ if (result_size() > 2) {
+ DCHECK_EQ(3, result_size());
+ // Read result values stored on stack.
+ __ Ldr(x0, MemOperand(__ StackPointer(), 4 * kPointerSize));
+ __ Ldr(x1, MemOperand(__ StackPointer(), 5 * kPointerSize));
+ __ Ldr(x2, MemOperand(__ StackPointer(), 6 * kPointerSize));
+ }
+ // Result returned in x0, x1:x0 or x2:x1:x0 - do not destroy these registers!
+
+ // x0 result0 The return code from the call.
+ // x1 result1 For calls which return ObjectPair or ObjectTriple.
+ // x2 result2 For calls which return ObjectTriple.
// x21 argv
// x22 argc
// x23 target
@@ -1616,363 +1608,6 @@ void InstanceOfStub::Generate(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- Register arg_count = ArgumentsAccessReadDescriptor::parameter_count();
- Register key = ArgumentsAccessReadDescriptor::index();
- DCHECK(arg_count.is(x0));
- DCHECK(key.is(x1));
-
- // The displacement is the offset of the last parameter (if any) relative
- // to the frame pointer.
- static const int kDisplacement =
- StandardFrameConstants::kCallerSPOffset - kPointerSize;
-
- // Check that the key is a smi.
- Label slow;
- __ JumpIfNotSmi(key, &slow);
-
- // Check if the calling frame is an arguments adaptor frame.
- Register local_fp = x11;
- Register caller_fp = x11;
- Register caller_ctx = x12;
- Label skip_adaptor;
- __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(caller_ctx, MemOperand(caller_fp,
- StandardFrameConstants::kContextOffset));
- __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ Csel(local_fp, fp, caller_fp, ne);
- __ B(ne, &skip_adaptor);
-
- // Load the actual arguments limit found in the arguments adaptor frame.
- __ Ldr(arg_count, MemOperand(caller_fp,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ Bind(&skip_adaptor);
-
- // Check index against formal parameters count limit. Use unsigned comparison
- // to get negative check for free: branch if key < 0 or key >= arg_count.
- __ Cmp(key, arg_count);
- __ B(hs, &slow);
-
- // Read the argument from the stack and return it.
- __ Sub(x10, arg_count, key);
- __ Add(x10, local_fp, Operand::UntagSmiAndScale(x10, kPointerSizeLog2));
- __ Ldr(x0, MemOperand(x10, kDisplacement));
- __ Ret();
-
- // Slow case: handle non-smi or out-of-bounds access to arguments by calling
- // the runtime system.
- __ Bind(&slow);
- __ Push(key);
- __ TailCallRuntime(Runtime::kArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
- // x1 : function
- // x2 : number of parameters (tagged)
- // x3 : parameters pointer
-
- DCHECK(x1.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(x2.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(x3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- Register caller_fp = x10;
- __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- // Load and untag the context.
- __ Ldr(w11, UntagSmiMemOperand(caller_fp,
- StandardFrameConstants::kContextOffset));
- __ Cmp(w11, StackFrame::ARGUMENTS_ADAPTOR);
- __ B(ne, &runtime);
-
- // Patch the arguments.length and parameters pointer in the current frame.
- __ Ldr(x2,
- MemOperand(caller_fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ Add(x3, caller_fp, Operand::UntagSmiAndScale(x2, kPointerSizeLog2));
- __ Add(x3, x3, StandardFrameConstants::kCallerSPOffset);
-
- __ Bind(&runtime);
- __ Push(x1, x3, x2);
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
- // x1 : function
- // x2 : number of parameters (tagged)
- // x3 : parameters pointer
- //
- // Returns pointer to result object in x0.
-
- DCHECK(x1.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(x2.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(x3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Make an untagged copy of the parameter count.
- // Note: arg_count_smi is an alias of param_count_smi.
- Register function = x1;
- Register arg_count_smi = x2;
- Register param_count_smi = x2;
- Register recv_arg = x3;
- Register param_count = x7;
- __ SmiUntag(param_count, param_count_smi);
-
- // Check if the calling frame is an arguments adaptor frame.
- Register caller_fp = x11;
- Register caller_ctx = x12;
- Label runtime;
- Label adaptor_frame, try_allocate;
- __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(caller_ctx, MemOperand(caller_fp,
- StandardFrameConstants::kContextOffset));
- __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ B(eq, &adaptor_frame);
-
- // No adaptor, parameter count = argument count.
-
- // x1 function function pointer
- // x2 arg_count_smi number of function arguments (smi)
- // x3 recv_arg pointer to receiver arguments
- // x4 mapped_params number of mapped params, min(params, args) (uninit)
- // x7 param_count number of function parameters
- // x11 caller_fp caller's frame pointer
- // x14 arg_count number of function arguments (uninit)
-
- Register arg_count = x14;
- Register mapped_params = x4;
- __ Mov(arg_count, param_count);
- __ Mov(mapped_params, param_count);
- __ B(&try_allocate);
-
- // We have an adaptor frame. Patch the parameters pointer.
- __ Bind(&adaptor_frame);
- __ Ldr(arg_count_smi,
- MemOperand(caller_fp,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(arg_count, arg_count_smi);
- __ Add(x10, caller_fp, Operand(arg_count, LSL, kPointerSizeLog2));
- __ Add(recv_arg, x10, StandardFrameConstants::kCallerSPOffset);
-
- // Compute the mapped parameter count = min(param_count, arg_count)
- __ Cmp(param_count, arg_count);
- __ Csel(mapped_params, param_count, arg_count, lt);
-
- __ Bind(&try_allocate);
-
- // x0 alloc_obj pointer to allocated objects: param map, backing
- // store, arguments (uninit)
- // x1 function function pointer
- // x2 arg_count_smi number of function arguments (smi)
- // x3 recv_arg pointer to receiver arguments
- // x4 mapped_params number of mapped parameters, min(params, args)
- // x7 param_count number of function parameters
- // x10 size size of objects to allocate (uninit)
- // x14 arg_count number of function arguments
-
- // Compute the size of backing store, parameter map, and arguments object.
- // 1. Parameter map, has two extra words containing context and backing
- // store.
- const int kParameterMapHeaderSize =
- FixedArray::kHeaderSize + 2 * kPointerSize;
-
- // Calculate the parameter map size, assuming it exists.
- Register size = x10;
- __ Mov(size, Operand(mapped_params, LSL, kPointerSizeLog2));
- __ Add(size, size, kParameterMapHeaderSize);
-
- // If there are no mapped parameters, set the running size total to zero.
- // Otherwise, use the parameter map size calculated earlier.
- __ Cmp(mapped_params, 0);
- __ CzeroX(size, eq);
-
- // 2. Add the size of the backing store and arguments object.
- __ Add(size, size, Operand(arg_count, LSL, kPointerSizeLog2));
- __ Add(size, size,
- FixedArray::kHeaderSize + Heap::kSloppyArgumentsObjectSize);
-
- // Do the allocation of all three objects in one go. Assign this to x0, as it
- // will be returned to the caller.
- Register alloc_obj = x0;
- __ Allocate(size, alloc_obj, x11, x12, &runtime, TAG_OBJECT);
-
- // Get the arguments boilerplate from the current (global) context.
-
- // x0 alloc_obj pointer to allocated objects (param map, backing
- // store, arguments)
- // x1 function function pointer
- // x2 arg_count_smi number of function arguments (smi)
- // x3 recv_arg pointer to receiver arguments
- // x4 mapped_params number of mapped parameters, min(params, args)
- // x7 param_count number of function parameters
- // x11 sloppy_args_map offset to args (or aliased args) map (uninit)
- // x14 arg_count number of function arguments
-
- Register global_ctx = x10;
- Register sloppy_args_map = x11;
- Register aliased_args_map = x10;
- __ Ldr(global_ctx, NativeContextMemOperand());
-
- __ Ldr(sloppy_args_map,
- ContextMemOperand(global_ctx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
- __ Ldr(
- aliased_args_map,
- ContextMemOperand(global_ctx, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX));
- __ Cmp(mapped_params, 0);
- __ CmovX(sloppy_args_map, aliased_args_map, ne);
-
- // Copy the JS object part.
- __ Str(sloppy_args_map, FieldMemOperand(alloc_obj, JSObject::kMapOffset));
- __ LoadRoot(x10, Heap::kEmptyFixedArrayRootIndex);
- __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kPropertiesOffset));
- __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
-
- // Set up the callee in-object property.
- STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- const int kCalleeOffset = JSObject::kHeaderSize +
- Heap::kArgumentsCalleeIndex * kPointerSize;
- __ AssertNotSmi(function);
- __ Str(function, FieldMemOperand(alloc_obj, kCalleeOffset));
-
- // Use the length and set that as an in-object property.
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- const int kLengthOffset = JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize;
- __ Str(arg_count_smi, FieldMemOperand(alloc_obj, kLengthOffset));
-
- // Set up the elements pointer in the allocated arguments object.
- // If we allocated a parameter map, "elements" will point there, otherwise
- // it will point to the backing store.
-
- // x0 alloc_obj pointer to allocated objects (param map, backing
- // store, arguments)
- // x1 function function pointer
- // x2 arg_count_smi number of function arguments (smi)
- // x3 recv_arg pointer to receiver arguments
- // x4 mapped_params number of mapped parameters, min(params, args)
- // x5 elements pointer to parameter map or backing store (uninit)
- // x6 backing_store pointer to backing store (uninit)
- // x7 param_count number of function parameters
- // x14 arg_count number of function arguments
-
- Register elements = x5;
- __ Add(elements, alloc_obj, Heap::kSloppyArgumentsObjectSize);
- __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
-
- // Initialize parameter map. If there are no mapped arguments, we're done.
- Label skip_parameter_map;
- __ Cmp(mapped_params, 0);
- // Set up backing store address, because it is needed later for filling in
- // the unmapped arguments.
- Register backing_store = x6;
- __ CmovX(backing_store, elements, eq);
- __ B(eq, &skip_parameter_map);
-
- __ LoadRoot(x10, Heap::kSloppyArgumentsElementsMapRootIndex);
- __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
- __ Add(x10, mapped_params, 2);
- __ SmiTag(x10);
- __ Str(x10, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Str(cp, FieldMemOperand(elements,
- FixedArray::kHeaderSize + 0 * kPointerSize));
- __ Add(x10, elements, Operand(mapped_params, LSL, kPointerSizeLog2));
- __ Add(x10, x10, kParameterMapHeaderSize);
- __ Str(x10, FieldMemOperand(elements,
- FixedArray::kHeaderSize + 1 * kPointerSize));
-
- // Copy the parameter slots and the holes in the arguments.
- // We need to fill in mapped_parameter_count slots. Then index the context,
- // where parameters are stored in reverse order, at:
- //
- // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS + parameter_count - 1
- //
- // The mapped parameter thus needs to get indices:
- //
- // MIN_CONTEXT_SLOTS + parameter_count - 1 ..
- // MIN_CONTEXT_SLOTS + parameter_count - mapped_parameter_count
- //
- // We loop from right to left.
-
- // x0 alloc_obj pointer to allocated objects (param map, backing
- // store, arguments)
- // x1 function function pointer
- // x2 arg_count_smi number of function arguments (smi)
- // x3 recv_arg pointer to receiver arguments
- // x4 mapped_params number of mapped parameters, min(params, args)
- // x5 elements pointer to parameter map or backing store (uninit)
- // x6 backing_store pointer to backing store (uninit)
- // x7 param_count number of function parameters
- // x11 loop_count parameter loop counter (uninit)
- // x12 index parameter index (smi, uninit)
- // x13 the_hole hole value (uninit)
- // x14 arg_count number of function arguments
-
- Register loop_count = x11;
- Register index = x12;
- Register the_hole = x13;
- Label parameters_loop, parameters_test;
- __ Mov(loop_count, mapped_params);
- __ Add(index, param_count, static_cast<int>(Context::MIN_CONTEXT_SLOTS));
- __ Sub(index, index, mapped_params);
- __ SmiTag(index);
- __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
- __ Add(backing_store, elements, Operand(loop_count, LSL, kPointerSizeLog2));
- __ Add(backing_store, backing_store, kParameterMapHeaderSize);
-
- __ B(&parameters_test);
-
- __ Bind(&parameters_loop);
- __ Sub(loop_count, loop_count, 1);
- __ Mov(x10, Operand(loop_count, LSL, kPointerSizeLog2));
- __ Add(x10, x10, kParameterMapHeaderSize - kHeapObjectTag);
- __ Str(index, MemOperand(elements, x10));
- __ Sub(x10, x10, kParameterMapHeaderSize - FixedArray::kHeaderSize);
- __ Str(the_hole, MemOperand(backing_store, x10));
- __ Add(index, index, Smi::FromInt(1));
- __ Bind(&parameters_test);
- __ Cbnz(loop_count, &parameters_loop);
-
- __ Bind(&skip_parameter_map);
- // Copy arguments header and remaining slots (if there are any.)
- __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
- __ Str(x10, FieldMemOperand(backing_store, FixedArray::kMapOffset));
- __ Str(arg_count_smi, FieldMemOperand(backing_store,
- FixedArray::kLengthOffset));
-
- // x0 alloc_obj pointer to allocated objects (param map, backing
- // store, arguments)
- // x1 function function pointer
- // x2 arg_count_smi number of function arguments (smi)
- // x3 recv_arg pointer to receiver arguments
- // x4 mapped_params number of mapped parameters, min(params, args)
- // x6 backing_store pointer to backing store (uninit)
- // x14 arg_count number of function arguments
-
- Label arguments_loop, arguments_test;
- __ Mov(x10, mapped_params);
- __ Sub(recv_arg, recv_arg, Operand(x10, LSL, kPointerSizeLog2));
- __ B(&arguments_test);
-
- __ Bind(&arguments_loop);
- __ Sub(recv_arg, recv_arg, kPointerSize);
- __ Ldr(x11, MemOperand(recv_arg));
- __ Add(x12, backing_store, Operand(x10, LSL, kPointerSizeLog2));
- __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
- __ Add(x10, x10, 1);
-
- __ Bind(&arguments_test);
- __ Cmp(x10, arg_count);
- __ B(lt, &arguments_loop);
-
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- __ Bind(&runtime);
- __ Push(function, recv_arg, arg_count_smi);
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
// Return address is in lr.
Label slow;
@@ -1993,182 +1628,6 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
- // x1 : function
- // x2 : number of parameters (tagged)
- // x3 : parameters pointer
- //
- // Returns pointer to result object in x0.
-
- DCHECK(x1.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(x2.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(x3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Make an untagged copy of the parameter count.
- Register function = x1;
- Register param_count_smi = x2;
- Register params = x3;
- Register param_count = x13;
- __ SmiUntag(param_count, param_count_smi);
-
- // Test if arguments adaptor needed.
- Register caller_fp = x11;
- Register caller_ctx = x12;
- Label try_allocate, runtime;
- __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(caller_ctx, MemOperand(caller_fp,
- StandardFrameConstants::kContextOffset));
- __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ B(ne, &try_allocate);
-
- // x1 function function pointer
- // x2 param_count_smi number of parameters passed to function (smi)
- // x3 params pointer to parameters
- // x11 caller_fp caller's frame pointer
- // x13 param_count number of parameters passed to function
-
- // Patch the argument length and parameters pointer.
- __ Ldr(param_count_smi,
- MemOperand(caller_fp,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(param_count, param_count_smi);
- __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
- __ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
-
- // Try the new space allocation. Start out with computing the size of the
- // arguments object and the elements array in words.
- Register size = x10;
- __ Bind(&try_allocate);
- __ Add(size, param_count, FixedArray::kHeaderSize / kPointerSize);
- __ Cmp(param_count, 0);
- __ CzeroX(size, eq);
- __ Add(size, size, Heap::kStrictArgumentsObjectSize / kPointerSize);
-
- // Do the allocation of both objects in one go. Assign this to x0, as it will
- // be returned to the caller.
- Register alloc_obj = x0;
- __ Allocate(size, alloc_obj, x11, x12, &runtime,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
-
- // Get the arguments boilerplate from the current (native) context.
- Register strict_args_map = x4;
- __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX,
- strict_args_map);
-
- // x0 alloc_obj pointer to allocated objects: parameter array and
- // arguments object
- // x1 function function pointer
- // x2 param_count_smi number of parameters passed to function (smi)
- // x3 params pointer to parameters
- // x4 strict_args_map offset to arguments map
- // x13 param_count number of parameters passed to function
- __ Str(strict_args_map, FieldMemOperand(alloc_obj, JSObject::kMapOffset));
- __ LoadRoot(x5, Heap::kEmptyFixedArrayRootIndex);
- __ Str(x5, FieldMemOperand(alloc_obj, JSObject::kPropertiesOffset));
- __ Str(x5, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
-
- // Set the smi-tagged length as an in-object property.
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- const int kLengthOffset = JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize;
- __ Str(param_count_smi, FieldMemOperand(alloc_obj, kLengthOffset));
-
- // If there are no actual arguments, we're done.
- Label done;
- __ Cbz(param_count, &done);
-
- // Set up the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- Register elements = x5;
- __ Add(elements, alloc_obj, Heap::kStrictArgumentsObjectSize);
- __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
- __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
- __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
- __ Str(param_count_smi, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // x0 alloc_obj pointer to allocated objects: parameter array and
- // arguments object
- // x1 function function pointer
- // x2 param_count_smi number of parameters passed to function (smi)
- // x3 params pointer to parameters
- // x4 array pointer to array slot (uninit)
- // x5 elements pointer to elements array of alloc_obj
- // x13 param_count number of parameters passed to function
-
- // Copy the fixed array slots.
- Label loop;
- Register array = x4;
- // Set up pointer to first array slot.
- __ Add(array, elements, FixedArray::kHeaderSize - kHeapObjectTag);
-
- __ Bind(&loop);
- // Pre-decrement the parameters pointer by kPointerSize on each iteration.
- // Pre-decrement in order to skip receiver.
- __ Ldr(x10, MemOperand(params, -kPointerSize, PreIndex));
- // Post-increment elements by kPointerSize on each iteration.
- __ Str(x10, MemOperand(array, kPointerSize, PostIndex));
- __ Sub(param_count, param_count, 1);
- __ Cbnz(param_count, &loop);
-
- // Return from stub.
- __ Bind(&done);
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- __ Bind(&runtime);
- __ Push(function, params, param_count_smi);
- __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
-void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
- // x2 : number of parameters (tagged)
- // x3 : parameters pointer
- // x4 : rest parameter index (tagged)
- //
- // Returns pointer to result object in x0.
-
- DCHECK(x2.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(x3.is(RestParamAccessDescriptor::parameter_pointer()));
- DCHECK(x4.is(RestParamAccessDescriptor::rest_parameter_index()));
-
- // Get the stub arguments from the frame, and make an untagged copy of the
- // parameter count.
- Register rest_index_smi = x4;
- Register param_count_smi = x2;
- Register params = x3;
- Register param_count = x13;
- __ SmiUntag(param_count, param_count_smi);
-
- // Test if arguments adaptor needed.
- Register caller_fp = x11;
- Register caller_ctx = x12;
- Label runtime;
- __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(caller_ctx,
- MemOperand(caller_fp, StandardFrameConstants::kContextOffset));
- __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ B(ne, &runtime);
-
- // x4 rest_index_smi index of rest parameter
- // x2 param_count_smi number of parameters passed to function (smi)
- // x3 params pointer to parameters
- // x11 caller_fp caller's frame pointer
- // x13 param_count number of parameters passed to function
-
- // Patch the argument length and parameters pointer.
- __ Ldr(param_count_smi,
- MemOperand(caller_fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(param_count, param_count_smi);
- __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
- __ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
-
- __ Bind(&runtime);
- __ Push(param_count_smi, params, rest_index_smi);
- __ TailCallRuntime(Runtime::kNewRestParam);
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExec);
@@ -2917,7 +2376,8 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Bind(&call_function);
__ Mov(x0, argc);
- __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+ __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
+ tail_call_mode()),
RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
@@ -2951,7 +2411,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Bind(&call);
__ Mov(x0, argc);
- __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+ __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
RelocInfo::CODE_TARGET);
__ bind(&uninitialized);
@@ -3151,18 +2611,14 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ CheckMap(x1, x2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
__ CheckMap(x0, x3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
- if (op() != Token::EQ_STRICT && is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
- } else {
- if (!Token::IsEqualityOp(op())) {
- __ Ldr(x1, FieldMemOperand(x1, Oddball::kToNumberOffset));
- __ AssertSmi(x1);
- __ Ldr(x0, FieldMemOperand(x0, Oddball::kToNumberOffset));
- __ AssertSmi(x0);
- }
- __ Sub(x0, x1, x0);
- __ Ret();
+ if (!Token::IsEqualityOp(op())) {
+ __ Ldr(x1, FieldMemOperand(x1, Oddball::kToNumberOffset));
+ __ AssertSmi(x1);
+ __ Ldr(x0, FieldMemOperand(x0, Oddball::kToNumberOffset));
+ __ AssertSmi(x0);
}
+ __ Sub(x0, x1, x0);
+ __ Ret();
__ Bind(&miss);
GenerateMiss(masm);
@@ -3236,7 +2692,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ Ret();
__ Bind(&unordered);
- CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
+ CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
CompareICState::GENERIC, CompareICState::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
@@ -3467,8 +2923,6 @@ void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
if (Token::IsEqualityOp(op())) {
__ Sub(result, rhs, lhs);
__ Ret();
- } else if (is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
Register ncr = x2;
if (op() == Token::LT || op() == Token::LTE) {
@@ -3859,6 +3313,39 @@ void ToStringStub::Generate(MacroAssembler* masm) {
}
+void ToNameStub::Generate(MacroAssembler* masm) {
+ // The ToName stub takes one argument in x0.
+ Label is_number;
+ __ JumpIfSmi(x0, &is_number);
+
+ Label not_name;
+ STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+ __ JumpIfObjectType(x0, x1, x1, LAST_NAME_TYPE, &not_name, hi);
+ // x0: receiver
+ // x1: receiver instance type
+ __ Ret();
+ __ Bind(&not_name);
+
+ Label not_heap_number;
+ __ Cmp(x1, HEAP_NUMBER_TYPE);
+ __ B(ne, &not_heap_number);
+ __ Bind(&is_number);
+ NumberToStringStub stub(isolate());
+ __ TailCallStub(&stub);
+ __ Bind(&not_heap_number);
+
+ Label not_oddball;
+ __ Cmp(x1, ODDBALL_TYPE);
+ __ B(ne, &not_oddball);
+ __ Ldr(x0, FieldMemOperand(x0, Oddball::kToStringOffset));
+ __ Ret();
+ __ Bind(&not_oddball);
+
+ __ Push(x0); // Push argument.
+ __ TailCallRuntime(Runtime::kToName);
+}
+
+
void StringHelper::GenerateFlatOneByteStringEquals(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3) {
@@ -4042,8 +3529,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
__ Ldr(val, MemOperand(regs_.address()));
__ JumpIfNotInNewSpace(val, &dont_need_remembered_set);
- __ CheckPageFlagSet(regs_.object(), val, 1 << MemoryChunk::SCAN_ON_SCAVENGE,
- &dont_need_remembered_set);
+ __ JumpIfInNewSpace(regs_.object(), &dont_need_remembered_set);
// First notify the incremental marker if necessary, then update the
// remembered set.
@@ -5343,6 +4829,672 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void FastNewObjectStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x1 : target
+ // -- x3 : new target
+ // -- cp : context
+ // -- lr : return address
+ // -----------------------------------
+ __ AssertFunction(x1);
+ __ AssertReceiver(x3);
+
+ // Verify that the new target is a JSFunction.
+ Label new_object;
+ __ JumpIfNotObjectType(x3, x2, x2, JS_FUNCTION_TYPE, &new_object);
+
+ // Load the initial map and verify that it's in fact a map.
+ __ Ldr(x2, FieldMemOperand(x3, JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(x2, &new_object);
+ __ JumpIfNotObjectType(x2, x0, x0, MAP_TYPE, &new_object);
+
+ // Fall back to runtime if the target differs from the new target's
+ // initial map constructor.
+ __ Ldr(x0, FieldMemOperand(x2, Map::kConstructorOrBackPointerOffset));
+ __ CompareAndBranch(x0, x1, ne, &new_object);
+
+ // Allocate the JSObject on the heap.
+ Label allocate, done_allocate;
+ __ Ldrb(x4, FieldMemOperand(x2, Map::kInstanceSizeOffset));
+ __ Allocate(x4, x0, x5, x6, &allocate, SIZE_IN_WORDS);
+ __ Bind(&done_allocate);
+
+ // Initialize the JSObject fields.
+ __ Mov(x1, x0);
+ STATIC_ASSERT(JSObject::kMapOffset == 0 * kPointerSize);
+ __ Str(x2, MemOperand(x1, kPointerSize, PostIndex));
+ __ LoadRoot(x3, Heap::kEmptyFixedArrayRootIndex);
+ STATIC_ASSERT(JSObject::kPropertiesOffset == 1 * kPointerSize);
+ STATIC_ASSERT(JSObject::kElementsOffset == 2 * kPointerSize);
+ __ Stp(x3, x3, MemOperand(x1, 2 * kPointerSize, PostIndex));
+ STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
+
+ // ----------- S t a t e -------------
+ // -- x0 : result (untagged)
+ // -- x1 : result fields (untagged)
+ // -- x5 : result end (untagged)
+ // -- x2 : initial map
+ // -- cp : context
+ // -- lr : return address
+ // -----------------------------------
+
+ // Perform in-object slack tracking if requested.
+ Label slack_tracking;
+ STATIC_ASSERT(Map::kNoSlackTracking == 0);
+ __ LoadRoot(x6, Heap::kUndefinedValueRootIndex);
+ __ Ldr(w3, FieldMemOperand(x2, Map::kBitField3Offset));
+ __ TestAndBranchIfAnySet(w3, Map::ConstructionCounter::kMask,
+ &slack_tracking);
+ {
+ // Initialize all in-object fields with undefined.
+ __ InitializeFieldsWithFiller(x1, x5, x6);
+
+ // Add the object tag to make the JSObject real.
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ Add(x0, x0, kHeapObjectTag);
+ __ Ret();
+ }
+ __ Bind(&slack_tracking);
+ {
+ // Decrease generous allocation count.
+ STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+ __ Sub(w3, w3, 1 << Map::ConstructionCounter::kShift);
+ __ Str(w3, FieldMemOperand(x2, Map::kBitField3Offset));
+
+ // Initialize the in-object fields with undefined.
+ __ Ldrb(x4, FieldMemOperand(x2, Map::kUnusedPropertyFieldsOffset));
+ __ Sub(x4, x5, Operand(x4, LSL, kPointerSizeLog2));
+ __ InitializeFieldsWithFiller(x1, x4, x6);
+
+ // Initialize the remaining (reserved) fields with one pointer filler map.
+ __ LoadRoot(x6, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(x1, x5, x6);
+
+ // Add the object tag to make the JSObject real.
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ Add(x0, x0, kHeapObjectTag);
+
+ // Check if we can finalize the instance size.
+ Label finalize;
+ STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
+ __ TestAndBranchIfAllClear(w3, Map::ConstructionCounter::kMask, &finalize);
+ __ Ret();
+
+ // Finalize the instance size.
+ __ Bind(&finalize);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(x0, x2);
+ __ CallRuntime(Runtime::kFinalizeInstanceSize);
+ __ Pop(x0);
+ }
+ __ Ret();
+ }
+
+ // Fall back to %AllocateInNewSpace.
+ __ Bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ __ Mov(x4,
+ Operand(x4, LSL, kPointerSizeLog2 + kSmiTagSize + kSmiShiftSize));
+ __ Push(x2, x4);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ Pop(x2);
+ }
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ Sub(x0, x0, kHeapObjectTag);
+ __ Ldrb(x5, FieldMemOperand(x2, Map::kInstanceSizeOffset));
+ __ Add(x5, x0, Operand(x5, LSL, kPointerSizeLog2));
+ __ B(&done_allocate);
+
+ // Fall back to %NewObject.
+ __ Bind(&new_object);
+ __ Push(x1, x3);
+ __ TailCallRuntime(Runtime::kNewObject);
+}
+
+
+void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x1 : function
+ // -- cp : context
+ // -- fp : frame pointer
+ // -- lr : return address
+ // -----------------------------------
+ __ AssertFunction(x1);
+
+ // For Ignition we need to skip all possible handler/stub frames until
+ // we reach the JavaScript frame for the function (similar to what the
+ // runtime fallback implementation does). So make x2 point to that
+ // JavaScript frame.
+ {
+ Label loop, loop_entry;
+ __ Mov(x2, fp);
+ __ B(&loop_entry);
+ __ Bind(&loop);
+ __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
+ __ Bind(&loop_entry);
+ __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kMarkerOffset));
+ __ Cmp(x3, x1);
+ __ B(ne, &loop);
+ }
+
+ // Check if we have rest parameters (only possible if we have an
+ // arguments adaptor frame below the function frame).
+ Label no_rest_parameters;
+ __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kContextOffset));
+ __ Cmp(x3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ B(ne, &no_rest_parameters);
+
+ // Check if the arguments adaptor frame contains more arguments than
+ // specified by the function's internal formal parameter count.
+ Label rest_parameters;
+ __ Ldrsw(x0, UntagSmiMemOperand(
+ x2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ Ldr(x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldrsw(
+ x1, FieldMemOperand(x1, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Subs(x0, x0, x1);
+ __ B(gt, &rest_parameters);
+
+ // Return an empty rest parameter array.
+ __ Bind(&no_rest_parameters);
+ {
+ // ----------- S t a t e -------------
+ // -- cp : context
+ // -- lr : return address
+ // -----------------------------------
+
+ // Allocate an empty rest parameter array.
+ Label allocate, done_allocate;
+ __ Allocate(JSArray::kSize, x0, x1, x2, &allocate, TAG_OBJECT);
+ __ Bind(&done_allocate);
+
+ // Setup the rest parameter array in x0.
+ __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, x1);
+ __ Str(x1, FieldMemOperand(x0, JSArray::kMapOffset));
+ __ LoadRoot(x1, Heap::kEmptyFixedArrayRootIndex);
+ __ Str(x1, FieldMemOperand(x0, JSArray::kPropertiesOffset));
+ __ Str(x1, FieldMemOperand(x0, JSArray::kElementsOffset));
+ __ Mov(x1, Smi::FromInt(0));
+ __ Str(x1, FieldMemOperand(x0, JSArray::kLengthOffset));
+ STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+ __ Ret();
+
+ // Fall back to %AllocateInNewSpace.
+ __ Bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(Smi::FromInt(JSArray::kSize));
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ }
+ __ B(&done_allocate);
+ }
+
+ __ Bind(&rest_parameters);
+ {
+ // Compute the pointer to the first rest parameter (skippping the receiver).
+ __ Add(x2, x2, Operand(x0, LSL, kPointerSizeLog2));
+ __ Add(x2, x2, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize);
+
+ // ----------- S t a t e -------------
+ // -- cp : context
+ // -- x0 : number of rest parameters
+ // -- x2 : pointer to first rest parameters
+ // -- lr : return address
+ // -----------------------------------
+
+ // Allocate space for the rest parameter array plus the backing store.
+ Label allocate, done_allocate;
+ __ Mov(x1, JSArray::kSize + FixedArray::kHeaderSize);
+ __ Add(x1, x1, Operand(x0, LSL, kPointerSizeLog2));
+ __ Allocate(x1, x3, x4, x5, &allocate, TAG_OBJECT);
+ __ Bind(&done_allocate);
+
+ // Compute arguments.length in x6.
+ __ SmiTag(x6, x0);
+
+ // Setup the elements array in x3.
+ __ LoadRoot(x1, Heap::kFixedArrayMapRootIndex);
+ __ Str(x1, FieldMemOperand(x3, FixedArray::kMapOffset));
+ __ Str(x6, FieldMemOperand(x3, FixedArray::kLengthOffset));
+ __ Add(x4, x3, FixedArray::kHeaderSize);
+ {
+ Label loop, done_loop;
+ __ Add(x0, x4, Operand(x0, LSL, kPointerSizeLog2));
+ __ Bind(&loop);
+ __ Cmp(x4, x0);
+ __ B(eq, &done_loop);
+ __ Ldr(x5, MemOperand(x2, 0 * kPointerSize));
+ __ Str(x5, FieldMemOperand(x4, 0 * kPointerSize));
+ __ Sub(x2, x2, Operand(1 * kPointerSize));
+ __ Add(x4, x4, Operand(1 * kPointerSize));
+ __ B(&loop);
+ __ Bind(&done_loop);
+ }
+
+ // Setup the rest parameter array in x0.
+ __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, x1);
+ __ Str(x1, FieldMemOperand(x0, JSArray::kMapOffset));
+ __ LoadRoot(x1, Heap::kEmptyFixedArrayRootIndex);
+ __ Str(x1, FieldMemOperand(x0, JSArray::kPropertiesOffset));
+ __ Str(x3, FieldMemOperand(x0, JSArray::kElementsOffset));
+ __ Str(x6, FieldMemOperand(x0, JSArray::kLengthOffset));
+ STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+ __ Ret();
+
+ // Fall back to %AllocateInNewSpace.
+ __ Bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(x0);
+ __ SmiTag(x1);
+ __ Push(x0, x2, x1);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ Mov(x3, x0);
+ __ Pop(x2, x0);
+ __ SmiUntag(x0);
+ }
+ __ B(&done_allocate);
+ }
+}
+
+
+void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x1 : function
+ // -- cp : context
+ // -- fp : frame pointer
+ // -- lr : return address
+ // -----------------------------------
+ __ AssertFunction(x1);
+
+ // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
+ __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldrsw(
+ x2, FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Add(x3, fp, Operand(x2, LSL, kPointerSizeLog2));
+ __ Add(x3, x3, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ SmiTag(x2);
+
+ // x1 : function
+ // x2 : number of parameters (tagged)
+ // x3 : parameters pointer
+ //
+ // Returns pointer to result object in x0.
+
+ // Make an untagged copy of the parameter count.
+ // Note: arg_count_smi is an alias of param_count_smi.
+ Register function = x1;
+ Register arg_count_smi = x2;
+ Register param_count_smi = x2;
+ Register recv_arg = x3;
+ Register param_count = x7;
+ __ SmiUntag(param_count, param_count_smi);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Register caller_fp = x11;
+ Register caller_ctx = x12;
+ Label runtime;
+ Label adaptor_frame, try_allocate;
+ __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(caller_ctx, MemOperand(caller_fp,
+ StandardFrameConstants::kContextOffset));
+ __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ B(eq, &adaptor_frame);
+
+ // No adaptor, parameter count = argument count.
+
+ // x1 function function pointer
+ // x2 arg_count_smi number of function arguments (smi)
+ // x3 recv_arg pointer to receiver arguments
+ // x4 mapped_params number of mapped params, min(params, args) (uninit)
+ // x7 param_count number of function parameters
+ // x11 caller_fp caller's frame pointer
+ // x14 arg_count number of function arguments (uninit)
+
+ Register arg_count = x14;
+ Register mapped_params = x4;
+ __ Mov(arg_count, param_count);
+ __ Mov(mapped_params, param_count);
+ __ B(&try_allocate);
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ Bind(&adaptor_frame);
+ __ Ldr(arg_count_smi,
+ MemOperand(caller_fp,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(arg_count, arg_count_smi);
+ __ Add(x10, caller_fp, Operand(arg_count, LSL, kPointerSizeLog2));
+ __ Add(recv_arg, x10, StandardFrameConstants::kCallerSPOffset);
+
+ // Compute the mapped parameter count = min(param_count, arg_count)
+ __ Cmp(param_count, arg_count);
+ __ Csel(mapped_params, param_count, arg_count, lt);
+
+ __ Bind(&try_allocate);
+
+ // x0 alloc_obj pointer to allocated objects: param map, backing
+ // store, arguments (uninit)
+ // x1 function function pointer
+ // x2 arg_count_smi number of function arguments (smi)
+ // x3 recv_arg pointer to receiver arguments
+ // x4 mapped_params number of mapped parameters, min(params, args)
+ // x7 param_count number of function parameters
+ // x10 size size of objects to allocate (uninit)
+ // x14 arg_count number of function arguments
+
+ // Compute the size of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has two extra words containing context and backing
+ // store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+
+ // Calculate the parameter map size, assuming it exists.
+ Register size = x10;
+ __ Mov(size, Operand(mapped_params, LSL, kPointerSizeLog2));
+ __ Add(size, size, kParameterMapHeaderSize);
+
+ // If there are no mapped parameters, set the running size total to zero.
+ // Otherwise, use the parameter map size calculated earlier.
+ __ Cmp(mapped_params, 0);
+ __ CzeroX(size, eq);
+
+ // 2. Add the size of the backing store and arguments object.
+ __ Add(size, size, Operand(arg_count, LSL, kPointerSizeLog2));
+ __ Add(size, size, FixedArray::kHeaderSize + JSSloppyArgumentsObject::kSize);
+
+ // Do the allocation of all three objects in one go. Assign this to x0, as it
+ // will be returned to the caller.
+ Register alloc_obj = x0;
+ __ Allocate(size, alloc_obj, x11, x12, &runtime, TAG_OBJECT);
+
+ // Get the arguments boilerplate from the current (global) context.
+
+ // x0 alloc_obj pointer to allocated objects (param map, backing
+ // store, arguments)
+ // x1 function function pointer
+ // x2 arg_count_smi number of function arguments (smi)
+ // x3 recv_arg pointer to receiver arguments
+ // x4 mapped_params number of mapped parameters, min(params, args)
+ // x7 param_count number of function parameters
+ // x11 sloppy_args_map offset to args (or aliased args) map (uninit)
+ // x14 arg_count number of function arguments
+
+ Register global_ctx = x10;
+ Register sloppy_args_map = x11;
+ Register aliased_args_map = x10;
+ __ Ldr(global_ctx, NativeContextMemOperand());
+
+ __ Ldr(sloppy_args_map,
+ ContextMemOperand(global_ctx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+ __ Ldr(
+ aliased_args_map,
+ ContextMemOperand(global_ctx, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX));
+ __ Cmp(mapped_params, 0);
+ __ CmovX(sloppy_args_map, aliased_args_map, ne);
+
+ // Copy the JS object part.
+ __ Str(sloppy_args_map, FieldMemOperand(alloc_obj, JSObject::kMapOffset));
+ __ LoadRoot(x10, Heap::kEmptyFixedArrayRootIndex);
+ __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kPropertiesOffset));
+ __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
+
+ // Set up the callee in-object property.
+ __ AssertNotSmi(function);
+ __ Str(function,
+ FieldMemOperand(alloc_obj, JSSloppyArgumentsObject::kCalleeOffset));
+
+ // Use the length and set that as an in-object property.
+ __ Str(arg_count_smi,
+ FieldMemOperand(alloc_obj, JSSloppyArgumentsObject::kLengthOffset));
+
+ // Set up the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, "elements" will point there, otherwise
+ // it will point to the backing store.
+
+ // x0 alloc_obj pointer to allocated objects (param map, backing
+ // store, arguments)
+ // x1 function function pointer
+ // x2 arg_count_smi number of function arguments (smi)
+ // x3 recv_arg pointer to receiver arguments
+ // x4 mapped_params number of mapped parameters, min(params, args)
+ // x5 elements pointer to parameter map or backing store (uninit)
+ // x6 backing_store pointer to backing store (uninit)
+ // x7 param_count number of function parameters
+ // x14 arg_count number of function arguments
+
+ Register elements = x5;
+ __ Add(elements, alloc_obj, JSSloppyArgumentsObject::kSize);
+ __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
+
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ __ Cmp(mapped_params, 0);
+ // Set up backing store address, because it is needed later for filling in
+ // the unmapped arguments.
+ Register backing_store = x6;
+ __ CmovX(backing_store, elements, eq);
+ __ B(eq, &skip_parameter_map);
+
+ __ LoadRoot(x10, Heap::kSloppyArgumentsElementsMapRootIndex);
+ __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
+ __ Add(x10, mapped_params, 2);
+ __ SmiTag(x10);
+ __ Str(x10, FieldMemOperand(elements, FixedArray::kLengthOffset));
+ __ Str(cp, FieldMemOperand(elements,
+ FixedArray::kHeaderSize + 0 * kPointerSize));
+ __ Add(x10, elements, Operand(mapped_params, LSL, kPointerSizeLog2));
+ __ Add(x10, x10, kParameterMapHeaderSize);
+ __ Str(x10, FieldMemOperand(elements,
+ FixedArray::kHeaderSize + 1 * kPointerSize));
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. Then index the context,
+ // where parameters are stored in reverse order, at:
+ //
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS + parameter_count - 1
+ //
+ // The mapped parameter thus needs to get indices:
+ //
+ // MIN_CONTEXT_SLOTS + parameter_count - 1 ..
+ // MIN_CONTEXT_SLOTS + parameter_count - mapped_parameter_count
+ //
+ // We loop from right to left.
+
+ // x0 alloc_obj pointer to allocated objects (param map, backing
+ // store, arguments)
+ // x1 function function pointer
+ // x2 arg_count_smi number of function arguments (smi)
+ // x3 recv_arg pointer to receiver arguments
+ // x4 mapped_params number of mapped parameters, min(params, args)
+ // x5 elements pointer to parameter map or backing store (uninit)
+ // x6 backing_store pointer to backing store (uninit)
+ // x7 param_count number of function parameters
+ // x11 loop_count parameter loop counter (uninit)
+ // x12 index parameter index (smi, uninit)
+ // x13 the_hole hole value (uninit)
+ // x14 arg_count number of function arguments
+
+ Register loop_count = x11;
+ Register index = x12;
+ Register the_hole = x13;
+ Label parameters_loop, parameters_test;
+ __ Mov(loop_count, mapped_params);
+ __ Add(index, param_count, static_cast<int>(Context::MIN_CONTEXT_SLOTS));
+ __ Sub(index, index, mapped_params);
+ __ SmiTag(index);
+ __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
+ __ Add(backing_store, elements, Operand(loop_count, LSL, kPointerSizeLog2));
+ __ Add(backing_store, backing_store, kParameterMapHeaderSize);
+
+ __ B(&parameters_test);
+
+ __ Bind(&parameters_loop);
+ __ Sub(loop_count, loop_count, 1);
+ __ Mov(x10, Operand(loop_count, LSL, kPointerSizeLog2));
+ __ Add(x10, x10, kParameterMapHeaderSize - kHeapObjectTag);
+ __ Str(index, MemOperand(elements, x10));
+ __ Sub(x10, x10, kParameterMapHeaderSize - FixedArray::kHeaderSize);
+ __ Str(the_hole, MemOperand(backing_store, x10));
+ __ Add(index, index, Smi::FromInt(1));
+ __ Bind(&parameters_test);
+ __ Cbnz(loop_count, &parameters_loop);
+
+ __ Bind(&skip_parameter_map);
+ // Copy arguments header and remaining slots (if there are any.)
+ __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
+ __ Str(x10, FieldMemOperand(backing_store, FixedArray::kMapOffset));
+ __ Str(arg_count_smi, FieldMemOperand(backing_store,
+ FixedArray::kLengthOffset));
+
+ // x0 alloc_obj pointer to allocated objects (param map, backing
+ // store, arguments)
+ // x1 function function pointer
+ // x2 arg_count_smi number of function arguments (smi)
+ // x3 recv_arg pointer to receiver arguments
+ // x4 mapped_params number of mapped parameters, min(params, args)
+ // x6 backing_store pointer to backing store (uninit)
+ // x14 arg_count number of function arguments
+
+ Label arguments_loop, arguments_test;
+ __ Mov(x10, mapped_params);
+ __ Sub(recv_arg, recv_arg, Operand(x10, LSL, kPointerSizeLog2));
+ __ B(&arguments_test);
+
+ __ Bind(&arguments_loop);
+ __ Sub(recv_arg, recv_arg, kPointerSize);
+ __ Ldr(x11, MemOperand(recv_arg));
+ __ Add(x12, backing_store, Operand(x10, LSL, kPointerSizeLog2));
+ __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
+ __ Add(x10, x10, 1);
+
+ __ Bind(&arguments_test);
+ __ Cmp(x10, arg_count);
+ __ B(lt, &arguments_loop);
+
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ __ Bind(&runtime);
+ __ Push(function, recv_arg, arg_count_smi);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
+}
+
+
+void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x1 : function
+ // -- cp : context
+ // -- fp : frame pointer
+ // -- lr : return address
+ // -----------------------------------
+ __ AssertFunction(x1);
+
+ // For Ignition we need to skip all possible handler/stub frames until
+ // we reach the JavaScript frame for the function (similar to what the
+ // runtime fallback implementation does). So make x2 point to that
+ // JavaScript frame.
+ {
+ Label loop, loop_entry;
+ __ Mov(x2, fp);
+ __ B(&loop_entry);
+ __ Bind(&loop);
+ __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
+ __ Bind(&loop_entry);
+ __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kMarkerOffset));
+ __ Cmp(x3, x1);
+ __ B(ne, &loop);
+ }
+
+ // Check if we have an arguments adaptor frame below the function frame.
+ Label arguments_adaptor, arguments_done;
+ __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(x4, MemOperand(x3, StandardFrameConstants::kContextOffset));
+ __ Cmp(x4, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ B(eq, &arguments_adaptor);
+ {
+ __ Ldr(x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldrsw(x0, FieldMemOperand(
+ x1, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Add(x2, x2, Operand(x0, LSL, kPointerSizeLog2));
+ __ Add(x2, x2, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize);
+ }
+ __ B(&arguments_done);
+ __ Bind(&arguments_adaptor);
+ {
+ __ Ldrsw(x0, UntagSmiMemOperand(
+ x3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ Add(x2, x3, Operand(x0, LSL, kPointerSizeLog2));
+ __ Add(x2, x2, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize);
+ }
+ __ Bind(&arguments_done);
+
+ // ----------- S t a t e -------------
+ // -- cp : context
+ // -- x0 : number of rest parameters
+ // -- x2 : pointer to first rest parameters
+ // -- lr : return address
+ // -----------------------------------
+
+ // Allocate space for the strict arguments object plus the backing store.
+ Label allocate, done_allocate;
+ __ Mov(x1, JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize);
+ __ Add(x1, x1, Operand(x0, LSL, kPointerSizeLog2));
+ __ Allocate(x1, x3, x4, x5, &allocate, TAG_OBJECT);
+ __ Bind(&done_allocate);
+
+ // Compute arguments.length in x6.
+ __ SmiTag(x6, x0);
+
+ // Setup the elements array in x3.
+ __ LoadRoot(x1, Heap::kFixedArrayMapRootIndex);
+ __ Str(x1, FieldMemOperand(x3, FixedArray::kMapOffset));
+ __ Str(x6, FieldMemOperand(x3, FixedArray::kLengthOffset));
+ __ Add(x4, x3, FixedArray::kHeaderSize);
+ {
+ Label loop, done_loop;
+ __ Add(x0, x4, Operand(x0, LSL, kPointerSizeLog2));
+ __ Bind(&loop);
+ __ Cmp(x4, x0);
+ __ B(eq, &done_loop);
+ __ Ldr(x5, MemOperand(x2, 0 * kPointerSize));
+ __ Str(x5, FieldMemOperand(x4, 0 * kPointerSize));
+ __ Sub(x2, x2, Operand(1 * kPointerSize));
+ __ Add(x4, x4, Operand(1 * kPointerSize));
+ __ B(&loop);
+ __ Bind(&done_loop);
+ }
+
+ // Setup the strict arguments object in x0.
+ __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, x1);
+ __ Str(x1, FieldMemOperand(x0, JSStrictArgumentsObject::kMapOffset));
+ __ LoadRoot(x1, Heap::kEmptyFixedArrayRootIndex);
+ __ Str(x1, FieldMemOperand(x0, JSStrictArgumentsObject::kPropertiesOffset));
+ __ Str(x3, FieldMemOperand(x0, JSStrictArgumentsObject::kElementsOffset));
+ __ Str(x6, FieldMemOperand(x0, JSStrictArgumentsObject::kLengthOffset));
+ STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
+ __ Ret();
+
+ // Fall back to %AllocateInNewSpace.
+ __ Bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(x0);
+ __ SmiTag(x1);
+ __ Push(x0, x2, x1);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ Mov(x3, x0);
+ __ Pop(x2, x0);
+ __ SmiUntag(x0);
+ }
+ __ B(&done_allocate);
+}
+
+
void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
Register context = cp;
Register result = x0;
@@ -5656,11 +5808,10 @@ static void CallApiFunctionAndReturn(
__ B(&leave_exit_frame);
}
-
static void CallApiFunctionStubHelper(MacroAssembler* masm,
const ParameterCount& argc,
bool return_first_arg,
- bool call_data_undefined) {
+ bool call_data_undefined, bool is_lazy) {
// ----------- S t a t e -------------
// -- x0 : callee
// -- x4 : call_data
@@ -5697,8 +5848,10 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
// FunctionCallbackArguments: context, callee and call data.
__ Push(context, callee, call_data);
- // Load context from callee
- __ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+ if (!is_lazy) {
+ // Load context from callee
+ __ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+ }
if (!call_data_undefined) {
__ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
@@ -5783,7 +5936,7 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
void CallApiFunctionStub::Generate(MacroAssembler* masm) {
bool call_data_undefined = this->call_data_undefined();
CallApiFunctionStubHelper(masm, ParameterCount(x3), false,
- call_data_undefined);
+ call_data_undefined, false);
}
@@ -5791,24 +5944,29 @@ void CallApiAccessorStub::Generate(MacroAssembler* masm) {
bool is_store = this->is_store();
int argc = this->argc();
bool call_data_undefined = this->call_data_undefined();
+ bool is_lazy = this->is_lazy();
CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
- call_data_undefined);
+ call_data_undefined, is_lazy);
}
void CallApiGetterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- sp[0] : name
- // -- sp[8 - kArgsLength*8] : PropertyCallbackArguments object
+ // -- sp[0] : name
+ // -- sp[8 .. (8 + kArgsLength*8)] : v8::PropertyCallbackInfo::args_
// -- ...
- // -- x2 : api_function_address
+ // -- x2 : api_function_address
// -----------------------------------
Register api_function_address = ApiGetterDescriptor::function_address();
DCHECK(api_function_address.is(x2));
+ // v8::PropertyCallbackInfo::args_ array and name handle.
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
__ Mov(x0, masm->StackPointer()); // x0 = Handle<Name>
- __ Add(x1, x0, 1 * kPointerSize); // x1 = PCA
+ __ Add(x1, x0, 1 * kPointerSize); // x1 = v8::PCI::args_
const int kApiStackSpace = 1;
@@ -5819,20 +5977,22 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
- // Create PropertyAccessorInfo instance on the stack above the exit frame with
- // x1 (internal::Object** args_) as the data.
+ // Create v8::PropertyCallbackInfo object on the stack and initialize
+ // it's args_ field.
__ Poke(x1, 1 * kPointerSize);
- __ Add(x1, masm->StackPointer(), 1 * kPointerSize); // x1 = AccessorInfo&
-
- const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+ __ Add(x1, masm->StackPointer(), 1 * kPointerSize);
+ // x1 = v8::PropertyCallbackInfo&
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback(isolate());
const int spill_offset = 1 + kApiStackSpace;
+ // +3 is to skip prolog, return address and name handle.
+ MemOperand return_value_operand(
+ fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
kStackUnwindSpace, NULL, spill_offset,
- MemOperand(fp, 6 * kPointerSize), NULL);
+ return_value_operand, NULL);
}
diff --git a/deps/v8/src/arm64/cpu-arm64.cc b/deps/v8/src/arm64/cpu-arm64.cc
index cf2cc57215..37bb4a22ba 100644
--- a/deps/v8/src/arm64/cpu-arm64.cc
+++ b/deps/v8/src/arm64/cpu-arm64.cc
@@ -19,8 +19,8 @@ class CacheLineSizes {
cache_type_register_ = 0;
#else
// Copy the content of the cache type register to a core register.
- __asm__ __volatile__ ("mrs %[ctr], ctr_el0" // NOLINT
- : [ctr] "=r" (cache_type_register_));
+ __asm__ __volatile__("mrs %[ctr], ctr_el0" // NOLINT
+ : [ctr] "=r"(cache_type_register_));
#endif
}
@@ -37,7 +37,6 @@ class CacheLineSizes {
uint32_t cache_type_register_;
};
-
void CpuFeatures::FlushICache(void* address, size_t length) {
#ifdef V8_HOST_ARCH_ARM64
// The code below assumes user space cache operations are allowed. The goal
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index 081405037a..3aa1e4dfa1 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -65,30 +65,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
-void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
- // Set the register values. The values are not important as there are no
- // callee saved registers in JavaScript frames, so all registers are
- // spilled. Registers fp and sp are set to the correct values though.
- for (int i = 0; i < Register::NumRegisters(); i++) {
- input_->SetRegister(i, 0);
- }
-
- // TODO(all): Do we also need to set a value to csp?
- input_->SetRegister(jssp.code(), reinterpret_cast<intptr_t>(frame->sp()));
- input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
-
- for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) {
- input_->SetDoubleRegister(i, 0.0);
- }
-
- // Fill the frame content from the actual data on the frame.
- for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
- input_->SetFrameSlot(i, Memory::uint64_at(tos + i));
- }
-}
-
-
-bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
// There is no dynamic alignment padding on ARM64 in the input frame.
return false;
}
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index 485aa780e3..c6ae37e733 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -56,20 +56,6 @@ const Register StringCompareDescriptor::LeftRegister() { return x1; }
const Register StringCompareDescriptor::RightRegister() { return x0; }
-const Register ArgumentsAccessReadDescriptor::index() { return x1; }
-const Register ArgumentsAccessReadDescriptor::parameter_count() { return x0; }
-
-
-const Register ArgumentsAccessNewDescriptor::function() { return x1; }
-const Register ArgumentsAccessNewDescriptor::parameter_count() { return x2; }
-const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return x3; }
-
-
-const Register RestParamAccessDescriptor::parameter_count() { return x2; }
-const Register RestParamAccessDescriptor::parameter_pointer() { return x3; }
-const Register RestParamAccessDescriptor::rest_parameter_index() { return x4; }
-
-
const Register ApiGetterDescriptor::function_address() { return x2; }
@@ -98,6 +84,35 @@ void FastNewContextDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void FastNewObjectDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {x1, x3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewRestParameterDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // x1: function
+ Register registers[] = {x1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // x1: function
+ Register registers[] = {x1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // x1: function
+ Register registers[] = {x1};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void ToNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -116,6 +131,10 @@ const Register ToStringDescriptor::ReceiverRegister() { return x0; }
// static
+const Register ToNameDescriptor::ReceiverRegister() { return x0; }
+
+
+// static
const Register ToObjectDescriptor::ReceiverRegister() { return x0; }
@@ -185,13 +204,6 @@ void CreateWeakCellDescriptor::InitializePlatformSpecific(
}
-void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {x3, x0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1 function the function to call
@@ -465,6 +477,14 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
&default_descriptor);
}
+void InterpreterDispatchDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
+ kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
+ kInterpreterDispatchTableRegister};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -476,7 +496,6 @@ void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@@ -488,7 +507,6 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index fbf459db46..953c3fd7f2 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -1488,18 +1488,15 @@ void MacroAssembler::LoadAccessor(Register dst, Register holder,
}
-void MacroAssembler::CheckEnumCache(Register object,
- Register null_value,
- Register scratch0,
- Register scratch1,
- Register scratch2,
- Register scratch3,
+void MacroAssembler::CheckEnumCache(Register object, Register scratch0,
+ Register scratch1, Register scratch2,
+ Register scratch3, Register scratch4,
Label* call_runtime) {
- DCHECK(!AreAliased(object, null_value, scratch0, scratch1, scratch2,
- scratch3));
+ DCHECK(!AreAliased(object, scratch0, scratch1, scratch2, scratch3, scratch4));
Register empty_fixed_array_value = scratch0;
Register current_object = scratch1;
+ Register null_value = scratch4;
LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
Label next, start;
@@ -1516,6 +1513,7 @@ void MacroAssembler::CheckEnumCache(Register object,
Cmp(enum_length, kInvalidEnumCacheSentinel);
B(eq, call_runtime);
+ LoadRoot(null_value, Heap::kNullValueRootIndex);
B(&start);
Bind(&next);
@@ -1576,10 +1574,9 @@ void MacroAssembler::InNewSpace(Register object,
Label* branch) {
DCHECK(cond == eq || cond == ne);
UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
- And(temp, object, ExternalReference::new_space_mask(isolate()));
- Cmp(temp, ExternalReference::new_space_start(isolate()));
- B(cond, branch);
+ const int mask =
+ (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
+ CheckPageFlag(object, temps.AcquireSameSizeAs(object), mask, cond, branch);
}
@@ -1641,6 +1638,20 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
+void MacroAssembler::AssertReceiver(Register object) {
+ if (emit_debug_code()) {
+ AssertNotSmi(object, kOperandIsASmiAndNotAReceiver);
+
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ CompareObjectType(object, temp, temp, FIRST_JS_RECEIVER_TYPE);
+ Check(hs, kOperandIsNotAReceiver);
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (emit_debug_code()) {
@@ -1679,6 +1690,15 @@ void MacroAssembler::AssertPositiveOrZero(Register value) {
}
}
+void MacroAssembler::AssertNumber(Register value) {
+ if (emit_debug_code()) {
+ Label done;
+ JumpIfSmi(value, &done);
+ JumpIfHeapNumber(value, &done);
+ Abort(kOperandIsNotANumber);
+ Bind(&done);
+ }
+}
void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
@@ -1727,19 +1747,6 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
}
-void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- ASM_LOCATION("MacroAssembler::InvokeBuiltin");
- // You can't call a builtin without a valid frame.
- DCHECK(flag == JUMP_FUNCTION || has_frame());
-
- // Fake a parameter count to avoid emitting code to do the check.
- ParameterCount expected(0);
- LoadNativeContextSlot(native_context_index, x1);
- InvokeFunctionCode(x1, no_reg, expected, expected, flag, call_wrapper);
-}
-
-
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
DCHECK_EQ(1, function->result_size);
@@ -2423,7 +2430,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
}
Push(fun);
Push(fun);
- CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
Pop(fun);
if (new_target.is_valid()) {
Pop(new_target);
@@ -3824,6 +3831,65 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
Ldr(result, FieldMemOperand(scratch2, kValueOffset));
}
+void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
+ Register code_entry,
+ Register scratch) {
+ const int offset = JSFunction::kCodeEntryOffset;
+
+ // Since a code entry (value) is always in old space, we don't need to update
+ // remembered set. If incremental marking is off, there is nothing for us to
+ // do.
+ if (!FLAG_incremental_marking) return;
+
+ DCHECK(js_function.is(x1));
+ DCHECK(code_entry.is(x7));
+ DCHECK(scratch.is(x5));
+ AssertNotSmi(js_function);
+
+ if (emit_debug_code()) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Add(scratch, js_function, offset - kHeapObjectTag);
+ Ldr(temp, MemOperand(scratch));
+ Cmp(temp, code_entry);
+ Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ }
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis and stores into young gen.
+ Label done;
+
+ CheckPageFlagClear(code_entry, scratch,
+ MemoryChunk::kPointersToHereAreInterestingMask, &done);
+ CheckPageFlagClear(js_function, scratch,
+ MemoryChunk::kPointersFromHereAreInterestingMask, &done);
+
+ const Register dst = scratch;
+ Add(dst, js_function, offset - kHeapObjectTag);
+
+ // Save caller-saved registers.Both input registers (x1 and x7) are caller
+ // saved, so there is no need to push them.
+ PushCPURegList(kCallerSaved);
+
+ int argument_count = 3;
+
+ Mov(x0, js_function);
+ Mov(x1, dst);
+ Mov(x2, ExternalReference::isolate_address(isolate()));
+
+ {
+ AllowExternalCallThatCantCauseGC scope(this);
+ CallCFunction(
+ ExternalReference::incremental_marking_record_write_code_entry_function(
+ isolate()),
+ argument_count);
+ }
+
+ // Restore caller-saved registers.
+ PopCPURegList(kCallerSaved);
+
+ Bind(&done);
+}
void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Register address,
@@ -3938,6 +4004,17 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
}
}
+void MacroAssembler::CheckPageFlag(const Register& object,
+ const Register& scratch, int mask,
+ Condition cc, Label* condition_met) {
+ And(scratch, object, ~Page::kPageAlignmentMask);
+ Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ if (cc == eq) {
+ TestAndBranchIfAnySet(scratch, mask, condition_met);
+ } else {
+ TestAndBranchIfAllClear(scratch, mask, condition_met);
+ }
+}
void MacroAssembler::CheckPageFlagSet(const Register& object,
const Register& scratch,
@@ -4409,9 +4486,9 @@ void MacroAssembler::Abort(BailoutReason reason) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 1);
+ CallRuntime(Runtime::kAbort);
} else {
- CallRuntime(Runtime::kAbort, 1);
+ CallRuntime(Runtime::kAbort);
}
} else {
// Load the string to pass to Printf.
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index 78997d6d02..ff41c4f27f 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -34,9 +34,9 @@ namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
-// TODO(titzer): arm64 is a pain for aliasing; get rid of these macros
#define kReturnRegister0 x0
#define kReturnRegister1 x1
+#define kReturnRegister2 x2
#define kJSFunctionRegister x1
#define kContextRegister cp
#define kInterpreterAccumulatorRegister x0
@@ -970,6 +970,9 @@ class MacroAssembler : public Assembler {
// enabled via --debug-code.
void AssertBoundFunction(Register object);
+ // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
+ void AssertReceiver(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@@ -981,6 +984,9 @@ class MacroAssembler : public Assembler {
// --debug-code.
void AssertPositiveOrZero(Register value);
+ // Abort execution if argument is not a number (heap number or smi).
+ void AssertNumber(Register value);
+
void JumpIfHeapNumber(Register object, Label* on_heap_number,
SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
void JumpIfNotHeapNumber(Register object, Label* on_not_heap_number,
@@ -1138,10 +1144,6 @@ class MacroAssembler : public Assembler {
int num_arguments);
- // Invoke specified builtin JavaScript function.
- void InvokeBuiltin(int native_context_index, InvokeFlag flag,
- const CallWrapper& call_wrapper = NullCallWrapper());
-
void Jump(Register target);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
@@ -1586,12 +1588,8 @@ class MacroAssembler : public Assembler {
void LeaveFrame(StackFrame::Type type);
// Returns map with validated enum cache in object register.
- void CheckEnumCache(Register object,
- Register null_value,
- Register scratch0,
- Register scratch1,
- Register scratch2,
- Register scratch3,
+ void CheckEnumCache(Register object, Register scratch0, Register scratch1,
+ Register scratch2, Register scratch3, Register scratch4,
Label* call_runtime);
// AllocationMemento support. Arrays may have an associated
@@ -1730,6 +1728,9 @@ class MacroAssembler : public Assembler {
Peek(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
}
+ void CheckPageFlag(const Register& object, const Register& scratch, int mask,
+ Condition cc, Label* condition_met);
+
void CheckPageFlagSet(const Register& object,
const Register& scratch,
int mask,
@@ -1793,6 +1794,11 @@ class MacroAssembler : public Assembler {
pointers_to_here_check_for_value);
}
+ // Notify the garbage collector that we wrote a code entry into a
+ // JSFunction. Only scratch is clobbered by the operation.
+ void RecordWriteCodeEntryField(Register js_function, Register code_entry,
+ Register scratch);
+
void RecordWriteForMap(
Register object,
Register map,
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc
index 8f72669f49..81dbdf8850 100644
--- a/deps/v8/src/arm64/simulator-arm64.cc
+++ b/deps/v8/src/arm64/simulator-arm64.cc
@@ -15,6 +15,7 @@
#include "src/disasm.h"
#include "src/macro-assembler.h"
#include "src/ostreams.h"
+#include "src/runtime/runtime-utils.h"
namespace v8 {
namespace internal {
@@ -533,12 +534,6 @@ void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
// uses the ObjectPair structure.
// The simulator assumes all runtime calls return two 64-bits values. If they
// don't, register x1 is clobbered. This is fine because x1 is caller-saved.
-struct ObjectPair {
- int64_t res0;
- int64_t res1;
-};
-
-
typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0,
int64_t arg1,
int64_t arg2,
@@ -548,6 +543,11 @@ typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0,
int64_t arg6,
int64_t arg7);
+typedef ObjectTriple (*SimulatorRuntimeTripleCall)(int64_t arg0, int64_t arg1,
+ int64_t arg2, int64_t arg3,
+ int64_t arg4, int64_t arg5,
+ int64_t arg6, int64_t arg7);
+
typedef int64_t (*SimulatorRuntimeCompareCall)(double arg1, double arg2);
typedef double (*SimulatorRuntimeFPFPCall)(double arg1, double arg2);
typedef double (*SimulatorRuntimeFPCall)(double arg1);
@@ -589,8 +589,10 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
UNREACHABLE();
break;
- case ExternalReference::BUILTIN_CALL: {
- // Object* f(v8::internal::Arguments).
+ case ExternalReference::BUILTIN_CALL:
+ case ExternalReference::BUILTIN_CALL_PAIR: {
+ // Object* f(v8::internal::Arguments) or
+ // ObjectPair f(v8::internal::Arguments).
TraceSim("Type: BUILTIN_CALL\n");
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
@@ -607,13 +609,41 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
xreg(4), xreg(5), xreg(6), xreg(7));
ObjectPair result = target(xreg(0), xreg(1), xreg(2), xreg(3),
xreg(4), xreg(5), xreg(6), xreg(7));
- TraceSim("Returned: {0x%" PRIx64 ", 0x%" PRIx64 "}\n",
- result.res0, result.res1);
+ TraceSim("Returned: {%p, %p}\n", result.x, result.y);
+#ifdef DEBUG
+ CorruptAllCallerSavedCPURegisters();
+#endif
+ set_xreg(0, reinterpret_cast<int64_t>(result.x));
+ set_xreg(1, reinterpret_cast<int64_t>(result.y));
+ break;
+ }
+
+ case ExternalReference::BUILTIN_CALL_TRIPLE: {
+ // ObjectTriple f(v8::internal::Arguments).
+ TraceSim("Type: BUILTIN_CALL TRIPLE\n");
+ SimulatorRuntimeTripleCall target =
+ reinterpret_cast<SimulatorRuntimeTripleCall>(external);
+
+ // We don't know how many arguments are being passed, but we can
+ // pass 8 without touching the stack. They will be ignored by the
+ // host function if they aren't used.
+ TraceSim(
+ "Arguments: "
+ "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64,
+ xreg(0), xreg(1), xreg(2), xreg(3), xreg(4), xreg(5), xreg(6),
+ xreg(7));
+ // Return location passed in x8.
+ ObjectTriple* sim_result = reinterpret_cast<ObjectTriple*>(xreg(8));
+ ObjectTriple result = target(xreg(0), xreg(1), xreg(2), xreg(3), xreg(4),
+ xreg(5), xreg(6), xreg(7));
+ TraceSim("Returned: {%p, %p, %p}\n", result.x, result.y, result.z);
#ifdef DEBUG
CorruptAllCallerSavedCPURegisters();
#endif
- set_xreg(0, result.res0);
- set_xreg(1, result.res1);
+ *sim_result = result;
break;
}
@@ -1966,10 +1996,10 @@ void Simulator::VisitDataProcessing1Source(Instruction* instr) {
switch (instr->Mask(DataProcessing1SourceMask)) {
case RBIT_w:
- set_wreg(dst, ReverseBits(wreg(src)));
+ set_wreg(dst, base::bits::ReverseBits(wreg(src)));
break;
case RBIT_x:
- set_xreg(dst, ReverseBits(xreg(src)));
+ set_xreg(dst, base::bits::ReverseBits(xreg(src)));
break;
case REV16_w:
set_wreg(dst, ReverseBytes(wreg(src), 1));
@@ -3510,7 +3540,8 @@ void Simulator::Debug() {
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
int64_t value = *cur;
Heap* current_heap = isolate_->heap();
- if (((value & 1) == 0) || current_heap->Contains(obj)) {
+ if (((value & 1) == 0) ||
+ current_heap->ContainsSlow(obj->address())) {
PrintF(" (");
if ((value & kSmiTagMask) == 0) {
STATIC_ASSERT(kSmiValueSize == 32);
diff --git a/deps/v8/src/arm64/utils-arm64.h b/deps/v8/src/arm64/utils-arm64.h
index 1e1c0a33c2..35d9824837 100644
--- a/deps/v8/src/arm64/utils-arm64.h
+++ b/deps/v8/src/arm64/utils-arm64.h
@@ -55,19 +55,6 @@ int MaskToBit(uint64_t mask);
template <typename T>
-T ReverseBits(T value) {
- DCHECK((sizeof(value) == 1) || (sizeof(value) == 2) || (sizeof(value) == 4) ||
- (sizeof(value) == 8));
- T result = 0;
- for (unsigned i = 0; i < (sizeof(value) * 8); i++) {
- result = (result << 1) | (value & 1);
- value >>= 1;
- }
- return result;
-}
-
-
-template <typename T>
T ReverseBytes(T value, int block_bytes_log2) {
DCHECK((sizeof(value) == 4) || (sizeof(value) == 8));
DCHECK((1U << block_bytes_log2) <= sizeof(value));
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index 4aac08d541..5c8c2ce16d 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -34,6 +34,7 @@
#include "src/assembler.h"
+#include <math.h>
#include <cmath>
#include "src/api.h"
#include "src/base/cpu.h"
@@ -50,8 +51,8 @@
#include "src/execution.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/interpreter/interpreter.h"
#include "src/ostreams.h"
-#include "src/parsing/token.h"
#include "src/profiler/cpu-profiler.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
@@ -265,8 +266,8 @@ CpuFeatureScope::~CpuFeatureScope() {
bool CpuFeatures::initialized_ = false;
unsigned CpuFeatures::supported_ = 0;
-unsigned CpuFeatures::cache_line_size_ = 0;
-
+unsigned CpuFeatures::icache_line_size_ = 0;
+unsigned CpuFeatures::dcache_line_size_ = 0;
// -----------------------------------------------------------------------------
// Implementation of Label
@@ -770,6 +771,9 @@ RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask)
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
+bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
+ return DebugCodegen::DebugBreakSlotIsPatched(pc_);
+}
#ifdef DEBUG
bool RelocInfo::RequiresRelocation(const CodeDesc& desc) {
@@ -943,6 +947,20 @@ void RelocInfo::Verify(Isolate* isolate) {
// Implementation of ExternalReference
+static ExternalReference::Type BuiltinCallTypeForResultSize(int result_size) {
+ switch (result_size) {
+ case 1:
+ return ExternalReference::BUILTIN_CALL;
+ case 2:
+ return ExternalReference::BUILTIN_CALL_PAIR;
+ case 3:
+ return ExternalReference::BUILTIN_CALL_TRIPLE;
+ }
+ UNREACHABLE();
+ return ExternalReference::BUILTIN_CALL;
+}
+
+
void ExternalReference::SetUp() {
double_constants.min_int = kMinInt;
double_constants.one_half = 0.5;
@@ -1025,18 +1043,23 @@ ExternalReference::ExternalReference(Builtins::Name name, Isolate* isolate)
ExternalReference::ExternalReference(Runtime::FunctionId id, Isolate* isolate)
- : address_(Redirect(isolate, Runtime::FunctionForId(id)->entry)) {}
+ : ExternalReference(Runtime::FunctionForId(id), isolate) {}
ExternalReference::ExternalReference(const Runtime::Function* f,
Isolate* isolate)
- : address_(Redirect(isolate, f->entry)) {}
+ : address_(Redirect(isolate, f->entry,
+ BuiltinCallTypeForResultSize(f->result_size))) {}
ExternalReference ExternalReference::isolate_address(Isolate* isolate) {
return ExternalReference(isolate);
}
+ExternalReference ExternalReference::interpreter_dispatch_table_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->interpreter()->dispatch_table_address());
+}
ExternalReference::ExternalReference(StatsCounter* counter)
: address_(reinterpret_cast<Address>(counter->GetInternalPointer())) {}
@@ -1057,9 +1080,16 @@ ExternalReference ExternalReference::
FUNCTION_ADDR(IncrementalMarking::RecordWriteFromCode)));
}
+ExternalReference
+ExternalReference::incremental_marking_record_write_code_entry_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(
+ isolate,
+ FUNCTION_ADDR(IncrementalMarking::RecordWriteOfCodeEntryFromCode)));
+}
-ExternalReference ExternalReference::
- store_buffer_overflow_function(Isolate* isolate) {
+ExternalReference ExternalReference::store_buffer_overflow_function(
+ Isolate* isolate) {
return ExternalReference(Redirect(
isolate,
FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow)));
@@ -1117,6 +1147,67 @@ ExternalReference ExternalReference::compute_output_frames_function(
Redirect(isolate, FUNCTION_ADDR(Deoptimizer::ComputeOutputFrames)));
}
+static void f32_trunc_wrapper(float* param) { *param = truncf(*param); }
+
+ExternalReference ExternalReference::f32_trunc_wrapper_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f32_trunc_wrapper)));
+}
+
+static void f32_floor_wrapper(float* param) { *param = floorf(*param); }
+
+ExternalReference ExternalReference::f32_floor_wrapper_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f32_floor_wrapper)));
+}
+
+static void f32_ceil_wrapper(float* param) { *param = ceilf(*param); }
+
+ExternalReference ExternalReference::f32_ceil_wrapper_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f32_ceil_wrapper)));
+}
+
+static void f32_nearest_int_wrapper(float* param) {
+ *param = nearbyintf(*param);
+}
+
+ExternalReference ExternalReference::f32_nearest_int_wrapper_function(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(f32_nearest_int_wrapper)));
+}
+
+static void f64_trunc_wrapper(double* param) { *param = trunc(*param); }
+
+ExternalReference ExternalReference::f64_trunc_wrapper_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_trunc_wrapper)));
+}
+
+static void f64_floor_wrapper(double* param) { *param = floor(*param); }
+
+ExternalReference ExternalReference::f64_floor_wrapper_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_floor_wrapper)));
+}
+
+static void f64_ceil_wrapper(double* param) { *param = ceil(*param); }
+
+ExternalReference ExternalReference::f64_ceil_wrapper_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_ceil_wrapper)));
+}
+
+static void f64_nearest_int_wrapper(double* param) {
+ *param = nearbyint(*param);
+}
+
+ExternalReference ExternalReference::f64_nearest_int_wrapper_function(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(f64_nearest_int_wrapper)));
+}
ExternalReference ExternalReference::log_enter_external_function(
Isolate* isolate) {
@@ -1182,12 +1273,6 @@ ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) {
}
-ExternalReference ExternalReference::new_space_mask(Isolate* isolate) {
- return ExternalReference(reinterpret_cast<Address>(
- isolate->heap()->NewSpaceMask()));
-}
-
-
ExternalReference ExternalReference::new_space_allocation_top_address(
Isolate* isolate) {
return ExternalReference(isolate->heap()->NewSpaceAllocationTopAddress());
@@ -1521,23 +1606,6 @@ ExternalReference ExternalReference::power_double_int_function(
}
-bool EvalComparison(Token::Value op, double op1, double op2) {
- DCHECK(Token::IsCompareOp(op));
- switch (op) {
- case Token::EQ:
- case Token::EQ_STRICT: return (op1 == op2);
- case Token::NE: return (op1 != op2);
- case Token::LT: return (op1 < op2);
- case Token::GT: return (op1 > op2);
- case Token::LTE: return (op1 <= op2);
- case Token::GTE: return (op1 >= op2);
- default:
- UNREACHABLE();
- return false;
- }
-}
-
-
ExternalReference ExternalReference::mod_two_doubles_operation(
Isolate* isolate) {
return ExternalReference(Redirect(isolate,
@@ -1837,11 +1905,9 @@ int ConstantPoolBuilder::Emit(Assembler* assm) {
// Platform specific but identical code for all the platforms.
-void Assembler::RecordDeoptReason(const int reason,
- const SourcePosition position) {
+void Assembler::RecordDeoptReason(const int reason, int raw_position) {
if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling()) {
EnsureSpace ensure_space(this);
- int raw_position = position.IsUnknown() ? 0 : position.raw();
RecordRelocInfo(RelocInfo::POSITION, raw_position);
RecordRelocInfo(RelocInfo::DEOPT_REASON, reason);
}
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 08c6b38541..7bd9ee65f2 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -38,7 +38,6 @@
#include "src/allocation.h"
#include "src/builtins.h"
#include "src/isolate.h"
-#include "src/parsing/token.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -49,7 +48,6 @@ class ApiFunction;
namespace internal {
// Forward declarations.
-class SourcePosition;
class StatsCounter;
// -----------------------------------------------------------------------------
@@ -225,9 +223,14 @@ class CpuFeatures : public AllStatic {
static inline bool SupportsCrankshaft();
- static inline unsigned cache_line_size() {
- DCHECK(cache_line_size_ != 0);
- return cache_line_size_;
+ static inline unsigned icache_line_size() {
+ DCHECK(icache_line_size_ != 0);
+ return icache_line_size_;
+ }
+
+ static inline unsigned dcache_line_size() {
+ DCHECK(dcache_line_size_ != 0);
+ return dcache_line_size_;
}
static void PrintTarget();
@@ -243,7 +246,8 @@ class CpuFeatures : public AllStatic {
static void ProbeImpl(bool cross_compile);
static unsigned supported_;
- static unsigned cache_line_size_;
+ static unsigned icache_line_size_;
+ static unsigned dcache_line_size_;
static bool initialized_;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
@@ -614,13 +618,9 @@ class RelocInfo {
template<typename StaticVisitor> inline void Visit(Heap* heap);
inline void Visit(Isolate* isolate, ObjectVisitor* v);
- // Check whether this return sequence has been patched
- // with a call to the debugger.
- INLINE(bool IsPatchedReturnSequence());
-
// Check whether this debug break slot has been patched with a call to the
// debugger.
- INLINE(bool IsPatchedDebugBreakSlotSequence());
+ bool IsPatchedDebugBreakSlotSequence();
#ifdef DEBUG
// Check whether the given code contains relocation information that
@@ -819,6 +819,14 @@ class ExternalReference BASE_EMBEDDED {
// Object* f(v8::internal::Arguments).
BUILTIN_CALL, // default
+ // Builtin call returning object pair.
+ // ObjectPair f(v8::internal::Arguments).
+ BUILTIN_CALL_PAIR,
+
+ // Builtin call that returns .
+ // ObjectTriple f(v8::internal::Arguments).
+ BUILTIN_CALL_TRIPLE,
+
// Builtin that takes float arguments and returns an int.
// int f(double, double).
BUILTIN_COMPARE_CALL,
@@ -885,8 +893,12 @@ class ExternalReference BASE_EMBEDDED {
// pattern. This means that they have to be added to the
// ExternalReferenceTable in serialize.cc manually.
+ static ExternalReference interpreter_dispatch_table_address(Isolate* isolate);
+
static ExternalReference incremental_marking_record_write_function(
Isolate* isolate);
+ static ExternalReference incremental_marking_record_write_code_entry_function(
+ Isolate* isolate);
static ExternalReference store_buffer_overflow_function(
Isolate* isolate);
static ExternalReference delete_handle_scope_extensions(Isolate* isolate);
@@ -901,6 +913,15 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference new_deoptimizer_function(Isolate* isolate);
static ExternalReference compute_output_frames_function(Isolate* isolate);
+ static ExternalReference f32_trunc_wrapper_function(Isolate* isolate);
+ static ExternalReference f32_floor_wrapper_function(Isolate* isolate);
+ static ExternalReference f32_ceil_wrapper_function(Isolate* isolate);
+ static ExternalReference f32_nearest_int_wrapper_function(Isolate* isolate);
+ static ExternalReference f64_trunc_wrapper_function(Isolate* isolate);
+ static ExternalReference f64_floor_wrapper_function(Isolate* isolate);
+ static ExternalReference f64_ceil_wrapper_function(Isolate* isolate);
+ static ExternalReference f64_nearest_int_wrapper_function(Isolate* isolate);
+
// Log support.
static ExternalReference log_enter_external_function(Isolate* isolate);
static ExternalReference log_leave_external_function(Isolate* isolate);
@@ -933,7 +954,6 @@ class ExternalReference BASE_EMBEDDED {
// Static variable Heap::NewSpaceStart()
static ExternalReference new_space_start(Isolate* isolate);
- static ExternalReference new_space_mask(Isolate* isolate);
// Write barrier.
static ExternalReference store_buffer_top(Isolate* isolate);
@@ -1120,8 +1140,6 @@ inline int NumberOfBitsSet(uint32_t x) {
return num_bits_set;
}
-bool EvalComparison(Token::Value op, double op1, double op2);
-
// Computes pow(x, y) with the special cases in the spec for Math.pow.
double power_helper(Isolate* isolate, double x, double y);
double power_double_int(double x, int y);
diff --git a/deps/v8/src/ast/OWNERS b/deps/v8/src/ast/OWNERS
index 7cd947998d..4fdc3f9540 100644
--- a/deps/v8/src/ast/OWNERS
+++ b/deps/v8/src/ast/OWNERS
@@ -5,3 +5,4 @@ bmeurer@chromium.org
littledan@chromium.org
mstarzinger@chromium.org
rossberg@chromium.org
+
diff --git a/deps/v8/src/ast/ast-expression-rewriter.cc b/deps/v8/src/ast/ast-expression-rewriter.cc
index 49cc7f6ff4..edee91d3a1 100644
--- a/deps/v8/src/ast/ast-expression-rewriter.cc
+++ b/deps/v8/src/ast/ast-expression-rewriter.cc
@@ -398,10 +398,10 @@ void AstExpressionRewriter::VisitDoExpression(DoExpression* node) {
}
-void AstExpressionRewriter::VisitRewritableAssignmentExpression(
- RewritableAssignmentExpression* node) {
+void AstExpressionRewriter::VisitRewritableExpression(
+ RewritableExpression* node) {
REWRITE_THIS(node);
- AST_REWRITE_PROPERTY(Expression, node, expression);
+ AST_REWRITE(Expression, node->expression(), node->Rewrite(replacement));
}
diff --git a/deps/v8/src/ast/ast-expression-rewriter.h b/deps/v8/src/ast/ast-expression-rewriter.h
index 916842ab20..1da3fa8247 100644
--- a/deps/v8/src/ast/ast-expression-rewriter.h
+++ b/deps/v8/src/ast/ast-expression-rewriter.h
@@ -8,9 +8,7 @@
#include "src/allocation.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
-#include "src/effects.h"
#include "src/type-info.h"
-#include "src/types.h"
#include "src/zone.h"
namespace v8 {
diff --git a/deps/v8/src/ast/ast-expression-visitor.cc b/deps/v8/src/ast/ast-expression-visitor.cc
index 6b2550c541..dbf4ea463c 100644
--- a/deps/v8/src/ast/ast-expression-visitor.cc
+++ b/deps/v8/src/ast/ast-expression-visitor.cc
@@ -208,6 +208,7 @@ void AstExpressionVisitor::VisitNativeFunctionLiteral(
void AstExpressionVisitor::VisitDoExpression(DoExpression* expr) {
+ VisitExpression(expr);
RECURSE(VisitBlock(expr->block()));
RECURSE(VisitVariableProxy(expr->result()));
}
@@ -399,8 +400,8 @@ void AstExpressionVisitor::VisitSuperCallReference(SuperCallReference* expr) {
}
-void AstExpressionVisitor::VisitRewritableAssignmentExpression(
- RewritableAssignmentExpression* expr) {
+void AstExpressionVisitor::VisitRewritableExpression(
+ RewritableExpression* expr) {
VisitExpression(expr);
RECURSE(Visit(expr->expression()));
}
diff --git a/deps/v8/src/ast/ast-expression-visitor.h b/deps/v8/src/ast/ast-expression-visitor.h
index cda624d5b7..545a45c416 100644
--- a/deps/v8/src/ast/ast-expression-visitor.h
+++ b/deps/v8/src/ast/ast-expression-visitor.h
@@ -8,9 +8,7 @@
#include "src/allocation.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
-#include "src/effects.h"
#include "src/type-info.h"
-#include "src/types.h"
#include "src/zone.h"
namespace v8 {
diff --git a/deps/v8/src/ast/ast-literal-reindexer.cc b/deps/v8/src/ast/ast-literal-reindexer.cc
index fce33e70b8..1f79b12217 100644
--- a/deps/v8/src/ast/ast-literal-reindexer.cc
+++ b/deps/v8/src/ast/ast-literal-reindexer.cc
@@ -44,7 +44,8 @@ void AstLiteralReindexer::VisitNativeFunctionLiteral(
void AstLiteralReindexer::VisitDoExpression(DoExpression* node) {
- // TODO(caitp): literals in do expressions need re-indexing too.
+ Visit(node->block());
+ Visit(node->result());
}
@@ -76,8 +77,8 @@ void AstLiteralReindexer::VisitSuperCallReference(SuperCallReference* node) {
}
-void AstLiteralReindexer::VisitRewritableAssignmentExpression(
- RewritableAssignmentExpression* node) {
+void AstLiteralReindexer::VisitRewritableExpression(
+ RewritableExpression* node) {
Visit(node->expression());
}
@@ -187,6 +188,8 @@ void AstLiteralReindexer::VisitCompareOperation(CompareOperation* node) {
void AstLiteralReindexer::VisitSpread(Spread* node) {
+ // This is reachable because ParserBase::ParseArrowFunctionLiteral calls
+ // ReindexLiterals before calling RewriteDestructuringAssignments.
Visit(node->expression());
}
diff --git a/deps/v8/src/ast/ast-numbering.cc b/deps/v8/src/ast/ast-numbering.cc
index 6c2b696a5d..272f9bde11 100644
--- a/deps/v8/src/ast/ast-numbering.cc
+++ b/deps/v8/src/ast/ast-numbering.cc
@@ -306,7 +306,6 @@ void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
IncrementNodeCount();
DisableOptimization(kTryCatchStatement);
- node->set_base_id(ReserveIdRange(TryCatchStatement::num_ids()));
Visit(node->try_block());
Visit(node->catch_block());
}
@@ -315,7 +314,6 @@ void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
void AstNumberingVisitor::VisitTryFinallyStatement(TryFinallyStatement* node) {
IncrementNodeCount();
DisableOptimization(kTryFinallyStatement);
- node->set_base_id(ReserveIdRange(TryFinallyStatement::num_ids()));
Visit(node->try_block());
Visit(node->finally_block());
}
@@ -372,11 +370,7 @@ void AstNumberingVisitor::VisitCompareOperation(CompareOperation* node) {
}
-void AstNumberingVisitor::VisitSpread(Spread* node) {
- IncrementNodeCount();
- DisableCrankshaft(kSpread);
- Visit(node->expression());
-}
+void AstNumberingVisitor::VisitSpread(Spread* node) { UNREACHABLE(); }
void AstNumberingVisitor::VisitEmptyParentheses(EmptyParentheses* node) {
@@ -510,6 +504,9 @@ void AstNumberingVisitor::VisitArrayLiteral(ArrayLiteral* node) {
void AstNumberingVisitor::VisitCall(Call* node) {
IncrementNodeCount();
+ if (node->tail_call_mode() == TailCallMode::kAllow) {
+ DisableOptimization(kTailCall);
+ }
ReserveFeedbackSlots(node);
node->set_base_id(ReserveIdRange(Call::num_ids()));
Visit(node->expression());
@@ -557,10 +554,10 @@ void AstNumberingVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
}
-void AstNumberingVisitor::VisitRewritableAssignmentExpression(
- RewritableAssignmentExpression* node) {
+void AstNumberingVisitor::VisitRewritableExpression(
+ RewritableExpression* node) {
IncrementNodeCount();
- node->set_base_id(ReserveIdRange(RewritableAssignmentExpression::num_ids()));
+ node->set_base_id(ReserveIdRange(RewritableExpression::num_ids()));
Visit(node->expression());
}
diff --git a/deps/v8/src/ast/ast-value-factory.cc b/deps/v8/src/ast/ast-value-factory.cc
index 2e17fbcfaf..189d4cc0f5 100644
--- a/deps/v8/src/ast/ast-value-factory.cc
+++ b/deps/v8/src/ast/ast-value-factory.cc
@@ -172,6 +172,8 @@ void AstValue::Internalize(Isolate* isolate) {
if (symbol_name_[0] == 'i') {
DCHECK_EQ(0, strcmp(symbol_name_, "iterator_symbol"));
value_ = isolate->factory()->iterator_symbol();
+ } else if (strcmp(symbol_name_, "hasInstance_symbol") == 0) {
+ value_ = isolate->factory()->has_instance_symbol();
} else {
DCHECK_EQ(0, strcmp(symbol_name_, "home_object_symbol"));
value_ = isolate->factory()->home_object_symbol();
diff --git a/deps/v8/src/ast/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h
index 4ae912ea82..85e8277d80 100644
--- a/deps/v8/src/ast/ast-value-factory.h
+++ b/deps/v8/src/ast/ast-value-factory.h
@@ -255,6 +255,7 @@ class AstValue : public ZoneObject {
F(dot_catch, ".catch") \
F(empty, "") \
F(eval, "eval") \
+ F(function, "function") \
F(get_space, "get ") \
F(let, "let") \
F(native, "native") \
@@ -263,9 +264,11 @@ class AstValue : public ZoneObject {
F(proto, "__proto__") \
F(prototype, "prototype") \
F(rest_parameter, ".rest_parameter") \
+ F(return, "return") \
F(set_space, "set ") \
F(this, "this") \
F(this_function, ".this_function") \
+ F(throw, "throw") \
F(undefined, "undefined") \
F(use_asm, "use asm") \
F(use_strong, "use strong") \
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index 69e7351a7d..9b2c6388c1 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -5,6 +5,8 @@
#include "src/ast/ast.h"
#include <cmath> // For isfinite.
+
+#include "src/ast/prettyprinter.h"
#include "src/ast/scopes.h"
#include "src/builtins.h"
#include "src/code-stubs.h"
@@ -32,6 +34,25 @@ AST_NODE_LIST(DECL_ACCEPT)
// ----------------------------------------------------------------------------
// Implementation of other node functionality.
+#ifdef DEBUG
+
+void AstNode::Print() { Print(Isolate::Current()); }
+
+
+void AstNode::Print(Isolate* isolate) {
+ AstPrinter::PrintOut(isolate, this);
+}
+
+
+void AstNode::PrettyPrint() { PrettyPrint(Isolate::Current()); }
+
+
+void AstNode::PrettyPrint(Isolate* isolate) {
+ PrettyPrinter::PrintOut(isolate, this);
+}
+
+#endif // DEBUG
+
bool Expression::IsSmiLiteral() const {
return IsLiteral() && AsLiteral()->value()->IsSmi();
@@ -254,14 +275,21 @@ ObjectLiteralProperty::ObjectLiteralProperty(AstValueFactory* ast_value_factory,
}
}
+bool ObjectLiteralProperty::NeedsSetFunctionName() const {
+ return is_computed_name_ &&
+ (value_->IsAnonymousFunctionDefinition() ||
+ (value_->IsFunctionLiteral() &&
+ IsConciseMethod(value_->AsFunctionLiteral()->kind())));
+}
void ClassLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
// This logic that computes the number of slots needed for vector store
// ICs must mirror FullCodeGenerator::VisitClassLiteral.
+ prototype_slot_ = spec->AddLoadICSlot();
if (NeedsProxySlot()) {
- slot_ = spec->AddStoreICSlot();
+ proxy_slot_ = spec->AddStoreICSlot();
}
for (int i = 0; i < properties()->length(); i++) {
@@ -476,10 +504,11 @@ void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
+ DCHECK_LT(first_spread_index_, 0);
+
if (!constant_elements_.is_null()) return;
- int constants_length =
- first_spread_index_ >= 0 ? first_spread_index_ : values()->length();
+ int constants_length = values()->length();
// Allocate a fixed array to hold all the object literals.
Handle<JSArray> array = isolate->factory()->NewJSArray(
@@ -487,7 +516,7 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
Strength::WEAK, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
// Fill in the literals.
- bool is_simple = (first_spread_index_ < 0);
+ bool is_simple = true;
int depth_acc = 1;
bool is_holey = false;
int array_index = 0;
@@ -553,7 +582,7 @@ void ArrayLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
int array_index = 0;
for (; array_index < values()->length(); array_index++) {
Expression* subexpr = values()->at(array_index);
- if (subexpr->IsSpread()) break;
+ DCHECK(!subexpr->IsSpread());
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
// We'll reuse the same literal slot for all of the non-constant
@@ -797,14 +826,12 @@ void AstVisitor::VisitExpressions(ZoneList<Expression*>* expressions) {
}
}
-
CaseClause::CaseClause(Zone* zone, Expression* label,
ZoneList<Statement*>* statements, int pos)
: Expression(zone, pos),
label_(label),
statements_(statements),
- compare_type_(Type::None(zone)) {}
-
+ compare_type_(Type::None()) {}
uint32_t Literal::Hash() {
return raw_value()->IsString()
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index 7f00955a64..dcb440d7c7 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -91,7 +91,7 @@ namespace internal {
V(CaseClause) \
V(EmptyParentheses) \
V(DoExpression) \
- V(RewritableAssignmentExpression)
+ V(RewritableExpression)
#define AST_NODE_LIST(V) \
DECLARATION_NODE_LIST(V) \
@@ -196,15 +196,18 @@ class AstNode: public ZoneObject {
virtual NodeType node_type() const = 0;
int position() const { return position_; }
+#ifdef DEBUG
+ void PrettyPrint(Isolate* isolate);
+ void PrettyPrint();
+ void Print(Isolate* isolate);
+ void Print();
+#endif // DEBUG
+
// Type testing & conversion functions overridden by concrete subclasses.
#define DECLARE_NODE_FUNCTIONS(type) \
- bool Is##type() const { return node_type() == AstNode::k##type; } \
- type* As##type() { \
- return Is##type() ? reinterpret_cast<type*>(this) : NULL; \
- } \
- const type* As##type() const { \
- return Is##type() ? reinterpret_cast<const type*>(this) : NULL; \
- }
+ V8_INLINE bool Is##type() const; \
+ V8_INLINE type* As##type(); \
+ V8_INLINE const type* As##type() const;
AST_NODE_LIST(DECLARE_NODE_FUNCTIONS)
#undef DECLARE_NODE_FUNCTIONS
@@ -237,7 +240,6 @@ class Statement : public AstNode {
bool IsEmpty() { return AsEmptyStatement() != NULL; }
virtual bool IsJump() const { return false; }
- virtual void MarkTail() {}
};
@@ -317,6 +319,10 @@ class Expression : public AstNode {
// names because [] for string objects is handled only by keyed ICs.
virtual bool IsPropertyName() const { return false; }
+ // True iff the expression is a class or function expression without
+ // a syntactic name.
+ virtual bool IsAnonymousFunctionDefinition() const { return false; }
+
// True iff the expression is a literal represented as a smi.
bool IsSmiLiteral() const;
@@ -365,14 +371,6 @@ class Expression : public AstNode {
BailoutId id() const { return BailoutId(local_id(0)); }
TypeFeedbackId test_id() const { return TypeFeedbackId(local_id(1)); }
- // Parenthesized expressions in the form `( Expression )`.
- void set_is_parenthesized() {
- bit_field_ = ParenthesizedField::update(bit_field_, true);
- }
- bool is_parenthesized() const {
- return ParenthesizedField::decode(bit_field_);
- }
-
protected:
Expression(Zone* zone, int pos)
: AstNode(pos),
@@ -395,8 +393,6 @@ class Expression : public AstNode {
int base_id_;
Bounds bounds_;
class ToBooleanTypesField : public BitField16<uint16_t, 0, 9> {};
- class ParenthesizedField
- : public BitField16<bool, ToBooleanTypesField::kNext, 1> {};
uint16_t bit_field_;
// Ends with 16-bit field; deriving classes in turn begin with
// 16-bit fields for optimum packing efficiency.
@@ -471,10 +467,6 @@ class Block final : public BreakableStatement {
&& labels() == NULL; // Good enough as an approximation...
}
- void MarkTail() override {
- if (!statements_.is_empty()) statements_.last()->MarkTail();
- }
-
Scope* scope() const { return scope_; }
void set_scope(Scope* scope) { scope_ = scope; }
@@ -505,8 +497,6 @@ class DoExpression final : public Expression {
VariableProxy* result() { return result_; }
void set_result(VariableProxy* v) { result_ = v; }
- void MarkTail() override { block_->MarkTail(); }
-
protected:
DoExpression(Zone* zone, Block* block, VariableProxy* result, int pos)
: Expression(zone, pos), block_(block), result_(result) {
@@ -555,24 +545,10 @@ class VariableDeclaration final : public Declaration {
return mode() == VAR ? kCreatedInitialized : kNeedsInitialization;
}
- bool is_class_declaration() const { return is_class_declaration_; }
-
- // VariableDeclarations can be grouped into consecutive declaration
- // groups. Each VariableDeclaration is associated with the start position of
- // the group it belongs to. The positions are used for strong mode scope
- // checks for classes and functions.
- int declaration_group_start() const { return declaration_group_start_; }
-
protected:
VariableDeclaration(Zone* zone, VariableProxy* proxy, VariableMode mode,
- Scope* scope, int pos, bool is_class_declaration = false,
- int declaration_group_start = -1)
- : Declaration(zone, proxy, mode, scope, pos),
- is_class_declaration_(is_class_declaration),
- declaration_group_start_(declaration_group_start) {}
-
- bool is_class_declaration_;
- int declaration_group_start_;
+ Scope* scope, int pos)
+ : Declaration(zone, proxy, mode, scope, pos) {}
};
@@ -820,6 +796,10 @@ class ForEachStatement : public IterationStatement {
FeedbackVectorSlotCache* cache) override;
FeedbackVectorSlot EachFeedbackSlot() const { return each_slot_; }
+ static const char* VisitModeString(VisitMode mode) {
+ return mode == ITERATE ? "for-of" : "for-in";
+ }
+
protected:
ForEachStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
: IterationStatement(zone, labels, pos), each_(NULL), subject_(NULL) {}
@@ -857,9 +837,9 @@ class ForInStatement final : public ForEachStatement {
static int num_ids() { return parent_num_ids() + 6; }
BailoutId BodyId() const { return BailoutId(local_id(0)); }
- BailoutId PrepareId() const { return BailoutId(local_id(1)); }
- BailoutId EnumId() const { return BailoutId(local_id(2)); }
- BailoutId ToObjectId() const { return BailoutId(local_id(3)); }
+ BailoutId EnumId() const { return BailoutId(local_id(1)); }
+ BailoutId ToObjectId() const { return BailoutId(local_id(2)); }
+ BailoutId PrepareId() const { return BailoutId(local_id(3)); }
BailoutId FilterId() const { return BailoutId(local_id(4)); }
BailoutId AssignmentId() const { return BailoutId(local_id(5)); }
BailoutId ContinueId() const override { return EntryId(); }
@@ -885,11 +865,13 @@ class ForOfStatement final : public ForEachStatement {
void Initialize(Expression* each,
Expression* subject,
Statement* body,
+ Variable* iterator,
Expression* assign_iterator,
Expression* next_result,
Expression* result_done,
Expression* assign_each) {
ForEachStatement::Initialize(each, subject, body);
+ iterator_ = iterator;
assign_iterator_ = assign_iterator;
next_result_ = next_result;
result_done_ = result_done;
@@ -900,6 +882,10 @@ class ForOfStatement final : public ForEachStatement {
return subject();
}
+ Variable* iterator() const {
+ return iterator_;
+ }
+
// iterator = subject[Symbol.iterator]()
Expression* assign_iterator() const {
return assign_iterator_;
@@ -934,6 +920,7 @@ class ForOfStatement final : public ForEachStatement {
protected:
ForOfStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
: ForEachStatement(zone, labels, pos),
+ iterator_(NULL),
assign_iterator_(NULL),
next_result_(NULL),
result_done_(NULL),
@@ -943,6 +930,7 @@ class ForOfStatement final : public ForEachStatement {
private:
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
+ Variable* iterator_;
Expression* assign_iterator_;
Expression* next_result_;
Expression* result_done_;
@@ -957,7 +945,6 @@ class ExpressionStatement final : public Statement {
void set_expression(Expression* e) { expression_ = e; }
Expression* expression() const { return expression_; }
bool IsJump() const override { return expression_->IsThrow(); }
- void MarkTail() override { expression_->MarkTail(); }
protected:
ExpressionStatement(Zone* zone, Expression* expression, int pos)
@@ -1039,8 +1026,6 @@ class WithStatement final : public Statement {
BailoutId ToObjectId() const { return BailoutId(local_id(0)); }
BailoutId EntryId() const { return BailoutId(local_id(1)); }
- void MarkTail() override { statement_->MarkTail(); }
-
protected:
WithStatement(Zone* zone, Scope* scope, Expression* expression,
Statement* statement, int pos)
@@ -1083,10 +1068,6 @@ class CaseClause final : public Expression {
BailoutId EntryId() const { return BailoutId(local_id(0)); }
TypeFeedbackId CompareId() { return TypeFeedbackId(local_id(1)); }
- void MarkTail() override {
- if (!statements_->is_empty()) statements_->last()->MarkTail();
- }
-
Type* compare_type() { return compare_type_; }
void set_compare_type(Type* type) { compare_type_ = type; }
@@ -1119,10 +1100,6 @@ class SwitchStatement final : public BreakableStatement {
void set_tag(Expression* t) { tag_ = t; }
- void MarkTail() override {
- if (!cases_->is_empty()) cases_->last()->MarkTail();
- }
-
protected:
SwitchStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
: BreakableStatement(zone, labels, TARGET_FOR_ANONYMOUS, pos),
@@ -1160,11 +1137,6 @@ class IfStatement final : public Statement {
&& HasElseStatement() && else_statement()->IsJump();
}
- void MarkTail() override {
- then_statement_->MarkTail();
- else_statement_->MarkTail();
- }
-
void set_base_id(int id) { base_id_ = id; }
static int num_ids() { return parent_num_ids() + 3; }
BailoutId IfId() const { return BailoutId(local_id(0)); }
@@ -1201,27 +1173,12 @@ class TryStatement : public Statement {
Block* try_block() const { return try_block_; }
void set_try_block(Block* b) { try_block_ = b; }
- void set_base_id(int id) { base_id_ = id; }
- static int num_ids() { return parent_num_ids() + 1; }
- BailoutId HandlerId() const { return BailoutId(local_id(0)); }
-
protected:
TryStatement(Zone* zone, Block* try_block, int pos)
- : Statement(zone, pos),
- try_block_(try_block),
- base_id_(BailoutId::None().ToInt()) {}
- static int parent_num_ids() { return 0; }
-
- int base_id() const {
- DCHECK(!BailoutId(base_id_).IsNone());
- return base_id_;
- }
+ : Statement(zone, pos), try_block_(try_block) {}
private:
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
-
Block* try_block_;
- int base_id_;
};
@@ -1234,8 +1191,6 @@ class TryCatchStatement final : public TryStatement {
Block* catch_block() const { return catch_block_; }
void set_catch_block(Block* b) { catch_block_ = b; }
- void MarkTail() override { catch_block_->MarkTail(); }
-
protected:
TryCatchStatement(Zone* zone, Block* try_block, Scope* scope,
Variable* variable, Block* catch_block, int pos)
@@ -1258,8 +1213,6 @@ class TryFinallyStatement final : public TryStatement {
Block* finally_block() const { return finally_block_; }
void set_finally_block(Block* b) { finally_block_ = b; }
- void MarkTail() override { finally_block_->MarkTail(); }
-
protected:
TryFinallyStatement(Zone* zone, Block* try_block, Block* finally_block,
int pos)
@@ -1472,6 +1425,8 @@ class ObjectLiteralProperty final : public ZoneObject {
void set_receiver_type(Handle<Map> map) { receiver_type_ = map; }
+ bool NeedsSetFunctionName() const;
+
protected:
friend class AstNodeFactory;
@@ -1510,6 +1465,9 @@ class ObjectLiteral final : public MaterializedLiteral {
bool may_store_doubles() const { return may_store_doubles_; }
bool has_function() const { return has_function_; }
bool has_elements() const { return has_elements_; }
+ bool has_shallow_properties() const {
+ return depth() == 1 && !has_elements() && !may_store_doubles();
+ }
// Decide if a property should be in the object boilerplate.
static bool IsBoilerplateProperty(Property* property);
@@ -1526,7 +1484,7 @@ class ObjectLiteral final : public MaterializedLiteral {
int ComputeFlags(bool disable_mementos = false) const {
int flags = fast_elements() ? kFastElements : kNoFlags;
flags |= has_function() ? kHasFunction : kNoFlags;
- if (depth() == 1 && !has_elements() && !may_store_doubles()) {
+ if (has_shallow_properties()) {
flags |= kShallowProperties;
}
if (disable_mementos) {
@@ -1683,6 +1641,19 @@ class ArrayLiteral final : public MaterializedLiteral {
return flags;
}
+ // Provide a mechanism for iterating through values to rewrite spreads.
+ ZoneList<Expression*>::iterator FirstSpread() const {
+ return (first_spread_index_ >= 0) ? values_->begin() + first_spread_index_
+ : values_->end();
+ }
+ ZoneList<Expression*>::iterator EndValue() const { return values_->end(); }
+
+ // Rewind an array literal omitting everything from the first spread on.
+ void RewindSpreads() {
+ values_->Rewind(first_spread_index_);
+ first_spread_index_ = -1;
+ }
+
enum Flags {
kNoFlags = 0,
kShallowElements = 1,
@@ -1975,7 +1946,10 @@ class Call final : public Expression {
bit_field_ = IsUninitializedField::update(bit_field_, b);
}
- bool is_tail() const { return IsTailField::decode(bit_field_); }
+ TailCallMode tail_call_mode() const {
+ return IsTailField::decode(bit_field_) ? TailCallMode::kAllow
+ : TailCallMode::kDisallow;
+ }
void MarkTail() override {
bit_field_ = IsTailField::update(bit_field_, true);
}
@@ -2349,7 +2323,7 @@ class CompareOperation final : public Expression {
op_(op),
left_(left),
right_(right),
- combined_type_(Type::None(zone)) {
+ combined_type_(Type::None()) {
DCHECK(Token::IsCompareOp(op));
}
static int parent_num_ids() { return Expression::num_ids(); }
@@ -2372,17 +2346,20 @@ class Spread final : public Expression {
Expression* expression() const { return expression_; }
void set_expression(Expression* e) { expression_ = e; }
+ int expression_position() const { return expr_pos_; }
+
static int num_ids() { return parent_num_ids(); }
protected:
- Spread(Zone* zone, Expression* expression, int pos)
- : Expression(zone, pos), expression_(expression) {}
+ Spread(Zone* zone, Expression* expression, int pos, int expr_pos)
+ : Expression(zone, pos), expression_(expression), expr_pos_(expr_pos) {}
static int parent_num_ids() { return Expression::num_ids(); }
private:
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
Expression* expression_;
+ int expr_pos_;
};
@@ -2505,18 +2482,32 @@ class Assignment final : public Expression {
};
-class RewritableAssignmentExpression : public Expression {
+// The RewritableExpression class is a wrapper for AST nodes that wait
+// for some potential rewriting. However, even if such nodes are indeed
+// rewritten, the RewritableExpression wrapper nodes will survive in the
+// final AST and should be just ignored, i.e., they should be treated as
+// equivalent to the wrapped nodes. For this reason and to simplify later
+// phases, RewritableExpressions are considered as exceptions of AST nodes
+// in the following sense:
+//
+// 1. IsRewritableExpression and AsRewritableExpression behave as usual.
+// 2. All other Is* and As* methods are practically delegated to the
+// wrapped node, i.e. IsArrayLiteral() will return true iff the
+// wrapped node is an array literal.
+//
+// Furthermore, an invariant that should be respected is that the wrapped
+// node is not a RewritableExpression.
+class RewritableExpression : public Expression {
public:
- DECLARE_NODE_TYPE(RewritableAssignmentExpression)
+ DECLARE_NODE_TYPE(RewritableExpression)
- Expression* expression() { return expr_; }
+ Expression* expression() const { return expr_; }
bool is_rewritten() const { return is_rewritten_; }
- void set_expression(Expression* e) { expr_ = e; }
-
void Rewrite(Expression* new_expression) {
DCHECK(!is_rewritten());
DCHECK_NOT_NULL(new_expression);
+ DCHECK(!new_expression->IsRewritableExpression());
expr_ = new_expression;
is_rewritten_ = true;
}
@@ -2524,10 +2515,12 @@ class RewritableAssignmentExpression : public Expression {
static int num_ids() { return parent_num_ids(); }
protected:
- RewritableAssignmentExpression(Zone* zone, Expression* expression)
+ RewritableExpression(Zone* zone, Expression* expression)
: Expression(zone, expression->position()),
is_rewritten_(false),
- expr_(expression) {}
+ expr_(expression) {
+ DCHECK(!expression->IsRewritableExpression());
+ }
private:
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
@@ -2555,26 +2548,6 @@ class Yield final : public Expression {
void set_generator_object(Expression* e) { generator_object_ = e; }
void set_expression(Expression* e) { expression_ = e; }
- // Type feedback information.
- bool HasFeedbackSlots() const { return yield_kind() == kDelegating; }
- void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache) override {
- if (HasFeedbackSlots()) {
- yield_first_feedback_slot_ = spec->AddKeyedLoadICSlot();
- keyed_load_feedback_slot_ = spec->AddLoadICSlot();
- done_feedback_slot_ = spec->AddLoadICSlot();
- }
- }
-
- FeedbackVectorSlot KeyedLoadFeedbackSlot() {
- DCHECK(!HasFeedbackSlots() || !yield_first_feedback_slot_.IsInvalid());
- return yield_first_feedback_slot_;
- }
-
- FeedbackVectorSlot DoneFeedbackSlot() { return keyed_load_feedback_slot_; }
-
- FeedbackVectorSlot ValueFeedbackSlot() { return done_feedback_slot_; }
-
protected:
Yield(Zone* zone, Expression* generator_object, Expression* expression,
Kind yield_kind, int pos)
@@ -2587,9 +2560,6 @@ class Yield final : public Expression {
Expression* generator_object_;
Expression* expression_;
Kind yield_kind_;
- FeedbackVectorSlot yield_first_feedback_slot_;
- FeedbackVectorSlot keyed_load_feedback_slot_;
- FeedbackVectorSlot done_feedback_slot_;
};
@@ -2615,15 +2585,13 @@ class FunctionLiteral final : public Expression {
kAnonymousExpression,
kNamedExpression,
kDeclaration,
- kGlobalOrEval
+ kAccessorOrMethod
};
enum ParameterFlag { kNoDuplicateParameters, kHasDuplicateParameters };
enum EagerCompileHint { kShouldEagerCompile, kShouldLazyCompile };
- enum ArityRestriction { kNormalArity, kGetterArity, kSetterArity };
-
DECLARE_NODE_TYPE(FunctionLiteral)
Handle<String> name() const { return raw_name_->string(); }
@@ -2636,8 +2604,13 @@ class FunctionLiteral final : public Expression {
int start_position() const;
int end_position() const;
int SourceSize() const { return end_position() - start_position(); }
- bool is_expression() const { return IsExpression::decode(bitfield_); }
- bool is_anonymous() const { return IsAnonymous::decode(bitfield_); }
+ bool is_declaration() const { return IsDeclaration::decode(bitfield_); }
+ bool is_named_expression() const {
+ return IsNamedExpression::decode(bitfield_);
+ }
+ bool is_anonymous_expression() const {
+ return IsAnonymousExpression::decode(bitfield_);
+ }
LanguageMode language_mode() const;
static bool NeedsHomeObject(Expression* expr);
@@ -2729,6 +2702,10 @@ class FunctionLiteral final : public Expression {
dont_optimize_reason_ = reason;
}
+ bool IsAnonymousFunctionDefinition() const final {
+ return is_anonymous_expression();
+ }
+
protected:
FunctionLiteral(Zone* zone, const AstString* name,
AstValueFactory* ast_value_factory, Scope* scope,
@@ -2737,7 +2714,7 @@ class FunctionLiteral final : public Expression {
FunctionType function_type,
ParameterFlag has_duplicate_parameters,
EagerCompileHint eager_compile_hint, FunctionKind kind,
- int position)
+ int position, bool is_function)
: Expression(zone, position),
raw_name_(name),
scope_(scope),
@@ -2750,26 +2727,28 @@ class FunctionLiteral final : public Expression {
parameter_count_(parameter_count),
function_token_position_(RelocInfo::kNoPosition) {
bitfield_ =
- IsExpression::encode(function_type != kDeclaration) |
- IsAnonymous::encode(function_type == kAnonymousExpression) |
+ IsDeclaration::encode(function_type == kDeclaration) |
+ IsNamedExpression::encode(function_type == kNamedExpression) |
+ IsAnonymousExpression::encode(function_type == kAnonymousExpression) |
Pretenure::encode(false) |
HasDuplicateParameters::encode(has_duplicate_parameters ==
kHasDuplicateParameters) |
- IsFunction::encode(function_type != kGlobalOrEval) |
+ IsFunction::encode(is_function) |
ShouldEagerCompile::encode(eager_compile_hint == kShouldEagerCompile) |
FunctionKindBits::encode(kind) | ShouldBeUsedOnceHint::encode(false);
DCHECK(IsValidFunctionKind(kind));
}
private:
- class IsExpression : public BitField16<bool, 0, 1> {};
- class IsAnonymous : public BitField16<bool, 1, 1> {};
- class Pretenure : public BitField16<bool, 2, 1> {};
- class HasDuplicateParameters : public BitField16<bool, 3, 1> {};
- class IsFunction : public BitField16<bool, 4, 1> {};
- class ShouldEagerCompile : public BitField16<bool, 5, 1> {};
- class FunctionKindBits : public BitField16<FunctionKind, 6, 8> {};
- class ShouldBeUsedOnceHint : public BitField16<bool, 15, 1> {};
+ class IsDeclaration : public BitField16<bool, 0, 1> {};
+ class IsNamedExpression : public BitField16<bool, 1, 1> {};
+ class IsAnonymousExpression : public BitField16<bool, 2, 1> {};
+ class Pretenure : public BitField16<bool, 3, 1> {};
+ class HasDuplicateParameters : public BitField16<bool, 4, 1> {};
+ class IsFunction : public BitField16<bool, 5, 1> {};
+ class ShouldEagerCompile : public BitField16<bool, 6, 1> {};
+ class ShouldBeUsedOnceHint : public BitField16<bool, 7, 1> {};
+ class FunctionKindBits : public BitField16<FunctionKind, 8, 8> {};
// Start with 16-bit field, which should get packed together
// with Expression's trailing 16-bit field.
@@ -2796,13 +2775,6 @@ class ClassLiteral final : public Expression {
DECLARE_NODE_TYPE(ClassLiteral)
- Handle<String> name() const { return raw_name_->string(); }
- const AstRawString* raw_name() const { return raw_name_; }
- void set_raw_name(const AstRawString* name) {
- DCHECK_NULL(raw_name_);
- raw_name_ = name;
- }
-
Scope* scope() const { return scope_; }
VariableProxy* class_variable_proxy() const { return class_variable_proxy_; }
Expression* extends() const { return extends_; }
@@ -2817,13 +2789,14 @@ class ClassLiteral final : public Expression {
BailoutId DeclsId() const { return BailoutId(local_id(1)); }
BailoutId ExitId() { return BailoutId(local_id(2)); }
BailoutId CreateLiteralId() const { return BailoutId(local_id(3)); }
+ BailoutId PrototypeId() { return BailoutId(local_id(4)); }
// Return an AST id for a property that is used in simulate instructions.
- BailoutId GetIdForProperty(int i) { return BailoutId(local_id(i + 4)); }
+ BailoutId GetIdForProperty(int i) { return BailoutId(local_id(i + 5)); }
// Unlike other AST nodes, this number of bailout IDs allocated for an
// ClassLiteral can vary, so num_ids() is not a static method.
- int num_ids() const { return parent_num_ids() + 4 + properties()->length(); }
+ int num_ids() const { return parent_num_ids() + 5 + properties()->length(); }
// Object literals need one feedback slot for each non-trivial value, as well
// as some slots for home objects.
@@ -2835,15 +2808,19 @@ class ClassLiteral final : public Expression {
class_variable_proxy()->var()->IsUnallocated();
}
- FeedbackVectorSlot ProxySlot() const { return slot_; }
+ FeedbackVectorSlot PrototypeSlot() const { return prototype_slot_; }
+ FeedbackVectorSlot ProxySlot() const { return proxy_slot_; }
+
+ bool IsAnonymousFunctionDefinition() const final {
+ return constructor()->raw_name()->length() == 0;
+ }
protected:
- ClassLiteral(Zone* zone, const AstRawString* name, Scope* scope,
- VariableProxy* class_variable_proxy, Expression* extends,
- FunctionLiteral* constructor, ZoneList<Property*>* properties,
- int start_position, int end_position)
+ ClassLiteral(Zone* zone, Scope* scope, VariableProxy* class_variable_proxy,
+ Expression* extends, FunctionLiteral* constructor,
+ ZoneList<Property*>* properties, int start_position,
+ int end_position)
: Expression(zone, start_position),
- raw_name_(name),
scope_(scope),
class_variable_proxy_(class_variable_proxy),
extends_(extends),
@@ -2856,14 +2833,14 @@ class ClassLiteral final : public Expression {
private:
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
- const AstRawString* raw_name_;
Scope* scope_;
VariableProxy* class_variable_proxy_;
Expression* extends_;
FunctionLiteral* constructor_;
ZoneList<Property*>* properties_;
int end_position_;
- FeedbackVectorSlot slot_;
+ FeedbackVectorSlot prototype_slot_;
+ FeedbackVectorSlot proxy_slot_;
};
@@ -3095,12 +3072,11 @@ class AstNodeFactory final BASE_EMBEDDED {
AstValueFactory* ast_value_factory() const { return ast_value_factory_; }
- VariableDeclaration* NewVariableDeclaration(
- VariableProxy* proxy, VariableMode mode, Scope* scope, int pos,
- bool is_class_declaration = false, int declaration_group_start = -1) {
+ VariableDeclaration* NewVariableDeclaration(VariableProxy* proxy,
+ VariableMode mode, Scope* scope,
+ int pos) {
return new (parser_zone_)
- VariableDeclaration(parser_zone_, proxy, mode, scope, pos,
- is_class_declaration, declaration_group_start);
+ VariableDeclaration(parser_zone_, proxy, mode, scope, pos);
}
FunctionDeclaration* NewFunctionDeclaration(VariableProxy* proxy,
@@ -3389,8 +3365,8 @@ class AstNodeFactory final BASE_EMBEDDED {
CompareOperation(local_zone_, op, left, right, pos);
}
- Spread* NewSpread(Expression* expression, int pos) {
- return new (local_zone_) Spread(local_zone_, expression, pos);
+ Spread* NewSpread(Expression* expression, int pos, int expr_pos) {
+ return new (local_zone_) Spread(local_zone_, expression, pos, expr_pos);
}
Conditional* NewConditional(Expression* condition,
@@ -3401,12 +3377,9 @@ class AstNodeFactory final BASE_EMBEDDED {
local_zone_, condition, then_expression, else_expression, position);
}
- RewritableAssignmentExpression* NewRewritableAssignmentExpression(
- Expression* expression) {
+ RewritableExpression* NewRewritableExpression(Expression* expression) {
DCHECK_NOT_NULL(expression);
- DCHECK(expression->IsAssignment());
- return new (local_zone_)
- RewritableAssignmentExpression(local_zone_, expression);
+ return new (local_zone_) RewritableExpression(local_zone_, expression);
}
Assignment* NewAssignment(Token::Value op,
@@ -3449,16 +3422,31 @@ class AstNodeFactory final BASE_EMBEDDED {
parser_zone_, name, ast_value_factory_, scope, body,
materialized_literal_count, expected_property_count, parameter_count,
function_type, has_duplicate_parameters, eager_compile_hint, kind,
- position);
+ position, true);
+ }
+
+ // Creates a FunctionLiteral representing a top-level script, the
+ // result of an eval (top-level or otherwise), or the result of calling
+ // the Function constructor.
+ FunctionLiteral* NewScriptOrEvalFunctionLiteral(
+ Scope* scope, ZoneList<Statement*>* body, int materialized_literal_count,
+ int expected_property_count) {
+ return new (parser_zone_) FunctionLiteral(
+ parser_zone_, ast_value_factory_->empty_string(), ast_value_factory_,
+ scope, body, materialized_literal_count, expected_property_count, 0,
+ FunctionLiteral::kAnonymousExpression,
+ FunctionLiteral::kNoDuplicateParameters,
+ FunctionLiteral::kShouldLazyCompile, FunctionKind::kNormalFunction, 0,
+ false);
}
- ClassLiteral* NewClassLiteral(const AstRawString* name, Scope* scope,
- VariableProxy* proxy, Expression* extends,
+ ClassLiteral* NewClassLiteral(Scope* scope, VariableProxy* proxy,
+ Expression* extends,
FunctionLiteral* constructor,
ZoneList<ObjectLiteral::Property*>* properties,
int start_position, int end_position) {
return new (parser_zone_)
- ClassLiteral(parser_zone_, name, scope, proxy, extends, constructor,
+ ClassLiteral(parser_zone_, scope, proxy, extends, constructor,
properties, start_position, end_position);
}
@@ -3529,6 +3517,46 @@ class AstNodeFactory final BASE_EMBEDDED {
};
+// Type testing & conversion functions overridden by concrete subclasses.
+// Inline functions for AstNode.
+
+#define DECLARE_NODE_FUNCTIONS(type) \
+ bool AstNode::Is##type() const { \
+ NodeType mine = node_type(); \
+ if (mine == AstNode::kRewritableExpression && \
+ AstNode::k##type != AstNode::kRewritableExpression) \
+ mine = reinterpret_cast<const RewritableExpression*>(this) \
+ ->expression() \
+ ->node_type(); \
+ return mine == AstNode::k##type; \
+ } \
+ type* AstNode::As##type() { \
+ NodeType mine = node_type(); \
+ AstNode* result = this; \
+ if (mine == AstNode::kRewritableExpression && \
+ AstNode::k##type != AstNode::kRewritableExpression) { \
+ result = \
+ reinterpret_cast<const RewritableExpression*>(this)->expression(); \
+ mine = result->node_type(); \
+ } \
+ return mine == AstNode::k##type ? reinterpret_cast<type*>(result) : NULL; \
+ } \
+ const type* AstNode::As##type() const { \
+ NodeType mine = node_type(); \
+ const AstNode* result = this; \
+ if (mine == AstNode::kRewritableExpression && \
+ AstNode::k##type != AstNode::kRewritableExpression) { \
+ result = \
+ reinterpret_cast<const RewritableExpression*>(this)->expression(); \
+ mine = result->node_type(); \
+ } \
+ return mine == AstNode::k##type ? reinterpret_cast<const type*>(result) \
+ : NULL; \
+ }
+AST_NODE_LIST(DECLARE_NODE_FUNCTIONS)
+#undef DECLARE_NODE_FUNCTIONS
+
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ast/modules.cc b/deps/v8/src/ast/modules.cc
index 225cd8d62c..f895756e4a 100644
--- a/deps/v8/src/ast/modules.cc
+++ b/deps/v8/src/ast/modules.cc
@@ -13,7 +13,6 @@ namespace internal {
void ModuleDescriptor::AddLocalExport(const AstRawString* export_name,
const AstRawString* local_name,
Zone* zone, bool* ok) {
- DCHECK(!IsFrozen());
void* key = const_cast<AstRawString*>(export_name);
ZoneAllocationPolicy allocator(zone);
diff --git a/deps/v8/src/ast/modules.h b/deps/v8/src/ast/modules.h
index e3c66dce94..1fdf526cd1 100644
--- a/deps/v8/src/ast/modules.h
+++ b/deps/v8/src/ast/modules.h
@@ -26,8 +26,7 @@ class ModuleDescriptor : public ZoneObject {
// ---------------------------------------------------------------------------
// Mutators.
- // Add a name to the list of exports. If it already exists, or this descriptor
- // is frozen, that's an error.
+ // Add a name to the list of exports. If it already exists, that's an error.
void AddLocalExport(const AstRawString* export_name,
const AstRawString* local_name, Zone* zone, bool* ok);
@@ -35,30 +34,22 @@ class ModuleDescriptor : public ZoneObject {
// if not already present.
void AddModuleRequest(const AstRawString* module_specifier, Zone* zone);
- // Do not allow any further refinements, directly or through unification.
- void Freeze() { frozen_ = true; }
-
// Assign an index.
void Allocate(int index) {
- DCHECK(IsFrozen() && index_ == -1);
+ DCHECK_EQ(-1, index_);
index_ = index;
}
// ---------------------------------------------------------------------------
// Accessors.
- // Check whether this is closed (i.e. fully determined).
- bool IsFrozen() { return frozen_; }
-
int Length() {
- DCHECK(IsFrozen());
ZoneHashMap* exports = exports_;
return exports ? exports->occupancy() : 0;
}
// The context slot in the hosting script context pointing to this module.
int Index() {
- DCHECK(IsFrozen());
return index_;
}
@@ -104,12 +95,8 @@ class ModuleDescriptor : public ZoneObject {
// Implementation.
private:
explicit ModuleDescriptor(Zone* zone)
- : frozen_(false),
- exports_(NULL),
- requested_modules_(1, zone),
- index_(-1) {}
+ : exports_(NULL), requested_modules_(1, zone), index_(-1) {}
- bool frozen_;
ZoneHashMap* exports_; // Module exports and their types (allocated lazily)
ZoneList<const AstRawString*> requested_modules_;
int index_;
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index 1f6b8c31de..0e9986a438 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -412,8 +412,7 @@ void CallPrinter::VisitSuperCallReference(SuperCallReference* node) {
}
-void CallPrinter::VisitRewritableAssignmentExpression(
- RewritableAssignmentExpression* node) {
+void CallPrinter::VisitRewritableExpression(RewritableExpression* node) {
Find(node->expression());
}
@@ -719,7 +718,7 @@ void PrettyPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
void PrettyPrinter::VisitClassLiteral(ClassLiteral* node) {
Print("(class ");
- PrintLiteral(node->name(), false);
+ PrintLiteral(node->constructor()->name(), false);
if (node->extends()) {
Print(" extends ");
Visit(node->extends());
@@ -929,8 +928,7 @@ void PrettyPrinter::VisitSuperCallReference(SuperCallReference* node) {
}
-void PrettyPrinter::VisitRewritableAssignmentExpression(
- RewritableAssignmentExpression* node) {
+void PrettyPrinter::VisitRewritableExpression(RewritableExpression* node) {
Visit(node->expression());
}
@@ -1203,6 +1201,14 @@ const char* AstPrinter::PrintProgram(FunctionLiteral* program) {
}
+void AstPrinter::PrintOut(Isolate* isolate, AstNode* node) {
+ AstPrinter printer(isolate);
+ printer.Init();
+ printer.Visit(node);
+ PrintF("%s", printer.Output());
+}
+
+
void AstPrinter::PrintDeclarations(ZoneList<Declaration*>* declarations) {
if (declarations->length() > 0) {
IndentedScope indent(this, "DECLS");
@@ -1390,6 +1396,10 @@ void AstPrinter::VisitForOfStatement(ForOfStatement* node) {
PrintIndentedVisit("FOR", node->each());
PrintIndentedVisit("OF", node->iterable());
PrintIndentedVisit("BODY", node->body());
+ PrintIndentedVisit("INIT", node->assign_iterator());
+ PrintIndentedVisit("NEXT", node->next_result());
+ PrintIndentedVisit("EACH", node->assign_each());
+ PrintIndentedVisit("DONE", node->result_done());
}
@@ -1429,9 +1439,7 @@ void AstPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
void AstPrinter::VisitClassLiteral(ClassLiteral* node) {
IndentedScope indent(this, "CLASS LITERAL", node->position());
- if (node->raw_name() != nullptr) {
- PrintLiteralIndented("NAME", node->name(), false);
- }
+ PrintLiteralIndented("NAME", node->constructor()->name(), false);
if (node->extends() != nullptr) {
PrintIndentedVisit("EXTENDS", node->extends());
}
@@ -1544,31 +1552,36 @@ void AstPrinter::VisitArrayLiteral(ArrayLiteral* node) {
void AstPrinter::VisitVariableProxy(VariableProxy* node) {
- Variable* var = node->var();
EmbeddedVector<char, 128> buf;
int pos =
FormatSlotNode(&buf, node, "VAR PROXY", node->VariableFeedbackSlot());
- switch (var->location()) {
- case VariableLocation::UNALLOCATED:
- break;
- case VariableLocation::PARAMETER:
- SNPrintF(buf + pos, " parameter[%d]", var->index());
- break;
- case VariableLocation::LOCAL:
- SNPrintF(buf + pos, " local[%d]", var->index());
- break;
- case VariableLocation::CONTEXT:
- SNPrintF(buf + pos, " context[%d]", var->index());
- break;
- case VariableLocation::GLOBAL:
- SNPrintF(buf + pos, " global[%d]", var->index());
- break;
- case VariableLocation::LOOKUP:
- SNPrintF(buf + pos, " lookup");
- break;
+ if (!node->is_resolved()) {
+ SNPrintF(buf + pos, " unresolved");
+ PrintLiteralWithModeIndented(buf.start(), nullptr, node->name());
+ } else {
+ Variable* var = node->var();
+ switch (var->location()) {
+ case VariableLocation::UNALLOCATED:
+ break;
+ case VariableLocation::PARAMETER:
+ SNPrintF(buf + pos, " parameter[%d]", var->index());
+ break;
+ case VariableLocation::LOCAL:
+ SNPrintF(buf + pos, " local[%d]", var->index());
+ break;
+ case VariableLocation::CONTEXT:
+ SNPrintF(buf + pos, " context[%d]", var->index());
+ break;
+ case VariableLocation::GLOBAL:
+ SNPrintF(buf + pos, " global[%d]", var->index());
+ break;
+ case VariableLocation::LOOKUP:
+ SNPrintF(buf + pos, " lookup");
+ break;
+ }
+ PrintLiteralWithModeIndented(buf.start(), var, node->name());
}
- PrintLiteralWithModeIndented(buf.start(), var, node->name());
}
@@ -1580,7 +1593,9 @@ void AstPrinter::VisitAssignment(Assignment* node) {
void AstPrinter::VisitYield(Yield* node) {
- IndentedScope indent(this, "YIELD", node->position());
+ EmbeddedVector<char, 128> buf;
+ SNPrintF(buf, "YIELD (kind %d)", node->yield_kind());
+ IndentedScope indent(this, buf.start(), node->position());
Visit(node->expression());
}
@@ -1608,7 +1623,9 @@ void AstPrinter::VisitProperty(Property* node) {
void AstPrinter::VisitCall(Call* node) {
EmbeddedVector<char, 128> buf;
- FormatSlotNode(&buf, node, "CALL", node->CallFeedbackICSlot());
+ const char* name =
+ node->tail_call_mode() == TailCallMode::kAllow ? "TAIL CALL" : "CALL";
+ FormatSlotNode(&buf, node, name, node->CallFeedbackICSlot());
IndentedScope indent(this, buf.start());
Visit(node->expression());
@@ -1686,8 +1703,7 @@ void AstPrinter::VisitSuperCallReference(SuperCallReference* node) {
}
-void AstPrinter::VisitRewritableAssignmentExpression(
- RewritableAssignmentExpression* node) {
+void AstPrinter::VisitRewritableExpression(RewritableExpression* node) {
Visit(node->expression());
}
diff --git a/deps/v8/src/ast/prettyprinter.h b/deps/v8/src/ast/prettyprinter.h
index 7e4dcdc804..0186203d27 100644
--- a/deps/v8/src/ast/prettyprinter.h
+++ b/deps/v8/src/ast/prettyprinter.h
@@ -104,6 +104,9 @@ class AstPrinter: public PrettyPrinter {
const char* PrintProgram(FunctionLiteral* program);
+ // Print a node to stdout.
+ static void PrintOut(Isolate* isolate, AstNode* node);
+
// Individual nodes
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
diff --git a/deps/v8/src/ast/scopeinfo.cc b/deps/v8/src/ast/scopeinfo.cc
index 668879fe51..4ffc020f61 100644
--- a/deps/v8/src/ast/scopeinfo.cc
+++ b/deps/v8/src/ast/scopeinfo.cc
@@ -19,16 +19,12 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
ZoneList<Variable*> stack_locals(scope->StackLocalCount(), zone);
ZoneList<Variable*> context_locals(scope->ContextLocalCount(), zone);
ZoneList<Variable*> context_globals(scope->ContextGlobalCount(), zone);
- ZoneList<Variable*> strong_mode_free_variables(0, zone);
scope->CollectStackAndContextLocals(&stack_locals, &context_locals,
- &context_globals,
- &strong_mode_free_variables);
+ &context_globals);
const int stack_local_count = stack_locals.length();
const int context_local_count = context_locals.length();
const int context_global_count = context_globals.length();
- const int strong_mode_free_variable_count =
- strong_mode_free_variables.length();
// Make sure we allocate the correct amount.
DCHECK_EQ(scope->ContextLocalCount(), context_local_count);
DCHECK_EQ(scope->ContextGlobalCount(), context_global_count);
@@ -77,7 +73,6 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
const int length = kVariablePartIndex + parameter_count +
(1 + stack_local_count) + 2 * context_local_count +
2 * context_global_count +
- 3 * strong_mode_free_variable_count +
(has_receiver ? 1 : 0) + (has_function_name ? 2 : 0);
Factory* factory = isolate->factory();
@@ -104,7 +99,6 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
scope_info->SetStackLocalCount(stack_local_count);
scope_info->SetContextLocalCount(context_local_count);
scope_info->SetContextGlobalCount(context_global_count);
- scope_info->SetStrongModeFreeVariableCount(strong_mode_free_variable_count);
int index = kVariablePartIndex;
// Add parameters.
@@ -173,25 +167,6 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
scope_info->set(index++, Smi::FromInt(value));
}
- DCHECK(index == scope_info->StrongModeFreeVariableNameEntriesIndex());
- for (int i = 0; i < strong_mode_free_variable_count; ++i) {
- scope_info->set(index++, *strong_mode_free_variables[i]->name());
- }
-
- DCHECK(index == scope_info->StrongModeFreeVariablePositionEntriesIndex());
- for (int i = 0; i < strong_mode_free_variable_count; ++i) {
- // Unfortunately, the source code positions are stored as int even though
- // int32_t would be enough (given the maximum source code length).
- Handle<Object> start_position = factory->NewNumberFromInt(
- static_cast<int32_t>(strong_mode_free_variables[i]
- ->strong_mode_reference_start_position()));
- scope_info->set(index++, *start_position);
- Handle<Object> end_position = factory->NewNumberFromInt(
- static_cast<int32_t>(strong_mode_free_variables[i]
- ->strong_mode_reference_end_position()));
- scope_info->set(index++, *end_position);
- }
-
// If the receiver is allocated, add its index.
DCHECK(index == scope_info->ReceiverEntryIndex());
if (has_receiver) {
@@ -226,7 +201,6 @@ Handle<ScopeInfo> ScopeInfo::CreateGlobalThisBinding(Isolate* isolate) {
const int stack_local_count = 0;
const int context_local_count = 1;
const int context_global_count = 0;
- const int strong_mode_free_variable_count = 0;
const bool has_simple_parameters = true;
const VariableAllocationInfo receiver_info = CONTEXT;
const VariableAllocationInfo function_name_info = NONE;
@@ -237,7 +211,6 @@ Handle<ScopeInfo> ScopeInfo::CreateGlobalThisBinding(Isolate* isolate) {
const int length = kVariablePartIndex + parameter_count +
(1 + stack_local_count) + 2 * context_local_count +
2 * context_global_count +
- 3 * strong_mode_free_variable_count +
(has_receiver ? 1 : 0) + (has_function_name ? 2 : 0);
Factory* factory = isolate->factory();
@@ -259,7 +232,6 @@ Handle<ScopeInfo> ScopeInfo::CreateGlobalThisBinding(Isolate* isolate) {
scope_info->SetStackLocalCount(stack_local_count);
scope_info->SetContextLocalCount(context_local_count);
scope_info->SetContextGlobalCount(context_global_count);
- scope_info->SetStrongModeFreeVariableCount(strong_mode_free_variable_count);
int index = kVariablePartIndex;
const int first_slot_index = 0;
@@ -276,9 +248,6 @@ Handle<ScopeInfo> ScopeInfo::CreateGlobalThisBinding(Isolate* isolate) {
ContextLocalMaybeAssignedFlag::encode(kNotAssigned);
scope_info->set(index++, Smi::FromInt(value));
- DCHECK(index == scope_info->StrongModeFreeVariableNameEntriesIndex());
- DCHECK(index == scope_info->StrongModeFreeVariablePositionEntriesIndex());
-
// And here we record that this scopeinfo binds a receiver.
DCHECK(index == scope_info->ReceiverEntryIndex());
const int receiver_index = Context::MIN_CONTEXT_SLOTS + 0;
@@ -482,35 +451,6 @@ bool ScopeInfo::LocalIsSynthetic(int var) {
}
-String* ScopeInfo::StrongModeFreeVariableName(int var) {
- DCHECK(0 <= var && var < StrongModeFreeVariableCount());
- int info_index = StrongModeFreeVariableNameEntriesIndex() + var;
- return String::cast(get(info_index));
-}
-
-
-int ScopeInfo::StrongModeFreeVariableStartPosition(int var) {
- DCHECK(0 <= var && var < StrongModeFreeVariableCount());
- int info_index = StrongModeFreeVariablePositionEntriesIndex() + var * 2;
- int32_t value = 0;
- bool ok = get(info_index)->ToInt32(&value);
- USE(ok);
- DCHECK(ok);
- return value;
-}
-
-
-int ScopeInfo::StrongModeFreeVariableEndPosition(int var) {
- DCHECK(0 <= var && var < StrongModeFreeVariableCount());
- int info_index = StrongModeFreeVariablePositionEntriesIndex() + var * 2 + 1;
- int32_t value = 0;
- bool ok = get(info_index)->ToInt32(&value);
- USE(ok);
- DCHECK(ok);
- return value;
-}
-
-
int ScopeInfo::StackSlotIndex(String* name) {
DCHECK(name->IsInternalizedString());
if (length() > 0) {
@@ -691,20 +631,8 @@ int ScopeInfo::ContextGlobalInfoEntriesIndex() {
}
-int ScopeInfo::StrongModeFreeVariableNameEntriesIndex() {
- return ContextGlobalInfoEntriesIndex() + ContextGlobalCount();
-}
-
-
-int ScopeInfo::StrongModeFreeVariablePositionEntriesIndex() {
- return StrongModeFreeVariableNameEntriesIndex() +
- StrongModeFreeVariableCount();
-}
-
-
int ScopeInfo::ReceiverEntryIndex() {
- return StrongModeFreeVariablePositionEntriesIndex() +
- 2 * StrongModeFreeVariableCount();
+ return ContextGlobalInfoEntriesIndex() + ContextGlobalCount();
}
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index c2b05b7c04..7c87ce39e9 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -27,12 +27,10 @@ VariableMap::VariableMap(Zone* zone)
zone_(zone) {}
VariableMap::~VariableMap() {}
-
Variable* VariableMap::Declare(Scope* scope, const AstRawString* name,
VariableMode mode, Variable::Kind kind,
InitializationFlag initialization_flag,
- MaybeAssignedFlag maybe_assigned_flag,
- int declaration_group_start) {
+ MaybeAssignedFlag maybe_assigned_flag) {
// AstRawStrings are unambiguous, i.e., the same string is always represented
// by the same AstRawString*.
// FIXME(marja): fix the type of Lookup.
@@ -42,14 +40,8 @@ Variable* VariableMap::Declare(Scope* scope, const AstRawString* name,
if (p->value == NULL) {
// The variable has not been declared yet -> insert it.
DCHECK(p->key == name);
- if (kind == Variable::CLASS) {
- p->value = new (zone())
- ClassVariable(scope, name, mode, initialization_flag,
- maybe_assigned_flag, declaration_group_start);
- } else {
- p->value = new (zone()) Variable(
- scope, name, mode, kind, initialization_flag, maybe_assigned_flag);
- }
+ p->value = new (zone()) Variable(scope, name, mode, kind,
+ initialization_flag, maybe_assigned_flag);
}
return reinterpret_cast<Variable*>(p->value);
}
@@ -103,8 +95,7 @@ Scope::Scope(Zone* zone, Scope* outer_scope, ScopeType scope_type,
sloppy_block_function_map_(zone),
already_resolved_(false),
ast_value_factory_(ast_value_factory),
- zone_(zone),
- class_declaration_group_start_(-1) {
+ zone_(zone) {
SetDefaults(scope_type, outer_scope, Handle<ScopeInfo>::null(),
function_kind);
// The outermost scope must be a script scope.
@@ -112,7 +103,6 @@ Scope::Scope(Zone* zone, Scope* outer_scope, ScopeType scope_type,
DCHECK(!HasIllegalRedeclaration());
}
-
Scope::Scope(Zone* zone, Scope* inner_scope, ScopeType scope_type,
Handle<ScopeInfo> scope_info, AstValueFactory* value_factory)
: inner_scopes_(4, zone),
@@ -125,8 +115,7 @@ Scope::Scope(Zone* zone, Scope* inner_scope, ScopeType scope_type,
sloppy_block_function_map_(zone),
already_resolved_(true),
ast_value_factory_(value_factory),
- zone_(zone),
- class_declaration_group_start_(-1) {
+ zone_(zone) {
SetDefaults(scope_type, NULL, scope_info);
if (!scope_info.is_null()) {
num_heap_slots_ = scope_info_->ContextLength();
@@ -137,7 +126,6 @@ Scope::Scope(Zone* zone, Scope* inner_scope, ScopeType scope_type,
AddInnerScope(inner_scope);
}
-
Scope::Scope(Zone* zone, Scope* inner_scope,
const AstRawString* catch_variable_name,
AstValueFactory* value_factory)
@@ -151,8 +139,7 @@ Scope::Scope(Zone* zone, Scope* inner_scope,
sloppy_block_function_map_(zone),
already_resolved_(true),
ast_value_factory_(value_factory),
- zone_(zone),
- class_declaration_group_start_(-1) {
+ zone_(zone) {
SetDefaults(CATCH_SCOPE, NULL, Handle<ScopeInfo>::null());
AddInnerScope(inner_scope);
++num_var_or_const_;
@@ -528,19 +515,17 @@ Variable* Scope::DeclareParameter(
return var;
}
-
Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode,
InitializationFlag init_flag, Variable::Kind kind,
- MaybeAssignedFlag maybe_assigned_flag,
- int declaration_group_start) {
+ MaybeAssignedFlag maybe_assigned_flag) {
DCHECK(!already_resolved());
// This function handles VAR, LET, and CONST modes. DYNAMIC variables are
- // introduces during variable allocation, and TEMPORARY variables are
+ // introduced during variable allocation, and TEMPORARY variables are
// allocated via NewTemporary().
DCHECK(IsDeclaredVariableMode(mode));
++num_var_or_const_;
return variables_.Declare(this, name, mode, kind, init_flag,
- maybe_assigned_flag, declaration_group_start);
+ maybe_assigned_flag);
}
@@ -660,11 +645,9 @@ class VarAndOrder {
int order_;
};
-
-void Scope::CollectStackAndContextLocals(
- ZoneList<Variable*>* stack_locals, ZoneList<Variable*>* context_locals,
- ZoneList<Variable*>* context_globals,
- ZoneList<Variable*>* strong_mode_free_variables) {
+void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
+ ZoneList<Variable*>* context_locals,
+ ZoneList<Variable*>* context_globals) {
DCHECK(stack_locals != NULL);
DCHECK(context_locals != NULL);
DCHECK(context_globals != NULL);
@@ -691,11 +674,6 @@ void Scope::CollectStackAndContextLocals(
p != NULL;
p = variables_.Next(p)) {
Variable* var = reinterpret_cast<Variable*>(p->value);
- if (strong_mode_free_variables && var->has_strong_mode_reference() &&
- var->mode() == DYNAMIC_GLOBAL) {
- strong_mode_free_variables->Add(var, zone());
- }
-
if (var->is_used()) {
vars.Add(VarAndOrder(var, p->order), zone());
}
@@ -1017,9 +995,7 @@ void Scope::Print(int n) {
if (HasTrivialOuterContext()) {
Indent(n1, "// scope has trivial outer context\n");
}
- if (is_strong(language_mode())) {
- Indent(n1, "// strong mode scope\n");
- } else if (is_strict(language_mode())) {
+ if (is_strict(language_mode())) {
Indent(n1, "// strict mode scope\n");
}
if (scope_inside_with_) Indent(n1, "// scope inside 'with'\n");
@@ -1204,10 +1180,6 @@ bool Scope::ResolveVariable(ParseInfo* info, VariableProxy* proxy,
switch (binding_kind) {
case BOUND:
- // We found a variable binding.
- if (is_strong(language_mode())) {
- if (!CheckStrongModeDeclaration(proxy, var)) return false;
- }
break;
case BOUND_EVAL_SHADOWED:
@@ -1245,126 +1217,12 @@ bool Scope::ResolveVariable(ParseInfo* info, VariableProxy* proxy,
DCHECK(var != NULL);
if (proxy->is_assigned()) var->set_maybe_assigned();
- if (is_strong(language_mode())) {
- // Record that the variable is referred to from strong mode. Also, record
- // the position.
- var->RecordStrongModeReference(proxy->position(), proxy->end_position());
- }
-
proxy->BindTo(var);
return true;
}
-bool Scope::CheckStrongModeDeclaration(VariableProxy* proxy, Variable* var) {
- // Check for declaration-after use (for variables) in strong mode. Note that
- // we can only do this in the case where we have seen the declaration. And we
- // always allow referencing functions (for now).
-
- // This might happen during lazy compilation; we don't keep track of
- // initializer positions for variables stored in ScopeInfo, so we cannot check
- // bindings against them. TODO(marja, rossberg): remove this hack.
- if (var->initializer_position() == RelocInfo::kNoPosition) return true;
-
- // Allow referencing the class name from methods of that class, even though
- // the initializer position for class names is only after the body.
- Scope* scope = this;
- while (scope) {
- if (scope->ClassVariableForMethod() == var) return true;
- scope = scope->outer_scope();
- }
-
- // Allow references from methods to classes declared later, if we detect no
- // problematic dependency cycles. Note that we can be inside multiple methods
- // at the same time, and it's enough if we find one where the reference is
- // allowed.
- if (var->is_class() &&
- var->AsClassVariable()->declaration_group_start() >= 0) {
- for (scope = this; scope && scope != var->scope();
- scope = scope->outer_scope()) {
- ClassVariable* class_var = scope->ClassVariableForMethod();
- // A method is referring to some other class, possibly declared
- // later. Referring to a class declared earlier is always OK and covered
- // by the code outside this if. Here we only need to allow special cases
- // for referring to a class which is declared later.
-
- // Referring to a class C declared later is OK under the following
- // circumstances:
-
- // 1. The class declarations are in a consecutive group with no other
- // declarations or statements in between, and
-
- // 2. There is no dependency cycle where the first edge is an
- // initialization time dependency (computed property name or extends
- // clause) from C to something that depends on this class directly or
- // transitively.
- if (class_var &&
- class_var->declaration_group_start() ==
- var->AsClassVariable()->declaration_group_start()) {
- return true;
- }
-
- // TODO(marja,rossberg): implement the dependency cycle detection. Here we
- // undershoot the target and allow referring to any class in the same
- // consectuive declaration group.
-
- // The cycle detection can work roughly like this: 1) detect init-time
- // references here (they are free variables which are inside the class
- // scope but not inside a method scope - no parser changes needed to
- // detect them) 2) if we encounter an init-time reference here, allow it,
- // but record it for a later dependency cycle check 3) also record
- // non-init-time references here 4) after scope analysis is done, analyse
- // the dependency cycles: an illegal cycle is one starting with an
- // init-time reference and leading back to the starting point with either
- // non-init-time and init-time references.
- }
- }
-
- // If both the use and the declaration are inside an eval scope (possibly
- // indirectly), or one of them is, we need to check whether they are inside
- // the same eval scope or different ones.
-
- // TODO(marja,rossberg): Detect errors across different evals (depends on the
- // future of eval in strong mode).
- const Scope* eval_for_use = NearestOuterEvalScope();
- const Scope* eval_for_declaration = var->scope()->NearestOuterEvalScope();
-
- if (proxy->position() != RelocInfo::kNoPosition &&
- proxy->position() < var->initializer_position() && !var->is_function() &&
- eval_for_use == eval_for_declaration) {
- DCHECK(proxy->end_position() != RelocInfo::kNoPosition);
- ReportMessage(proxy->position(), proxy->end_position(),
- MessageTemplate::kStrongUseBeforeDeclaration,
- proxy->raw_name());
- return false;
- }
- return true;
-}
-
-
-ClassVariable* Scope::ClassVariableForMethod() const {
- // TODO(marja, rossberg): This fails to find a class variable in the following
- // cases:
- // let A = class { ... }
- // It needs to be investigated whether this causes any practical problems.
- if (!is_function_scope()) return nullptr;
- if (IsInObjectLiteral(function_kind_)) return nullptr;
- if (!IsConciseMethod(function_kind_) && !IsClassConstructor(function_kind_) &&
- !IsAccessorFunction(function_kind_)) {
- return nullptr;
- }
- DCHECK_NOT_NULL(outer_scope_);
- // The class scope contains at most one variable, the class name.
- DCHECK(outer_scope_->variables_.occupancy() <= 1);
- if (outer_scope_->variables_.occupancy() == 0) return nullptr;
- VariableMap::Entry* p = outer_scope_->variables_.Start();
- Variable* var = reinterpret_cast<Variable*>(p->value);
- if (!var->is_class()) return nullptr;
- return var->AsClassVariable();
-}
-
-
bool Scope::ResolveVariablesRecursively(ParseInfo* info,
AstNodeFactory* factory) {
DCHECK(info->script_scope()->is_script_scope());
@@ -1646,7 +1504,7 @@ void Scope::AllocateVariablesRecursively(Isolate* isolate) {
}
// If scope is already resolved, we still need to allocate
- // variables in inner scopes which might not had been resolved yet.
+ // variables in inner scopes which might not have been resolved yet.
if (already_resolved()) return;
// The number of slots required for variables.
num_heap_slots_ = Context::MIN_CONTEXT_SLOTS;
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index 6c261f63c3..76f761dba3 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -24,8 +24,7 @@ class VariableMap: public ZoneHashMap {
Variable* Declare(Scope* scope, const AstRawString* name, VariableMode mode,
Variable::Kind kind, InitializationFlag initialization_flag,
- MaybeAssignedFlag maybe_assigned_flag = kNotAssigned,
- int declaration_group_start = -1);
+ MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
Variable* Lookup(const AstRawString* name);
@@ -163,8 +162,7 @@ class Scope: public ZoneObject {
// declared before, the previously declared variable is returned.
Variable* DeclareLocal(const AstRawString* name, VariableMode mode,
InitializationFlag init_flag, Variable::Kind kind,
- MaybeAssignedFlag maybe_assigned_flag = kNotAssigned,
- int declaration_group_start = -1);
+ MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
// Declare an implicit global variable in this scope which must be a
// script scope. The variable was introduced (possibly from an inner
@@ -377,12 +375,6 @@ class Scope: public ZoneObject {
IsClassConstructor(function_kind())));
}
- const Scope* NearestOuterEvalScope() const {
- if (is_eval_scope()) return this;
- if (outer_scope() == nullptr) return nullptr;
- return outer_scope()->NearestOuterEvalScope();
- }
-
// ---------------------------------------------------------------------------
// Accessors.
@@ -428,7 +420,24 @@ class Scope: public ZoneObject {
// Returns the default function arity excluding default or rest parameters.
int default_function_length() const { return arity_; }
- int num_parameters() const { return params_.length(); }
+ // Returns the number of formal parameters, up to but not including the
+ // rest parameter index (if the function has rest parameters), i.e. it
+ // says 2 for
+ //
+ // function foo(a, b) { ... }
+ //
+ // and
+ //
+ // function foo(a, b, ...c) { ... }
+ //
+ // but for
+ //
+ // function foo(a, b, c = 1) { ... }
+ //
+ // we return 3 here.
+ int num_parameters() const {
+ return has_rest_parameter() ? params_.length() - 1 : params_.length();
+ }
// A function can have at most one rest parameter. Returns Variable* or NULL.
Variable* rest_parameter(int* index) const {
@@ -486,25 +495,15 @@ class Scope: public ZoneObject {
// The ModuleDescriptor for this scope; only for module scopes.
ModuleDescriptor* module() const { return module_descriptor_; }
-
- void set_class_declaration_group_start(int position) {
- class_declaration_group_start_ = position;
- }
-
- int class_declaration_group_start() const {
- return class_declaration_group_start_;
- }
-
// ---------------------------------------------------------------------------
// Variable allocation.
// Collect stack and context allocated local variables in this scope. Note
// that the function variable - if present - is not collected and should be
// handled separately.
- void CollectStackAndContextLocals(
- ZoneList<Variable*>* stack_locals, ZoneList<Variable*>* context_locals,
- ZoneList<Variable*>* context_globals,
- ZoneList<Variable*>* strong_mode_free_variables = nullptr);
+ void CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
+ ZoneList<Variable*>* context_locals,
+ ZoneList<Variable*>* context_globals);
// Current number of var or const locals.
int num_var_or_const() { return num_var_or_const_; }
@@ -767,12 +766,6 @@ class Scope: public ZoneObject {
MUST_USE_RESULT
bool ResolveVariablesRecursively(ParseInfo* info, AstNodeFactory* factory);
- bool CheckStrongModeDeclaration(VariableProxy* proxy, Variable* var);
-
- // If this scope is a method scope of a class, return the corresponding
- // class variable, otherwise nullptr.
- ClassVariable* ClassVariableForMethod() const;
-
// Scope analysis.
void PropagateScopeInfo(bool outer_scope_calls_sloppy_eval);
bool HasTrivialContext() const;
@@ -837,10 +830,6 @@ class Scope: public ZoneObject {
Zone* zone_;
PendingCompilationErrorHandler pending_error_handler_;
-
- // For tracking which classes are declared consecutively. Needed for strong
- // mode.
- int class_declaration_group_start_;
};
} // namespace internal
diff --git a/deps/v8/src/ast/variables.cc b/deps/v8/src/ast/variables.cc
index 8e00782386..7b9a5d2957 100644
--- a/deps/v8/src/ast/variables.cc
+++ b/deps/v8/src/ast/variables.cc
@@ -40,9 +40,6 @@ Variable::Variable(Scope* scope, const AstRawString* name, VariableMode mode,
location_(VariableLocation::UNALLOCATED),
index_(-1),
initializer_position_(RelocInfo::kNoPosition),
- has_strong_mode_reference_(false),
- strong_mode_reference_start_position_(RelocInfo::kNoPosition),
- strong_mode_reference_end_position_(RelocInfo::kNoPosition),
local_if_not_shadowed_(NULL),
is_from_eval_(false),
force_context_allocation_(false),
diff --git a/deps/v8/src/ast/variables.h b/deps/v8/src/ast/variables.h
index ca5d1cdd40..b8bb07eab7 100644
--- a/deps/v8/src/ast/variables.h
+++ b/deps/v8/src/ast/variables.h
@@ -15,12 +15,9 @@ namespace internal {
// variables. Variables themselves are never directly referred to from the AST,
// they are maintained by scopes, and referred to from VariableProxies and Slots
// after binding and variable allocation.
-
-class ClassVariable;
-
class Variable: public ZoneObject {
public:
- enum Kind { NORMAL, FUNCTION, CLASS, THIS, ARGUMENTS };
+ enum Kind { NORMAL, FUNCTION, THIS, ARGUMENTS };
Variable(Scope* scope, const AstRawString* name, VariableMode mode, Kind kind,
InitializationFlag initialization_flag,
@@ -84,7 +81,6 @@ class Variable: public ZoneObject {
}
bool is_function() const { return kind_ == FUNCTION; }
- bool is_class() const { return kind_ == CLASS; }
bool is_this() const { return kind_ == THIS; }
bool is_arguments() const { return kind_ == ARGUMENTS; }
@@ -98,11 +94,6 @@ class Variable: public ZoneObject {
return is_this() || *name() == *isolate->factory()->this_string();
}
- ClassVariable* AsClassVariable() {
- DCHECK(is_class());
- return reinterpret_cast<ClassVariable*>(this);
- }
-
// True if the variable is named eval and not known to be shadowed.
bool is_possibly_eval(Isolate* isolate) const {
return IsVariable(isolate->factory()->eval_string());
@@ -132,24 +123,6 @@ class Variable: public ZoneObject {
static int CompareIndex(Variable* const* v, Variable* const* w);
- void RecordStrongModeReference(int start_position, int end_position) {
- // Record the earliest reference to the variable. Used in error messages for
- // strong mode references to undeclared variables.
- if (has_strong_mode_reference_ &&
- strong_mode_reference_start_position_ < start_position)
- return;
- has_strong_mode_reference_ = true;
- strong_mode_reference_start_position_ = start_position;
- strong_mode_reference_end_position_ = end_position;
- }
-
- bool has_strong_mode_reference() const { return has_strong_mode_reference_; }
- int strong_mode_reference_start_position() const {
- return strong_mode_reference_start_position_;
- }
- int strong_mode_reference_end_position() const {
- return strong_mode_reference_end_position_;
- }
PropertyAttributes DeclarationPropertyAttributes() const {
int property_attributes = NONE;
if (IsImmutableVariableMode(mode_)) {
@@ -169,11 +142,6 @@ class Variable: public ZoneObject {
VariableLocation location_;
int index_;
int initializer_position_;
- // Tracks whether the variable is bound to a VariableProxy which is in strong
- // mode, and if yes, the source location of the reference.
- bool has_strong_mode_reference_;
- int strong_mode_reference_start_position_;
- int strong_mode_reference_end_position_;
// If this field is set, this variable references the stored locally bound
// variable, but it might be shadowed by variable bindings introduced by
@@ -190,28 +158,6 @@ class Variable: public ZoneObject {
InitializationFlag initialization_flag_;
MaybeAssignedFlag maybe_assigned_;
};
-
-class ClassVariable : public Variable {
- public:
- ClassVariable(Scope* scope, const AstRawString* name, VariableMode mode,
- InitializationFlag initialization_flag,
- MaybeAssignedFlag maybe_assigned_flag = kNotAssigned,
- int declaration_group_start = -1)
- : Variable(scope, name, mode, Variable::CLASS, initialization_flag,
- maybe_assigned_flag),
- declaration_group_start_(declaration_group_start) {}
-
- int declaration_group_start() const { return declaration_group_start_; }
- void set_declaration_group_start(int declaration_group_start) {
- declaration_group_start_ = declaration_group_start;
- }
-
- private:
- // For classes we keep track of consecutive groups of delcarations. They are
- // needed for strong mode scoping checks. TODO(marja, rossberg): Implement
- // checks for functions too.
- int declaration_group_start_;
-};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/bailout-reason.h b/deps/v8/src/bailout-reason.h
index 83898d12bf..272b6a4180 100644
--- a/deps/v8/src/bailout-reason.h
+++ b/deps/v8/src/bailout-reason.h
@@ -78,8 +78,6 @@ namespace internal {
V(kExportDeclaration, "Export declaration") \
V(kExternalStringExpectedButNotFound, \
"External string expected, but not found") \
- V(kForInStatementOptimizationIsDisabled, \
- "ForInStatement optimization is disabled") \
V(kForInStatementWithNonLocalEachVariable, \
"ForInStatement with non-local each variable") \
V(kForOfStatement, "ForOfStatement") \
@@ -99,8 +97,6 @@ namespace internal {
V(kImportDeclaration, "Import declaration") \
V(kIndexIsNegative, "Index is negative") \
V(kIndexIsTooLarge, "Index is too large") \
- V(kInlinedRuntimeFunctionFastOneByteArrayJoin, \
- "Inlined runtime function: FastOneByteArrayJoin") \
V(kInliningBailedOut, "Inlining bailed out") \
V(kInputGPRIsExpectedToHaveUpper32Cleared, \
"Input GPR is expected to have upper32 cleared") \
@@ -131,8 +127,6 @@ namespace internal {
V(kNativeFunctionLiteral, "Native function literal") \
V(kNeedSmiLiteral, "Need a Smi literal here") \
V(kNoCasesLeft, "No cases left") \
- V(kNoEmptyArraysHereInEmitFastOneByteArrayJoin, \
- "No empty arrays here in EmitFastOneByteArrayJoin") \
V(kNonInitializerAssignmentToConst, "Non-initializer assignment to const") \
V(kNonSmiIndex, "Non-smi index") \
V(kNonSmiKeyInArrayLiteral, "Non-smi key in array literal") \
@@ -150,6 +144,7 @@ namespace internal {
"Operand is a smi and not a bound function") \
V(kOperandIsASmiAndNotAFunction, "Operand is a smi and not a function") \
V(kOperandIsASmiAndNotAName, "Operand is a smi and not a name") \
+ V(kOperandIsASmiAndNotAReceiver, "Operand is a smi and not a receiver") \
V(kOperandIsASmiAndNotAString, "Operand is a smi and not a string") \
V(kOperandIsASmi, "Operand is a smi") \
V(kOperandIsNotADate, "Operand is not a date") \
@@ -157,6 +152,7 @@ namespace internal {
V(kOperandIsNotAFunction, "Operand is not a function") \
V(kOperandIsNotAName, "Operand is not a name") \
V(kOperandIsNotANumber, "Operand is not a number") \
+ V(kOperandIsNotAReceiver, "Operand is not a receiver") \
V(kOperandIsNotASmi, "Operand is not a smi") \
V(kOperandIsNotAString, "Operand is not a string") \
V(kOperandIsNotSmi, "Operand is not smi") \
@@ -183,10 +179,10 @@ namespace internal {
"Sloppy function expects JSReceiver as receiver.") \
V(kSmiAdditionOverflow, "Smi addition overflow") \
V(kSmiSubtractionOverflow, "Smi subtraction overflow") \
- V(kSpread, "Spread in array literal") \
V(kStackAccessBelowStackPointer, "Stack access below stack pointer") \
V(kStackFrameTypesMustMatch, "Stack frame types must match") \
V(kSuperReference, "Super reference") \
+ V(kTailCall, "Tail call") \
V(kTheCurrentStackPointerIsBelowCsp, \
"The current stack pointer is below csp") \
V(kTheSourceAndDestinationAreTheSame, \
@@ -236,6 +232,7 @@ namespace internal {
"Unexpected number of pre-allocated property fields") \
V(kUnexpectedFPCRMode, "Unexpected FPCR mode.") \
V(kUnexpectedSmi, "Unexpected smi value") \
+ V(kUnexpectedStackDepth, "Unexpected operand stack depth in full-codegen") \
V(kUnexpectedStackPointer, "The stack pointer is not the expected value") \
V(kUnexpectedStringType, "Unexpected string type") \
V(kUnexpectedTypeForRegExpDataFixedArrayExpected, \
@@ -253,6 +250,8 @@ namespace internal {
V(kUnsupportedPhiUseOfArguments, "Unsupported phi use of arguments") \
V(kUnsupportedPhiUseOfConstVariable, \
"Unsupported phi use of const variable") \
+ V(kUnexpectedReturnFromBytecodeHandler, \
+ "Unexpectedly returned from a bytecode handler") \
V(kUnexpectedReturnFromThrow, "Unexpectedly returned from a throw") \
V(kUnsupportedSwitchStatement, "Unsupported switch statement") \
V(kUnsupportedTaggedImmediate, "Unsupported tagged immediate") \
@@ -267,7 +266,6 @@ namespace internal {
"Should not directly enter OSR-compiled function") \
V(kYield, "Yield")
-
#define ERROR_MESSAGES_CONSTANTS(C, T) C,
enum BailoutReason {
ERROR_MESSAGES_LIST(ERROR_MESSAGES_CONSTANTS) kLastErrorMessage
diff --git a/deps/v8/src/base.isolate b/deps/v8/src/base.isolate
index 8422ec7b60..b51de01ac7 100644
--- a/deps/v8/src/base.isolate
+++ b/deps/v8/src/base.isolate
@@ -22,6 +22,13 @@
],
},
}],
+ ['v8_use_snapshot=="true" and v8_use_external_startup_data==1 and v8_separate_ignition_snapshot==1', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/snapshot_blob_ignition.bin',
+ ],
+ },
+ }],
['OS=="linux" and component=="shared_library" and target_arch=="ia32"', {
'variables': {
'files': [
diff --git a/deps/v8/src/base/atomicops.h b/deps/v8/src/base/atomicops.h
index 3e628fead9..ea33e48928 100644
--- a/deps/v8/src/base/atomicops.h
+++ b/deps/v8/src/base/atomicops.h
@@ -157,6 +157,8 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
#include "src/base/atomicops_internals_mips_gcc.h"
#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS64
#include "src/base/atomicops_internals_mips64_gcc.h"
+#elif defined(__GNUC__) && V8_HOST_ARCH_S390
+#include "src/base/atomicops_internals_s390_gcc.h"
#else
#error "Atomic operations are not supported on your platform"
#endif
diff --git a/deps/v8/src/base/atomicops_internals_s390_gcc.h b/deps/v8/src/base/atomicops_internals_s390_gcc.h
new file mode 100644
index 0000000000..6e34f305e3
--- /dev/null
+++ b/deps/v8/src/base/atomicops_internals_s390_gcc.h
@@ -0,0 +1,152 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_S390_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_S390_H_
+
+namespace v8 {
+namespace base {
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ return (__sync_val_compare_and_swap(ptr, old_value, new_value));
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ Atomic32 old_value;
+ do {
+ old_value = *ptr;
+ } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
+ return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return __sync_add_and_fetch(ptr, increment);
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value, Atomic32 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value, Atomic32 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+ *ptr = value;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+inline void MemoryBarrier() { __sync_synchronize(); }
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { return *ptr; }
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; }
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ Atomic32 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+#ifdef V8_TARGET_ARCH_S390X
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ return (__sync_val_compare_and_swap(ptr, old_value, new_value));
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ Atomic64 old_value;
+ do {
+ old_value = *ptr;
+ } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
+ return old_value;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return __sync_add_and_fetch(ptr, increment);
+}
+
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value, Atomic64 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value, Atomic64 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { return *ptr; }
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ Atomic64 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+#endif
+
+} // namespace base
+} // namespace v8
+
+#endif // V8_BASE_ATOMICOPS_INTERNALS_S390_H_
diff --git a/deps/v8/src/base/bits.h b/deps/v8/src/base/bits.h
index 4ba3c47ad9..0e76624884 100644
--- a/deps/v8/src/base/bits.h
+++ b/deps/v8/src/base/bits.h
@@ -92,6 +92,20 @@ inline unsigned CountLeadingZeros64(uint64_t value) {
}
+// ReverseBits(value) returns |value| in reverse bit order.
+template <typename T>
+T ReverseBits(T value) {
+ DCHECK((sizeof(value) == 1) || (sizeof(value) == 2) || (sizeof(value) == 4) ||
+ (sizeof(value) == 8));
+ T result = 0;
+ for (unsigned i = 0; i < (sizeof(value) * 8); i++) {
+ result = (result << 1) | (value & 1);
+ value >>= 1;
+ }
+ return result;
+}
+
+
// CountTrailingZeros32(value) returns the number of zero bits preceding the
// least significant 1 bit in |value| if |value| is non-zero, otherwise it
// returns 32.
diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc
index 692494afcb..777f379bae 100644
--- a/deps/v8/src/base/cpu.cc
+++ b/deps/v8/src/base/cpu.cc
@@ -312,6 +312,8 @@ CPU::CPU()
architecture_(0),
variant_(-1),
part_(0),
+ icache_line_size_(UNKNOWN_CACHE_LINE_SIZE),
+ dcache_line_size_(UNKNOWN_CACHE_LINE_SIZE),
has_fpu_(false),
has_cmov_(false),
has_sahf_(false),
@@ -644,9 +646,16 @@ CPU::CPU()
if (n == 0 || entry.a_type == AT_NULL) {
break;
}
- if (entry.a_type == AT_PLATFORM) {
- auxv_cpu_type = reinterpret_cast<char*>(entry.a_un.a_val);
- break;
+ switch (entry.a_type) {
+ case AT_PLATFORM:
+ auxv_cpu_type = reinterpret_cast<char*>(entry.a_un.a_val);
+ break;
+ case AT_ICACHEBSIZE:
+ icache_line_size_ = entry.a_un.a_val;
+ break;
+ case AT_DCACHEBSIZE:
+ dcache_line_size_ = entry.a_un.a_val;
+ break;
}
}
fclose(fp);
diff --git a/deps/v8/src/base/cpu.h b/deps/v8/src/base/cpu.h
index ca108fa2bf..3778d27233 100644
--- a/deps/v8/src/base/cpu.h
+++ b/deps/v8/src/base/cpu.h
@@ -75,6 +75,9 @@ class CPU final {
// General features
bool has_fpu() const { return has_fpu_; }
+ int icache_line_size() const { return icache_line_size_; }
+ int dcache_line_size() const { return dcache_line_size_; }
+ static const int UNKNOWN_CACHE_LINE_SIZE = 0;
// x86 features
bool has_cmov() const { return has_cmov_; }
@@ -118,6 +121,8 @@ class CPU final {
int architecture_;
int variant_;
int part_;
+ int icache_line_size_;
+ int dcache_line_size_;
bool has_fpu_;
bool has_cmov_;
bool has_sahf_;
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index f68a12ab14..252c51cae4 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -134,7 +134,8 @@ class Genesis BASE_EMBEDDED {
public:
Genesis(Isolate* isolate, MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_proxy_template,
- v8::ExtensionConfiguration* extensions, ContextType context_type);
+ v8::ExtensionConfiguration* extensions,
+ GlobalContextType context_type);
~Genesis() { }
Isolate* isolate() const { return isolate_; }
@@ -187,10 +188,10 @@ class Genesis BASE_EMBEDDED {
// New context initialization. Used for creating a context from scratch.
void InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> empty_function,
- ContextType context_type);
+ GlobalContextType context_type);
void InitializeExperimentalGlobal();
// Depending on the situation, expose and/or get rid of the utils object.
- void ConfigureUtilsObject(ContextType context_type);
+ void ConfigureUtilsObject(GlobalContextType context_type);
#define DECLARE_FEATURE_INITIALIZATION(id, descr) \
void InitializeGlobal_##id();
@@ -206,7 +207,7 @@ class Genesis BASE_EMBEDDED {
Handle<JSFunction> InstallInternalArray(Handle<JSObject> target,
const char* name,
ElementsKind elements_kind);
- bool InstallNatives(ContextType context_type);
+ bool InstallNatives(GlobalContextType context_type);
void InstallTypedArray(const char* name, ElementsKind elements_kind,
Handle<JSFunction>* fun);
@@ -318,11 +319,10 @@ void Bootstrapper::Iterate(ObjectVisitor* v) {
v->Synchronize(VisitorSynchronization::kExtensions);
}
-
Handle<Context> Bootstrapper::CreateEnvironment(
MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_proxy_template,
- v8::ExtensionConfiguration* extensions, ContextType context_type) {
+ v8::ExtensionConfiguration* extensions, GlobalContextType context_type) {
HandleScope scope(isolate_);
Genesis genesis(isolate_, maybe_global_proxy, global_proxy_template,
extensions, context_type);
@@ -483,7 +483,7 @@ void Genesis::SetFunctionInstanceDescriptor(Handle<Map> map,
Handle<Map> Genesis::CreateSloppyFunctionMap(FunctionMode function_mode) {
Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
SetFunctionInstanceDescriptor(map, function_mode);
- if (IsFunctionModeWithPrototype(function_mode)) map->set_is_constructor();
+ map->set_is_constructor(IsFunctionModeWithPrototype(function_mode));
map->set_is_callable();
return map;
}
@@ -715,7 +715,7 @@ Handle<Map> Genesis::CreateStrictFunctionMap(
FunctionMode function_mode, Handle<JSFunction> empty_function) {
Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
SetStrictFunctionInstanceDescriptor(map, function_mode);
- if (IsFunctionModeWithPrototype(function_mode)) map->set_is_constructor();
+ map->set_is_constructor(IsFunctionModeWithPrototype(function_mode));
map->set_is_callable();
Map::SetPrototype(map, empty_function);
return map;
@@ -726,7 +726,7 @@ Handle<Map> Genesis::CreateStrongFunctionMap(
Handle<JSFunction> empty_function, bool is_constructor) {
Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
SetStrongFunctionInstanceDescriptor(map);
- if (is_constructor) map->set_is_constructor();
+ map->set_is_constructor(is_constructor);
Map::SetPrototype(map, empty_function);
map->set_is_callable();
map->set_is_extensible(is_constructor);
@@ -789,6 +789,7 @@ void Genesis::CreateIteratorMaps() {
// Generator functions do not have "caller" or "arguments" accessors.
Handle<Map> sloppy_generator_function_map =
Map::Copy(strict_function_map, "SloppyGeneratorFunction");
+ sloppy_generator_function_map->set_is_constructor(false);
Map::SetPrototype(sloppy_generator_function_map,
generator_function_prototype);
native_context()->set_sloppy_generator_function_map(
@@ -796,6 +797,7 @@ void Genesis::CreateIteratorMaps() {
Handle<Map> strict_generator_function_map =
Map::Copy(strict_function_map, "StrictGeneratorFunction");
+ strict_generator_function_map->set_is_constructor(false);
Map::SetPrototype(strict_generator_function_map,
generator_function_prototype);
native_context()->set_strict_generator_function_map(
@@ -804,6 +806,7 @@ void Genesis::CreateIteratorMaps() {
Handle<Map> strong_function_map(native_context()->strong_function_map());
Handle<Map> strong_generator_function_map =
Map::Copy(strong_function_map, "StrongGeneratorFunction");
+ strong_generator_function_map->set_is_constructor(false);
Map::SetPrototype(strong_generator_function_map,
generator_function_prototype);
native_context()->set_strong_generator_function_map(
@@ -822,7 +825,7 @@ static void ReplaceAccessors(Handle<Map> map,
PropertyAttributes attributes,
Handle<AccessorPair> accessor_pair) {
DescriptorArray* descriptors = map->instance_descriptors();
- int idx = descriptors->SearchWithCache(*name, *map);
+ int idx = descriptors->SearchWithCache(map->GetIsolate(), *name, *map);
AccessorConstantDescriptor descriptor(name, accessor_pair, attributes);
descriptors->Replace(idx, &descriptor);
}
@@ -952,7 +955,6 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
}
js_global_object_function->initial_map()->set_is_prototype_map(true);
- js_global_object_function->initial_map()->set_is_hidden_prototype();
js_global_object_function->initial_map()->set_dictionary_map(true);
Handle<JSGlobalObject> global_object =
factory()->NewJSGlobalObject(js_global_object_function);
@@ -973,10 +975,10 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
isolate(), global_constructor, factory()->the_hole_value(),
ApiNatives::GlobalProxyType);
}
-
Handle<String> global_name = factory()->global_string();
global_proxy_function->shared()->set_instance_class_name(*global_name);
global_proxy_function->initial_map()->set_is_access_check_needed(true);
+ global_proxy_function->initial_map()->set_has_hidden_prototype(true);
// Set global_proxy.__proto__ to js_global after ConfigureGlobalObjects
// Return the global proxy.
@@ -1063,7 +1065,7 @@ static void InstallWithIntrinsicDefaultProto(Isolate* isolate,
// work in the snapshot case is done in HookUpGlobalObject.
void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> empty_function,
- ContextType context_type) {
+ GlobalContextType context_type) {
// --- N a t i v e C o n t e x t ---
// Use the empty function as closure (no scope info).
native_context()->set_closure(*empty_function);
@@ -1095,6 +1097,13 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> object_freeze = SimpleInstallFunction(
object_function, "freeze", Builtins::kObjectFreeze, 1, false);
native_context()->set_object_freeze(*object_freeze);
+ SimpleInstallFunction(object_function, "getOwnPropertyDescriptor",
+ Builtins::kObjectGetOwnPropertyDescriptor, 2, false);
+ SimpleInstallFunction(object_function, "getOwnPropertyNames",
+ Builtins::kObjectGetOwnPropertyNames, 1, false);
+ SimpleInstallFunction(object_function, "getOwnPropertySymbols",
+ Builtins::kObjectGetOwnPropertySymbols, 1, false);
+ SimpleInstallFunction(object_function, "is", Builtins::kObjectIs, 2, true);
Handle<JSFunction> object_is_extensible =
SimpleInstallFunction(object_function, "isExtensible",
Builtins::kObjectIsExtensible, 1, false);
@@ -1140,6 +1149,22 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(prototype, factory->toString_string(),
Builtins::kFunctionPrototypeToString, 0, false);
+ // Install the @@hasInstance function.
+ Handle<JSFunction> has_instance = InstallFunction(
+ prototype, factory->has_instance_symbol(), JS_OBJECT_TYPE,
+ JSObject::kHeaderSize, MaybeHandle<JSObject>(),
+ Builtins::kFunctionHasInstance,
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY));
+
+ // Set the expected parameters for @@hasInstance to 1; required by builtin.
+ has_instance->shared()->set_internal_formal_parameter_count(1);
+
+ // Set the length for the function to satisfy ECMA-262.
+ has_instance->shared()->set_length(1);
+
+ // Install in the native context
+ native_context()->set_ordinary_has_instance(*has_instance);
+
// Install the "constructor" property on the %FunctionPrototype%.
JSObject::AddProperty(prototype, factory->constructor_string(),
function_fun, DONT_ENUM);
@@ -1216,9 +1241,29 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> boolean_fun =
InstallFunction(global, "Boolean", JS_VALUE_TYPE, JSValue::kSize,
isolate->initial_object_prototype(),
- Builtins::kIllegal);
+ Builtins::kBooleanConstructor);
+ boolean_fun->shared()->DontAdaptArguments();
+ boolean_fun->shared()->set_construct_stub(
+ *isolate->builtins()->BooleanConstructor_ConstructStub());
+ boolean_fun->shared()->set_length(1);
InstallWithIntrinsicDefaultProto(isolate, boolean_fun,
Context::BOOLEAN_FUNCTION_INDEX);
+
+ // Create the %BooleanPrototype%
+ Handle<JSValue> prototype =
+ Handle<JSValue>::cast(factory->NewJSObject(boolean_fun, TENURED));
+ prototype->set_value(isolate->heap()->false_value());
+ Accessors::FunctionSetPrototype(boolean_fun, prototype).Assert();
+
+ // Install the "constructor" property on the {prototype}.
+ JSObject::AddProperty(prototype, factory->constructor_string(), boolean_fun,
+ DONT_ENUM);
+
+ // Install the Boolean.prototype methods.
+ SimpleInstallFunction(prototype, "toString",
+ Builtins::kBooleanPrototypeToString, 0, false);
+ SimpleInstallFunction(prototype, "valueOf",
+ Builtins::kBooleanPrototypeValueOf, 0, false);
}
{ // --- S t r i n g ---
@@ -1234,6 +1279,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<Map> string_map =
Handle<Map>(native_context()->string_function()->initial_map());
+ string_map->set_elements_kind(FAST_STRING_WRAPPER_ELEMENTS);
Map::EnsureDescriptorSlack(string_map, 1);
PropertyAttributes attribs = static_cast<PropertyAttributes>(
@@ -1250,14 +1296,20 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{
// --- S y m b o l ---
- Handle<JSFunction> symbol_fun = InstallFunction(
- global, "Symbol", JS_VALUE_TYPE, JSValue::kSize,
- isolate->initial_object_prototype(), Builtins::kSymbolConstructor);
+ Handle<JSObject> prototype =
+ factory->NewJSObject(isolate->object_function(), TENURED);
+ Handle<JSFunction> symbol_fun =
+ InstallFunction(global, "Symbol", JS_VALUE_TYPE, JSValue::kSize,
+ prototype, Builtins::kSymbolConstructor);
symbol_fun->shared()->set_construct_stub(
*isolate->builtins()->SymbolConstructor_ConstructStub());
symbol_fun->shared()->set_length(1);
symbol_fun->shared()->DontAdaptArguments();
native_context()->set_symbol_function(*symbol_fun);
+
+ // Install the "constructor" property on the {prototype}.
+ JSObject::AddProperty(prototype, factory->constructor_string(), symbol_fun,
+ DONT_ENUM);
}
{ // --- D a t e ---
@@ -1290,12 +1342,13 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kDatePrototypeToDateString, 0, false);
SimpleInstallFunction(prototype, "toTimeString",
Builtins::kDatePrototypeToTimeString, 0, false);
- SimpleInstallFunction(prototype, "toGMTString",
- Builtins::kDatePrototypeToUTCString, 0, false);
SimpleInstallFunction(prototype, "toISOString",
Builtins::kDatePrototypeToISOString, 0, false);
- SimpleInstallFunction(prototype, "toUTCString",
- Builtins::kDatePrototypeToUTCString, 0, false);
+ Handle<JSFunction> to_utc_string =
+ SimpleInstallFunction(prototype, "toUTCString",
+ Builtins::kDatePrototypeToUTCString, 0, false);
+ InstallFunction(prototype, to_utc_string,
+ factory->InternalizeUtf8String("toGMTString"), DONT_ENUM);
SimpleInstallFunction(prototype, "getDate", Builtins::kDatePrototypeGetDate,
0, true);
SimpleInstallFunction(prototype, "setDate", Builtins::kDatePrototypeSetDate,
@@ -1504,9 +1557,11 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
cons,
Handle<Object>(native_context()->initial_object_prototype(), isolate));
cons->shared()->set_instance_class_name(*name);
- Handle<JSObject> json_object = factory->NewJSObject(cons, TENURED);
- DCHECK(json_object->IsJSObject());
- JSObject::AddProperty(global, name, json_object, DONT_ENUM);
+ Handle<JSObject> math = factory->NewJSObject(cons, TENURED);
+ DCHECK(math->IsJSObject());
+ JSObject::AddProperty(global, name, math, DONT_ENUM);
+ SimpleInstallFunction(math, "max", Builtins::kMathMax, 2, false);
+ SimpleInstallFunction(math, "min", Builtins::kMathMin, 2, false);
}
{ // -- A r r a y B u f f e r
@@ -1527,16 +1582,16 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
TYPED_ARRAYS(INSTALL_TYPED_ARRAY)
#undef INSTALL_TYPED_ARRAY
- Handle<JSFunction> data_view_fun =
- InstallFunction(
- global, "DataView", JS_DATA_VIEW_TYPE,
- JSDataView::kSizeWithInternalFields,
- isolate->initial_object_prototype(),
- Builtins::kIllegal);
+ Handle<JSFunction> data_view_fun = InstallFunction(
+ global, "DataView", JS_DATA_VIEW_TYPE,
+ JSDataView::kSizeWithInternalFields,
+ isolate->initial_object_prototype(), Builtins::kDataViewConstructor);
InstallWithIntrinsicDefaultProto(isolate, data_view_fun,
Context::DATA_VIEW_FUN_INDEX);
data_view_fun->shared()->set_construct_stub(
- *isolate->builtins()->JSBuiltinsConstructStub());
+ *isolate->builtins()->DataViewConstructor_ConstructStub());
+ data_view_fun->shared()->set_length(3);
+ data_view_fun->shared()->DontAdaptArguments();
}
{ // -- M a p
@@ -1557,7 +1612,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- I t e r a t o r R e s u l t
Handle<Map> map =
- factory->NewMap(JS_ITERATOR_RESULT_TYPE, JSIteratorResult::kSize);
+ factory->NewMap(JS_OBJECT_TYPE, JSIteratorResult::kSize);
Map::SetPrototype(map, isolate->initial_object_prototype());
Map::EnsureDescriptorSlack(map, 2);
@@ -1573,6 +1628,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
map->AppendDescriptor(&d);
}
+ map->SetConstructor(native_context()->object_function());
map->SetInObjectProperties(2);
native_context()->set_iterator_result_map(*map);
}
@@ -1618,7 +1674,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
native_context()->set_bound_function_without_constructor_map(*map);
map = Map::Copy(map, "IsConstructor");
- map->set_is_constructor();
+ map->set_is_constructor(true);
native_context()->set_bound_function_with_constructor_map(*map);
}
@@ -1633,18 +1689,20 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
function->shared()->set_instance_class_name(*arguments_string);
Handle<Map> map = factory->NewMap(
- JS_OBJECT_TYPE, Heap::kSloppyArgumentsObjectSize, FAST_ELEMENTS);
+ JS_OBJECT_TYPE, JSSloppyArgumentsObject::kSize, FAST_ELEMENTS);
// Create the descriptor array for the arguments object.
Map::EnsureDescriptorSlack(map, 2);
{ // length
- DataDescriptor d(factory->length_string(), Heap::kArgumentsLengthIndex,
- DONT_ENUM, Representation::Tagged());
+ DataDescriptor d(factory->length_string(),
+ JSSloppyArgumentsObject::kLengthIndex, DONT_ENUM,
+ Representation::Tagged());
map->AppendDescriptor(&d);
}
{ // callee
- DataDescriptor d(factory->callee_string(), Heap::kArgumentsCalleeIndex,
- DONT_ENUM, Representation::Tagged());
+ DataDescriptor d(factory->callee_string(),
+ JSSloppyArgumentsObject::kCalleeIndex, DONT_ENUM,
+ Representation::Tagged());
map->AppendDescriptor(&d);
}
// @@iterator method is added later.
@@ -1656,8 +1714,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
JSFunction::SetInitialMap(function, map,
isolate->initial_object_prototype());
- DCHECK(map->GetInObjectProperties() > Heap::kArgumentsCalleeIndex);
- DCHECK(map->GetInObjectProperties() > Heap::kArgumentsLengthIndex);
DCHECK(!map->is_dictionary_map());
DCHECK(IsFastObjectElementsKind(map->elements_kind()));
}
@@ -1693,13 +1749,14 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// Create the map. Allocate one in-object field for length.
Handle<Map> map = factory->NewMap(
- JS_OBJECT_TYPE, Heap::kStrictArgumentsObjectSize, FAST_ELEMENTS);
+ JS_OBJECT_TYPE, JSStrictArgumentsObject::kSize, FAST_ELEMENTS);
// Create the descriptor array for the arguments object.
Map::EnsureDescriptorSlack(map, 3);
{ // length
- DataDescriptor d(factory->length_string(), Heap::kArgumentsLengthIndex,
- DONT_ENUM, Representation::Tagged());
+ DataDescriptor d(factory->length_string(),
+ JSStrictArgumentsObject::kLengthIndex, DONT_ENUM,
+ Representation::Tagged());
map->AppendDescriptor(&d);
}
{ // callee
@@ -1725,7 +1782,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
native_context()->set_strict_arguments_map(*map);
- DCHECK(map->GetInObjectProperties() > Heap::kArgumentsLengthIndex);
DCHECK(!map->is_dictionary_map());
DCHECK(IsFastObjectElementsKind(map->elements_kind()));
}
@@ -1805,7 +1861,7 @@ bool Bootstrapper::CompileBuiltin(Isolate* isolate, int index) {
Handle<Object> args[] = {global, utils, extras_utils};
return Bootstrapper::CompileNative(isolate, name, source_code,
- arraysize(args), args);
+ arraysize(args), args, NATIVES_CODE);
}
@@ -1818,7 +1874,7 @@ bool Bootstrapper::CompileExperimentalBuiltin(Isolate* isolate, int index) {
Handle<Object> utils = isolate->natives_utils_object();
Handle<Object> args[] = {global, utils};
return Bootstrapper::CompileNative(isolate, name, source_code,
- arraysize(args), args);
+ arraysize(args), args, NATIVES_CODE);
}
@@ -1832,7 +1888,7 @@ bool Bootstrapper::CompileExtraBuiltin(Isolate* isolate, int index) {
Handle<Object> extras_utils = isolate->extras_utils_object();
Handle<Object> args[] = {global, binding, extras_utils};
return Bootstrapper::CompileNative(isolate, name, source_code,
- arraysize(args), args);
+ arraysize(args), args, EXTENSION_CODE);
}
@@ -1847,13 +1903,13 @@ bool Bootstrapper::CompileExperimentalExtraBuiltin(Isolate* isolate,
Handle<Object> extras_utils = isolate->extras_utils_object();
Handle<Object> args[] = {global, binding, extras_utils};
return Bootstrapper::CompileNative(isolate, name, source_code,
- arraysize(args), args);
+ arraysize(args), args, EXTENSION_CODE);
}
-
bool Bootstrapper::CompileNative(Isolate* isolate, Vector<const char> name,
Handle<String> source, int argc,
- Handle<Object> argv[]) {
+ Handle<Object> argv[],
+ NativesFlag natives_flag) {
SuppressDebug compiling_natives(isolate->debug());
// During genesis, the boilerplate for stack overflow won't work until the
// environment has been at least partially initialized. Add a stack check
@@ -1870,7 +1926,7 @@ bool Bootstrapper::CompileNative(Isolate* isolate, Vector<const char> name,
isolate->factory()->NewStringFromUtf8(name).ToHandleChecked();
Handle<SharedFunctionInfo> function_info = Compiler::CompileScript(
source, script_name, 0, 0, ScriptOriginOptions(), Handle<Object>(),
- context, NULL, NULL, ScriptCompiler::kNoCompileOptions, NATIVES_CODE,
+ context, NULL, NULL, ScriptCompiler::kNoCompileOptions, natives_flag,
false);
if (function_info.is_null()) return false;
@@ -1928,7 +1984,7 @@ bool Genesis::CompileExtension(Isolate* isolate, v8::Extension* extension) {
function_info = Compiler::CompileScript(
source, script_name, 0, 0, ScriptOriginOptions(), Handle<Object>(),
context, extension, NULL, ScriptCompiler::kNoCompileOptions,
- NOT_NATIVES_CODE, false);
+ EXTENSION_CODE, false);
if (function_info.is_null()) return false;
cache->Add(name, function_info);
}
@@ -1977,8 +2033,7 @@ static Handle<JSObject> ResolveBuiltinIdHolder(Handle<Context> native_context,
return Handle<JSObject>::cast(value);
}
-
-void Genesis::ConfigureUtilsObject(ContextType context_type) {
+void Genesis::ConfigureUtilsObject(GlobalContextType context_type) {
switch (context_type) {
// We still need the utils object to find debug functions.
case DEBUG_CONTEXT:
@@ -2027,24 +2082,6 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
#undef EXPORT_PUBLIC_SYMBOL
{
- Handle<JSFunction> apply = InstallFunction(
- container, "reflect_apply", JS_OBJECT_TYPE, JSObject::kHeaderSize,
- MaybeHandle<JSObject>(), Builtins::kReflectApply);
- apply->shared()->DontAdaptArguments();
- apply->shared()->set_length(3);
- native_context->set_reflect_apply(*apply);
- }
-
- {
- Handle<JSFunction> construct = InstallFunction(
- container, "reflect_construct", JS_OBJECT_TYPE, JSObject::kHeaderSize,
- MaybeHandle<JSObject>(), Builtins::kReflectConstruct);
- construct->shared()->DontAdaptArguments();
- construct->shared()->set_length(2);
- native_context->set_reflect_construct(*construct);
- }
-
- {
Handle<JSFunction> to_string = InstallFunction(
container, "object_to_string", JS_OBJECT_TYPE, JSObject::kHeaderSize,
MaybeHandle<JSObject>(), Builtins::kObjectProtoToString);
@@ -2279,7 +2316,6 @@ void Bootstrapper::ExportExperimentalFromRuntime(Isolate* isolate,
}
INITIALIZE_FLAG(FLAG_harmony_tostring)
- INITIALIZE_FLAG(FLAG_harmony_tolength)
INITIALIZE_FLAG(FLAG_harmony_species)
#undef INITIALIZE_FLAG
@@ -2299,13 +2335,15 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_destructuring_assignment)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_object_observe)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexps)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_unicode_regexps)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_completion)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_tolength)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_do_expressions)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_iterator_close)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_lookbehind)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_property)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_name)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_sent)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(promise_extra)
-
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_tailcalls)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_instanceof)
void InstallPublicSymbol(Factory* factory, Handle<Context> native_context,
const char* name, Handle<Symbol> value) {
@@ -2328,13 +2366,6 @@ void Genesis::InitializeGlobal_harmony_tostring() {
}
-void Genesis::InitializeGlobal_harmony_concat_spreadable() {
- if (!FLAG_harmony_concat_spreadable) return;
- InstallPublicSymbol(factory(), native_context(), "isConcatSpreadable",
- factory()->is_concat_spreadable_symbol());
-}
-
-
void Genesis::InitializeGlobal_harmony_regexp_subclass() {
if (!FLAG_harmony_regexp_subclass) return;
InstallPublicSymbol(factory(), native_context(), "match",
@@ -2364,6 +2395,15 @@ void Genesis::InitializeGlobal_harmony_reflect() {
Builtins::kReflectDeleteProperty, 2, true);
native_context()->set_reflect_delete_property(*delete_property);
+ Handle<JSFunction> apply = SimpleCreateFunction(
+ isolate(), factory->apply_string(), Builtins::kReflectApply, 3, false);
+ native_context()->set_reflect_apply(*apply);
+
+ Handle<JSFunction> construct =
+ SimpleCreateFunction(isolate(), factory->construct_string(),
+ Builtins::kReflectConstruct, 2, false);
+ native_context()->set_reflect_construct(*construct);
+
if (!FLAG_harmony_reflect) return;
Handle<JSGlobalObject> global(JSGlobalObject::cast(
@@ -2375,6 +2415,8 @@ void Genesis::InitializeGlobal_harmony_reflect() {
InstallFunction(reflect, define_property, factory->defineProperty_string());
InstallFunction(reflect, delete_property, factory->deleteProperty_string());
+ InstallFunction(reflect, apply, factory->apply_string());
+ InstallFunction(reflect, construct, factory->construct_string());
SimpleInstallFunction(reflect, factory->get_string(),
Builtins::kReflectGet, 2, false);
@@ -2438,6 +2480,35 @@ void Genesis::InitializeGlobal_harmony_simd() {
}
+void Genesis::InitializeGlobal_harmony_object_values_entries() {
+ if (!FLAG_harmony_object_values_entries) return;
+
+ Handle<JSGlobalObject> global(
+ JSGlobalObject::cast(native_context()->global_object()));
+ Isolate* isolate = global->GetIsolate();
+ Factory* factory = isolate->factory();
+
+ Handle<JSFunction> object_function = isolate->object_function();
+ SimpleInstallFunction(object_function, factory->entries_string(),
+ Builtins::kObjectEntries, 1, false);
+ SimpleInstallFunction(object_function, factory->values_string(),
+ Builtins::kObjectValues, 1, false);
+}
+
+void Genesis::InitializeGlobal_harmony_object_own_property_descriptors() {
+ if (!FLAG_harmony_object_own_property_descriptors) return;
+
+ Handle<JSGlobalObject> global(
+ JSGlobalObject::cast(native_context()->global_object()));
+ Isolate* isolate = global->GetIsolate();
+ Factory* factory = isolate->factory();
+
+ Handle<JSFunction> object_function = isolate->object_function();
+ SimpleInstallFunction(object_function,
+ factory->getOwnPropertyDescriptors_string(),
+ Builtins::kObjectGetOwnPropertyDescriptors, 1, false);
+}
+
void Genesis::InstallJSProxyMaps() {
// Allocate the different maps for all Proxy types.
// Next to the default proxy, we need maps indicating callable and
@@ -2445,7 +2516,7 @@ void Genesis::InstallJSProxyMaps() {
Handle<Map> proxy_function_map =
Map::Copy(isolate()->sloppy_function_without_prototype_map(), "Proxy");
- proxy_function_map->set_is_constructor();
+ proxy_function_map->set_is_constructor(true);
native_context()->set_proxy_function_map(*proxy_function_map);
Handle<Map> proxy_map =
@@ -2460,7 +2531,7 @@ void Genesis::InstallJSProxyMaps() {
Handle<Map> proxy_constructor_map =
Map::Copy(proxy_callable_map, "constructor Proxy");
- proxy_constructor_map->set_is_constructor();
+ proxy_constructor_map->set_is_constructor(true);
native_context()->set_proxy_constructor_map(*proxy_constructor_map);
}
@@ -2478,8 +2549,9 @@ void Genesis::InitializeGlobal_harmony_proxies() {
Handle<String> name = factory->Proxy_string();
Handle<Code> code(isolate->builtins()->ProxyConstructor());
- Handle<JSFunction> proxy_function = factory->NewFunction(
- isolate->proxy_function_map(), factory->Proxy_string(), code);
+ Handle<JSFunction> proxy_function =
+ factory->NewFunction(isolate->proxy_function_map(),
+ factory->Proxy_string(), MaybeHandle<Code>(code));
JSFunction::SetInitialMap(proxy_function,
Handle<Map>(native_context()->proxy_map(), isolate),
@@ -2574,8 +2646,7 @@ Handle<JSFunction> Genesis::InstallInternalArray(Handle<JSObject> target,
return array_function;
}
-
-bool Genesis::InstallNatives(ContextType context_type) {
+bool Genesis::InstallNatives(GlobalContextType context_type) {
HandleScope scope(isolate());
// Set up the utils object as shared container between native scripts.
@@ -2637,10 +2708,11 @@ bool Genesis::InstallNatives(ContextType context_type) {
if (!CallUtilsFunction(isolate(), "PostNatives")) return false;
- auto function_cache =
+ auto template_instantiations_cache =
ObjectHashTable::New(isolate(), ApiNatives::kInitialFunctionCacheSize,
USE_CUSTOM_MINIMUM_CAPACITY);
- native_context()->set_function_cache(*function_cache);
+ native_context()->set_template_instantiations_cache(
+ *template_instantiations_cache);
// Store the map for the %ObjectPrototype% after the natives has been compiled
// and the Object function has been set up.
@@ -2717,6 +2789,91 @@ bool Genesis::InstallNatives(ContextType context_type) {
InstallBuiltinFunctionIds();
+ // Create a map for accessor property descriptors (a variant of JSObject
+ // that predefines four properties get, set, configurable and enumerable).
+ {
+ // AccessorPropertyDescriptor initial map.
+ Handle<Map> map =
+ factory()->NewMap(JS_OBJECT_TYPE, JSAccessorPropertyDescriptor::kSize);
+ // Create the descriptor array for the property descriptor object.
+ Map::EnsureDescriptorSlack(map, 4);
+
+ { // get
+ DataDescriptor d(factory()->get_string(),
+ JSAccessorPropertyDescriptor::kGetIndex, NONE,
+ Representation::Tagged());
+ map->AppendDescriptor(&d);
+ }
+ { // set
+ DataDescriptor d(factory()->set_string(),
+ JSAccessorPropertyDescriptor::kSetIndex, NONE,
+ Representation::Tagged());
+ map->AppendDescriptor(&d);
+ }
+ { // enumerable
+ DataDescriptor d(factory()->enumerable_string(),
+ JSAccessorPropertyDescriptor::kEnumerableIndex, NONE,
+ Representation::Tagged());
+ map->AppendDescriptor(&d);
+ }
+ { // configurable
+ DataDescriptor d(factory()->configurable_string(),
+ JSAccessorPropertyDescriptor::kConfigurableIndex, NONE,
+ Representation::Tagged());
+ map->AppendDescriptor(&d);
+ }
+
+ Map::SetPrototype(map, isolate()->initial_object_prototype());
+ map->SetConstructor(native_context()->object_function());
+ map->SetInObjectProperties(4);
+ map->set_unused_property_fields(0);
+
+ native_context()->set_accessor_property_descriptor_map(*map);
+ }
+
+ // Create a map for data property descriptors (a variant of JSObject
+ // that predefines four properties value, writable, configurable and
+ // enumerable).
+ {
+ // DataPropertyDescriptor initial map.
+ Handle<Map> map =
+ factory()->NewMap(JS_OBJECT_TYPE, JSDataPropertyDescriptor::kSize);
+ // Create the descriptor array for the property descriptor object.
+ Map::EnsureDescriptorSlack(map, 4);
+
+ { // value
+ DataDescriptor d(factory()->value_string(),
+ JSDataPropertyDescriptor::kValueIndex, NONE,
+ Representation::Tagged());
+ map->AppendDescriptor(&d);
+ }
+ { // writable
+ DataDescriptor d(factory()->writable_string(),
+ JSDataPropertyDescriptor::kWritableIndex, NONE,
+ Representation::Tagged());
+ map->AppendDescriptor(&d);
+ }
+ { // enumerable
+ DataDescriptor d(factory()->enumerable_string(),
+ JSDataPropertyDescriptor::kEnumerableIndex, NONE,
+ Representation::Tagged());
+ map->AppendDescriptor(&d);
+ }
+ { // configurable
+ DataDescriptor d(factory()->configurable_string(),
+ JSDataPropertyDescriptor::kConfigurableIndex, NONE,
+ Representation::Tagged());
+ map->AppendDescriptor(&d);
+ }
+
+ Map::SetPrototype(map, isolate()->initial_object_prototype());
+ map->SetConstructor(native_context()->object_function());
+ map->SetInObjectProperties(4);
+ map->set_unused_property_fields(0);
+
+ native_context()->set_data_property_descriptor_map(*map);
+ }
+
// Create a constructor for RegExp results (a variant of Array that
// predefines the two properties index and match).
{
@@ -2745,7 +2902,7 @@ bool Genesis::InstallNatives(ContextType context_type) {
array_function->initial_map()->instance_descriptors());
Handle<String> length = factory()->length_string();
int old = array_descriptors->SearchWithCache(
- *length, array_function->initial_map());
+ isolate(), *length, array_function->initial_map());
DCHECK(old != DescriptorArray::kNotFound);
AccessorConstantDescriptor desc(
length, handle(array_descriptors->GetValue(old), isolate()),
@@ -2817,11 +2974,13 @@ bool Genesis::InstallExperimentalNatives() {
static const char* harmony_regexps_natives[] = {"native harmony-regexp.js",
nullptr};
static const char* harmony_tostring_natives[] = {nullptr};
+ static const char* harmony_iterator_close_natives[] = {nullptr};
static const char* harmony_sloppy_natives[] = {nullptr};
static const char* harmony_sloppy_function_natives[] = {nullptr};
static const char* harmony_sloppy_let_natives[] = {nullptr};
static const char* harmony_species_natives[] = {"native harmony-species.js",
nullptr};
+ static const char* harmony_tailcalls_natives[] = {nullptr};
static const char* harmony_unicode_regexps_natives[] = {
"native harmony-unicode-regexps.js", nullptr};
static const char* harmony_default_parameters_natives[] = {nullptr};
@@ -2833,17 +2992,20 @@ bool Genesis::InstallExperimentalNatives() {
"native harmony-object-observe.js", nullptr};
static const char* harmony_sharedarraybuffer_natives[] = {
"native harmony-sharedarraybuffer.js", "native harmony-atomics.js", NULL};
- static const char* harmony_concat_spreadable_natives[] = {nullptr};
static const char* harmony_simd_natives[] = {"native harmony-simd.js",
nullptr};
- static const char* harmony_tolength_natives[] = {nullptr};
- static const char* harmony_completion_natives[] = {nullptr};
static const char* harmony_do_expressions_natives[] = {nullptr};
static const char* harmony_regexp_subclass_natives[] = {nullptr};
static const char* harmony_regexp_lookbehind_natives[] = {nullptr};
+ static const char* harmony_instanceof_natives[] = {nullptr};
+ static const char* harmony_regexp_property_natives[] = {nullptr};
static const char* harmony_function_name_natives[] = {nullptr};
+ static const char* harmony_function_sent_natives[] = {nullptr};
static const char* promise_extra_natives[] = {"native promise-extra.js",
nullptr};
+ static const char* harmony_object_values_entries_natives[] = {nullptr};
+ static const char* harmony_object_own_property_descriptors_natives[] = {
+ nullptr};
for (int i = ExperimentalNatives::GetDebuggerCount();
i < ExperimentalNatives::GetBuiltinsCount(); i++) {
@@ -3371,12 +3533,11 @@ class NoTrackDoubleFieldsForSerializerScope {
bool enabled_;
};
-
Genesis::Genesis(Isolate* isolate,
MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_proxy_template,
v8::ExtensionConfiguration* extensions,
- ContextType context_type)
+ GlobalContextType context_type)
: isolate_(isolate), active_(isolate->bootstrapper()) {
NoTrackDoubleFieldsForSerializerScope disable_scope(isolate);
result_ = Handle<Context>::null();
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index 44f0f1b2a5..d1bf201139 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -61,7 +61,7 @@ class SourceCodeCache final BASE_EMBEDDED {
DISALLOW_COPY_AND_ASSIGN(SourceCodeCache);
};
-enum ContextType { FULL_CONTEXT, THIN_CONTEXT, DEBUG_CONTEXT };
+enum GlobalContextType { FULL_CONTEXT, THIN_CONTEXT, DEBUG_CONTEXT };
// The Boostrapper is the public interface for creating a JavaScript global
// context.
@@ -80,7 +80,7 @@ class Bootstrapper final {
MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_object_template,
v8::ExtensionConfiguration* extensions,
- ContextType context_type = FULL_CONTEXT);
+ GlobalContextType context_type = FULL_CONTEXT);
// Detach the environment from its outer global object.
void DetachGlobal(Handle<Context> env);
@@ -109,7 +109,7 @@ class Bootstrapper final {
static bool CompileNative(Isolate* isolate, Vector<const char> name,
Handle<String> source, int argc,
- Handle<Object> argv[]);
+ Handle<Object> argv[], NativesFlag natives_flag);
static bool CompileBuiltin(Isolate* isolate, int index);
static bool CompileExperimentalBuiltin(Isolate* isolate, int index);
static bool CompileExtraBuiltin(Isolate* isolate, int index);
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index 2df9503302..23c41f706e 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -59,7 +59,8 @@ class BuiltinArguments : public Arguments {
return Arguments::at<Object>(0);
}
- Handle<JSFunction> target();
+ template <class S>
+ Handle<S> target();
Handle<HeapObject> new_target();
// Gets the total number of arguments including the receiver (but
@@ -81,8 +82,9 @@ int BuiltinArguments<BuiltinExtraArguments::kTarget>::length() const {
}
template <>
-Handle<JSFunction> BuiltinArguments<BuiltinExtraArguments::kTarget>::target() {
- return Arguments::at<JSFunction>(Arguments::length() - 1);
+template <class S>
+Handle<S> BuiltinArguments<BuiltinExtraArguments::kTarget>::target() {
+ return Arguments::at<S>(Arguments::length() - 1);
}
template <>
@@ -103,9 +105,10 @@ int BuiltinArguments<BuiltinExtraArguments::kTargetAndNewTarget>::length()
}
template <>
-Handle<JSFunction>
+template <class S>
+Handle<S>
BuiltinArguments<BuiltinExtraArguments::kTargetAndNewTarget>::target() {
- return Arguments::at<JSFunction>(Arguments::length() - 2);
+ return Arguments::at<S>(Arguments::length() - 2);
}
template <>
@@ -134,17 +137,21 @@ BUILTIN_LIST_C(DEF_ARG_TYPE)
// In the body of the builtin function the arguments can be accessed
// through the BuiltinArguments object args.
-#define BUILTIN(name) \
- MUST_USE_RESULT static Object* Builtin_Impl_##name( \
- name##ArgumentsType args, Isolate* isolate); \
- MUST_USE_RESULT static Object* Builtin_##name( \
- int args_length, Object** args_object, Isolate* isolate) { \
- name##ArgumentsType args(args_length, args_object); \
- return Builtin_Impl_##name(args, isolate); \
- } \
- MUST_USE_RESULT static Object* Builtin_Impl_##name( \
- name##ArgumentsType args, Isolate* isolate)
-
+#define BUILTIN(name) \
+ MUST_USE_RESULT static Object* Builtin_Impl_##name(name##ArgumentsType args, \
+ Isolate* isolate); \
+ MUST_USE_RESULT static Object* Builtin_##name( \
+ int args_length, Object** args_object, Isolate* isolate) { \
+ isolate->counters()->runtime_calls()->Increment(); \
+ RuntimeCallStats* stats = isolate->counters()->runtime_call_stats(); \
+ RuntimeCallTimerScope timer(isolate, &stats->Builtin_##name); \
+ name##ArgumentsType args(args_length, args_object); \
+ Object* value = Builtin_Impl_##name(args, isolate); \
+ return value; \
+ } \
+ \
+ MUST_USE_RESULT static Object* Builtin_Impl_##name(name##ArgumentsType args, \
+ Isolate* isolate)
// ----------------------------------------------------------------------------
@@ -194,7 +201,7 @@ inline bool GetSloppyArgumentsLength(Isolate* isolate, Handle<JSObject> object,
Map* arguments_map = isolate->native_context()->sloppy_arguments_map();
if (object->map() != arguments_map) return false;
DCHECK(object->HasFastElements());
- Object* len_obj = object->InObjectPropertyAt(Heap::kArgumentsLengthIndex);
+ Object* len_obj = object->InObjectPropertyAt(JSArgumentsObject::kLengthIndex);
if (!len_obj->IsSmi()) return false;
*out = Max(0, Smi::cast(len_obj)->value());
return *out <= object->elements()->length();
@@ -208,13 +215,12 @@ inline bool PrototypeHasNoElements(PrototypeIterator* iter) {
JSObject* current = iter->GetCurrent<JSObject>();
if (current->IsAccessCheckNeeded()) return false;
if (current->HasIndexedInterceptor()) return false;
- if (current->IsJSValue()) return false;
+ if (current->HasStringWrapperElements()) return false;
if (current->elements()->length() != 0) return false;
}
return true;
}
-
inline bool IsJSArrayFastElementMovingAllowed(Isolate* isolate,
JSArray* receiver) {
DisallowHeapAllocation no_gc;
@@ -232,16 +238,14 @@ inline bool IsJSArrayFastElementMovingAllowed(Isolate* isolate,
return PrototypeHasNoElements(&iter);
}
-
inline bool HasSimpleElements(JSObject* current) {
if (current->IsAccessCheckNeeded()) return false;
if (current->HasIndexedInterceptor()) return false;
- if (current->IsJSValue()) return false;
+ if (current->HasStringWrapperElements()) return false;
if (current->GetElementsAccessor()->HasAccessors(current)) return false;
return true;
}
-
inline bool HasOnlySimpleReceiverElements(Isolate* isolate,
JSReceiver* receiver) {
// Check that we have no accessors on the receiver's elements.
@@ -253,7 +257,6 @@ inline bool HasOnlySimpleReceiverElements(Isolate* isolate,
return PrototypeHasNoElements(&iter);
}
-
inline bool HasOnlySimpleElements(Isolate* isolate, JSReceiver* receiver) {
// Check that ther are not elements on the prototype.
DisallowHeapAllocation no_gc;
@@ -267,12 +270,14 @@ inline bool HasOnlySimpleElements(Isolate* isolate, JSReceiver* receiver) {
return true;
}
-
// Returns empty handle if not applicable.
MUST_USE_RESULT
inline MaybeHandle<FixedArrayBase> EnsureJSArrayWithWritableFastElements(
Isolate* isolate, Handle<Object> receiver, Arguments* args,
int first_added_arg) {
+ // We explicitly add a HandleScope to avoid creating several copies of the
+ // same handle which would otherwise cause issue when left-trimming later-on.
+ HandleScope scope(isolate);
if (!receiver->IsJSArray()) return MaybeHandle<FixedArrayBase>();
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
// If there may be elements accessors in the prototype chain, the fast path
@@ -286,12 +291,18 @@ inline MaybeHandle<FixedArrayBase> EnsureJSArrayWithWritableFastElements(
Handle<FixedArrayBase> elms(array->elements(), isolate);
Map* map = elms->map();
if (map == heap->fixed_array_map()) {
- if (args == NULL || array->HasFastObjectElements()) return elms;
+ if (args == NULL || array->HasFastObjectElements()) {
+ return scope.CloseAndEscape(elms);
+ }
} else if (map == heap->fixed_cow_array_map()) {
elms = JSObject::EnsureWritableFastElements(array);
- if (args == NULL || array->HasFastObjectElements()) return elms;
+ if (args == NULL || array->HasFastObjectElements()) {
+ return scope.CloseAndEscape(elms);
+ }
} else if (map == heap->fixed_double_array_map()) {
- if (args == NULL) return elms;
+ if (args == NULL) {
+ return scope.CloseAndEscape(elms);
+ }
} else {
return MaybeHandle<FixedArrayBase>();
}
@@ -305,7 +316,9 @@ inline MaybeHandle<FixedArrayBase> EnsureJSArrayWithWritableFastElements(
// Need to ensure that the arguments passed in args can be contained in
// the array.
int args_length = args->length();
- if (first_added_arg >= args_length) return handle(array->elements(), isolate);
+ if (first_added_arg >= args_length) {
+ return scope.CloseAndEscape(elms);
+ }
ElementsKind origin_kind = array->map()->elements_kind();
DCHECK(!IsFastObjectElementsKind(origin_kind));
@@ -328,9 +341,9 @@ inline MaybeHandle<FixedArrayBase> EnsureJSArrayWithWritableFastElements(
}
if (target_kind != origin_kind) {
JSObject::TransitionElementsKind(array, target_kind);
- return handle(array->elements(), isolate);
+ elms = handle(array->elements(), isolate);
}
- return elms;
+ return scope.CloseAndEscape(elms);
}
@@ -494,19 +507,14 @@ BUILTIN(ArraySlice) {
int relative_end = 0;
bool is_sloppy_arguments = false;
- // TODO(littledan): Look up @@species only once, not once here and
- // again in the JS builtin. Pass the species out?
- Handle<Object> species;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, species, Object::ArraySpeciesConstructor(isolate, receiver));
- if (*species != isolate->context()->native_context()->array_function()) {
- return CallJsIntrinsic(isolate, isolate->array_slice(), args);
- }
if (receiver->IsJSArray()) {
DisallowHeapAllocation no_gc;
JSArray* array = JSArray::cast(*receiver);
if (!array->HasFastElements() ||
- !IsJSArrayFastElementMovingAllowed(isolate, array)) {
+ !IsJSArrayFastElementMovingAllowed(isolate, array) ||
+ !isolate->IsArraySpeciesLookupChainIntact() ||
+ // If this is a subclass of Array, then call out to JS
+ !array->map()->new_target_is_base()) {
AllowHeapAllocation allow_allocation;
return CallJsIntrinsic(isolate, isolate->array_slice(), args);
}
@@ -584,15 +592,11 @@ BUILTIN(ArraySplice) {
MaybeHandle<FixedArrayBase> maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 3);
Handle<FixedArrayBase> elms_obj;
- if (!maybe_elms_obj.ToHandle(&elms_obj)) {
- return CallJsIntrinsic(isolate, isolate->array_splice(), args);
- }
- // TODO(littledan): Look up @@species only once, not once here and
- // again in the JS builtin. Pass the species out?
- Handle<Object> species;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, species, Object::ArraySpeciesConstructor(isolate, receiver));
- if (*species != isolate->context()->native_context()->array_function()) {
+ if (!maybe_elms_obj.ToHandle(&elms_obj) ||
+ // If this is a subclass of Array, then call out to JS
+ !JSArray::cast(*receiver)->map()->new_target_is_base() ||
+ // If anything with @@species has been messed with, call out to JS
+ !isolate->IsArraySpeciesLookupChainIntact()) {
return CallJsIntrinsic(isolate, isolate->array_splice(), args);
}
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
@@ -663,28 +667,42 @@ namespace {
*/
class ArrayConcatVisitor {
public:
- ArrayConcatVisitor(Isolate* isolate, Handle<FixedArray> storage,
+ ArrayConcatVisitor(Isolate* isolate, Handle<Object> storage,
bool fast_elements)
: isolate_(isolate),
- storage_(Handle<FixedArray>::cast(
- isolate->global_handles()->Create(*storage))),
+ storage_(isolate->global_handles()->Create(*storage)),
index_offset_(0u),
bit_field_(FastElementsField::encode(fast_elements) |
- ExceedsLimitField::encode(false)) {}
+ ExceedsLimitField::encode(false) |
+ IsFixedArrayField::encode(storage->IsFixedArray())) {
+ DCHECK(!(this->fast_elements() && !is_fixed_array()));
+ }
~ArrayConcatVisitor() { clear_storage(); }
- void visit(uint32_t i, Handle<Object> elm) {
+ bool visit(uint32_t i, Handle<Object> elm) {
+ uint32_t index = index_offset_ + i;
+
+ if (!is_fixed_array()) {
+ Handle<Object> element_value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, element_value,
+ Object::SetElement(isolate_, storage_, index, elm, STRICT), false);
+ return true;
+ }
+
if (i >= JSObject::kMaxElementCount - index_offset_) {
set_exceeds_array_limit(true);
- return;
+ // Exception hasn't been thrown at this point. Return true to
+ // break out, and caller will throw. !visit would imply that
+ // there is already a pending exception.
+ return true;
}
- uint32_t index = index_offset_ + i;
if (fast_elements()) {
- if (index < static_cast<uint32_t>(storage_->length())) {
- storage_->set(index, *elm);
- return;
+ if (index < static_cast<uint32_t>(storage_fixed_array()->length())) {
+ storage_fixed_array()->set(index, *elm);
+ return true;
}
// Our initial estimate of length was foiled, possibly by
// getters on the arrays increasing the length of later arrays
@@ -705,6 +723,7 @@ class ArrayConcatVisitor {
clear_storage();
set_storage(*result);
}
+ return true;
}
void increase_index_offset(uint32_t delta) {
@@ -728,6 +747,7 @@ class ArrayConcatVisitor {
}
Handle<JSArray> ToArray() {
+ DCHECK(is_fixed_array());
Handle<JSArray> array = isolate_->factory()->NewJSArray(0);
Handle<Object> length =
isolate_->factory()->NewNumber(static_cast<double>(index_offset_));
@@ -735,15 +755,26 @@ class ArrayConcatVisitor {
array, fast_elements() ? FAST_HOLEY_ELEMENTS : DICTIONARY_ELEMENTS);
array->set_map(*map);
array->set_length(*length);
- array->set_elements(*storage_);
+ array->set_elements(*storage_fixed_array());
return array;
}
+ // Storage is either a FixedArray (if is_fixed_array()) or a JSReciever
+ // (otherwise)
+ Handle<FixedArray> storage_fixed_array() {
+ DCHECK(is_fixed_array());
+ return Handle<FixedArray>::cast(storage_);
+ }
+ Handle<JSReceiver> storage_jsreceiver() {
+ DCHECK(!is_fixed_array());
+ return Handle<JSReceiver>::cast(storage_);
+ }
+
private:
// Convert storage to dictionary mode.
void SetDictionaryMode() {
- DCHECK(fast_elements());
- Handle<FixedArray> current_storage(*storage_);
+ DCHECK(fast_elements() && is_fixed_array());
+ Handle<FixedArray> current_storage = storage_fixed_array();
Handle<SeededNumberDictionary> slow_storage(
SeededNumberDictionary::New(isolate_, current_storage->length()));
uint32_t current_length = static_cast<uint32_t>(current_storage->length());
@@ -771,12 +802,13 @@ class ArrayConcatVisitor {
}
inline void set_storage(FixedArray* storage) {
- storage_ =
- Handle<FixedArray>::cast(isolate_->global_handles()->Create(storage));
+ DCHECK(is_fixed_array());
+ storage_ = isolate_->global_handles()->Create(storage);
}
class FastElementsField : public BitField<bool, 0, 1> {};
class ExceedsLimitField : public BitField<bool, 1, 1> {};
+ class IsFixedArrayField : public BitField<bool, 2, 1> {};
bool fast_elements() const { return FastElementsField::decode(bit_field_); }
void set_fast_elements(bool fast) {
@@ -785,9 +817,10 @@ class ArrayConcatVisitor {
void set_exceeds_array_limit(bool exceeds) {
bit_field_ = ExceedsLimitField::update(bit_field_, exceeds);
}
+ bool is_fixed_array() const { return IsFixedArrayField::decode(bit_field_); }
Isolate* isolate_;
- Handle<FixedArray> storage_; // Always a global handle.
+ Handle<Object> storage_; // Always a global handle.
// Index after last seen index. Always less than or equal to
// JSObject::kMaxElementCount.
uint32_t index_offset_;
@@ -842,14 +875,20 @@ uint32_t EstimateElementCount(Handle<JSArray> array) {
}
break;
}
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
// External arrays are always dense.
return length;
+ case NO_ELEMENTS:
+ return 0;
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ UNREACHABLE();
+ return 0;
}
// As an estimate, we assume that the prototype doesn't contain any
// inherited elements.
@@ -857,48 +896,6 @@ uint32_t EstimateElementCount(Handle<JSArray> array) {
}
-template <class ExternalArrayClass, class ElementType>
-void IterateTypedArrayElements(Isolate* isolate, Handle<JSObject> receiver,
- bool elements_are_ints,
- bool elements_are_guaranteed_smis,
- ArrayConcatVisitor* visitor) {
- Handle<ExternalArrayClass> array(
- ExternalArrayClass::cast(receiver->elements()));
- uint32_t len = static_cast<uint32_t>(array->length());
-
- DCHECK(visitor != NULL);
- if (elements_are_ints) {
- if (elements_are_guaranteed_smis) {
- for (uint32_t j = 0; j < len; j++) {
- HandleScope loop_scope(isolate);
- Handle<Smi> e(Smi::FromInt(static_cast<int>(array->get_scalar(j))),
- isolate);
- visitor->visit(j, e);
- }
- } else {
- for (uint32_t j = 0; j < len; j++) {
- HandleScope loop_scope(isolate);
- int64_t val = static_cast<int64_t>(array->get_scalar(j));
- if (Smi::IsValid(static_cast<intptr_t>(val))) {
- Handle<Smi> e(Smi::FromInt(static_cast<int>(val)), isolate);
- visitor->visit(j, e);
- } else {
- Handle<Object> e =
- isolate->factory()->NewNumber(static_cast<ElementType>(val));
- visitor->visit(j, e);
- }
- }
- }
- } else {
- for (uint32_t j = 0; j < len; j++) {
- HandleScope loop_scope(isolate);
- Handle<Object> e = isolate->factory()->NewNumber(array->get_scalar(j));
- visitor->visit(j, e);
- }
- }
-}
-
-
// Used for sorting indices in a List<uint32_t>.
int compareUInt32(const uint32_t* ap, const uint32_t* bp) {
uint32_t a = *ap;
@@ -989,6 +986,28 @@ void CollectElementIndices(Handle<JSObject> object, uint32_t range,
}
break;
}
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS: {
+ DCHECK(object->IsJSValue());
+ Handle<JSValue> js_value = Handle<JSValue>::cast(object);
+ DCHECK(js_value->value()->IsString());
+ Handle<String> string(String::cast(js_value->value()), isolate);
+ uint32_t length = static_cast<uint32_t>(string->length());
+ uint32_t i = 0;
+ uint32_t limit = Min(length, range);
+ for (; i < limit; i++) {
+ indices->Add(i);
+ }
+ ElementsAccessor* accessor = object->GetElementsAccessor();
+ for (; i < range; i++) {
+ if (accessor->HasElement(object, i)) {
+ indices->Add(i);
+ }
+ }
+ break;
+ }
+ case NO_ELEMENTS:
+ break;
}
PrototypeIterator iter(isolate, object);
@@ -1012,7 +1031,7 @@ bool IterateElementsSlow(Isolate* isolate, Handle<JSReceiver> receiver,
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, element_value,
Object::GetElement(isolate, receiver, i),
false);
- visitor->visit(i, element_value);
+ if (!visitor->visit(i, element_value)) return false;
}
}
visitor->increase_index_offset(length);
@@ -1049,12 +1068,11 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
if (!val->ToUint32(&length)) {
length = 0;
}
+ // TODO(cbruni): handle other element kind as well
return IterateElementsSlow(isolate, receiver, length, visitor);
}
if (!HasOnlySimpleElements(isolate, *receiver)) {
- // For classes which are not known to be safe to access via elements alone,
- // use the slow case.
return IterateElementsSlow(isolate, receiver, length, visitor);
}
Handle<JSObject> array = Handle<JSObject>::cast(receiver);
@@ -1073,7 +1091,7 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
HandleScope loop_scope(isolate);
Handle<Object> element_value(elements->get(j), isolate);
if (!element_value->IsTheHole()) {
- visitor->visit(j, element_value);
+ if (!visitor->visit(j, element_value)) return false;
} else {
Maybe<bool> maybe = JSReceiver::HasElement(array, j);
if (!maybe.IsJust()) return false;
@@ -1083,7 +1101,7 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, element_value, Object::GetElement(isolate, array, j),
false);
- visitor->visit(j, element_value);
+ if (!visitor->visit(j, element_value)) return false;
}
}
}
@@ -1109,7 +1127,7 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
double double_value = elements->get_scalar(j);
Handle<Object> element_value =
isolate->factory()->NewNumber(double_value);
- visitor->visit(j, element_value);
+ if (!visitor->visit(j, element_value)) return false;
} else {
Maybe<bool> maybe = JSReceiver::HasElement(array, j);
if (!maybe.IsJust()) return false;
@@ -1120,12 +1138,13 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, element_value, Object::GetElement(isolate, array, j),
false);
- visitor->visit(j, element_value);
+ if (!visitor->visit(j, element_value)) return false;
}
}
}
break;
}
+
case DICTIONARY_ELEMENTS: {
Handle<SeededNumberDictionary> dict(array->element_dictionary());
List<uint32_t> indices(dict->Capacity() / 2);
@@ -1141,7 +1160,7 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
Handle<Object> element;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, element, Object::GetElement(isolate, array, index), false);
- visitor->visit(index, element);
+ if (!visitor->visit(index, element)) return false;
// Skip to next different index (i.e., omit duplicates).
do {
j++;
@@ -1149,55 +1168,6 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
}
break;
}
- case UINT8_CLAMPED_ELEMENTS: {
- Handle<FixedUint8ClampedArray> pixels(
- FixedUint8ClampedArray::cast(array->elements()));
- for (uint32_t j = 0; j < length; j++) {
- Handle<Smi> e(Smi::FromInt(pixels->get_scalar(j)), isolate);
- visitor->visit(j, e);
- }
- break;
- }
- case INT8_ELEMENTS: {
- IterateTypedArrayElements<FixedInt8Array, int8_t>(isolate, array, true,
- true, visitor);
- break;
- }
- case UINT8_ELEMENTS: {
- IterateTypedArrayElements<FixedUint8Array, uint8_t>(isolate, array, true,
- true, visitor);
- break;
- }
- case INT16_ELEMENTS: {
- IterateTypedArrayElements<FixedInt16Array, int16_t>(isolate, array, true,
- true, visitor);
- break;
- }
- case UINT16_ELEMENTS: {
- IterateTypedArrayElements<FixedUint16Array, uint16_t>(
- isolate, array, true, true, visitor);
- break;
- }
- case INT32_ELEMENTS: {
- IterateTypedArrayElements<FixedInt32Array, int32_t>(isolate, array, true,
- false, visitor);
- break;
- }
- case UINT32_ELEMENTS: {
- IterateTypedArrayElements<FixedUint32Array, uint32_t>(
- isolate, array, true, false, visitor);
- break;
- }
- case FLOAT32_ELEMENTS: {
- IterateTypedArrayElements<FixedFloat32Array, float>(isolate, array, false,
- false, visitor);
- break;
- }
- case FLOAT64_ELEMENTS: {
- IterateTypedArrayElements<FixedFloat64Array, double>(
- isolate, array, false, false, visitor);
- break;
- }
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
for (uint32_t index = 0; index < length; index++) {
@@ -1205,10 +1175,21 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
Handle<Object> element;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, element, Object::GetElement(isolate, array, index), false);
- visitor->visit(index, element);
+ if (!visitor->visit(index, element)) return false;
}
break;
}
+ case NO_ELEMENTS:
+ break;
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ return IterateElementsSlow(isolate, receiver, length, visitor);
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ // |array| is guaranteed to be an array or typed array.
+ UNREACHABLE();
+ break;
}
visitor->increase_index_offset(length);
return true;
@@ -1216,7 +1197,6 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
bool HasConcatSpreadableModifier(Isolate* isolate, Handle<JSArray> obj) {
- if (!FLAG_harmony_concat_spreadable) return false;
Handle<Symbol> key(isolate->factory()->is_concat_spreadable_symbol());
Maybe<bool> maybe = JSReceiver::HasProperty(obj, key);
return maybe.FromMaybe(false);
@@ -1226,21 +1206,22 @@ bool HasConcatSpreadableModifier(Isolate* isolate, Handle<JSArray> obj) {
static Maybe<bool> IsConcatSpreadable(Isolate* isolate, Handle<Object> obj) {
HandleScope handle_scope(isolate);
if (!obj->IsJSReceiver()) return Just(false);
- if (FLAG_harmony_concat_spreadable) {
- Handle<Symbol> key(isolate->factory()->is_concat_spreadable_symbol());
- Handle<Object> value;
- MaybeHandle<Object> maybeValue =
- i::Runtime::GetObjectProperty(isolate, obj, key);
- if (!maybeValue.ToHandle(&value)) return Nothing<bool>();
- if (!value->IsUndefined()) return Just(value->BooleanValue());
- }
+ Handle<Symbol> key(isolate->factory()->is_concat_spreadable_symbol());
+ Handle<Object> value;
+ MaybeHandle<Object> maybeValue =
+ i::Runtime::GetObjectProperty(isolate, obj, key);
+ if (!maybeValue.ToHandle(&value)) return Nothing<bool>();
+ if (!value->IsUndefined()) return Just(value->BooleanValue());
return Object::IsArray(obj);
}
-Object* Slow_ArrayConcat(Arguments* args, Isolate* isolate) {
+Object* Slow_ArrayConcat(Arguments* args, Handle<Object> species,
+ Isolate* isolate) {
int argument_count = args->length();
+ bool is_array_species = *species == isolate->context()->array_function();
+
// Pass 1: estimate the length and number of elements of the result.
// The actual length can be larger if any of the arguments have getters
// that mutate other arguments (but will otherwise be precise).
@@ -1288,7 +1269,8 @@ Object* Slow_ArrayConcat(Arguments* args, Isolate* isolate) {
// If estimated number of elements is more than half of length, a
// fixed array (fast case) is more time and space-efficient than a
// dictionary.
- bool fast_case = (estimate_nof_elements * 2) >= estimate_result_length;
+ bool fast_case =
+ is_array_species && (estimate_nof_elements * 2) >= estimate_result_length;
if (fast_case && kind == FAST_DOUBLE_ELEMENTS) {
Handle<FixedArrayBase> storage =
@@ -1350,6 +1332,7 @@ Object* Slow_ArrayConcat(Arguments* args, Isolate* isolate) {
case FAST_HOLEY_ELEMENTS:
case FAST_ELEMENTS:
case DICTIONARY_ELEMENTS:
+ case NO_ELEMENTS:
DCHECK_EQ(0u, length);
break;
default:
@@ -1365,18 +1348,25 @@ Object* Slow_ArrayConcat(Arguments* args, Isolate* isolate) {
// In case of failure, fall through.
}
- Handle<FixedArray> storage;
+ Handle<Object> storage;
if (fast_case) {
// The backing storage array must have non-existing elements to preserve
// holes across concat operations.
storage =
isolate->factory()->NewFixedArrayWithHoles(estimate_result_length);
- } else {
+ } else if (is_array_species) {
// TODO(126): move 25% pre-allocation logic into Dictionary::Allocate
uint32_t at_least_space_for =
estimate_nof_elements + (estimate_nof_elements >> 2);
- storage = Handle<FixedArray>::cast(
- SeededNumberDictionary::New(isolate, at_least_space_for));
+ storage = SeededNumberDictionary::New(isolate, at_least_space_for);
+ } else {
+ DCHECK(species->IsConstructor());
+ Handle<Object> length(Smi::FromInt(0), isolate);
+ Handle<Object> storage_object;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, storage_object,
+ Execution::New(isolate, species, species, 1, &length));
+ storage = storage_object;
}
ArrayConcatVisitor visitor(isolate, storage, fast_case);
@@ -1400,7 +1390,12 @@ Object* Slow_ArrayConcat(Arguments* args, Isolate* isolate) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewRangeError(MessageTemplate::kInvalidArrayLength));
}
- return *visitor.ToArray();
+
+ if (is_array_species) {
+ return *visitor.ToArray();
+ } else {
+ return *visitor.storage_jsreceiver();
+ }
}
@@ -1446,26 +1441,37 @@ MaybeHandle<JSArray> Fast_ArrayConcat(Isolate* isolate, Arguments* args) {
} // namespace
+
// ES6 22.1.3.1 Array.prototype.concat
BUILTIN(ArrayConcat) {
HandleScope scope(isolate);
- Handle<Object> receiver;
- if (!Object::ToObject(isolate, handle(args[0], isolate))
- .ToHandle(&receiver)) {
+ Handle<Object> receiver = args.receiver();
+ // TODO(bmeurer): Do we really care about the exact exception message here?
+ if (receiver->IsNull() || receiver->IsUndefined()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
isolate->factory()->NewStringFromAsciiChecked(
"Array.prototype.concat")));
}
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, receiver, Object::ToObject(isolate, args.receiver()));
args[0] = *receiver;
Handle<JSArray> result_array;
- if (Fast_ArrayConcat(isolate, &args).ToHandle(&result_array)) {
- return *result_array;
+
+ // Reading @@species happens before anything else with a side effect, so
+ // we can do it here to determine whether to take the fast path.
+ Handle<Object> species;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, species, Object::ArraySpeciesConstructor(isolate, receiver));
+ if (*species == isolate->context()->native_context()->array_function()) {
+ if (Fast_ArrayConcat(isolate, &args).ToHandle(&result_array)) {
+ return *result_array;
+ }
+ if (isolate->has_pending_exception()) return isolate->heap()->exception();
}
- if (isolate->has_pending_exception()) return isolate->heap()->exception();
- return Slow_ArrayConcat(&args, isolate);
+ return Slow_ArrayConcat(&args, species, isolate);
}
@@ -1479,6 +1485,77 @@ BUILTIN(ArrayIsArray) {
return *isolate->factory()->ToBoolean(result.FromJust());
}
+namespace {
+
+MUST_USE_RESULT Maybe<bool> FastAssign(Handle<JSReceiver> to,
+ Handle<Object> next_source) {
+ // Non-empty strings are the only non-JSReceivers that need to be handled
+ // explicitly by Object.assign.
+ if (!next_source->IsJSReceiver()) {
+ return Just(!next_source->IsString() ||
+ String::cast(*next_source)->length() == 0);
+ }
+
+ Isolate* isolate = to->GetIsolate();
+ Handle<Map> map(JSReceiver::cast(*next_source)->map(), isolate);
+
+ if (!map->IsJSObjectMap()) return Just(false);
+ if (!map->OnlyHasSimpleProperties()) return Just(false);
+
+ Handle<JSObject> from = Handle<JSObject>::cast(next_source);
+ if (from->elements() != isolate->heap()->empty_fixed_array()) {
+ return Just(false);
+ }
+
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
+ int length = map->NumberOfOwnDescriptors();
+
+ bool stable = true;
+
+ for (int i = 0; i < length; i++) {
+ Handle<Name> next_key(descriptors->GetKey(i), isolate);
+ Handle<Object> prop_value;
+ // Directly decode from the descriptor array if |from| did not change shape.
+ if (stable) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (!details.IsEnumerable()) continue;
+ if (details.kind() == kData) {
+ if (details.location() == kDescriptor) {
+ prop_value = handle(descriptors->GetValue(i), isolate);
+ } else {
+ Representation representation = details.representation();
+ FieldIndex index = FieldIndex::ForDescriptor(*map, i);
+ prop_value = JSObject::FastPropertyAt(from, representation, index);
+ }
+ } else {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, prop_value,
+ Object::GetProperty(from, next_key),
+ Nothing<bool>());
+ stable = from->map() == *map;
+ }
+ } else {
+ // If the map did change, do a slower lookup. We are still guaranteed that
+ // the object has a simple shape, and that the key is a name.
+ LookupIterator it(from, next_key, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ if (!it.IsFound()) continue;
+ DCHECK(it.state() == LookupIterator::DATA ||
+ it.state() == LookupIterator::ACCESSOR);
+ if (!it.IsEnumerable()) continue;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, prop_value, Object::GetProperty(&it), Nothing<bool>());
+ }
+ LookupIterator it(to, next_key);
+ bool call_to_js = it.IsFound() && it.state() != LookupIterator::DATA;
+ Maybe<bool> result = Object::SetProperty(
+ &it, prop_value, STRICT, Object::CERTAINLY_NOT_STORE_FROM_KEYED);
+ if (result.IsNothing()) return result;
+ if (stable && call_to_js) stable = from->map() == *map;
+ }
+
+ return Just(true);
+}
+
+} // namespace
// ES6 19.1.2.1 Object.assign
BUILTIN(ObjectAssign) {
@@ -1487,7 +1564,7 @@ BUILTIN(ObjectAssign) {
// 1. Let to be ? ToObject(target).
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, target,
- Execution::ToObject(isolate, target));
+ Object::ToObject(isolate, target));
Handle<JSReceiver> to = Handle<JSReceiver>::cast(target);
// 2. If only one argument was passed, return to.
if (args.length() == 2) return *to;
@@ -1496,17 +1573,20 @@ BUILTIN(ObjectAssign) {
// 4. For each element nextSource of sources, in ascending index order,
for (int i = 2; i < args.length(); ++i) {
Handle<Object> next_source = args.at<Object>(i);
+ Maybe<bool> fast_assign = FastAssign(to, next_source);
+ if (fast_assign.IsNothing()) return isolate->heap()->exception();
+ if (fast_assign.FromJust()) continue;
// 4a. If nextSource is undefined or null, let keys be an empty List.
- if (next_source->IsUndefined() || next_source->IsNull()) continue;
// 4b. Else,
// 4b i. Let from be ToObject(nextSource).
+ // Only non-empty strings and JSReceivers have enumerable properties.
Handle<JSReceiver> from =
Object::ToObject(isolate, next_source).ToHandleChecked();
// 4b ii. Let keys be ? from.[[OwnPropertyKeys]]().
Handle<FixedArray> keys;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, keys, JSReceiver::GetKeys(from, JSReceiver::OWN_ONLY,
- ALL_PROPERTIES, KEEP_NUMBERS));
+ isolate, keys,
+ JSReceiver::GetKeys(from, OWN_ONLY, ALL_PROPERTIES, KEEP_NUMBERS));
// 4c. Repeat for each element nextKey of keys in List order,
for (int j = 0; j < keys->length(); ++j) {
Handle<Object> next_key(keys->get(j), isolate);
@@ -1521,7 +1601,7 @@ BUILTIN(ObjectAssign) {
Handle<Object> prop_value;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, prop_value,
- Runtime::GetObjectProperty(isolate, from, next_key, STRICT));
+ Runtime::GetObjectProperty(isolate, from, next_key));
// 4c ii 2. Let status be ? Set(to, nextKey, propValue, true).
Handle<Object> status;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
@@ -1581,6 +1661,72 @@ BUILTIN(ObjectFreeze) {
}
+// ES6 section 19.1.2.6 Object.getOwnPropertyDescriptor ( O, P )
+BUILTIN(ObjectGetOwnPropertyDescriptor) {
+ HandleScope scope(isolate);
+ // 1. Let obj be ? ToObject(O).
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ Handle<JSReceiver> receiver;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+ Object::ToObject(isolate, object));
+ // 2. Let key be ? ToPropertyKey(P).
+ Handle<Object> property = args.atOrUndefined(isolate, 2);
+ Handle<Name> key;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, key,
+ Object::ToName(isolate, property));
+ // 3. Let desc be ? obj.[[GetOwnProperty]](key).
+ PropertyDescriptor desc;
+ Maybe<bool> found =
+ JSReceiver::GetOwnPropertyDescriptor(isolate, receiver, key, &desc);
+ MAYBE_RETURN(found, isolate->heap()->exception());
+ // 4. Return FromPropertyDescriptor(desc).
+ if (!found.FromJust()) return isolate->heap()->undefined_value();
+ return *desc.ToObject(isolate);
+}
+
+
+namespace {
+
+Object* GetOwnPropertyKeys(Isolate* isolate,
+ BuiltinArguments<BuiltinExtraArguments::kNone> args,
+ PropertyFilter filter) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ Handle<JSReceiver> receiver;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+ Object::ToObject(isolate, object));
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, keys,
+ JSReceiver::GetKeys(receiver, OWN_ONLY, filter, CONVERT_TO_STRING));
+ return *isolate->factory()->NewJSArrayWithElements(keys);
+}
+
+} // namespace
+
+
+// ES6 section 19.1.2.7 Object.getOwnPropertyNames ( O )
+BUILTIN(ObjectGetOwnPropertyNames) {
+ return GetOwnPropertyKeys(isolate, args, SKIP_SYMBOLS);
+}
+
+
+// ES6 section 19.1.2.8 Object.getOwnPropertySymbols ( O )
+BUILTIN(ObjectGetOwnPropertySymbols) {
+ return GetOwnPropertyKeys(isolate, args, SKIP_STRINGS);
+}
+
+
+// ES#sec-object.is Object.is ( value1, value2 )
+BUILTIN(ObjectIs) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(3, args.length());
+ Handle<Object> value1 = args.at<Object>(1);
+ Handle<Object> value2 = args.at<Object>(2);
+ return isolate->heap()->ToBoolean(value1->SameValue(*value2));
+}
+
+
// ES6 section 19.1.2.11 Object.isExtensible ( O )
BUILTIN(ObjectIsExtensible) {
HandleScope scope(isolate);
@@ -1626,9 +1772,9 @@ BUILTIN(ObjectKeys) {
Handle<Object> object = args.atOrUndefined(isolate, 1);
Handle<JSReceiver> receiver;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
- Execution::ToObject(isolate, object));
- Handle<FixedArray> keys;
+ Object::ToObject(isolate, object));
+ Handle<FixedArray> keys;
int enum_length = receiver->map()->EnumLength();
if (enum_length != kInvalidEnumCacheSentinel &&
JSObject::cast(*receiver)->elements() ==
@@ -1636,29 +1782,87 @@ BUILTIN(ObjectKeys) {
DCHECK(receiver->IsJSObject());
DCHECK(!JSObject::cast(*receiver)->HasNamedInterceptor());
DCHECK(!JSObject::cast(*receiver)->IsAccessCheckNeeded());
- DCHECK(!HeapObject::cast(receiver->map()->prototype())
- ->map()
- ->is_hidden_prototype());
+ DCHECK(!receiver->map()->has_hidden_prototype());
DCHECK(JSObject::cast(*receiver)->HasFastProperties());
if (enum_length == 0) {
keys = isolate->factory()->empty_fixed_array();
} else {
Handle<FixedArray> cache(
receiver->map()->instance_descriptors()->GetEnumCache());
- keys = isolate->factory()->NewFixedArray(enum_length);
- for (int i = 0; i < enum_length; i++) {
- keys->set(i, cache->get(i));
- }
+ keys = isolate->factory()->CopyFixedArrayUpTo(cache, enum_length);
}
} else {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, keys,
- JSReceiver::GetKeys(receiver, JSReceiver::OWN_ONLY, ENUMERABLE_STRINGS,
+ JSReceiver::GetKeys(receiver, OWN_ONLY, ENUMERABLE_STRINGS,
CONVERT_TO_STRING));
}
return *isolate->factory()->NewJSArrayWithElements(keys, FAST_ELEMENTS);
}
+BUILTIN(ObjectValues) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ Handle<JSReceiver> receiver;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+ Object::ToObject(isolate, object));
+ Handle<FixedArray> values;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, values, JSReceiver::GetOwnValues(receiver, ENUMERABLE_STRINGS));
+ return *isolate->factory()->NewJSArrayWithElements(values);
+}
+
+
+BUILTIN(ObjectEntries) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ Handle<JSReceiver> receiver;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+ Object::ToObject(isolate, object));
+ Handle<FixedArray> entries;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, entries,
+ JSReceiver::GetOwnEntries(receiver, ENUMERABLE_STRINGS));
+ return *isolate->factory()->NewJSArrayWithElements(entries);
+}
+
+BUILTIN(ObjectGetOwnPropertyDescriptors) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+ Handle<Object> undefined = isolate->factory()->undefined_value();
+
+ Handle<JSReceiver> receiver;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+ Object::ToObject(isolate, object));
+
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, keys, JSReceiver::GetKeys(receiver, OWN_ONLY, ALL_PROPERTIES,
+ CONVERT_TO_STRING));
+
+ Handle<Object> descriptors =
+ isolate->factory()->NewJSObject(isolate->object_function());
+
+ for (int i = 0; i < keys->length(); ++i) {
+ Handle<Name> key = Handle<Name>::cast(FixedArray::get(*keys, i, isolate));
+ PropertyDescriptor descriptor;
+ Maybe<bool> did_get_descriptor = JSReceiver::GetOwnPropertyDescriptor(
+ isolate, receiver, key, &descriptor);
+ MAYBE_RETURN(did_get_descriptor, isolate->heap()->exception());
+
+ Handle<Object> from_descriptor = did_get_descriptor.FromJust()
+ ? descriptor.ToObject(isolate)
+ : undefined;
+
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, descriptors, key, LookupIterator::OWN);
+ Maybe<bool> success = JSReceiver::CreateDataProperty(&it, from_descriptor,
+ Object::DONT_THROW);
+ CHECK(success.FromJust());
+ }
+
+ return *descriptors;
+}
// ES6 section 19.1.2.15 Object.preventExtensions ( O )
BUILTIN(ObjectPreventExtensions) {
@@ -1737,7 +1941,7 @@ MaybeHandle<JSFunction> CompileString(Handle<Context> context,
BUILTIN(GlobalEval) {
HandleScope scope(isolate);
Handle<Object> x = args.atOrUndefined(isolate, 1);
- Handle<JSFunction> target = args.target();
+ Handle<JSFunction> target = args.target<JSFunction>();
Handle<JSObject> target_global_proxy(target->global_proxy(), isolate);
if (!x->IsString()) return *x;
Handle<JSFunction> function;
@@ -1831,7 +2035,7 @@ BUILTIN(ReflectGet) {
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result, Object::GetPropertyOrElement(
- Handle<JSReceiver>::cast(target), name, receiver));
+ receiver, name, Handle<JSReceiver>::cast(target)));
return *result;
}
@@ -1877,8 +2081,9 @@ BUILTIN(ReflectGetPrototypeOf) {
"Reflect.getPrototypeOf")));
}
Handle<Object> prototype;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, prototype,
- Object::GetPrototype(isolate, target));
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(target);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, prototype, JSReceiver::GetPrototype(isolate, receiver));
return *prototype;
}
@@ -1943,9 +2148,9 @@ BUILTIN(ReflectOwnKeys) {
Handle<FixedArray> keys;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, keys, JSReceiver::GetKeys(Handle<JSReceiver>::cast(target),
- JSReceiver::OWN_ONLY, ALL_PROPERTIES,
- CONVERT_TO_STRING));
+ isolate, keys,
+ JSReceiver::GetKeys(Handle<JSReceiver>::cast(target), OWN_ONLY,
+ ALL_PROPERTIES, CONVERT_TO_STRING));
return *isolate->factory()->NewJSArrayWithElements(keys);
}
@@ -2025,6 +2230,180 @@ BUILTIN(ReflectSetPrototypeOf) {
// -----------------------------------------------------------------------------
+// ES6 section 19.3 Boolean Objects
+
+
+// ES6 section 19.3.1.1 Boolean ( value ) for the [[Call]] case.
+BUILTIN(BooleanConstructor) {
+ HandleScope scope(isolate);
+ Handle<Object> value = args.atOrUndefined(isolate, 1);
+ return isolate->heap()->ToBoolean(value->BooleanValue());
+}
+
+
+// ES6 section 19.3.1.1 Boolean ( value ) for the [[Construct]] case.
+BUILTIN(BooleanConstructor_ConstructStub) {
+ HandleScope scope(isolate);
+ Handle<Object> value = args.atOrUndefined(isolate, 1);
+ Handle<JSFunction> target = args.target<JSFunction>();
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+ DCHECK(*target == target->native_context()->boolean_function());
+ Handle<JSObject> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ JSObject::New(target, new_target));
+ Handle<JSValue>::cast(result)->set_value(
+ isolate->heap()->ToBoolean(value->BooleanValue()));
+ return *result;
+}
+
+
+// ES6 section 19.3.3.2 Boolean.prototype.toString ( )
+BUILTIN(BooleanPrototypeToString) {
+ HandleScope scope(isolate);
+ Handle<Object> receiver = args.receiver();
+ if (receiver->IsJSValue()) {
+ receiver = handle(Handle<JSValue>::cast(receiver)->value(), isolate);
+ }
+ if (!receiver->IsBoolean()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kNotGeneric,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Boolean.prototype.toString")));
+ }
+ return Handle<Oddball>::cast(receiver)->to_string();
+}
+
+
+// ES6 section 19.3.3.3 Boolean.prototype.valueOf ( )
+BUILTIN(BooleanPrototypeValueOf) {
+ HandleScope scope(isolate);
+ Handle<Object> receiver = args.receiver();
+ if (receiver->IsJSValue()) {
+ receiver = handle(Handle<JSValue>::cast(receiver)->value(), isolate);
+ }
+ if (!receiver->IsBoolean()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kNotGeneric,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Boolean.prototype.valueOf")));
+ }
+ return *receiver;
+}
+
+
+// -----------------------------------------------------------------------------
+// ES6 section 24.2 DataView Objects
+
+
+// ES6 section 24.2.2 The DataView Constructor for the [[Call]] case.
+BUILTIN(DataViewConstructor) {
+ HandleScope scope(isolate);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kConstructorNotFunction,
+ isolate->factory()->NewStringFromAsciiChecked("DataView")));
+}
+
+
+// ES6 section 24.2.2 The DataView Constructor for the [[Construct]] case.
+BUILTIN(DataViewConstructor_ConstructStub) {
+ HandleScope scope(isolate);
+ Handle<JSFunction> target = args.target<JSFunction>();
+ Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
+ Handle<Object> buffer = args.atOrUndefined(isolate, 1);
+ Handle<Object> byte_offset = args.atOrUndefined(isolate, 2);
+ Handle<Object> byte_length = args.atOrUndefined(isolate, 3);
+
+ // 2. If Type(buffer) is not Object, throw a TypeError exception.
+ // 3. If buffer does not have an [[ArrayBufferData]] internal slot, throw a
+ // TypeError exception.
+ if (!buffer->IsJSArrayBuffer()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kDataViewNotArrayBuffer));
+ }
+ Handle<JSArrayBuffer> array_buffer = Handle<JSArrayBuffer>::cast(buffer);
+
+ // 4. Let numberOffset be ? ToNumber(byteOffset).
+ Handle<Object> number_offset;
+ if (byte_offset->IsUndefined()) {
+ // We intentionally violate the specification at this point to allow
+ // for new DataView(buffer) invocations to be equivalent to the full
+ // new DataView(buffer, 0) invocation.
+ number_offset = handle(Smi::FromInt(0), isolate);
+ } else {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_offset,
+ Object::ToNumber(byte_offset));
+ }
+
+ // 5. Let offset be ToInteger(numberOffset).
+ Handle<Object> offset;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, offset,
+ Object::ToInteger(isolate, number_offset));
+
+ // 6. If numberOffset ā‰  offset or offset < 0, throw a RangeError exception.
+ if (number_offset->Number() != offset->Number() || offset->Number() < 0.0) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidDataViewOffset));
+ }
+
+ // 7. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
+ // We currently violate the specification at this point.
+
+ // 8. Let bufferByteLength be the value of buffer's [[ArrayBufferByteLength]]
+ // internal slot.
+ double const buffer_byte_length = array_buffer->byte_length()->Number();
+
+ // 9. If offset > bufferByteLength, throw a RangeError exception
+ if (offset->Number() > buffer_byte_length) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidDataViewOffset));
+ }
+
+ Handle<Object> view_byte_length;
+ if (byte_length->IsUndefined()) {
+ // 10. If byteLength is undefined, then
+ // a. Let viewByteLength be bufferByteLength - offset.
+ view_byte_length =
+ isolate->factory()->NewNumber(buffer_byte_length - offset->Number());
+ } else {
+ // 11. Else,
+ // a. Let viewByteLength be ? ToLength(byteLength).
+ // b. If offset+viewByteLength > bufferByteLength, throw a RangeError
+ // exception
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, view_byte_length, Object::ToLength(isolate, byte_length));
+ if (offset->Number() + view_byte_length->Number() > buffer_byte_length) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidDataViewLength));
+ }
+ }
+
+ // 12. Let O be ? OrdinaryCreateFromConstructor(NewTarget,
+ // "%DataViewPrototype%", Ā«[[DataView]], [[ViewedArrayBuffer]],
+ // [[ByteLength]], [[ByteOffset]]Ā»).
+ // 13. Set O's [[DataView]] internal slot to true.
+ Handle<JSObject> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ JSObject::New(target, new_target));
+ for (int i = 0; i < ArrayBufferView::kInternalFieldCount; ++i) {
+ Handle<JSDataView>::cast(result)->SetInternalField(i, Smi::FromInt(0));
+ }
+
+ // 14. Set O's [[ViewedArrayBuffer]] internal slot to buffer.
+ Handle<JSDataView>::cast(result)->set_buffer(*array_buffer);
+
+ // 15. Set O's [[ByteLength]] internal slot to viewByteLength.
+ Handle<JSDataView>::cast(result)->set_byte_length(*view_byte_length);
+
+ // 16. Set O's [[ByteOffset]] internal slot to offset.
+ Handle<JSDataView>::cast(result)->set_byte_offset(*offset);
+
+ // 17. Return O.
+ return *result;
+}
+
+
+// -----------------------------------------------------------------------------
// ES6 section 20.3 Date Objects
@@ -2237,7 +2616,7 @@ BUILTIN(DateConstructor) {
BUILTIN(DateConstructor_ConstructStub) {
HandleScope scope(isolate);
int const argc = args.length() - 1;
- Handle<JSFunction> target = args.target();
+ Handle<JSFunction> target = args.target<JSFunction>();
Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
double time_val;
if (argc == 0) {
@@ -3133,7 +3512,7 @@ MaybeHandle<JSFunction> CreateDynamicFunction(
// Compile the string in the constructor and not a helper so that errors to
// come from here.
- Handle<JSFunction> target = args.target();
+ Handle<JSFunction> target = args.target<JSFunction>();
Handle<JSObject> target_global_proxy(target->global_proxy(), isolate);
Handle<JSFunction> function;
{
@@ -3280,6 +3659,45 @@ BUILTIN(GeneratorFunctionConstructor) {
return *result;
}
+// ES6 section 19.2.3.6 Function.prototype[@@hasInstance](V)
+BUILTIN(FunctionHasInstance) {
+ HandleScope scope(isolate);
+ Handle<Object> callable = args.receiver();
+ Handle<Object> object = args.atOrUndefined(isolate, 1);
+
+ // {callable} must have a [[Call]] internal method.
+ if (!callable->IsCallable()) {
+ return isolate->heap()->false_value();
+ }
+ // If {object} is not a receiver, return false.
+ if (!object->IsJSReceiver()) {
+ return isolate->heap()->false_value();
+ }
+ // Check if {callable} is bound, if so, get [[BoundTargetFunction]] from it
+ // and use that instead of {callable}.
+ while (callable->IsJSBoundFunction()) {
+ callable =
+ handle(Handle<JSBoundFunction>::cast(callable)->bound_target_function(),
+ isolate);
+ }
+ DCHECK(callable->IsCallable());
+ // Get the "prototype" of {callable}; raise an error if it's not a receiver.
+ Handle<Object> prototype;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, prototype,
+ Object::GetProperty(callable, isolate->factory()->prototype_string()));
+ if (!prototype->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kInstanceofNonobjectProto, prototype));
+ }
+ // Return whether or not {prototype} is in the prototype chain of {object}.
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
+ Maybe<bool> result =
+ JSReceiver::HasInPrototypeChain(isolate, receiver, prototype);
+ MAYBE_RETURN(result, isolate->heap()->exception());
+ return isolate->heap()->ToBoolean(result.FromJust());
+}
// ES6 section 19.4.1.1 Symbol ( [ description ] ) for the [[Call]] case.
BUILTIN(SymbolConstructor) {
@@ -3318,7 +3736,7 @@ BUILTIN(ObjectProtoToString) {
// ES6 section 24.1.2.1 ArrayBuffer ( length ) for the [[Call]] case.
BUILTIN(ArrayBufferConstructor) {
HandleScope scope(isolate);
- Handle<JSFunction> target = args.target();
+ Handle<JSFunction> target = args.target<JSFunction>();
DCHECK(*target == target->native_context()->array_buffer_fun() ||
*target == target->native_context()->shared_array_buffer_fun());
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -3330,7 +3748,7 @@ BUILTIN(ArrayBufferConstructor) {
// ES6 section 24.1.2.1 ArrayBuffer ( length ) for the [[Construct]] case.
BUILTIN(ArrayBufferConstructor_ConstructStub) {
HandleScope scope(isolate);
- Handle<JSFunction> target = args.target();
+ Handle<JSFunction> target = args.target<JSFunction>();
Handle<JSReceiver> new_target = Handle<JSReceiver>::cast(args.new_target());
Handle<Object> length = args.atOrUndefined(isolate, 1);
DCHECK(*target == target->native_context()->array_buffer_fun() ||
@@ -3342,22 +3760,20 @@ BUILTIN(ArrayBufferConstructor_ConstructStub) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
}
- Handle<Map> initial_map;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, initial_map,
- JSFunction::GetDerivedMap(isolate, target, new_target));
+ Handle<JSObject> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ JSObject::New(target, new_target));
size_t byte_length;
if (!TryNumberToSize(isolate, *number_length, &byte_length)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
}
- Handle<JSArrayBuffer> result = Handle<JSArrayBuffer>::cast(
- isolate->factory()->NewJSObjectFromMap(initial_map));
SharedFlag shared_flag =
(*target == target->native_context()->array_buffer_fun())
? SharedFlag::kNotShared
: SharedFlag::kShared;
- if (!JSArrayBuffer::SetupAllocatingData(result, isolate, byte_length, true,
+ if (!JSArrayBuffer::SetupAllocatingData(Handle<JSArrayBuffer>::cast(result),
+ isolate, byte_length, true,
shared_flag)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewRangeError(MessageTemplate::kArrayBufferAllocationFailed));
@@ -3427,23 +3843,37 @@ template <bool is_construct>
MUST_USE_RESULT MaybeHandle<Object> HandleApiCallHelper(
Isolate* isolate, BuiltinArguments<BuiltinExtraArguments::kTarget> args) {
HandleScope scope(isolate);
- Handle<JSFunction> function = args.target();
- DCHECK(args.receiver()->IsJSReceiver());
+ Handle<HeapObject> function = args.target<HeapObject>();
+ Handle<JSReceiver> receiver;
// TODO(ishell): turn this back to a DCHECK.
- CHECK(function->shared()->IsApiFunction());
+ CHECK(function->IsFunctionTemplateInfo() ||
+ Handle<JSFunction>::cast(function)->shared()->IsApiFunction());
- Handle<FunctionTemplateInfo> fun_data(
- function->shared()->get_api_func_data(), isolate);
+ Handle<FunctionTemplateInfo> fun_data =
+ function->IsFunctionTemplateInfo()
+ ? Handle<FunctionTemplateInfo>::cast(function)
+ : handle(JSFunction::cast(*function)->shared()->get_api_func_data());
if (is_construct) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, fun_data,
- ApiNatives::ConfigureInstance(isolate, fun_data,
- Handle<JSObject>::cast(args.receiver())),
- Object);
+ DCHECK(args.receiver()->IsTheHole());
+ if (fun_data->instance_template()->IsUndefined()) {
+ v8::Local<ObjectTemplate> templ =
+ ObjectTemplate::New(reinterpret_cast<v8::Isolate*>(isolate),
+ ToApiHandle<v8::FunctionTemplate>(fun_data));
+ fun_data->set_instance_template(*Utils::OpenHandle(*templ));
+ }
+ Handle<ObjectTemplateInfo> instance_template(
+ ObjectTemplateInfo::cast(fun_data->instance_template()), isolate);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, receiver,
+ ApiNatives::InstantiateObject(instance_template),
+ Object);
+ args[0] = *receiver;
+ DCHECK_EQ(*receiver, *args.receiver());
+ } else {
+ DCHECK(args.receiver()->IsJSReceiver());
+ receiver = args.at<JSReceiver>(0);
}
if (!is_construct && !fun_data->accept_any_receiver()) {
- Handle<JSReceiver> receiver = args.at<JSReceiver>(0);
if (receiver->IsJSObject() && receiver->IsAccessCheckNeeded()) {
Handle<JSObject> js_receiver = Handle<JSObject>::cast(receiver);
if (!isolate->MayAccess(handle(isolate->context()), js_receiver)) {
@@ -3453,7 +3883,7 @@ MUST_USE_RESULT MaybeHandle<Object> HandleApiCallHelper(
}
}
- Object* raw_holder = fun_data->GetCompatibleReceiver(isolate, args[0]);
+ Object* raw_holder = fun_data->GetCompatibleReceiver(isolate, *receiver);
if (raw_holder->IsNull()) {
// This function cannot be called with the given receiver. Abort!
@@ -3497,7 +3927,7 @@ MUST_USE_RESULT MaybeHandle<Object> HandleApiCallHelper(
}
}
- return scope.CloseAndEscape(args.receiver());
+ return scope.CloseAndEscape(receiver);
}
} // namespace
@@ -3520,34 +3950,83 @@ BUILTIN(HandleApiCallConstruct) {
return *result;
}
-
-Handle<Code> Builtins::CallFunction(ConvertReceiverMode mode) {
- switch (mode) {
- case ConvertReceiverMode::kNullOrUndefined:
- return CallFunction_ReceiverIsNullOrUndefined();
- case ConvertReceiverMode::kNotNullOrUndefined:
- return CallFunction_ReceiverIsNotNullOrUndefined();
- case ConvertReceiverMode::kAny:
- return CallFunction_ReceiverIsAny();
+Handle<Code> Builtins::CallFunction(ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
+ switch (tail_call_mode) {
+ case TailCallMode::kDisallow:
+ switch (mode) {
+ case ConvertReceiverMode::kNullOrUndefined:
+ return CallFunction_ReceiverIsNullOrUndefined();
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ return CallFunction_ReceiverIsNotNullOrUndefined();
+ case ConvertReceiverMode::kAny:
+ return CallFunction_ReceiverIsAny();
+ }
+ break;
+ case TailCallMode::kAllow:
+ switch (mode) {
+ case ConvertReceiverMode::kNullOrUndefined:
+ return TailCallFunction_ReceiverIsNullOrUndefined();
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ return TailCallFunction_ReceiverIsNotNullOrUndefined();
+ case ConvertReceiverMode::kAny:
+ return TailCallFunction_ReceiverIsAny();
+ }
+ break;
}
UNREACHABLE();
return Handle<Code>::null();
}
+Handle<Code> Builtins::Call(ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
+ switch (tail_call_mode) {
+ case TailCallMode::kDisallow:
+ switch (mode) {
+ case ConvertReceiverMode::kNullOrUndefined:
+ return Call_ReceiverIsNullOrUndefined();
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ return Call_ReceiverIsNotNullOrUndefined();
+ case ConvertReceiverMode::kAny:
+ return Call_ReceiverIsAny();
+ }
+ break;
+ case TailCallMode::kAllow:
+ switch (mode) {
+ case ConvertReceiverMode::kNullOrUndefined:
+ return TailCall_ReceiverIsNullOrUndefined();
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ return TailCall_ReceiverIsNotNullOrUndefined();
+ case ConvertReceiverMode::kAny:
+ return TailCall_ReceiverIsAny();
+ }
+ break;
+ }
+ UNREACHABLE();
+ return Handle<Code>::null();
+}
-Handle<Code> Builtins::Call(ConvertReceiverMode mode) {
- switch (mode) {
- case ConvertReceiverMode::kNullOrUndefined:
- return Call_ReceiverIsNullOrUndefined();
- case ConvertReceiverMode::kNotNullOrUndefined:
- return Call_ReceiverIsNotNullOrUndefined();
- case ConvertReceiverMode::kAny:
- return Call_ReceiverIsAny();
+Handle<Code> Builtins::CallBoundFunction(TailCallMode tail_call_mode) {
+ switch (tail_call_mode) {
+ case TailCallMode::kDisallow:
+ return CallBoundFunction();
+ case TailCallMode::kAllow:
+ return TailCallBoundFunction();
}
UNREACHABLE();
return Handle<Code>::null();
}
+Handle<Code> Builtins::InterpreterPushArgsAndCall(TailCallMode tail_call_mode) {
+ switch (tail_call_mode) {
+ case TailCallMode::kDisallow:
+ return InterpreterPushArgsAndCall();
+ case TailCallMode::kAllow:
+ return InterpreterPushArgsAndTailCall();
+ }
+ UNREACHABLE();
+ return Handle<Code>::null();
+}
namespace {
@@ -3570,8 +4049,7 @@ class RelocatableArguments
} // namespace
-
-MaybeHandle<Object> Builtins::InvokeApiFunction(Handle<JSFunction> function,
+MaybeHandle<Object> Builtins::InvokeApiFunction(Handle<HeapObject> function,
Handle<Object> receiver,
int argc,
Handle<Object> args[]) {
@@ -3678,12 +4156,7 @@ static void Generate_LoadIC_Miss(MacroAssembler* masm) {
static void Generate_LoadIC_Normal(MacroAssembler* masm) {
- LoadIC::GenerateNormal(masm, SLOPPY);
-}
-
-
-static void Generate_LoadIC_Normal_Strong(MacroAssembler* masm) {
- LoadIC::GenerateNormal(masm, STRONG);
+ LoadIC::GenerateNormal(masm);
}
@@ -3693,22 +4166,12 @@ static void Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
static void Generate_LoadIC_Slow(MacroAssembler* masm) {
- LoadIC::GenerateRuntimeGetProperty(masm, SLOPPY);
-}
-
-
-static void Generate_LoadIC_Slow_Strong(MacroAssembler* masm) {
- LoadIC::GenerateRuntimeGetProperty(masm, STRONG);
+ LoadIC::GenerateRuntimeGetProperty(masm);
}
static void Generate_KeyedLoadIC_Slow(MacroAssembler* masm) {
- KeyedLoadIC::GenerateRuntimeGetProperty(masm, SLOPPY);
-}
-
-
-static void Generate_KeyedLoadIC_Slow_Strong(MacroAssembler* masm) {
- KeyedLoadIC::GenerateRuntimeGetProperty(masm, STRONG);
+ KeyedLoadIC::GenerateRuntimeGetProperty(masm);
}
@@ -3718,12 +4181,7 @@ static void Generate_KeyedLoadIC_Miss(MacroAssembler* masm) {
static void Generate_KeyedLoadIC_Megamorphic(MacroAssembler* masm) {
- KeyedLoadIC::GenerateMegamorphic(masm, SLOPPY);
-}
-
-
-static void Generate_KeyedLoadIC_Megamorphic_Strong(MacroAssembler* masm) {
- KeyedLoadIC::GenerateMegamorphic(masm, STRONG);
+ KeyedLoadIC::GenerateMegamorphic(masm);
}
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index a707a94752..93e6e3d7f2 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -69,6 +69,14 @@ inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
V(ArrayBufferConstructor_ConstructStub, kTargetAndNewTarget) \
V(ArrayBufferIsView, kNone) \
\
+ V(BooleanConstructor, kNone) \
+ V(BooleanConstructor_ConstructStub, kTargetAndNewTarget) \
+ V(BooleanPrototypeToString, kNone) \
+ V(BooleanPrototypeValueOf, kNone) \
+ \
+ V(DataViewConstructor, kNone) \
+ V(DataViewConstructor_ConstructStub, kTargetAndNewTarget) \
+ \
V(DateConstructor, kNone) \
V(DateConstructor_ConstructStub, kTargetAndNewTarget) \
V(DateNow, kNone) \
@@ -102,6 +110,7 @@ inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
V(FunctionConstructor, kTargetAndNewTarget) \
V(FunctionPrototypeBind, kNone) \
V(FunctionPrototypeToString, kNone) \
+ V(FunctionHasInstance, kNone) \
\
V(GeneratorFunctionConstructor, kTargetAndNewTarget) \
\
@@ -110,10 +119,17 @@ inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
V(ObjectAssign, kNone) \
V(ObjectCreate, kNone) \
V(ObjectFreeze, kNone) \
+ V(ObjectGetOwnPropertyDescriptor, kNone) \
+ V(ObjectGetOwnPropertyNames, kNone) \
+ V(ObjectGetOwnPropertySymbols, kNone) \
+ V(ObjectIs, kNone) \
V(ObjectIsExtensible, kNone) \
V(ObjectIsFrozen, kNone) \
V(ObjectIsSealed, kNone) \
V(ObjectKeys, kNone) \
+ V(ObjectValues, kNone) \
+ V(ObjectEntries, kNone) \
+ V(ObjectGetOwnPropertyDescriptors, kNone) \
V(ObjectPreventExtensions, kNone) \
V(ObjectSeal, kNone) \
V(ObjectProtoToString, kNone) \
@@ -155,11 +171,22 @@ inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
V(CallFunction_ReceiverIsNotNullOrUndefined, BUILTIN, UNINITIALIZED, \
kNoExtraICState) \
V(CallFunction_ReceiverIsAny, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(TailCallFunction_ReceiverIsNullOrUndefined, BUILTIN, UNINITIALIZED, \
+ kNoExtraICState) \
+ V(TailCallFunction_ReceiverIsNotNullOrUndefined, BUILTIN, UNINITIALIZED, \
+ kNoExtraICState) \
+ V(TailCallFunction_ReceiverIsAny, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(CallBoundFunction, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(TailCallBoundFunction, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(Call_ReceiverIsNullOrUndefined, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(Call_ReceiverIsNotNullOrUndefined, BUILTIN, UNINITIALIZED, \
kNoExtraICState) \
V(Call_ReceiverIsAny, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(TailCall_ReceiverIsNullOrUndefined, BUILTIN, UNINITIALIZED, \
+ kNoExtraICState) \
+ V(TailCall_ReceiverIsNotNullOrUndefined, BUILTIN, UNINITIALIZED, \
+ kNoExtraICState) \
+ V(TailCall_ReceiverIsAny, BUILTIN, UNINITIALIZED, kNoExtraICState) \
\
V(ConstructFunction, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(ConstructBoundFunction, BUILTIN, UNINITIALIZED, kNoExtraICState) \
@@ -173,6 +200,8 @@ inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
V(InOptimizationQueue, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(JSBuiltinsConstructStub, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(JSBuiltinsConstructStubForDerived, BUILTIN, UNINITIALIZED, \
+ kNoExtraICState) \
V(JSConstructStubApi, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
@@ -188,10 +217,12 @@ inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
V(InterpreterEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(InterpreterExitTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(InterpreterPushArgsAndCall, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(InterpreterPushArgsAndTailCall, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(InterpreterPushArgsAndConstruct, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(InterpreterNotifyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(InterpreterNotifySoftDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(InterpreterNotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(InterpreterEnterBytecodeDispatch, BUILTIN, UNINITIALIZED, kNoExtraICState) \
\
V(LoadIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
@@ -200,9 +231,6 @@ inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, kNoExtraICState) \
V(KeyedLoadIC_Megamorphic, KEYED_LOAD_IC, MEGAMORPHIC, kNoExtraICState) \
\
- V(KeyedLoadIC_Megamorphic_Strong, KEYED_LOAD_IC, MEGAMORPHIC, \
- LoadICState::kStrongModeState) \
- \
V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC, \
StoreICState::kStrictModeState) \
\
@@ -246,6 +274,9 @@ inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
V(InternalArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(ArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
\
+ V(MathMax, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(MathMin, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
V(NumberConstructor, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(NumberConstructor_ConstructStub, BUILTIN, UNINITIALIZED, kNoExtraICState) \
\
@@ -265,13 +296,10 @@ inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
// Define list of builtin handlers implemented in assembly.
#define BUILTIN_LIST_H(V) \
V(LoadIC_Slow, LOAD_IC) \
- V(LoadIC_Slow_Strong, LOAD_IC) \
V(KeyedLoadIC_Slow, KEYED_LOAD_IC) \
- V(KeyedLoadIC_Slow_Strong, KEYED_LOAD_IC) \
V(StoreIC_Slow, STORE_IC) \
V(KeyedStoreIC_Slow, KEYED_STORE_IC) \
V(LoadIC_Normal, LOAD_IC) \
- V(LoadIC_Normal_Strong, LOAD_IC) \
V(StoreIC_Normal, STORE_IC)
// Define list of builtins used by the debugger implemented in assembly.
@@ -332,8 +360,13 @@ class Builtins {
#undef DECLARE_BUILTIN_ACCESSOR_A
// Convenience wrappers.
- Handle<Code> CallFunction(ConvertReceiverMode = ConvertReceiverMode::kAny);
- Handle<Code> Call(ConvertReceiverMode = ConvertReceiverMode::kAny);
+ Handle<Code> CallFunction(
+ ConvertReceiverMode = ConvertReceiverMode::kAny,
+ TailCallMode tail_call_mode = TailCallMode::kDisallow);
+ Handle<Code> Call(ConvertReceiverMode = ConvertReceiverMode::kAny,
+ TailCallMode tail_call_mode = TailCallMode::kDisallow);
+ Handle<Code> CallBoundFunction(TailCallMode tail_call_mode);
+ Handle<Code> InterpreterPushArgsAndCall(TailCallMode tail_call_mode);
Code* builtin(Name name) {
// Code::cast cannot be used here since we access builtins
@@ -358,7 +391,7 @@ class Builtins {
bool is_initialized() const { return initialized_; }
MUST_USE_RESULT static MaybeHandle<Object> InvokeApiFunction(
- Handle<JSFunction> function, Handle<Object> receiver, int argc,
+ Handle<HeapObject> function, Handle<Object> receiver, int argc,
Handle<Object> args[]);
private:
@@ -383,6 +416,7 @@ class Builtins {
static void Generate_CompileOptimizedConcurrent(MacroAssembler* masm);
static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
static void Generate_JSBuiltinsConstructStub(MacroAssembler* masm);
+ static void Generate_JSBuiltinsConstructStubForDerived(MacroAssembler* masm);
static void Generate_JSConstructStubApi(MacroAssembler* masm);
static void Generate_JSEntryTrampoline(MacroAssembler* masm);
static void Generate_JSConstructEntryTrampoline(MacroAssembler* masm);
@@ -397,30 +431,71 @@ class Builtins {
// ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
static void Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode);
+ ConvertReceiverMode mode,
+ TailCallMode tail_call_mode);
static void Generate_CallFunction_ReceiverIsNullOrUndefined(
MacroAssembler* masm) {
- Generate_CallFunction(masm, ConvertReceiverMode::kNullOrUndefined);
+ Generate_CallFunction(masm, ConvertReceiverMode::kNullOrUndefined,
+ TailCallMode::kDisallow);
}
static void Generate_CallFunction_ReceiverIsNotNullOrUndefined(
MacroAssembler* masm) {
- Generate_CallFunction(masm, ConvertReceiverMode::kNotNullOrUndefined);
+ Generate_CallFunction(masm, ConvertReceiverMode::kNotNullOrUndefined,
+ TailCallMode::kDisallow);
}
static void Generate_CallFunction_ReceiverIsAny(MacroAssembler* masm) {
- Generate_CallFunction(masm, ConvertReceiverMode::kAny);
+ Generate_CallFunction(masm, ConvertReceiverMode::kAny,
+ TailCallMode::kDisallow);
+ }
+ static void Generate_TailCallFunction_ReceiverIsNullOrUndefined(
+ MacroAssembler* masm) {
+ Generate_CallFunction(masm, ConvertReceiverMode::kNullOrUndefined,
+ TailCallMode::kAllow);
+ }
+ static void Generate_TailCallFunction_ReceiverIsNotNullOrUndefined(
+ MacroAssembler* masm) {
+ Generate_CallFunction(masm, ConvertReceiverMode::kNotNullOrUndefined,
+ TailCallMode::kAllow);
+ }
+ static void Generate_TailCallFunction_ReceiverIsAny(MacroAssembler* masm) {
+ Generate_CallFunction(masm, ConvertReceiverMode::kAny,
+ TailCallMode::kAllow);
}
// ES6 section 9.4.1.1 [[Call]] ( thisArgument, argumentsList)
- static void Generate_CallBoundFunction(MacroAssembler* masm);
+ static void Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+ TailCallMode tail_call_mode);
+ static void Generate_CallBoundFunction(MacroAssembler* masm) {
+ Generate_CallBoundFunctionImpl(masm, TailCallMode::kDisallow);
+ }
+ static void Generate_TailCallBoundFunction(MacroAssembler* masm) {
+ Generate_CallBoundFunctionImpl(masm, TailCallMode::kAllow);
+ }
// ES6 section 7.3.12 Call(F, V, [argumentsList])
- static void Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode);
+ static void Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+ TailCallMode tail_call_mode);
static void Generate_Call_ReceiverIsNullOrUndefined(MacroAssembler* masm) {
- Generate_Call(masm, ConvertReceiverMode::kNullOrUndefined);
+ Generate_Call(masm, ConvertReceiverMode::kNullOrUndefined,
+ TailCallMode::kDisallow);
}
static void Generate_Call_ReceiverIsNotNullOrUndefined(MacroAssembler* masm) {
- Generate_Call(masm, ConvertReceiverMode::kNotNullOrUndefined);
+ Generate_Call(masm, ConvertReceiverMode::kNotNullOrUndefined,
+ TailCallMode::kDisallow);
}
static void Generate_Call_ReceiverIsAny(MacroAssembler* masm) {
- Generate_Call(masm, ConvertReceiverMode::kAny);
+ Generate_Call(masm, ConvertReceiverMode::kAny, TailCallMode::kDisallow);
+ }
+ static void Generate_TailCall_ReceiverIsNullOrUndefined(
+ MacroAssembler* masm) {
+ Generate_Call(masm, ConvertReceiverMode::kNullOrUndefined,
+ TailCallMode::kAllow);
+ }
+ static void Generate_TailCall_ReceiverIsNotNullOrUndefined(
+ MacroAssembler* masm) {
+ Generate_Call(masm, ConvertReceiverMode::kNotNullOrUndefined,
+ TailCallMode::kAllow);
+ }
+ static void Generate_TailCall_ReceiverIsAny(MacroAssembler* masm) {
+ Generate_Call(masm, ConvertReceiverMode::kAny, TailCallMode::kAllow);
}
// ES6 section 9.2.2 [[Construct]] ( argumentsList, newTarget)
@@ -482,6 +557,17 @@ class Builtins {
static void Generate_InternalArrayCode(MacroAssembler* masm);
static void Generate_ArrayCode(MacroAssembler* masm);
+ enum class MathMaxMinKind { kMax, kMin };
+ static void Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind);
+ // ES6 section 20.2.2.24 Math.max ( value1, value2 , ...values )
+ static void Generate_MathMax(MacroAssembler* masm) {
+ Generate_MathMaxMin(masm, MathMaxMinKind::kMax);
+ }
+ // ES6 section 20.2.2.25 Math.min ( value1, value2 , ...values )
+ static void Generate_MathMin(MacroAssembler* masm) {
+ Generate_MathMaxMin(masm, MathMaxMinKind::kMin);
+ }
+
// ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Call]] case.
static void Generate_NumberConstructor(MacroAssembler* masm);
// ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Construct]] case.
@@ -496,11 +582,20 @@ class Builtins {
static void Generate_InterpreterEntryTrampoline(MacroAssembler* masm);
static void Generate_InterpreterExitTrampoline(MacroAssembler* masm);
- static void Generate_InterpreterPushArgsAndCall(MacroAssembler* masm);
+ static void Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+ return Generate_InterpreterPushArgsAndCallImpl(masm,
+ TailCallMode::kDisallow);
+ }
+ static void Generate_InterpreterPushArgsAndTailCall(MacroAssembler* masm) {
+ return Generate_InterpreterPushArgsAndCallImpl(masm, TailCallMode::kAllow);
+ }
+ static void Generate_InterpreterPushArgsAndCallImpl(
+ MacroAssembler* masm, TailCallMode tail_call_mode);
static void Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm);
static void Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm);
static void Generate_InterpreterNotifySoftDeoptimized(MacroAssembler* masm);
static void Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm);
+ static void Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm);
#define DECLARE_CODE_AGE_BUILTIN_GENERATOR(C) \
static void Generate_Make##C##CodeYoungAgainEvenMarking( \
diff --git a/deps/v8/src/code-factory.cc b/deps/v8/src/code-factory.cc
index 6d31a5f530..9898282d31 100644
--- a/deps/v8/src/code-factory.cc
+++ b/deps/v8/src/code-factory.cc
@@ -12,44 +12,36 @@ namespace internal {
// static
-Callable CodeFactory::LoadIC(Isolate* isolate, TypeofMode typeof_mode,
- LanguageMode language_mode) {
- return Callable(
- LoadIC::initialize_stub(
- isolate, LoadICState(typeof_mode, language_mode).GetExtraICState()),
- LoadDescriptor(isolate));
+Callable CodeFactory::LoadIC(Isolate* isolate, TypeofMode typeof_mode) {
+ return Callable(LoadIC::initialize_stub(
+ isolate, LoadICState(typeof_mode).GetExtraICState()),
+ LoadDescriptor(isolate));
}
// static
Callable CodeFactory::LoadICInOptimizedCode(
- Isolate* isolate, TypeofMode typeof_mode, LanguageMode language_mode,
+ Isolate* isolate, TypeofMode typeof_mode,
InlineCacheState initialization_state) {
auto code = LoadIC::initialize_stub_in_optimized_code(
- isolate, LoadICState(typeof_mode, language_mode).GetExtraICState(),
+ isolate, LoadICState(typeof_mode).GetExtraICState(),
initialization_state);
return Callable(code, LoadWithVectorDescriptor(isolate));
}
// static
-Callable CodeFactory::KeyedLoadIC(Isolate* isolate,
- LanguageMode language_mode) {
- ExtraICState state = is_strong(language_mode) ? LoadICState::kStrongModeState
- : kNoExtraICState;
- return Callable(KeyedLoadIC::initialize_stub(isolate, state),
+Callable CodeFactory::KeyedLoadIC(Isolate* isolate) {
+ return Callable(KeyedLoadIC::initialize_stub(isolate, kNoExtraICState),
LoadDescriptor(isolate));
}
// static
Callable CodeFactory::KeyedLoadICInOptimizedCode(
- Isolate* isolate, LanguageMode language_mode,
- InlineCacheState initialization_state) {
- ExtraICState state = is_strong(language_mode) ? LoadICState::kStrongModeState
- : kNoExtraICState;
+ Isolate* isolate, InlineCacheState initialization_state) {
auto code = KeyedLoadIC::initialize_stub_in_optimized_code(
- isolate, initialization_state, state);
+ isolate, initialization_state, kNoExtraICState);
if (initialization_state != MEGAMORPHIC) {
return Callable(code, LoadWithVectorDescriptor(isolate));
}
@@ -59,18 +51,20 @@ Callable CodeFactory::KeyedLoadICInOptimizedCode(
// static
Callable CodeFactory::CallIC(Isolate* isolate, int argc,
- ConvertReceiverMode mode) {
- return Callable(CallIC::initialize_stub(isolate, argc, mode),
+ ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
+ return Callable(CallIC::initialize_stub(isolate, argc, mode, tail_call_mode),
CallFunctionWithFeedbackDescriptor(isolate));
}
// static
Callable CodeFactory::CallICInOptimizedCode(Isolate* isolate, int argc,
- ConvertReceiverMode mode) {
- return Callable(
- CallIC::initialize_stub_in_optimized_code(isolate, argc, mode),
- CallFunctionWithFeedbackAndVectorDescriptor(isolate));
+ ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
+ return Callable(CallIC::initialize_stub_in_optimized_code(isolate, argc, mode,
+ tail_call_mode),
+ CallFunctionWithFeedbackAndVectorDescriptor(isolate));
}
@@ -118,9 +112,8 @@ Callable CodeFactory::KeyedStoreICInOptimizedCode(
// static
-Callable CodeFactory::CompareIC(Isolate* isolate, Token::Value op,
- Strength strength) {
- Handle<Code> code = CompareIC::GetUninitialized(isolate, op, strength);
+Callable CodeFactory::CompareIC(Isolate* isolate, Token::Value op) {
+ Handle<Code> code = CompareIC::GetUninitialized(isolate, op);
return Callable(code, CompareDescriptor(isolate));
}
@@ -133,9 +126,8 @@ Callable CodeFactory::CompareNilIC(Isolate* isolate, NilValue nil_value) {
// static
-Callable CodeFactory::BinaryOpIC(Isolate* isolate, Token::Value op,
- Strength strength) {
- BinaryOpICStub stub(isolate, op, strength);
+Callable CodeFactory::BinaryOpIC(Isolate* isolate, Token::Value op) {
+ BinaryOpICStub stub(isolate, op);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
@@ -169,6 +161,13 @@ Callable CodeFactory::ToString(Isolate* isolate) {
// static
+Callable CodeFactory::ToName(Isolate* isolate) {
+ ToNameStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
Callable CodeFactory::ToLength(Isolate* isolate) {
ToLengthStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
@@ -271,19 +270,29 @@ Callable CodeFactory::FastNewClosure(Isolate* isolate,
// static
-Callable CodeFactory::ArgumentsAccess(Isolate* isolate,
- bool is_unmapped_arguments,
- bool has_duplicate_parameters) {
- ArgumentsAccessStub::Type type = ArgumentsAccessStub::ComputeType(
- is_unmapped_arguments, has_duplicate_parameters);
- ArgumentsAccessStub stub(isolate, type);
+Callable CodeFactory::FastNewObject(Isolate* isolate) {
+ FastNewObjectStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static
-Callable CodeFactory::RestArgumentsAccess(Isolate* isolate) {
- RestParamAccessStub stub(isolate);
+Callable CodeFactory::FastNewRestParameter(Isolate* isolate) {
+ FastNewRestParameterStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
+Callable CodeFactory::FastNewSloppyArguments(Isolate* isolate) {
+ FastNewSloppyArgumentsStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
+Callable CodeFactory::FastNewStrictArguments(Isolate* isolate) {
+ FastNewStrictArgumentsStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
@@ -345,9 +354,11 @@ Callable CodeFactory::ConstructFunction(Isolate* isolate) {
// static
-Callable CodeFactory::InterpreterPushArgsAndCall(Isolate* isolate) {
- return Callable(isolate->builtins()->InterpreterPushArgsAndCall(),
- InterpreterPushArgsAndCallDescriptor(isolate));
+Callable CodeFactory::InterpreterPushArgsAndCall(Isolate* isolate,
+ TailCallMode tail_call_mode) {
+ return Callable(
+ isolate->builtins()->InterpreterPushArgsAndCall(tail_call_mode),
+ InterpreterPushArgsAndCallDescriptor(isolate));
}
diff --git a/deps/v8/src/code-factory.h b/deps/v8/src/code-factory.h
index 2126790359..fb1a165053 100644
--- a/deps/v8/src/code-factory.h
+++ b/deps/v8/src/code-factory.h
@@ -32,21 +32,20 @@ class Callable final BASE_EMBEDDED {
class CodeFactory final {
public:
// Initial states for ICs.
- static Callable LoadIC(Isolate* isolate, TypeofMode typeof_mode,
- LanguageMode language_mode);
+ static Callable LoadIC(Isolate* isolate, TypeofMode typeof_mode);
static Callable LoadICInOptimizedCode(Isolate* isolate,
TypeofMode typeof_mode,
- LanguageMode language_mode,
InlineCacheState initialization_state);
- static Callable KeyedLoadIC(Isolate* isolate, LanguageMode language_mode);
+ static Callable KeyedLoadIC(Isolate* isolate);
static Callable KeyedLoadICInOptimizedCode(
- Isolate* isolate, LanguageMode language_mode,
- InlineCacheState initialization_state);
+ Isolate* isolate, InlineCacheState initialization_state);
static Callable CallIC(Isolate* isolate, int argc,
- ConvertReceiverMode mode = ConvertReceiverMode::kAny);
+ ConvertReceiverMode mode = ConvertReceiverMode::kAny,
+ TailCallMode tail_call_mode = TailCallMode::kDisallow);
static Callable CallICInOptimizedCode(
Isolate* isolate, int argc,
- ConvertReceiverMode mode = ConvertReceiverMode::kAny);
+ ConvertReceiverMode mode = ConvertReceiverMode::kAny,
+ TailCallMode tail_call_mode = TailCallMode::kDisallow);
static Callable StoreIC(Isolate* isolate, LanguageMode mode);
static Callable StoreICInOptimizedCode(Isolate* isolate, LanguageMode mode,
InlineCacheState initialization_state);
@@ -55,12 +54,10 @@ class CodeFactory final {
Isolate* isolate, LanguageMode mode,
InlineCacheState initialization_state);
- static Callable CompareIC(Isolate* isolate, Token::Value op,
- Strength strength);
+ static Callable CompareIC(Isolate* isolate, Token::Value op);
static Callable CompareNilIC(Isolate* isolate, NilValue nil_value);
- static Callable BinaryOpIC(Isolate* isolate, Token::Value op,
- Strength strength);
+ static Callable BinaryOpIC(Isolate* isolate, Token::Value op);
// Code stubs. Add methods here as needed to reduce dependency on
// code-stubs.h.
@@ -70,6 +67,7 @@ class CodeFactory final {
static Callable ToNumber(Isolate* isolate);
static Callable ToString(Isolate* isolate);
+ static Callable ToName(Isolate* isolate);
static Callable ToLength(Isolate* isolate);
static Callable ToObject(Isolate* isolate);
static Callable NumberToString(Isolate* isolate);
@@ -91,10 +89,10 @@ class CodeFactory final {
static Callable FastNewContext(Isolate* isolate, int slot_count);
static Callable FastNewClosure(Isolate* isolate, LanguageMode language_mode,
FunctionKind kind);
-
- static Callable ArgumentsAccess(Isolate* isolate, bool is_unmapped_arguments,
- bool has_duplicate_parameters);
- static Callable RestArgumentsAccess(Isolate* isolate);
+ static Callable FastNewObject(Isolate* isolate);
+ static Callable FastNewRestParameter(Isolate* isolate);
+ static Callable FastNewSloppyArguments(Isolate* isolate);
+ static Callable FastNewStrictArguments(Isolate* isolate);
static Callable AllocateHeapNumber(Isolate* isolate);
static Callable AllocateMutableHeapNumber(Isolate* isolate);
@@ -108,7 +106,8 @@ class CodeFactory final {
static Callable Construct(Isolate* isolate);
static Callable ConstructFunction(Isolate* isolate);
- static Callable InterpreterPushArgsAndCall(Isolate* isolate);
+ static Callable InterpreterPushArgsAndCall(Isolate* isolate,
+ TailCallMode tail_call_mode);
static Callable InterpreterPushArgsAndConstruct(Isolate* isolate);
static Callable InterpreterCEntry(Isolate* isolate, int result_size = 1);
};
diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc
index 2fab578b9b..461baaa4e6 100644
--- a/deps/v8/src/code-stubs-hydrogen.cc
+++ b/deps/v8/src/code-stubs-hydrogen.cc
@@ -34,11 +34,12 @@ static LChunk* OptimizeGraph(HGraph* graph) {
class CodeStubGraphBuilderBase : public HGraphBuilder {
public:
- explicit CodeStubGraphBuilderBase(CompilationInfo* info)
- : HGraphBuilder(info),
+ explicit CodeStubGraphBuilderBase(CompilationInfo* info, CodeStub* code_stub)
+ : HGraphBuilder(info, code_stub->GetCallInterfaceDescriptor()),
arguments_length_(NULL),
info_(info),
- descriptor_(info->code_stub()),
+ code_stub_(code_stub),
+ descriptor_(code_stub),
context_(NULL) {
int parameter_count = GetParameterCount();
parameters_.Reset(new HParameter*[parameter_count]);
@@ -68,7 +69,7 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
return arguments_length_;
}
CompilationInfo* info() { return info_; }
- CodeStub* stub() { return info_->code_stub(); }
+ CodeStub* stub() { return code_stub_; }
HContext* context() { return context_; }
Isolate* isolate() { return info_->isolate(); }
@@ -124,6 +125,7 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
base::SmartArrayPointer<HParameter*> parameters_;
HValue* arguments_length_;
CompilationInfo* info_;
+ CodeStub* code_stub_;
CodeStubDescriptor descriptor_;
HContext* context_;
};
@@ -178,6 +180,7 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
context_ = Add<HContext>();
start_environment->BindContext(context_);
+ start_environment->Bind(param_count, context_);
Add<HSimulate>(BailoutId::StubEntry());
@@ -214,8 +217,8 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
template <class Stub>
class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
public:
- explicit CodeStubGraphBuilder(CompilationInfo* info)
- : CodeStubGraphBuilderBase(info) {}
+ explicit CodeStubGraphBuilder(CompilationInfo* info, CodeStub* stub)
+ : CodeStubGraphBuilderBase(info, stub) {}
protected:
virtual HValue* BuildCodeStub() {
@@ -269,13 +272,8 @@ Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode(
masm.GetCode(&desc);
// Copy the generated code into a heap object.
- Code::Flags flags = Code::ComputeFlags(
- GetCodeKind(),
- GetICState(),
- GetExtraICState(),
- GetStubType());
Handle<Code> new_object = factory->NewCode(
- desc, flags, masm.CodeObject(), NeedsImmovableCode());
+ desc, GetCodeFlags(), masm.CodeObject(), NeedsImmovableCode());
return new_object;
}
@@ -297,8 +295,15 @@ static Handle<Code> DoGenerateCode(Stub* stub) {
timer.Start();
}
Zone zone;
- CompilationInfo info(stub, isolate, &zone);
- CodeStubGraphBuilder<Stub> builder(&info);
+ CompilationInfo info(CodeStub::MajorName(stub->MajorKey()), isolate, &zone,
+ stub->GetCodeFlags());
+ // Parameter count is number of stack parameters.
+ int parameter_count = descriptor.GetStackParameterCount();
+ if (descriptor.function_mode() == NOT_JS_FUNCTION_STUB_MODE) {
+ parameter_count--;
+ }
+ info.set_parameter_count(parameter_count);
+ CodeStubGraphBuilder<Stub> builder(&info, stub);
LChunk* chunk = OptimizeGraph(builder.CreateGraph());
Handle<Code> code = chunk->Codegen();
if (FLAG_profile_hydrogen_code_stub_compilation) {
@@ -314,7 +319,7 @@ template <>
HValue* CodeStubGraphBuilder<NumberToStringStub>::BuildCodeStub() {
info()->MarkAsSavesCallerDoubles();
HValue* number = GetParameter(NumberToStringStub::kNumber);
- return BuildNumberToString(number, Type::Number(zone()));
+ return BuildNumberToString(number, Type::Number());
}
@@ -1464,16 +1469,15 @@ HValue* CodeStubGraphBuilder<BinaryOpICStub>::BuildCodeInitializedStub() {
if_leftisstring.If<HIsStringAndBranch>(left);
if_leftisstring.Then();
{
- Push(BuildBinaryOperation(state.op(), left, right, Type::String(zone()),
+ Push(BuildBinaryOperation(state.op(), left, right, Type::String(),
right_type, result_type,
- state.fixed_right_arg(), allocation_mode,
- state.strength()));
+ state.fixed_right_arg(), allocation_mode));
}
if_leftisstring.Else();
{
- Push(BuildBinaryOperation(
- state.op(), left, right, left_type, right_type, result_type,
- state.fixed_right_arg(), allocation_mode, state.strength()));
+ Push(BuildBinaryOperation(state.op(), left, right, left_type,
+ right_type, result_type,
+ state.fixed_right_arg(), allocation_mode));
}
if_leftisstring.End();
result = Pop();
@@ -1483,23 +1487,22 @@ HValue* CodeStubGraphBuilder<BinaryOpICStub>::BuildCodeInitializedStub() {
if_rightisstring.Then();
{
Push(BuildBinaryOperation(state.op(), left, right, left_type,
- Type::String(zone()), result_type,
- state.fixed_right_arg(), allocation_mode,
- state.strength()));
+ Type::String(), result_type,
+ state.fixed_right_arg(), allocation_mode));
}
if_rightisstring.Else();
{
- Push(BuildBinaryOperation(
- state.op(), left, right, left_type, right_type, result_type,
- state.fixed_right_arg(), allocation_mode, state.strength()));
+ Push(BuildBinaryOperation(state.op(), left, right, left_type,
+ right_type, result_type,
+ state.fixed_right_arg(), allocation_mode));
}
if_rightisstring.End();
result = Pop();
}
} else {
- result = BuildBinaryOperation(
- state.op(), left, right, left_type, right_type, result_type,
- state.fixed_right_arg(), allocation_mode, state.strength());
+ result = BuildBinaryOperation(state.op(), left, right, left_type,
+ right_type, result_type,
+ state.fixed_right_arg(), allocation_mode);
}
// If we encounter a generic argument, the number conversion is
@@ -1533,7 +1536,7 @@ HValue* CodeStubGraphBuilder<BinaryOpWithAllocationSiteStub>::BuildCodeStub() {
return BuildBinaryOperation(state.op(), left, right, left_type, right_type,
result_type, state.fixed_right_arg(),
- allocation_mode, state.strength());
+ allocation_mode);
}
@@ -2154,8 +2157,7 @@ HValue* CodeStubGraphBuilder<LoadDictionaryElementStub>::BuildCodeStub() {
HValue* hash = BuildElementIndexHash(key);
- return BuildUncheckedDictionaryElementLoad(receiver, elements, key, hash,
- casted_stub()->language_mode());
+ return BuildUncheckedDictionaryElementLoad(receiver, elements, key, hash);
}
@@ -2186,8 +2188,8 @@ template <>
class CodeStubGraphBuilder<KeyedLoadGenericStub>
: public CodeStubGraphBuilderBase {
public:
- explicit CodeStubGraphBuilder(CompilationInfo* info)
- : CodeStubGraphBuilderBase(info) {}
+ explicit CodeStubGraphBuilder(CompilationInfo* info, CodeStub* stub)
+ : CodeStubGraphBuilderBase(info, stub) {}
protected:
virtual HValue* BuildCodeStub();
@@ -2289,8 +2291,7 @@ HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
HValue* hash = BuildElementIndexHash(key);
- Push(BuildUncheckedDictionaryElementLoad(receiver, elements, key, hash,
- casted_stub()->language_mode()));
+ Push(BuildUncheckedDictionaryElementLoad(receiver, elements, key, hash));
}
kind_if.Else();
@@ -2334,8 +2335,8 @@ HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
hash = AddUncasted<HShr>(hash, Add<HConstant>(Name::kHashShift));
- HValue* value = BuildUncheckedDictionaryElementLoad(
- receiver, properties, key, hash, casted_stub()->language_mode());
+ HValue* value =
+ BuildUncheckedDictionaryElementLoad(receiver, properties, key, hash);
Push(value);
}
if_dict_properties.Else();
@@ -2412,10 +2413,7 @@ HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
// KeyedLookupCache miss; call runtime.
Add<HPushArguments>(receiver, key);
Push(Add<HCallRuntime>(
- Runtime::FunctionForId(is_strong(casted_stub()->language_mode())
- ? Runtime::kKeyedGetPropertyStrong
- : Runtime::kKeyedGetProperty),
- 2));
+ Runtime::FunctionForId(Runtime::kKeyedGetProperty), 2));
}
inline_or_runtime.End();
}
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 1754288b6e..4e5efcd8e0 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -96,6 +96,12 @@ Code::Kind CodeStub::GetCodeKind() const {
}
+Code::Flags CodeStub::GetCodeFlags() const {
+ return Code::ComputeFlags(GetCodeKind(), GetICState(), GetExtraICState(),
+ GetStubType());
+}
+
+
Handle<Code> CodeStub::GetCodeCopy(const Code::FindAndReplacePattern& pattern) {
Handle<Code> ic = GetCode();
ic = isolate()->factory()->CopyCode(ic);
@@ -270,7 +276,7 @@ MaybeHandle<Code> CodeStub::GetCode(Isolate* isolate, uint32_t key) {
void BinaryOpICStub::GenerateAheadOfTime(Isolate* isolate) {
// Generate the uninitialized versions of the stub.
for (int op = Token::BIT_OR; op <= Token::MOD; ++op) {
- BinaryOpICStub stub(isolate, static_cast<Token::Value>(op), Strength::WEAK);
+ BinaryOpICStub stub(isolate, static_cast<Token::Value>(op));
stub.GetCode();
}
@@ -453,9 +459,7 @@ void CompareNilICStub::UpdateStatus(Handle<Object> object) {
state.Add(NULL_TYPE);
} else if (object->IsUndefined()) {
state.Add(UNDEFINED);
- } else if (object->IsUndetectableObject() ||
- object->IsOddball() ||
- !object->IsHeapObject()) {
+ } else if (object->IsUndetectableObject() || object->IsSmi()) {
state.RemoveAll();
state.Add(GENERIC);
} else if (IsMonomorphic()) {
@@ -474,7 +478,7 @@ Handle<Code> TurboFanCodeStub::GenerateCode() {
Zone zone;
CallInterfaceDescriptor descriptor(GetCallInterfaceDescriptor());
compiler::CodeStubAssembler assembler(isolate(), &zone, descriptor,
- GetCodeKind(), name);
+ GetCodeFlags(), name);
GenerateAssembly(&assembler);
return assembler.GenerateCode();
}
@@ -549,18 +553,17 @@ std::ostream& operator<<(std::ostream& os, const CompareNilICStub::State& s) {
Type* CompareNilICStub::GetType(Zone* zone, Handle<Map> map) {
State state = this->state();
- if (state.Contains(CompareNilICStub::GENERIC)) return Type::Any(zone);
+ if (state.Contains(CompareNilICStub::GENERIC)) return Type::Any();
- Type* result = Type::None(zone);
+ Type* result = Type::None();
if (state.Contains(CompareNilICStub::UNDEFINED)) {
- result = Type::Union(result, Type::Undefined(zone), zone);
+ result = Type::Union(result, Type::Undefined(), zone);
}
if (state.Contains(CompareNilICStub::NULL_TYPE)) {
- result = Type::Union(result, Type::Null(zone), zone);
+ result = Type::Union(result, Type::Null(), zone);
}
if (state.Contains(CompareNilICStub::MONOMORPHIC_MAP)) {
- Type* type =
- map.is_null() ? Type::Detectable(zone) : Type::Class(map, zone);
+ Type* type = map.is_null() ? Type::Detectable() : Type::Class(map, zone);
result = Type::Union(result, type, zone);
}
@@ -570,8 +573,7 @@ Type* CompareNilICStub::GetType(Zone* zone, Handle<Map> map) {
Type* CompareNilICStub::GetInputType(Zone* zone, Handle<Map> map) {
Type* output_type = GetType(zone, map);
- Type* nil_type =
- nil_value() == kNullValue ? Type::Null(zone) : Type::Undefined(zone);
+ Type* nil_type = nil_value() == kNullValue ? Type::Null() : Type::Undefined();
return Type::Union(output_type, nil_type, zone);
}
@@ -599,9 +601,7 @@ void LoadDictionaryElementStub::InitializeDescriptor(
void KeyedLoadGenericStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
descriptor->Initialize(
- Runtime::FunctionForId(is_strong(language_mode())
- ? Runtime::kKeyedGetPropertyStrong
- : Runtime::kKeyedGetProperty)->entry);
+ Runtime::FunctionForId(Runtime::kKeyedGetProperty)->entry);
}
@@ -798,28 +798,8 @@ void CreateWeakCellStub::GenerateAheadOfTime(Isolate* isolate) {
void StoreElementStub::Generate(MacroAssembler* masm) {
- switch (elements_kind()) {
- case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS:
-
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- UNREACHABLE();
- break;
- case DICTIONARY_ELEMENTS:
- ElementHandlerCompiler::GenerateStoreSlow(masm);
- break;
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
+ DCHECK_EQ(DICTIONARY_ELEMENTS, elements_kind());
+ ElementHandlerCompiler::GenerateStoreSlow(masm);
}
@@ -838,52 +818,6 @@ void StoreFastElementStub::GenerateAheadOfTime(Isolate* isolate) {
}
-void RestParamAccessStub::Generate(MacroAssembler* masm) { GenerateNew(masm); }
-
-
-void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
- switch (type()) {
- case READ_ELEMENT:
- GenerateReadElement(masm);
- break;
- case NEW_SLOPPY_FAST:
- GenerateNewSloppyFast(masm);
- break;
- case NEW_SLOPPY_SLOW:
- GenerateNewSloppySlow(masm);
- break;
- case NEW_STRICT:
- GenerateNewStrict(masm);
- break;
- }
-}
-
-
-void ArgumentsAccessStub::PrintName(std::ostream& os) const { // NOLINT
- os << "ArgumentsAccessStub_";
- switch (type()) {
- case READ_ELEMENT:
- os << "ReadElement";
- break;
- case NEW_SLOPPY_FAST:
- os << "NewSloppyFast";
- break;
- case NEW_SLOPPY_SLOW:
- os << "NewSloppySlow";
- break;
- case NEW_STRICT:
- os << "NewStrict";
- break;
- }
- return;
-}
-
-
-void RestParamAccessStub::PrintName(std::ostream& os) const { // NOLINT
- os << "RestParamAccessStub_";
-}
-
-
void ArrayConstructorStub::PrintName(std::ostream& os) const { // NOLINT
os << "ArrayConstructorStub";
switch (argument_count()) {
@@ -964,9 +898,9 @@ bool ToBooleanStub::Types::UpdateStatus(Handle<Object> object) {
Add(SPEC_OBJECT);
return !object->IsUndetectableObject();
} else if (object->IsString()) {
+ DCHECK(!object->IsUndetectableObject());
Add(STRING);
- return !object->IsUndetectableObject() &&
- String::cast(*object)->length() != 0;
+ return String::cast(*object)->length() != 0;
} else if (object->IsSymbol()) {
Add(SYMBOL);
return true;
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 21e21356bb..f370ce6473 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -21,7 +21,6 @@ namespace internal {
// List of code stubs used on all platforms.
#define CODE_STUB_LIST_ALL_PLATFORMS(V) \
/* PlatformCodeStubs */ \
- V(ArgumentsAccess) \
V(ArrayConstructor) \
V(BinaryOpICWithAllocationSite) \
V(CallApiFunction) \
@@ -44,7 +43,6 @@ namespace internal {
V(MathPow) \
V(ProfileEntryHook) \
V(RecordWrite) \
- V(RestParamAccess) \
V(RegExpExec) \
V(StoreBufferOverflow) \
V(StoreElement) \
@@ -54,6 +52,7 @@ namespace internal {
V(ToNumber) \
V(ToLength) \
V(ToString) \
+ V(ToName) \
V(ToObject) \
V(VectorStoreICTrampoline) \
V(VectorKeyedStoreICTrampoline) \
@@ -77,6 +76,10 @@ namespace internal {
V(FastCloneShallowObject) \
V(FastNewClosure) \
V(FastNewContext) \
+ V(FastNewObject) \
+ V(FastNewRestParameter) \
+ V(FastNewSloppyArguments) \
+ V(FastNewStrictArguments) \
V(GrowArrayElements) \
V(InternalArrayNArgumentsConstructor) \
V(InternalArrayNoArgumentConstructor) \
@@ -240,6 +243,8 @@ class CodeStub BASE_EMBEDDED {
virtual ExtraICState GetExtraICState() const { return kNoExtraICState; }
virtual Code::StubType GetStubType() const { return Code::NORMAL; }
+ Code::Flags GetCodeFlags() const;
+
friend std::ostream& operator<<(std::ostream& os, const CodeStub& s) {
s.PrintName(os);
return os;
@@ -323,8 +328,10 @@ class CodeStub BASE_EMBEDDED {
#define DEFINE_CODE_STUB(NAME, SUPER) \
- protected: \
+ public: \
inline Major MajorKey() const override { return NAME; }; \
+ \
+ protected: \
DEFINE_CODE_STUB_BASE(NAME##Stub, SUPER)
@@ -720,6 +727,55 @@ class FastNewContextStub final : public HydrogenCodeStub {
};
+class FastNewObjectStub final : public PlatformCodeStub {
+ public:
+ explicit FastNewObjectStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewObject);
+ DEFINE_PLATFORM_CODE_STUB(FastNewObject, PlatformCodeStub);
+};
+
+
+// TODO(turbofan): This stub should be possible to write in TurboFan
+// using the CodeStubAssembler very soon in a way that is as efficient
+// and easy as the current handwritten version, which is partly a copy
+// of the strict arguments object materialization code.
+class FastNewRestParameterStub final : public PlatformCodeStub {
+ public:
+ explicit FastNewRestParameterStub(Isolate* isolate)
+ : PlatformCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewRestParameter);
+ DEFINE_PLATFORM_CODE_STUB(FastNewRestParameter, PlatformCodeStub);
+};
+
+
+// TODO(turbofan): This stub should be possible to write in TurboFan
+// using the CodeStubAssembler very soon in a way that is as efficient
+// and easy as the current handwritten version.
+class FastNewSloppyArgumentsStub final : public PlatformCodeStub {
+ public:
+ explicit FastNewSloppyArgumentsStub(Isolate* isolate)
+ : PlatformCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewSloppyArguments);
+ DEFINE_PLATFORM_CODE_STUB(FastNewSloppyArguments, PlatformCodeStub);
+};
+
+
+// TODO(turbofan): This stub should be possible to write in TurboFan
+// using the CodeStubAssembler very soon in a way that is as efficient
+// and easy as the current handwritten version.
+class FastNewStrictArgumentsStub final : public PlatformCodeStub {
+ public:
+ explicit FastNewStrictArgumentsStub(Isolate* isolate)
+ : PlatformCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewStrictArguments);
+ DEFINE_PLATFORM_CODE_STUB(FastNewStrictArguments, PlatformCodeStub);
+};
+
+
class FastCloneRegExpStub final : public HydrogenCodeStub {
public:
explicit FastCloneRegExpStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
@@ -920,6 +976,7 @@ class CallICStub: public PlatformCodeStub {
protected:
int arg_count() const { return state().argc(); }
ConvertReceiverMode convert_mode() const { return state().convert_mode(); }
+ TailCallMode tail_call_mode() const { return state().tail_call_mode(); }
CallICState state() const {
return CallICState(static_cast<ExtraICState>(minor_key_));
@@ -1383,11 +1440,13 @@ class CallApiFunctionStub : public PlatformCodeStub {
class CallApiAccessorStub : public PlatformCodeStub {
public:
- CallApiAccessorStub(Isolate* isolate, bool is_store, bool call_data_undefined)
+ CallApiAccessorStub(Isolate* isolate, bool is_store, bool call_data_undefined,
+ bool is_lazy)
: PlatformCodeStub(isolate) {
minor_key_ = IsStoreBits::encode(is_store) |
CallDataUndefinedBits::encode(call_data_undefined) |
- ArgumentBits::encode(is_store ? 1 : 0);
+ ArgumentBits::encode(is_store ? 1 : 0) |
+ IsLazyAccessorBits::encode(is_lazy);
}
protected:
@@ -1402,6 +1461,7 @@ class CallApiAccessorStub : public PlatformCodeStub {
private:
bool is_store() const { return IsStoreBits::decode(minor_key_); }
+ bool is_lazy() const { return IsLazyAccessorBits::decode(minor_key_); }
bool call_data_undefined() const {
return CallDataUndefinedBits::decode(minor_key_);
}
@@ -1410,6 +1470,7 @@ class CallApiAccessorStub : public PlatformCodeStub {
class IsStoreBits: public BitField<bool, 0, 1> {};
class CallDataUndefinedBits: public BitField<bool, 1, 1> {};
class ArgumentBits : public BitField<int, 2, kArgBits> {};
+ class IsLazyAccessorBits : public BitField<bool, 3 + kArgBits, 1> {};
DEFINE_CALL_INTERFACE_DESCRIPTOR(ApiAccessor);
DEFINE_PLATFORM_CODE_STUB(CallApiAccessor, PlatformCodeStub);
@@ -1445,9 +1506,9 @@ class CallApiGetterStub : public PlatformCodeStub {
class BinaryOpICStub : public HydrogenCodeStub {
public:
- BinaryOpICStub(Isolate* isolate, Token::Value op, Strength strength)
+ BinaryOpICStub(Isolate* isolate, Token::Value op)
: HydrogenCodeStub(isolate, UNINITIALIZED) {
- BinaryOpICState state(isolate, op, strength);
+ BinaryOpICState state(isolate, op);
set_sub_minor_key(state.GetExtraICState());
}
@@ -1528,9 +1589,8 @@ class BinaryOpICWithAllocationSiteStub final : public PlatformCodeStub {
class BinaryOpWithAllocationSiteStub final : public BinaryOpICStub {
public:
- BinaryOpWithAllocationSiteStub(Isolate* isolate, Token::Value op,
- Strength strength)
- : BinaryOpICStub(isolate, op, strength) {}
+ BinaryOpWithAllocationSiteStub(Isolate* isolate, Token::Value op)
+ : BinaryOpICStub(isolate, op) {}
BinaryOpWithAllocationSiteStub(Isolate* isolate, const BinaryOpICState& state)
: BinaryOpICStub(isolate, state) {}
@@ -1581,13 +1641,11 @@ class StringAddStub final : public HydrogenCodeStub {
class CompareICStub : public PlatformCodeStub {
public:
- CompareICStub(Isolate* isolate, Token::Value op, Strength strength,
- CompareICState::State left, CompareICState::State right,
- CompareICState::State state)
+ CompareICStub(Isolate* isolate, Token::Value op, CompareICState::State left,
+ CompareICState::State right, CompareICState::State state)
: PlatformCodeStub(isolate) {
DCHECK(Token::IsCompareOp(op));
minor_key_ = OpBits::encode(op - Token::EQ) |
- StrengthBits::encode(is_strong(strength)) |
LeftStateBits::encode(left) | RightStateBits::encode(right) |
StateBits::encode(state);
}
@@ -1600,10 +1658,6 @@ class CompareICStub : public PlatformCodeStub {
return static_cast<Token::Value>(Token::EQ + OpBits::decode(minor_key_));
}
- Strength strength() const {
- return StrengthBits::decode(minor_key_) ? Strength::STRONG : Strength::WEAK;
- }
-
CompareICState::State left() const {
return LeftStateBits::decode(minor_key_);
}
@@ -1636,10 +1690,9 @@ class CompareICStub : public PlatformCodeStub {
}
class OpBits : public BitField<int, 0, 3> {};
- class StrengthBits : public BitField<bool, 3, 1> {};
- class LeftStateBits : public BitField<CompareICState::State, 4, 4> {};
- class RightStateBits : public BitField<CompareICState::State, 8, 4> {};
- class StateBits : public BitField<CompareICState::State, 12, 4> {};
+ class LeftStateBits : public BitField<CompareICState::State, 3, 4> {};
+ class RightStateBits : public BitField<CompareICState::State, 7, 4> {};
+ class StateBits : public BitField<CompareICState::State, 11, 4> {};
Handle<Map> known_map_;
@@ -1746,10 +1799,8 @@ class CEntryStub : public PlatformCodeStub {
: PlatformCodeStub(isolate) {
minor_key_ = SaveDoublesBits::encode(save_doubles == kSaveFPRegs) |
ArgvMode::encode(argv_mode == kArgvInRegister);
- DCHECK(result_size == 1 || result_size == 2);
-#if _WIN64 || V8_TARGET_ARCH_PPC
+ DCHECK(result_size == 1 || result_size == 2 || result_size == 3);
minor_key_ = ResultSizeBits::update(minor_key_, result_size);
-#endif // _WIN64
}
// The version of this stub that doesn't save doubles is generated ahead of
@@ -1761,9 +1812,7 @@ class CEntryStub : public PlatformCodeStub {
private:
bool save_doubles() const { return SaveDoublesBits::decode(minor_key_); }
bool argv_in_register() const { return ArgvMode::decode(minor_key_); }
-#if _WIN64 || V8_TARGET_ARCH_PPC
int result_size() const { return ResultSizeBits::decode(minor_key_); }
-#endif // _WIN64
bool NeedsImmovableCode() override;
@@ -1805,67 +1854,6 @@ class JSEntryStub : public PlatformCodeStub {
};
-class ArgumentsAccessStub: public PlatformCodeStub {
- public:
- enum Type {
- READ_ELEMENT,
- NEW_SLOPPY_FAST,
- NEW_SLOPPY_SLOW,
- NEW_STRICT
- };
-
- ArgumentsAccessStub(Isolate* isolate, Type type) : PlatformCodeStub(isolate) {
- minor_key_ = TypeBits::encode(type);
- }
-
- CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
- if (type() == READ_ELEMENT) {
- return ArgumentsAccessReadDescriptor(isolate());
- } else {
- return ArgumentsAccessNewDescriptor(isolate());
- }
- }
-
- static Type ComputeType(bool is_unmapped, bool has_duplicate_parameters) {
- if (is_unmapped) {
- return Type::NEW_STRICT;
- } else if (has_duplicate_parameters) {
- return Type::NEW_SLOPPY_SLOW;
- } else {
- return Type::NEW_SLOPPY_FAST;
- }
- }
-
- private:
- Type type() const { return TypeBits::decode(minor_key_); }
-
- void GenerateReadElement(MacroAssembler* masm);
- void GenerateNewStrict(MacroAssembler* masm);
- void GenerateNewSloppyFast(MacroAssembler* masm);
- void GenerateNewSloppySlow(MacroAssembler* masm);
-
- void PrintName(std::ostream& os) const override; // NOLINT
-
- class TypeBits : public BitField<Type, 0, 2> {};
-
- DEFINE_PLATFORM_CODE_STUB(ArgumentsAccess, PlatformCodeStub);
-};
-
-
-class RestParamAccessStub : public PlatformCodeStub {
- public:
- explicit RestParamAccessStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
- private:
- void GenerateNew(MacroAssembler* masm);
-
- void PrintName(std::ostream& os) const override; // NOLINT
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(RestParamAccess);
- DEFINE_PLATFORM_CODE_STUB(RestParamAccess, PlatformCodeStub);
-};
-
-
class RegExpExecStub: public PlatformCodeStub {
public:
explicit RegExpExecStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
@@ -2096,10 +2084,6 @@ class LoadDictionaryElementStub : public HydrogenCodeStub {
return LoadWithVectorDescriptor(isolate());
}
- LanguageMode language_mode() const {
- return LoadICState::GetLanguageMode(MinorKey());
- }
-
DEFINE_HYDROGEN_CODE_STUB(LoadDictionaryElement, HydrogenCodeStub);
};
@@ -2114,10 +2098,6 @@ class KeyedLoadGenericStub : public HydrogenCodeStub {
Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
InlineCacheState GetICState() const override { return GENERIC; }
- LanguageMode language_mode() const {
- return LoadICState::GetLanguageMode(MinorKey());
- }
-
DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
DEFINE_HYDROGEN_CODE_STUB(KeyedLoadGeneric, HydrogenCodeStub);
@@ -2724,6 +2704,9 @@ class StoreElementStub : public PlatformCodeStub {
StoreElementStub(Isolate* isolate, ElementsKind elements_kind,
KeyedAccessStoreMode mode)
: PlatformCodeStub(isolate) {
+ // TODO(jkummerow): Rename this stub to StoreSlowElementStub,
+ // drop elements_kind parameter.
+ DCHECK_EQ(DICTIONARY_ELEMENTS, elements_kind);
minor_key_ = ElementsKindBits::encode(elements_kind) |
CommonStoreModeBits::encode(mode);
}
@@ -2950,6 +2933,15 @@ class ToStringStub final : public PlatformCodeStub {
};
+class ToNameStub final : public PlatformCodeStub {
+ public:
+ explicit ToNameStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(ToName);
+ DEFINE_PLATFORM_CODE_STUB(ToName, PlatformCodeStub);
+};
+
+
class ToObjectStub final : public HydrogenCodeStub {
public:
explicit ToObjectStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index a57cbb3a5e..692fa64bb6 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -124,18 +124,9 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
CompilationInfo* info) {
Isolate* isolate = info->isolate();
- Code::Flags flags;
- if (info->IsStub() && info->code_stub()) {
- DCHECK_EQ(info->output_code_kind(), info->code_stub()->GetCodeKind());
- flags = Code::ComputeFlags(
- info->output_code_kind(), info->code_stub()->GetICState(),
- info->code_stub()->GetExtraICState(), info->code_stub()->GetStubType());
- } else {
- flags = Code::ComputeFlags(info->output_code_kind());
- }
-
// Allocate and install the code.
CodeDesc desc;
+ Code::Flags flags = info->code_flags();
bool is_crankshafted =
Code::ExtractKindFromFlags(flags) == Code::OPTIMIZED_FUNCTION ||
info->IsStub();
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 307b3b0e42..c47e1b7bee 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -37,16 +37,6 @@
namespace v8 {
namespace internal {
-std::ostream& operator<<(std::ostream& os, const SourcePosition& p) {
- if (p.IsUnknown()) {
- return os << "<?>";
- } else if (FLAG_hydrogen_track_positions) {
- return os << "<" << p.inlining_id() << ":" << p.position() << ">";
- } else {
- return os << "<0:" << p.raw() << ">";
- }
-}
-
#define PARSE_INFO_GETTER(type, name) \
type CompilationInfo::name() const { \
@@ -120,8 +110,8 @@ bool CompilationInfo::has_scope() const {
CompilationInfo::CompilationInfo(ParseInfo* parse_info)
- : CompilationInfo(parse_info, nullptr, nullptr, BASE, parse_info->isolate(),
- parse_info->zone()) {
+ : CompilationInfo(parse_info, nullptr, Code::ComputeFlags(Code::FUNCTION),
+ BASE, parse_info->isolate(), parse_info->zone()) {
// Compiling for the snapshot typically results in different code than
// compiling later on. This means that code recompiled with deoptimization
// support won't be "equivalent" (as defined by SharedFunctionInfo::
@@ -148,23 +138,17 @@ CompilationInfo::CompilationInfo(ParseInfo* parse_info)
}
-CompilationInfo::CompilationInfo(CodeStub* stub, Isolate* isolate, Zone* zone)
- : CompilationInfo(nullptr, stub, CodeStub::MajorName(stub->MajorKey()),
- STUB, isolate, zone) {}
-
CompilationInfo::CompilationInfo(const char* debug_name, Isolate* isolate,
- Zone* zone)
- : CompilationInfo(nullptr, nullptr, debug_name, STUB, isolate, zone) {
- set_output_code_kind(Code::STUB);
-}
+ Zone* zone, Code::Flags code_flags)
+ : CompilationInfo(nullptr, debug_name, code_flags, STUB, isolate, zone) {}
-CompilationInfo::CompilationInfo(ParseInfo* parse_info, CodeStub* code_stub,
- const char* debug_name, Mode mode,
+CompilationInfo::CompilationInfo(ParseInfo* parse_info, const char* debug_name,
+ Code::Flags code_flags, Mode mode,
Isolate* isolate, Zone* zone)
: parse_info_(parse_info),
isolate_(isolate),
flags_(0),
- code_stub_(code_stub),
+ code_flags_(code_flags),
mode_(mode),
osr_ast_id_(BailoutId::None()),
zone_(zone),
@@ -178,19 +162,7 @@ CompilationInfo::CompilationInfo(ParseInfo* parse_info, CodeStub* code_stub,
parameter_count_(0),
optimization_id_(-1),
osr_expr_stack_height_(0),
- debug_name_(debug_name) {
- // Parameter count is number of stack parameters.
- if (code_stub_ != NULL) {
- CodeStubDescriptor descriptor(code_stub_);
- parameter_count_ = descriptor.GetStackParameterCount();
- if (descriptor.function_mode() == NOT_JS_FUNCTION_STUB_MODE) {
- parameter_count_--;
- }
- set_output_code_kind(code_stub->GetCodeKind());
- } else {
- set_output_code_kind(Code::FUNCTION);
- }
-}
+ debug_name_(debug_name) {}
CompilationInfo::~CompilationInfo() {
@@ -307,10 +279,13 @@ void CompilationInfo::LogDeoptCallPosition(int pc_offset, int inlining_id) {
base::SmartArrayPointer<char> CompilationInfo::GetDebugName() const {
- if (parse_info()) {
+ if (parse_info() && parse_info()->literal()) {
AllowHandleDereference allow_deref;
return parse_info()->literal()->debug_name()->ToCString();
}
+ if (parse_info() && !parse_info()->shared_info().is_null()) {
+ return parse_info()->shared_info()->DebugName()->ToCString();
+ }
const char* str = debug_name_ ? debug_name_ : "unknown";
size_t len = strlen(str) + 1;
base::SmartArrayPointer<char> name(new char[len]);
@@ -446,10 +421,14 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
if (info()->shared_info()->asm_function()) {
if (info()->osr_frame()) info()->MarkAsFrameSpecializing();
info()->MarkAsFunctionContextSpecializing();
- } else if (info()->has_global_object() &&
- FLAG_native_context_specialization) {
- info()->MarkAsNativeContextSpecializing();
- info()->MarkAsTypingEnabled();
+ } else {
+ if (!FLAG_always_opt) {
+ info()->MarkAsBailoutOnUninitialized();
+ }
+ if (FLAG_native_context_specialization) {
+ info()->MarkAsNativeContextSpecializing();
+ info()->MarkAsTypingEnabled();
+ }
}
if (!info()->shared_info()->asm_function() ||
FLAG_turbo_asm_deoptimization) {
@@ -755,7 +734,6 @@ static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
}
}
-
static bool CompileUnoptimizedCode(CompilationInfo* info) {
DCHECK(AllowCompilation::IsAllowed(info->isolate()));
if (!Compiler::Analyze(info->parse_info()) ||
@@ -768,32 +746,12 @@ static bool CompileUnoptimizedCode(CompilationInfo* info) {
}
-// TODO(rmcilroy): Remove this temporary work-around when ignition supports
-// catch and eval.
-static bool IgnitionShouldFallbackToFullCodeGen(Scope* scope) {
- if (scope->is_eval_scope() || scope->is_catch_scope() ||
- scope->calls_eval()) {
- return true;
- }
- for (auto inner_scope : *scope->inner_scopes()) {
- if (IgnitionShouldFallbackToFullCodeGen(inner_scope)) return true;
- }
- return false;
-}
-
-
static bool UseIgnition(CompilationInfo* info) {
// Cannot use Ignition when the {function_data} is already used.
if (info->has_shared_info() && info->shared_info()->HasBuiltinFunctionId()) {
return false;
}
- // Checks whether the scope chain is supported.
- if (FLAG_ignition_fallback_on_eval_and_catch &&
- IgnitionShouldFallbackToFullCodeGen(info->scope())) {
- return false;
- }
-
// Checks whether top level functions should be passed by the filter.
if (info->closure().is_null()) {
Vector<const char> filter = CStrVector(FLAG_ignition_filter);
@@ -804,13 +762,39 @@ static bool UseIgnition(CompilationInfo* info) {
return info->closure()->PassesFilter(FLAG_ignition_filter);
}
+static int CodeAndMetadataSize(CompilationInfo* info) {
+ int size = 0;
+ if (info->has_bytecode_array()) {
+ Handle<BytecodeArray> bytecode_array = info->bytecode_array();
+ size += bytecode_array->BytecodeArraySize();
+ size += bytecode_array->constant_pool()->Size();
+ size += bytecode_array->handler_table()->Size();
+ size += bytecode_array->source_position_table()->Size();
+ } else {
+ Handle<Code> code = info->code();
+ size += code->CodeSize();
+ size += code->relocation_info()->Size();
+ size += code->deoptimization_data()->Size();
+ size += code->handler_table()->Size();
+ }
+ return size;
+}
+
static bool GenerateBaselineCode(CompilationInfo* info) {
+ bool success;
if (FLAG_ignition && UseIgnition(info)) {
- return interpreter::Interpreter::MakeBytecode(info);
+ success = interpreter::Interpreter::MakeBytecode(info);
} else {
- return FullCodeGenerator::MakeCode(info);
+ success = FullCodeGenerator::MakeCode(info);
+ }
+ if (success) {
+ Isolate* isolate = info->isolate();
+ Counters* counters = isolate->counters();
+ counters->total_baseline_code_size()->Increment(CodeAndMetadataSize(info));
+ counters->total_baseline_compile_count()->Increment(1);
}
+ return success;
}
@@ -947,10 +931,13 @@ bool Compiler::ParseAndAnalyze(ParseInfo* info) {
static bool GetOptimizedCodeNow(CompilationInfo* info) {
Isolate* isolate = info->isolate();
CanonicalHandleScope canonical(isolate);
+ TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
+ TRACE_EVENT0("v8", "V8.OptimizeCode");
if (!Compiler::ParseAndAnalyze(info->parse_info())) return false;
TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
+ TRACE_EVENT0("v8", "V8.RecompileSynchronous");
OptimizedCompileJob job(info);
if (job.CreateGraph() != OptimizedCompileJob::SUCCEEDED ||
@@ -976,6 +963,8 @@ static bool GetOptimizedCodeNow(CompilationInfo* info) {
static bool GetOptimizedCodeLater(CompilationInfo* info) {
Isolate* isolate = info->isolate();
CanonicalHandleScope canonical(isolate);
+ TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
+ TRACE_EVENT0("v8", "V8.OptimizeCode");
if (!isolate->optimizing_compile_dispatcher()->IsQueueAvailable()) {
if (FLAG_trace_concurrent_recompilation) {
@@ -994,6 +983,7 @@ static bool GetOptimizedCodeLater(CompilationInfo* info) {
info->parse_info()->ReopenHandlesInNewHandleScope();
TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
+ TRACE_EVENT0("v8", "V8.RecompileSynchronous");
OptimizedCompileJob* job = new (info->zone()) OptimizedCompileJob(info);
OptimizedCompileJob::Status status = job->CreateGraph();
@@ -1033,6 +1023,8 @@ MaybeHandle<Code> Compiler::GetLazyCode(Handle<JSFunction> function) {
Isolate* isolate = function->GetIsolate();
DCHECK(!isolate->has_pending_exception());
DCHECK(!function->is_compiled());
+ TimerEventScope<TimerEventCompileCode> compile_timer(isolate);
+ TRACE_EVENT0("v8", "V8.CompileCode");
AggregatedHistogramTimerScope timer(isolate->counters()->compile_lazy());
// If the debugger is active, do not compile with turbofan unless we can
// deopt from turbofan code.
@@ -1044,7 +1036,7 @@ MaybeHandle<Code> Compiler::GetLazyCode(Handle<JSFunction> function) {
VMState<COMPILER> state(isolate);
PostponeInterruptsScope postpone(isolate);
- info.SetOptimizing(BailoutId::None(), handle(function->shared()->code()));
+ info.SetOptimizing();
if (GetOptimizedCodeNow(&info)) {
DCHECK(function->shared()->is_compiled());
@@ -1066,9 +1058,8 @@ MaybeHandle<Code> Compiler::GetLazyCode(Handle<JSFunction> function) {
if (FLAG_always_opt) {
Handle<Code> opt_code;
- if (Compiler::GetOptimizedCode(
- function, result,
- Compiler::NOT_CONCURRENT).ToHandle(&opt_code)) {
+ if (Compiler::GetOptimizedCode(function, Compiler::NOT_CONCURRENT)
+ .ToHandle(&opt_code)) {
result = opt_code;
}
}
@@ -1241,6 +1232,8 @@ void Compiler::CompileForLiveEdit(Handle<Script> script) {
static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
Isolate* isolate = info->isolate();
+ TimerEventScope<TimerEventCompileCode> timer(isolate);
+ TRACE_EVENT0("v8", "V8.CompileCode");
PostponeInterruptsScope postpone(isolate);
DCHECK(!isolate->native_context().is_null());
ParseInfo* parse_info = info->parse_info();
@@ -1300,6 +1293,7 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
? info->isolate()->counters()->compile_eval()
: info->isolate()->counters()->compile();
HistogramTimerScope timer(rate);
+ TRACE_EVENT0("v8", info->is_eval() ? "V8.CompileEval" : "V8.Compile");
// Compile the code.
if (!CompileBaselineCode(info)) {
@@ -1470,6 +1464,7 @@ Handle<SharedFunctionInfo> Compiler::CompileScript(
!isolate->debug()->is_loaded()) {
// Then check cached code provided by embedder.
HistogramTimerScope timer(isolate->counters()->compile_deserialize());
+ TRACE_EVENT0("v8", "V8.CompileDeserialize");
Handle<SharedFunctionInfo> result;
if (CodeSerializer::Deserialize(isolate, *cached_data, source)
.ToHandle(&result)) {
@@ -1495,6 +1490,9 @@ Handle<SharedFunctionInfo> Compiler::CompileScript(
if (natives == NATIVES_CODE) {
script->set_type(Script::TYPE_NATIVE);
script->set_hide_source(true);
+ } else if (natives == EXTENSION_CODE) {
+ script->set_type(Script::TYPE_EXTENSION);
+ script->set_hide_source(true);
}
if (!script_name.is_null()) {
script->set_name(*script_name);
@@ -1535,6 +1533,7 @@ Handle<SharedFunctionInfo> Compiler::CompileScript(
compile_options == ScriptCompiler::kProduceCodeCache) {
HistogramTimerScope histogram_timer(
isolate->counters()->compile_serialize());
+ TRACE_EVENT0("v8", "V8.CompileSerialize");
*cached_data = CodeSerializer::Serialize(isolate, result, source);
if (FLAG_profile_deserialization) {
PrintF("[Compiling and serializing took %0.3f ms]\n",
@@ -1635,6 +1634,8 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
bool lazy = FLAG_lazy && allow_lazy && !literal->should_eager_compile();
// Generate code
+ TimerEventScope<TimerEventCompileCode> timer(isolate);
+ TRACE_EVENT0("v8", "V8.CompileCode");
Handle<ScopeInfo> scope_info;
if (lazy) {
Handle<Code> code = isolate->builtins()->CompileLazy();
@@ -1700,9 +1701,39 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
return existing;
}
+Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForNative(
+ v8::Extension* extension, Handle<String> name) {
+ Isolate* isolate = name->GetIsolate();
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+
+ // Compute the function template for the native function.
+ v8::Local<v8::FunctionTemplate> fun_template =
+ extension->GetNativeFunctionTemplate(v8_isolate,
+ v8::Utils::ToLocal(name));
+ DCHECK(!fun_template.IsEmpty());
+
+ // Instantiate the function and create a shared function info from it.
+ Handle<JSFunction> fun = Handle<JSFunction>::cast(Utils::OpenHandle(
+ *fun_template->GetFunction(v8_isolate->GetCurrentContext())
+ .ToLocalChecked()));
+ const int literals = fun->NumberOfLiterals();
+ Handle<Code> code = Handle<Code>(fun->shared()->code());
+ Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
+ Handle<SharedFunctionInfo> shared = isolate->factory()->NewSharedFunctionInfo(
+ name, literals, FunctionKind::kNormalFunction, code,
+ Handle<ScopeInfo>(fun->shared()->scope_info()),
+ Handle<TypeFeedbackVector>(fun->shared()->feedback_vector()));
+ shared->set_construct_stub(*construct_stub);
+
+ // Copy the function data to the shared function info.
+ shared->set_function_data(fun->shared()->function_data());
+ int parameters = fun->shared()->internal_formal_parameter_count();
+ shared->set_internal_formal_parameter_count(parameters);
+
+ return shared;
+}
MaybeHandle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function,
- Handle<Code> current_code,
ConcurrencyMode mode,
BailoutId osr_ast_id,
JavaScriptFrame* osr_frame) {
@@ -1726,6 +1757,7 @@ MaybeHandle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function,
DCHECK(AllowCompilation::IsAllowed(isolate));
+ Handle<Code> current_code(shared->code());
if (!shared->is_compiled() ||
shared->scope_info() == ScopeInfo::Empty(isolate)) {
// The function was never compiled. Compile it unoptimized first.
@@ -1758,7 +1790,7 @@ MaybeHandle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function,
DCHECK(!isolate->has_pending_exception());
PostponeInterruptsScope postpone(isolate);
- info->SetOptimizing(osr_ast_id, current_code);
+ info->SetOptimizingForOsr(osr_ast_id, current_code);
if (mode == CONCURRENT) {
if (GetOptimizedCodeLater(info.get())) {
@@ -1774,8 +1806,8 @@ MaybeHandle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function,
return MaybeHandle<Code>();
}
-
-Handle<Code> Compiler::GetConcurrentlyOptimizedCode(OptimizedCompileJob* job) {
+MaybeHandle<Code> Compiler::GetConcurrentlyOptimizedCode(
+ OptimizedCompileJob* job) {
// Take ownership of compilation info. Deleting compilation info
// also tears down the zone and the recompile job.
base::SmartPointer<CompilationInfo> info(job->info());
@@ -1783,6 +1815,7 @@ Handle<Code> Compiler::GetConcurrentlyOptimizedCode(OptimizedCompileJob* job) {
VMState<COMPILER> state(isolate);
TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
+ TRACE_EVENT0("v8", "V8.RecompileSynchronous");
Handle<SharedFunctionInfo> shared = info->shared_info();
shared->code()->set_profiler_ticks(0);
@@ -1820,7 +1853,7 @@ Handle<Code> Compiler::GetConcurrentlyOptimizedCode(OptimizedCompileJob* job) {
info->closure()->ShortPrint();
PrintF(" because: %s]\n", GetBailoutReason(info->bailout_reason()));
}
- return Handle<Code>::null();
+ return MaybeHandle<Code>();
}
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index 9b439397c3..a56fa13c48 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -10,79 +10,18 @@
#include "src/bailout-reason.h"
#include "src/compilation-dependencies.h"
#include "src/signature.h"
+#include "src/source-position.h"
#include "src/zone.h"
namespace v8 {
namespace internal {
-class AstValueFactory;
-class HydrogenCodeStub;
+// Forward declarations.
class JavaScriptFrame;
class ParseInfo;
class ScriptData;
-// This class encapsulates encoding and decoding of sources positions from
-// which hydrogen values originated.
-// When FLAG_track_hydrogen_positions is set this object encodes the
-// identifier of the inlining and absolute offset from the start of the
-// inlined function.
-// When the flag is not set we simply track absolute offset from the
-// script start.
-class SourcePosition {
- public:
- static SourcePosition Unknown() {
- return SourcePosition::FromRaw(kNoPosition);
- }
-
- bool IsUnknown() const { return value_ == kNoPosition; }
-
- uint32_t position() const { return PositionField::decode(value_); }
- void set_position(uint32_t position) {
- if (FLAG_hydrogen_track_positions) {
- value_ = static_cast<uint32_t>(PositionField::update(value_, position));
- } else {
- value_ = position;
- }
- }
-
- uint32_t inlining_id() const { return InliningIdField::decode(value_); }
- void set_inlining_id(uint32_t inlining_id) {
- if (FLAG_hydrogen_track_positions) {
- value_ =
- static_cast<uint32_t>(InliningIdField::update(value_, inlining_id));
- }
- }
-
- uint32_t raw() const { return value_; }
-
- private:
- static const uint32_t kNoPosition =
- static_cast<uint32_t>(RelocInfo::kNoPosition);
- typedef BitField<uint32_t, 0, 9> InliningIdField;
-
- // Offset from the start of the inlined function.
- typedef BitField<uint32_t, 9, 23> PositionField;
-
- friend class HPositionInfo;
- friend class Deoptimizer;
-
- static SourcePosition FromRaw(uint32_t raw_position) {
- SourcePosition position;
- position.value_ = raw_position;
- return position;
- }
-
- // If FLAG_hydrogen_track_positions is set contains bitfields InliningIdField
- // and PositionField.
- // Otherwise contains absolute offset from the script start.
- uint32_t value_;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const SourcePosition& p);
-
-
struct InlinedFunctionInfo {
InlinedFunctionInfo(int parent_id, SourcePosition inline_position,
int script_id, int start_position)
@@ -125,11 +64,12 @@ class CompilationInfo {
kDeoptimizationEnabled = 1 << 16,
kSourcePositionsEnabled = 1 << 17,
kFirstCompile = 1 << 18,
+ kBailoutOnUninitialized = 1 << 19,
};
explicit CompilationInfo(ParseInfo* parse_info);
- CompilationInfo(CodeStub* stub, Isolate* isolate, Zone* zone);
- CompilationInfo(const char* debug_name, Isolate* isolate, Zone* zone);
+ CompilationInfo(const char* debug_name, Isolate* isolate, Zone* zone,
+ Code::Flags code_flags = Code::ComputeFlags(Code::STUB));
virtual ~CompilationInfo();
ParseInfo* parse_info() const { return parse_info_; }
@@ -159,7 +99,7 @@ class CompilationInfo {
Zone* zone() { return zone_; }
bool is_osr() const { return !osr_ast_id_.IsNone(); }
Handle<Code> code() const { return code_; }
- CodeStub* code_stub() const { return code_stub_; }
+ Code::Flags code_flags() const { return code_flags_; }
BailoutId osr_ast_id() const { return osr_ast_id_; }
Handle<Code> unoptimized_code() const { return unoptimized_code_; }
int opt_count() const { return opt_count_; }
@@ -268,12 +208,18 @@ class CompilationInfo {
bool is_first_compile() const { return GetFlag(kFirstCompile); }
+ void MarkAsBailoutOnUninitialized() { SetFlag(kBailoutOnUninitialized); }
+
+ bool is_bailout_on_uninitialized() const {
+ return GetFlag(kBailoutOnUninitialized);
+ }
+
bool GeneratePreagedPrologue() const {
// Generate a pre-aged prologue if we are optimizing for size, which
// will make code flushing more aggressive. Only apply to Code::FUNCTION,
// since StaticMarkingVisitor::IsFlushable only flushes proper functions.
return FLAG_optimize_for_size && FLAG_age_code && !will_serialize() &&
- !is_debug() && output_code_kind_ == Code::FUNCTION;
+ !is_debug() && output_code_kind() == Code::FUNCTION;
}
void EnsureFeedbackVector();
@@ -308,13 +254,17 @@ class CompilationInfo {
// Accessors for the different compilation modes.
bool IsOptimizing() const { return mode_ == OPTIMIZE; }
bool IsStub() const { return mode_ == STUB; }
- void SetOptimizing(BailoutId osr_ast_id, Handle<Code> unoptimized) {
+ void SetOptimizing() {
DCHECK(has_shared_info());
SetMode(OPTIMIZE);
+ optimization_id_ = isolate()->NextOptimizationId();
+ code_flags_ =
+ Code::KindField::update(code_flags_, Code::OPTIMIZED_FUNCTION);
+ }
+ void SetOptimizingForOsr(BailoutId osr_ast_id, Handle<Code> unoptimized) {
+ SetOptimizing();
osr_ast_id_ = osr_ast_id;
unoptimized_code_ = unoptimized;
- optimization_id_ = isolate()->NextOptimizationId();
- set_output_code_kind(Code::OPTIMIZED_FUNCTION);
}
// Deoptimization support.
@@ -423,9 +373,9 @@ class CompilationInfo {
base::SmartArrayPointer<char> GetDebugName() const;
- Code::Kind output_code_kind() const { return output_code_kind_; }
-
- void set_output_code_kind(Code::Kind kind) { output_code_kind_ = kind; }
+ Code::Kind output_code_kind() const {
+ return Code::ExtractKindFromFlags(code_flags_);
+ }
protected:
ParseInfo* parse_info_;
@@ -446,8 +396,8 @@ class CompilationInfo {
STUB
};
- CompilationInfo(ParseInfo* parse_info, CodeStub* code_stub,
- const char* debug_name, Mode mode, Isolate* isolate,
+ CompilationInfo(ParseInfo* parse_info, const char* debug_name,
+ Code::Flags code_flags, Mode mode, Isolate* isolate,
Zone* zone);
Isolate* isolate_;
@@ -466,10 +416,8 @@ class CompilationInfo {
unsigned flags_;
- Code::Kind output_code_kind_;
+ Code::Flags code_flags_;
- // For compiled stubs, the stub object
- CodeStub* code_stub_;
// The compiled code.
Handle<Code> code_;
@@ -683,19 +631,24 @@ class Compiler : public AllStatic {
static Handle<SharedFunctionInfo> GetSharedFunctionInfo(
FunctionLiteral* node, Handle<Script> script, CompilationInfo* outer);
+ // Create a shared function info object for a native function literal.
+ static Handle<SharedFunctionInfo> GetSharedFunctionInfoForNative(
+ v8::Extension* extension, Handle<String> name);
+
enum ConcurrencyMode { NOT_CONCURRENT, CONCURRENT };
// Generate and return optimized code or start a concurrent optimization job.
// In the latter case, return the InOptimizationQueue builtin. On failure,
// return the empty handle.
MUST_USE_RESULT static MaybeHandle<Code> GetOptimizedCode(
- Handle<JSFunction> function, Handle<Code> current_code,
- ConcurrencyMode mode, BailoutId osr_ast_id = BailoutId::None(),
+ Handle<JSFunction> function, ConcurrencyMode mode,
+ BailoutId osr_ast_id = BailoutId::None(),
JavaScriptFrame* osr_frame = nullptr);
// Generate and return code from previously queued optimization job.
// On failure, return the empty handle.
- static Handle<Code> GetConcurrentlyOptimizedCode(OptimizedCompileJob* job);
+ MUST_USE_RESULT static MaybeHandle<Code> GetConcurrentlyOptimizedCode(
+ OptimizedCompileJob* job);
};
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index ebd2789151..722bbf020e 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -6,9 +6,9 @@
#include "src/contexts.h"
#include "src/frames.h"
+#include "src/handles-inl.h"
#include "src/heap/heap.h"
#include "src/type-cache.h"
-#include "src/types-inl.h"
namespace v8 {
namespace internal {
@@ -268,20 +268,16 @@ FieldAccess AccessBuilder::ForValue() {
// static
FieldAccess AccessBuilder::ForArgumentsLength() {
- int offset =
- JSObject::kHeaderSize + Heap::kArgumentsLengthIndex * kPointerSize;
- FieldAccess access = {kTaggedBase, offset, Handle<Name>(), Type::Any(),
- MachineType::AnyTagged()};
+ FieldAccess access = {kTaggedBase, JSArgumentsObject::kLengthOffset,
+ Handle<Name>(), Type::Any(), MachineType::AnyTagged()};
return access;
}
// static
FieldAccess AccessBuilder::ForArgumentsCallee() {
- int offset =
- JSObject::kHeaderSize + Heap::kArgumentsCalleeIndex * kPointerSize;
- FieldAccess access = {kTaggedBase, offset, Handle<Name>(), Type::Any(),
- MachineType::AnyTagged()};
+ FieldAccess access = {kTaggedBase, JSSloppyArgumentsObject::kCalleeOffset,
+ Handle<Name>(), Type::Any(), MachineType::AnyTagged()};
return access;
}
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 612170e5b1..4a2a857029 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -8,9 +8,9 @@
#include "src/compilation-dependencies.h"
#include "src/compiler/access-info.h"
#include "src/field-index-inl.h"
+#include "src/field-type.h"
#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
#include "src/type-cache.h"
-#include "src/types-inl.h"
namespace v8 {
namespace internal {
@@ -232,6 +232,9 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
// Compute the receiver type.
Handle<Map> receiver_map = map;
+ // Property lookups require the name to be internalized.
+ name = isolate()->factory()->InternalizeName(name);
+
// We support fast inline cases for certain JSObject getters.
if (access_mode == AccessMode::kLoad &&
LookupSpecialFieldAccessor(map, name, access_info)) {
@@ -242,7 +245,7 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
do {
// Lookup the named property on the {map}.
Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate());
- int const number = descriptors->SearchWithCache(*name, *map);
+ int const number = descriptors->SearchWithCache(isolate(), *name, *map);
if (number != DescriptorArray::kNotFound) {
PropertyDetails const details = descriptors->GetDetails(number);
if (access_mode == AccessMode::kStore) {
@@ -277,8 +280,7 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
// Extract the field type from the property details (make sure its
// representation is TaggedPointer to reflect the heap object case).
field_type = Type::Intersect(
- Type::Convert<HeapType>(
- handle(descriptors->GetFieldType(number), isolate()), zone()),
+ descriptors->GetFieldType(number)->Convert(zone()),
Type::TaggedPointer(), zone());
if (field_type->Is(Type::None())) {
// Store is not safe if the field type was cleared.
@@ -454,10 +456,7 @@ bool AccessInfoFactory::LookupTransition(Handle<Map> map, Handle<Name> name,
// Extract the field type from the property details (make sure its
// representation is TaggedPointer to reflect the heap object case).
field_type = Type::Intersect(
- Type::Convert<HeapType>(
- handle(
- transition_map->instance_descriptors()->GetFieldType(number),
- isolate()),
+ transition_map->instance_descriptors()->GetFieldType(number)->Convert(
zone()),
Type::TaggedPointer(), zone());
if (field_type->Is(Type::None())) {
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index 9b074b05cc..bdf4c47165 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -206,6 +206,19 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
: OutOfLineCode(gen),
object_(object),
index_(index),
+ index_immediate_(0),
+ value_(value),
+ scratch0_(scratch0),
+ scratch1_(scratch1),
+ mode_(mode) {}
+
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t index,
+ Register value, Register scratch0, Register scratch1,
+ RecordWriteMode mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ index_(no_reg),
+ index_immediate_(index),
value_(value),
scratch0_(scratch0),
scratch1_(scratch1),
@@ -215,24 +228,36 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
if (mode_ > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value_, exit());
}
- if (mode_ > RecordWriteMode::kValueIsMap) {
- __ CheckPageFlag(value_, scratch0_,
- MemoryChunk::kPointersToHereAreInterestingMask, eq,
- exit());
- }
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
+ RememberedSetAction const remembered_set_action =
+ mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
+ : OMIT_REMEMBERED_SET;
SaveFPRegsMode const save_fp_mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- // TODO(turbofan): Once we get frame elision working, we need to save
- // and restore lr properly here if the frame was elided.
+ if (!frame()->needs_frame()) {
+ // We need to save and restore lr if the frame was elided.
+ __ Push(lr);
+ }
RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
- EMIT_REMEMBERED_SET, save_fp_mode);
- __ add(scratch1_, object_, index_);
+ remembered_set_action, save_fp_mode);
+ if (index_.is(no_reg)) {
+ __ add(scratch1_, object_, Operand(index_immediate_));
+ } else {
+ DCHECK_EQ(0, index_immediate_);
+ __ add(scratch1_, object_, Operand(index_));
+ }
__ CallStub(&stub);
+ if (!frame()->needs_frame()) {
+ __ Pop(lr);
+ }
}
private:
Register const object_;
Register const index_;
+ int32_t const index_immediate_; // Valid if index_.is(no_reg).
Register const value_;
Register const scratch0_;
Register const scratch1_;
@@ -449,11 +474,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
frame_access_state()->ClearSPDelta();
break;
}
- case kArchLazyBailout: {
- EnsureSpaceForLazyDeopt();
- RecordCallPosition(instr);
- break;
- }
case kArchPrepareCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
__ PrepareCallCFunction(num_parameters, kScratchReg);
@@ -514,6 +534,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ mov(i.OutputRegister(), fp);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
+ case kArchParentFramePointer:
+ if (frame_access_state()->frame()->needs_frame()) {
+ __ ldr(i.OutputRegister(), MemOperand(fp, 0));
+ } else {
+ __ mov(i.OutputRegister(), fp);
+ }
+ break;
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(i.OutputRegister(), i.InputFloat64Register(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -522,19 +549,43 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
RecordWriteMode mode =
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
Register object = i.InputRegister(0);
- Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
Register scratch0 = i.TempRegister(0);
Register scratch1 = i.TempRegister(1);
- auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
- scratch0, scratch1, mode);
- __ str(value, MemOperand(object, index));
+ OutOfLineRecordWrite* ool;
+
+ AddressingMode addressing_mode =
+ AddressingModeField::decode(instr->opcode());
+ if (addressing_mode == kMode_Offset_RI) {
+ int32_t index = i.InputInt32(1);
+ ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
+ scratch0, scratch1, mode);
+ __ str(value, MemOperand(object, index));
+ } else {
+ DCHECK_EQ(kMode_Offset_RR, addressing_mode);
+ Register index(i.InputRegister(1));
+ ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
+ scratch0, scratch1, mode);
+ __ str(value, MemOperand(object, index));
+ }
__ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask, ne,
ool->entry());
__ bind(ool->exit());
break;
}
+ case kArchStackSlot: {
+ FrameOffset offset =
+ frame_access_state()->GetFrameOffset(i.InputInt32(0));
+ Register base;
+ if (offset.from_stack_pointer()) {
+ base = sp;
+ } else {
+ base = fp;
+ }
+ __ add(i.OutputRegister(0), base, Operand(offset.offset()));
+ break;
+ }
case kArmAdd:
__ add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
i.OutputSBit());
@@ -622,6 +673,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
+ case kArmSbfx: {
+ CpuFeatureScope scope(masm(), ARMv7);
+ __ sbfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
case kArmSxtb:
__ sxtb(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -658,6 +716,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
i.InputInt32(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
+ case kArmRbit: {
+ CpuFeatureScope scope(masm(), ARMv7);
+ __ rbit(i.OutputRegister(), i.InputRegister(0));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
case kArmClz:
__ clz(i.OutputRegister(), i.InputRegister(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -831,6 +895,20 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
+ case kArmVcvtF32S32: {
+ SwVfpRegister scratch = kScratchDoubleReg.low();
+ __ vmov(scratch, i.InputRegister(0));
+ __ vcvt_f32_s32(i.OutputFloat32Register(), scratch);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmVcvtF32U32: {
+ SwVfpRegister scratch = kScratchDoubleReg.low();
+ __ vmov(scratch, i.InputRegister(0));
+ __ vcvt_f32_u32(i.OutputFloat32Register(), scratch);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
case kArmVcvtF64S32: {
SwVfpRegister scratch = kScratchDoubleReg.low();
__ vmov(scratch, i.InputRegister(0));
@@ -845,6 +923,20 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
+ case kArmVcvtS32F32: {
+ SwVfpRegister scratch = kScratchDoubleReg.low();
+ __ vcvt_s32_f32(scratch, i.InputFloat32Register(0));
+ __ vmov(i.OutputRegister(), scratch);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
+ case kArmVcvtU32F32: {
+ SwVfpRegister scratch = kScratchDoubleReg.low();
+ __ vcvt_u32_f32(scratch, i.InputFloat32Register(0));
+ __ vmov(i.OutputRegister(), scratch);
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ }
case kArmVcvtS32F64: {
SwVfpRegister scratch = kScratchDoubleReg.low();
__ vcvt_s32_f64(scratch, i.InputFloat64Register(0));
@@ -1098,8 +1190,6 @@ void CodeGenerator::AssemblePrologue() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- // TODO(titzer): cannot address target function == local #-1
- __ ldr(r1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
diff --git a/deps/v8/src/compiler/arm/instruction-codes-arm.h b/deps/v8/src/compiler/arm/instruction-codes-arm.h
index 401100be75..50fa555eb5 100644
--- a/deps/v8/src/compiler/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/arm/instruction-codes-arm.h
@@ -36,6 +36,7 @@ namespace compiler {
V(ArmMvn) \
V(ArmBfc) \
V(ArmUbfx) \
+ V(ArmSbfx) \
V(ArmSxtb) \
V(ArmSxth) \
V(ArmSxtab) \
@@ -43,6 +44,7 @@ namespace compiler {
V(ArmUxtb) \
V(ArmUxth) \
V(ArmUxtab) \
+ V(ArmRbit) \
V(ArmUxtah) \
V(ArmVcmpF32) \
V(ArmVaddF32) \
@@ -76,8 +78,12 @@ namespace compiler {
V(ArmVrintnF64) \
V(ArmVcvtF32F64) \
V(ArmVcvtF64F32) \
+ V(ArmVcvtF32S32) \
+ V(ArmVcvtF32U32) \
V(ArmVcvtF64S32) \
V(ArmVcvtF64U32) \
+ V(ArmVcvtS32F32) \
+ V(ArmVcvtU32F32) \
V(ArmVcvtS32F64) \
V(ArmVcvtU32F64) \
V(ArmVmovLowU32F64) \
@@ -100,7 +106,6 @@ namespace compiler {
V(ArmPush) \
V(ArmPoke)
-
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
// are encoded into the InstructionCode of the instruction and tell the
diff --git a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
index f36802ceb3..d950e8c97d 100644
--- a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
@@ -38,6 +38,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmMvn:
case kArmBfc:
case kArmUbfx:
+ case kArmSbfx:
case kArmSxtb:
case kArmSxth:
case kArmSxtab:
@@ -46,6 +47,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmUxth:
case kArmUxtab:
case kArmUxtah:
+ case kArmRbit:
case kArmVcmpF32:
case kArmVaddF32:
case kArmVsubF32:
@@ -78,8 +80,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmVrintnF64:
case kArmVcvtF32F64:
case kArmVcvtF64F32:
+ case kArmVcvtF32S32:
+ case kArmVcvtF32U32:
case kArmVcvtF64S32:
case kArmVcvtF64U32:
+ case kArmVcvtS32F32:
+ case kArmVcvtU32F32:
case kArmVcvtS32F64:
case kArmVcvtU32F64:
case kArmVmovLowU32F64:
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index f3deae7d75..14b30b1af0 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -327,8 +327,9 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord32:
opcode = kArmLdr;
break;
- case MachineRepresentation::kNone: // Fall through.
- case MachineRepresentation::kWord64:
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -355,10 +356,19 @@ void InstructionSelector::VisitStore(Node* node) {
if (write_barrier_kind != kNoWriteBarrier) {
DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ AddressingMode addressing_mode;
InstructionOperand inputs[3];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
- inputs[input_count++] = g.UseUniqueRegister(index);
+ // OutOfLineRecordWrite uses the index in an 'add' instruction as well as
+ // for the store itself, so we must check compatibility with both.
+ if (g.CanBeImmediate(index, kArmAdd) && g.CanBeImmediate(index, kArmStr)) {
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_Offset_RI;
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_Offset_RR;
+ }
inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
? g.UseRegister(value)
: g.UseUniqueRegister(value);
@@ -380,6 +390,7 @@ void InstructionSelector::VisitStore(Node* node) {
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
size_t const temp_count = arraysize(temps);
InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= AddressingModeField::encode(addressing_mode);
code |= MiscField::encode(static_cast<int>(record_write_mode));
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
@@ -402,8 +413,9 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord32:
opcode = kArmStr;
break;
- case MachineRepresentation::kNone: // Fall through.
- case MachineRepresentation::kWord64:
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kNone:
UNREACHABLE();
return;
}
@@ -442,9 +454,10 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -483,9 +496,10 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -551,43 +565,67 @@ void InstructionSelector::VisitWord32And(Node* node) {
if (m.right().HasValue()) {
uint32_t const value = m.right().Value();
uint32_t width = base::bits::CountPopulation32(value);
- uint32_t msb = base::bits::CountLeadingZeros32(value);
- // Try to interpret this AND as UBFX.
- if (IsSupported(ARMv7) && width != 0 && msb + width == 32) {
- DCHECK_EQ(0u, base::bits::CountTrailingZeros32(value));
- if (m.left().IsWord32Shr()) {
- Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().IsInRange(0, 31)) {
- // UBFX cannot extract bits past the register size, however since
- // shifting the original value would have introduced some zeros we can
- // still use UBFX with a smaller mask and the remaining bits will be
- // zeros.
- uint32_t const lsb = mleft.right().Value();
- return EmitUbfx(this, node, mleft.left().node(), lsb,
- std::min(width, 32 - lsb));
+ uint32_t leading_zeros = base::bits::CountLeadingZeros32(value);
+
+ // Try to merge SHR operations on the left hand input into this AND.
+ if (m.left().IsWord32Shr()) {
+ Int32BinopMatcher mshr(m.left().node());
+ if (mshr.right().HasValue()) {
+ uint32_t const shift = mshr.right().Value();
+
+ if (((shift == 8) || (shift == 16) || (shift == 24)) &&
+ ((value == 0xff) || (value == 0xffff))) {
+ // Merge SHR into AND by emitting a UXTB or UXTH instruction with a
+ // bytewise rotation.
+ Emit((value == 0xff) ? kArmUxtb : kArmUxth,
+ g.DefineAsRegister(m.node()), g.UseRegister(mshr.left().node()),
+ g.TempImmediate(mshr.right().Value()));
+ return;
+ } else if (IsSupported(ARMv7) && (width != 0) &&
+ ((leading_zeros + width) == 32)) {
+ // Merge Shr into And by emitting a UBFX instruction.
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(value));
+ if ((1 <= shift) && (shift <= 31)) {
+ // UBFX cannot extract bits past the register size, however since
+ // shifting the original value would have introduced some zeros we
+ // can still use UBFX with a smaller mask and the remaining bits
+ // will be zeros.
+ EmitUbfx(this, node, mshr.left().node(), shift,
+ std::min(width, 32 - shift));
+ return;
+ }
}
}
- return EmitUbfx(this, node, m.left().node(), 0, width);
+ } else if (value == 0xffff) {
+ // Emit UXTH for this AND. We don't bother testing for UXTB, as it's no
+ // better than AND 0xff for this operation.
+ Emit(kArmUxth, g.DefineAsRegister(m.node()),
+ g.UseRegister(m.left().node()), g.TempImmediate(0));
+ return;
}
- // Try to interpret this AND as BIC.
if (g.CanBeImmediate(~value)) {
+ // Emit BIC for this AND by inverting the immediate value first.
Emit(kArmBic | AddressingModeField::encode(kMode_Operand2_I),
g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.TempImmediate(~value));
return;
}
- // Try to interpret this AND as UXTH.
- if (value == 0xffff) {
- Emit(kArmUxth, g.DefineAsRegister(m.node()),
- g.UseRegister(m.left().node()), g.TempImmediate(0));
- return;
- }
- // Try to interpret this AND as BFC.
- if (IsSupported(ARMv7)) {
+ if (!g.CanBeImmediate(value) && IsSupported(ARMv7)) {
+ // If value has 9 to 23 contiguous set bits, and has the lsb set, we can
+ // replace this AND with UBFX. Other contiguous bit patterns have already
+ // been handled by BIC or will be handled by AND.
+ if ((width != 0) && ((leading_zeros + width) == 32) &&
+ (9 <= leading_zeros) && (leading_zeros <= 23)) {
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(value));
+ EmitUbfx(this, node, m.left().node(), 0, width);
+ return;
+ }
+
width = 32 - width;
- msb = base::bits::CountLeadingZeros32(~value);
+ leading_zeros = base::bits::CountLeadingZeros32(~value);
uint32_t lsb = base::bits::CountTrailingZeros32(~value);
- if (msb + width + lsb == 32) {
+ if ((leading_zeros + width + lsb) == 32) {
+ // This AND can be replaced with BFC.
Emit(kArmBfc, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
g.TempImmediate(lsb), g.TempImmediate(width));
return;
@@ -699,14 +737,23 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
Int32BinopMatcher m(node);
if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().Is(16) && m.right().Is(16)) {
- Emit(kArmSxth, g.DefineAsRegister(node),
- g.UseRegister(mleft.left().node()), g.TempImmediate(0));
- return;
- } else if (mleft.right().Is(24) && m.right().Is(24)) {
- Emit(kArmSxtb, g.DefineAsRegister(node),
- g.UseRegister(mleft.left().node()), g.TempImmediate(0));
- return;
+ if (m.right().HasValue() && mleft.right().HasValue()) {
+ uint32_t sar = m.right().Value();
+ uint32_t shl = mleft.right().Value();
+ if ((sar == shl) && (sar == 16)) {
+ Emit(kArmSxth, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(0));
+ return;
+ } else if ((sar == shl) && (sar == 24)) {
+ Emit(kArmSxtb, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(0));
+ return;
+ } else if (IsSupported(ARMv7) && (sar >= shl)) {
+ Emit(kArmSbfx, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(sar - shl),
+ g.TempImmediate(32 - sar));
+ return;
+ }
}
}
VisitShift(this, node, TryMatchASR);
@@ -726,6 +773,12 @@ void InstructionSelector::VisitWord32Clz(Node* node) {
void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord32ReverseBits(Node* node) {
+ DCHECK(IsSupported(ARMv7));
+ VisitRR(this, kArmRbit, node);
+}
+
+
void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
@@ -921,6 +974,16 @@ void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
}
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+ VisitRR(this, kArmVcvtF32S32, node);
+}
+
+
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+ VisitRR(this, kArmVcvtF32U32, node);
+}
+
+
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
VisitRR(this, kArmVcvtF64S32, node);
}
@@ -931,6 +994,16 @@ void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
}
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+ VisitRR(this, kArmVcvtS32F32, node);
+}
+
+
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+ VisitRR(this, kArmVcvtU32F32, node);
+}
+
+
void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
VisitRR(this, kArmVcvtS32F64, node);
}
@@ -1591,6 +1664,9 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::Flags flags =
MachineOperatorBuilder::kInt32DivIsSafe |
MachineOperatorBuilder::kUint32DivIsSafe;
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ flags |= MachineOperatorBuilder::kWord32ReverseBits;
+ }
if (CpuFeatures::IsSupported(ARMv8)) {
flags |= MachineOperatorBuilder::kFloat32RoundDown |
MachineOperatorBuilder::kFloat64RoundDown |
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index d356195ecf..e45c677619 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -270,7 +270,7 @@ class OutOfLineLoadZero final : public OutOfLineCode {
class OutOfLineRecordWrite final : public OutOfLineCode {
public:
- OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand index,
Register value, Register scratch0, Register scratch1,
RecordWriteMode mode)
: OutOfLineCode(gen),
@@ -285,24 +285,30 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
if (mode_ > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value_, exit());
}
- if (mode_ > RecordWriteMode::kValueIsMap) {
- __ CheckPageFlagClear(value_, scratch0_,
- MemoryChunk::kPointersToHereAreInterestingMask,
- exit());
- }
+ __ CheckPageFlagClear(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ exit());
+ RememberedSetAction const remembered_set_action =
+ mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
+ : OMIT_REMEMBERED_SET;
SaveFPRegsMode const save_fp_mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- // TODO(turbofan): Once we get frame elision working, we need to save
- // and restore lr properly here if the frame was elided.
+ if (!frame()->needs_frame()) {
+ // We need to save and restore lr if the frame was elided.
+ __ Push(lr);
+ }
RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
- EMIT_REMEMBERED_SET, save_fp_mode);
+ remembered_set_action, save_fp_mode);
__ Add(scratch1_, object_, index_);
__ CallStub(&stub);
+ if (!frame()->needs_frame()) {
+ __ Pop(lr);
+ }
}
private:
Register const object_;
- Register const index_;
+ Operand const index_;
Register const value_;
Register const scratch0_;
Register const scratch1_;
@@ -488,7 +494,8 @@ void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Arm64OperandConverter i(this, instr);
InstructionCode opcode = instr->opcode();
- switch (ArchOpcodeField::decode(opcode)) {
+ ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
+ switch (arch_opcode) {
case kArchCallCodeObject: {
EnsureSpaceForLazyDeopt();
if (instr->InputAt(0)->IsImmediate()) {
@@ -499,6 +506,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
__ Call(target);
}
+ // TODO(titzer): this is ugly. JSSP should be a caller-save register
+ // in this case, but it is not possible to express in the register
+ // allocator.
+ CallDescriptor::Flags flags =
+ static_cast<CallDescriptor::Flags>(MiscField::decode(opcode));
+ if (flags & CallDescriptor::kRestoreJSSP) {
+ __ mov(jssp, csp);
+ }
frame_access_state()->ClearSPDelta();
RecordCallPosition(instr);
break;
@@ -530,6 +545,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
__ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Call(x10);
+ // TODO(titzer): this is ugly. JSSP should be a caller-save register
+ // in this case, but it is not possible to express in the register
+ // allocator.
+ CallDescriptor::Flags flags =
+ static_cast<CallDescriptor::Flags>(MiscField::decode(opcode));
+ if (flags & CallDescriptor::kRestoreJSSP) {
+ __ mov(jssp, csp);
+ }
frame_access_state()->ClearSPDelta();
RecordCallPosition(instr);
break;
@@ -551,11 +574,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
frame_access_state()->ClearSPDelta();
break;
}
- case kArchLazyBailout: {
- EnsureSpaceForLazyDeopt();
- RecordCallPosition(instr);
- break;
- }
case kArchPrepareCallCFunction:
// We don't need kArchPrepareCallCFunction on arm64 as the instruction
// selector already perform a Claim to reserve space on the stack and
@@ -609,14 +627,29 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchFramePointer:
__ mov(i.OutputRegister(), fp);
break;
+ case kArchParentFramePointer:
+ if (frame_access_state()->frame()->needs_frame()) {
+ __ ldr(i.OutputRegister(), MemOperand(fp, 0));
+ } else {
+ __ mov(i.OutputRegister(), fp);
+ }
+ break;
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
break;
case kArchStoreWithWriteBarrier: {
RecordWriteMode mode =
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ AddressingMode addressing_mode =
+ AddressingModeField::decode(instr->opcode());
Register object = i.InputRegister(0);
- Register index = i.InputRegister(1);
+ Operand index(0);
+ if (addressing_mode == kMode_MRI) {
+ index = Operand(i.InputInt64(1));
+ } else {
+ DCHECK_EQ(addressing_mode, kMode_MRR);
+ index = Operand(i.InputRegister(1));
+ }
Register value = i.InputRegister(2);
Register scratch0 = i.TempRegister(0);
Register scratch1 = i.TempRegister(1);
@@ -629,6 +662,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Bind(ool->exit());
break;
}
+ case kArchStackSlot: {
+ FrameOffset offset =
+ frame_access_state()->GetFrameOffset(i.InputInt32(0));
+ Register base;
+ if (offset.from_stack_pointer()) {
+ base = __ StackPointer();
+ } else {
+ base = fp;
+ }
+ __ Add(i.OutputRegister(0), base, Operand(offset.offset()));
+ break;
+ }
case kArm64Float32RoundDown:
__ Frintm(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
@@ -885,18 +930,41 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64CompareAndBranch32:
// Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
break;
- case kArm64ClaimForCallArguments: {
- __ Claim(i.InputInt32(0));
- frame_access_state()->IncreaseSPDelta(i.InputInt32(0));
+ case kArm64ClaimCSP: {
+ int count = i.InputInt32(0);
+ Register prev = __ StackPointer();
+ __ SetStackPointer(csp);
+ __ Claim(count);
+ __ SetStackPointer(prev);
+ frame_access_state()->IncreaseSPDelta(count);
+ break;
+ }
+ case kArm64ClaimJSSP: {
+ int count = i.InputInt32(0);
+ if (csp.Is(__ StackPointer())) {
+ // No JSP is set up. Compute it from the CSP.
+ int even = RoundUp(count, 2);
+ __ Sub(jssp, csp, count * kPointerSize);
+ __ Sub(csp, csp, even * kPointerSize); // Must always be aligned.
+ frame_access_state()->IncreaseSPDelta(even);
+ } else {
+ // JSSP is the current stack pointer, just use regular Claim().
+ __ Claim(count);
+ frame_access_state()->IncreaseSPDelta(count);
+ }
break;
}
- case kArm64Poke: {
+ case kArm64PokeCSP: // fall through
+ case kArm64PokeJSSP: {
+ Register prev = __ StackPointer();
+ __ SetStackPointer(arch_opcode == kArm64PokeCSP ? csp : jssp);
Operand operand(i.InputInt32(1) * kPointerSize);
if (instr->InputAt(0)->IsDoubleRegister()) {
__ Poke(i.InputFloat64Register(0), operand);
} else {
__ Poke(i.InputRegister(0), operand);
}
+ __ SetStackPointer(prev);
break;
}
case kArm64PokePair: {
@@ -916,6 +984,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64Clz32:
__ Clz(i.OutputRegister32(), i.InputRegister32(0));
break;
+ case kArm64Rbit:
+ __ Rbit(i.OutputRegister64(), i.InputRegister64(0));
+ break;
+ case kArm64Rbit32:
+ __ Rbit(i.OutputRegister32(), i.InputRegister32(0));
+ break;
case kArm64Cmp:
__ Cmp(i.InputOrZeroRegister64(0), i.InputOperand(1));
break;
@@ -1042,9 +1116,15 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64Float64ToFloat32:
__ Fcvt(i.OutputDoubleRegister().S(), i.InputDoubleRegister(0));
break;
+ case kArm64Float32ToInt32:
+ __ Fcvtzs(i.OutputRegister32(), i.InputFloat32Register(0));
+ break;
case kArm64Float64ToInt32:
__ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0));
break;
+ case kArm64Float32ToUint32:
+ __ Fcvtzu(i.OutputRegister32(), i.InputFloat32Register(0));
+ break;
case kArm64Float64ToUint32:
__ Fcvtzu(i.OutputRegister32(), i.InputDoubleRegister(0));
break;
@@ -1093,6 +1173,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Cset(i.OutputRegister(1), ne);
}
break;
+ case kArm64Int32ToFloat32:
+ __ Scvtf(i.OutputFloat32Register(), i.InputRegister32(0));
+ break;
case kArm64Int32ToFloat64:
__ Scvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
break;
@@ -1102,6 +1185,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64Int64ToFloat64:
__ Scvtf(i.OutputDoubleRegister(), i.InputRegister64(0));
break;
+ case kArm64Uint32ToFloat32:
+ __ Ucvtf(i.OutputFloat32Register(), i.InputRegister32(0));
+ break;
case kArm64Uint32ToFloat64:
__ Ucvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
break;
@@ -1376,8 +1462,6 @@ void CodeGenerator::AssemblePrologue() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- // TODO(titzer): cannot address target function == local #-1
- __ ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
@@ -1445,13 +1529,14 @@ void CodeGenerator::AssembleReturn() {
__ Bind(&return_label_);
if (descriptor->UseNativeStack()) {
__ Mov(csp, fp);
+ pop_count += (pop_count & 1); // align
} else {
__ Mov(jssp, fp);
}
__ Pop(fp, lr);
}
} else if (descriptor->UseNativeStack()) {
- pop_count += (pop_count & 1);
+ pop_count += (pop_count & 1); // align
}
__ Drop(pop_count);
__ Ret();
diff --git a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
index ef333480e3..f03c2fb436 100644
--- a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
@@ -73,11 +73,15 @@ namespace compiler {
V(Arm64Ubfx32) \
V(Arm64Ubfiz32) \
V(Arm64Bfi) \
+ V(Arm64Rbit) \
+ V(Arm64Rbit32) \
V(Arm64TestAndBranch32) \
V(Arm64TestAndBranch) \
V(Arm64CompareAndBranch32) \
- V(Arm64ClaimForCallArguments) \
- V(Arm64Poke) \
+ V(Arm64ClaimCSP) \
+ V(Arm64ClaimJSSP) \
+ V(Arm64PokeCSP) \
+ V(Arm64PokeJSSP) \
V(Arm64PokePair) \
V(Arm64Float32Cmp) \
V(Arm64Float32Add) \
@@ -110,15 +114,19 @@ namespace compiler {
V(Arm64Float64RoundTiesEven) \
V(Arm64Float32ToFloat64) \
V(Arm64Float64ToFloat32) \
+ V(Arm64Float32ToInt32) \
V(Arm64Float64ToInt32) \
+ V(Arm64Float32ToUint32) \
V(Arm64Float64ToUint32) \
V(Arm64Float32ToInt64) \
V(Arm64Float64ToInt64) \
V(Arm64Float32ToUint64) \
V(Arm64Float64ToUint64) \
+ V(Arm64Int32ToFloat32) \
V(Arm64Int32ToFloat64) \
V(Arm64Int64ToFloat32) \
V(Arm64Int64ToFloat64) \
+ V(Arm64Uint32ToFloat32) \
V(Arm64Uint32ToFloat64) \
V(Arm64Uint64ToFloat32) \
V(Arm64Uint64ToFloat64) \
@@ -143,7 +151,6 @@ namespace compiler {
V(Arm64Ldr) \
V(Arm64Str)
-
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
// are encoded into the InstructionCode of the instruction and tell the
@@ -169,6 +176,8 @@ namespace compiler {
V(Operand2_R_SXTB) /* %r0 SXTB (signed extend byte) */ \
V(Operand2_R_SXTH) /* %r0 SXTH (signed extend halfword) */
+enum ResetJSSPAfterCall { kNoResetJSSP, kResetJSSP };
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
index eb358dd8c4..ca372993b8 100644
--- a/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-scheduler-arm64.cc
@@ -75,6 +75,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Ubfx32:
case kArm64Ubfiz32:
case kArm64Bfi:
+ case kArm64Rbit:
+ case kArm64Rbit32:
case kArm64Float32Cmp:
case kArm64Float32Add:
case kArm64Float32Sub:
@@ -106,15 +108,19 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Float32RoundUp:
case kArm64Float32ToFloat64:
case kArm64Float64ToFloat32:
+ case kArm64Float32ToInt32:
case kArm64Float64ToInt32:
+ case kArm64Float32ToUint32:
case kArm64Float64ToUint32:
case kArm64Float32ToInt64:
case kArm64Float64ToInt64:
case kArm64Float32ToUint64:
case kArm64Float64ToUint64:
+ case kArm64Int32ToFloat32:
case kArm64Int32ToFloat64:
case kArm64Int64ToFloat32:
case kArm64Int64ToFloat64:
+ case kArm64Uint32ToFloat32:
case kArm64Uint32ToFloat64:
case kArm64Uint64ToFloat32:
case kArm64Uint64ToFloat64:
@@ -141,8 +147,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Ldr:
return kIsLoadOperation;
- case kArm64ClaimForCallArguments:
- case kArm64Poke:
+ case kArm64ClaimCSP:
+ case kArm64ClaimJSSP:
+ case kArm64PokeCSP:
+ case kArm64PokeJSSP:
case kArm64PokePair:
case kArm64StrS:
case kArm64StrD:
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index 1ec5ab4c41..26a2896134 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -371,6 +371,7 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode = kArm64Ldr;
immediate_mode = kLoadStoreImm64;
break;
+ case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -398,10 +399,20 @@ void InstructionSelector::VisitStore(Node* node) {
// TODO(arm64): I guess this could be done in a better way.
if (write_barrier_kind != kNoWriteBarrier) {
DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ AddressingMode addressing_mode;
InstructionOperand inputs[3];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
- inputs[input_count++] = g.UseUniqueRegister(index);
+ // OutOfLineRecordWrite uses the index in an arithmetic instruction, so we
+ // must check kArithmeticImm as well as kLoadStoreImm64.
+ if (g.CanBeImmediate(index, kArithmeticImm) &&
+ g.CanBeImmediate(index, kLoadStoreImm64)) {
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MRR;
+ }
inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
? g.UseRegister(value)
: g.UseUniqueRegister(value);
@@ -423,6 +434,7 @@ void InstructionSelector::VisitStore(Node* node) {
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
size_t const temp_count = arraysize(temps);
InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= AddressingModeField::encode(addressing_mode);
code |= MiscField::encode(static_cast<int>(record_write_mode));
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
@@ -455,6 +467,7 @@ void InstructionSelector::VisitStore(Node* node) {
opcode = kArm64Str;
immediate_mode = kLoadStoreImm64;
break;
+ case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -496,8 +509,9 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -534,8 +548,9 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -963,6 +978,16 @@ void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord32ReverseBits(Node* node) {
+ VisitRR(this, kArm64Rbit32, node);
+}
+
+
+void InstructionSelector::VisitWord64ReverseBits(Node* node) {
+ VisitRR(this, kArm64Rbit, node);
+}
+
+
void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
@@ -1219,6 +1244,16 @@ void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
}
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+ VisitRR(this, kArm64Int32ToFloat32, node);
+}
+
+
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+ VisitRR(this, kArm64Uint32ToFloat32, node);
+}
+
+
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
VisitRR(this, kArm64Int32ToFloat64, node);
}
@@ -1229,11 +1264,21 @@ void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
}
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+ VisitRR(this, kArm64Float32ToInt32, node);
+}
+
+
void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
VisitRR(this, kArm64Float64ToInt32, node);
}
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+ VisitRR(this, kArm64Float32ToUint32, node);
+}
+
+
void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
VisitRR(this, kArm64Float64ToUint32, node);
}
@@ -1583,30 +1628,27 @@ void InstructionSelector::EmitPrepareArguments(
Node* node) {
Arm64OperandGenerator g(this);
- // Push the arguments to the stack.
- int aligned_push_count = static_cast<int>(arguments->size());
+ bool to_native_stack = descriptor->UseNativeStack();
- bool pushed_count_uneven = aligned_push_count & 1;
- int claim_count = aligned_push_count;
- if (pushed_count_uneven && descriptor->UseNativeStack()) {
- // We can only claim for an even number of call arguments when we use the
- // native stack.
- claim_count++;
+ int claim_count = static_cast<int>(arguments->size());
+ int slot = claim_count - 1;
+ if (to_native_stack) {
+ // Native stack must always be aligned to 16 (2 words).
+ claim_count = RoundUp(claim_count, 2);
}
- // TODO(dcarney): claim and poke probably take small immediates,
- // loop here or whatever.
+ // TODO(titzer): claim and poke probably take small immediates.
// Bump the stack pointer(s).
- if (aligned_push_count > 0) {
- // TODO(dcarney): it would be better to bump the csp here only
+ if (claim_count > 0) {
+ // TODO(titzer): it would be better to bump the csp here only
// and emit paired stores with increment for non c frames.
- Emit(kArm64ClaimForCallArguments, g.NoOutput(),
- g.TempImmediate(claim_count));
+ ArchOpcode claim = to_native_stack ? kArm64ClaimCSP : kArm64ClaimJSSP;
+ Emit(claim, g.NoOutput(), g.TempImmediate(claim_count));
}
- // Move arguments to the stack.
- int slot = aligned_push_count - 1;
+ // Poke the arguments into the stack.
+ ArchOpcode poke = to_native_stack ? kArm64PokeCSP : kArm64PokeJSSP;
while (slot >= 0) {
- Emit(kArm64Poke, g.NoOutput(), g.UseRegister((*arguments)[slot].node()),
+ Emit(poke, g.NoOutput(), g.UseRegister((*arguments)[slot].node()),
g.TempImmediate(slot));
slot--;
// TODO(ahaas): Poke arguments in pairs if two subsequent arguments have the
@@ -2191,7 +2233,9 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat64RoundTiesEven |
MachineOperatorBuilder::kWord32ShiftIsSafe |
MachineOperatorBuilder::kInt32DivIsSafe |
- MachineOperatorBuilder::kUint32DivIsSafe;
+ MachineOperatorBuilder::kUint32DivIsSafe |
+ MachineOperatorBuilder::kWord32ReverseBits |
+ MachineOperatorBuilder::kWord64ReverseBits;
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/ast-graph-builder.cc b/deps/v8/src/compiler/ast-graph-builder.cc
index c70dfbf650..abcf828c39 100644
--- a/deps/v8/src/compiler/ast-graph-builder.cc
+++ b/deps/v8/src/compiler/ast-graph-builder.cc
@@ -206,7 +206,6 @@ class AstGraphBuilder::ControlScope BASE_EMBEDDED {
int stack_height_;
};
-
// Helper class for a try-finally control scope. It can record intercepted
// control-flow commands that cause entry into a finally-block, and re-apply
// them after again leaving that block. Special tokens are used to identify
@@ -214,7 +213,10 @@ class AstGraphBuilder::ControlScope BASE_EMBEDDED {
class AstGraphBuilder::ControlScope::DeferredCommands : public ZoneObject {
public:
explicit DeferredCommands(AstGraphBuilder* owner)
- : owner_(owner), deferred_(owner->local_zone()) {}
+ : owner_(owner),
+ deferred_(owner->local_zone()),
+ return_token_(nullptr),
+ throw_token_(nullptr) {}
// One recorded control-flow command.
struct Entry {
@@ -226,7 +228,24 @@ class AstGraphBuilder::ControlScope::DeferredCommands : public ZoneObject {
// Records a control-flow command while entering the finally-block. This also
// generates a new dispatch token that identifies one particular path.
Node* RecordCommand(Command cmd, Statement* stmt, Node* value) {
- Node* token = NewPathTokenForDeferredCommand();
+ Node* token = nullptr;
+ switch (cmd) {
+ case CMD_BREAK:
+ case CMD_CONTINUE:
+ token = NewPathToken(dispenser_.GetBreakContinueToken());
+ break;
+ case CMD_THROW:
+ if (throw_token_) return throw_token_;
+ token = NewPathToken(TokenDispenserForFinally::kThrowToken);
+ throw_token_ = token;
+ break;
+ case CMD_RETURN:
+ if (return_token_) return return_token_;
+ token = NewPathToken(TokenDispenserForFinally::kReturnToken);
+ return_token_ = token;
+ break;
+ }
+ DCHECK_NOT_NULL(token);
deferred_.push_back({cmd, stmt, token});
return token;
}
@@ -255,11 +274,11 @@ class AstGraphBuilder::ControlScope::DeferredCommands : public ZoneObject {
}
protected:
- Node* NewPathTokenForDeferredCommand() {
- return owner_->jsgraph()->Constant(static_cast<int>(deferred_.size()));
+ Node* NewPathToken(int token_id) {
+ return owner_->jsgraph()->Constant(token_id);
}
Node* NewPathTokenForImplicitFallThrough() {
- return owner_->jsgraph()->Constant(-1);
+ return NewPathToken(TokenDispenserForFinally::kFallThroughToken);
}
Node* NewPathDispatchCondition(Node* t1, Node* t2) {
// TODO(mstarzinger): This should be machine()->WordEqual(), but our Phi
@@ -268,8 +287,11 @@ class AstGraphBuilder::ControlScope::DeferredCommands : public ZoneObject {
}
private:
+ TokenDispenserForFinally dispenser_;
AstGraphBuilder* owner_;
ZoneVector<Entry> deferred_;
+ Node* return_token_;
+ Node* throw_token_;
};
@@ -409,10 +431,13 @@ class AstGraphBuilder::FrameStateBeforeAndAfter {
DCHECK_EQ(IrOpcode::kDead,
NodeProperties::GetFrameStateInput(node, 0)->opcode());
+ bool node_has_exception = NodeProperties::IsExceptionalCall(node);
+
Node* frame_state_after =
id_after == BailoutId::None()
? builder_->jsgraph()->EmptyFrameState()
- : builder_->environment()->Checkpoint(id_after, combine);
+ : builder_->environment()->Checkpoint(id_after, combine,
+ node_has_exception);
NodeProperties::ReplaceFrameStateInput(node, 0, frame_state_after);
}
@@ -455,8 +480,7 @@ AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
local_zone),
frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
FrameStateType::kJavaScriptFunction, info->num_parameters() + 1,
- info->scope()->num_stack_slots(), info->shared_info(),
- CALL_MAINTAINS_NATIVE_CONTEXT)) {
+ info->scope()->num_stack_slots(), info->shared_info())) {
InitializeAstVisitor(info->isolate());
}
@@ -589,7 +613,7 @@ void AstGraphBuilder::CreateGraphBody(bool stack_check) {
// Emit tracing call if requested to do so.
if (FLAG_trace) {
- NewNode(javascript()->CallRuntime(Runtime::kTraceEnter, 0));
+ NewNode(javascript()->CallRuntime(Runtime::kTraceEnter));
}
// Visit illegal re-declaration and bail out if it exists.
@@ -610,13 +634,6 @@ void AstGraphBuilder::CreateGraphBody(bool stack_check) {
// Visit statements in the function body.
VisitStatements(info()->literal()->body());
- // Emit tracing call if requested to do so.
- if (FLAG_trace) {
- // TODO(mstarzinger): Only traces implicit return.
- Node* return_value = jsgraph()->UndefinedConstant();
- NewNode(javascript()->CallRuntime(Runtime::kTraceExit, 1), return_value);
- }
-
// Return 'undefined' in case we can fall off the end.
BuildReturn(jsgraph()->UndefinedConstant());
}
@@ -854,9 +871,9 @@ void AstGraphBuilder::Environment::UpdateStateValuesWithCache(
env_values, static_cast<size_t>(count));
}
-
-Node* AstGraphBuilder::Environment::Checkpoint(
- BailoutId ast_id, OutputFrameStateCombine combine) {
+Node* AstGraphBuilder::Environment::Checkpoint(BailoutId ast_id,
+ OutputFrameStateCombine combine,
+ bool owner_has_exception) {
if (!builder()->info()->is_deoptimization_enabled()) {
return builder()->jsgraph()->EmptyFrameState();
}
@@ -876,7 +893,15 @@ Node* AstGraphBuilder::Environment::Checkpoint(
DCHECK(IsLivenessBlockConsistent());
if (liveness_block() != nullptr) {
- liveness_block()->Checkpoint(result);
+ // If the owning node has an exception, register the checkpoint to the
+ // predecessor so that the checkpoint is used for both the normal and the
+ // exceptional paths. Yes, this is a terrible hack and we might want
+ // to use an explicit frame state for the exceptional path.
+ if (owner_has_exception) {
+ liveness_block()->GetPredecessor()->Checkpoint(result);
+ } else {
+ liveness_block()->Checkpoint(result);
+ }
}
return result;
}
@@ -1331,7 +1356,8 @@ void AstGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
// Prepare for-in cache.
Node* prepare = NewNode(javascript()->ForInPrepare(), object);
- PrepareFrameState(prepare, stmt->EnumId(), OutputFrameStateCombine::Push());
+ PrepareFrameState(prepare, stmt->PrepareId(),
+ OutputFrameStateCombine::Push(3));
Node* cache_type = NewNode(common()->Projection(0), prepare);
Node* cache_array = NewNode(common()->Projection(1), prepare);
Node* cache_length = NewNode(common()->Projection(2), prepare);
@@ -1422,14 +1448,6 @@ void AstGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
}
try_control.EndTry();
- // Insert lazy bailout point.
- // TODO(mstarzinger): We are only using a 'call' to get a lazy bailout
- // point. Ideally, we whould not re-enter optimized code when deoptimized
- // lazily. Tracked by issue v8:4195.
- NewNode(common()->LazyBailout(),
- jsgraph()->ZeroConstant(), // dummy target.
- environment()->Checkpoint(stmt->HandlerId())); // frame state.
-
// Clear message object as we enter the catch block.
Node* the_hole = jsgraph()->TheHoleConstant();
NewNode(javascript()->StoreMessage(), the_hole);
@@ -1474,14 +1492,6 @@ void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
}
try_control.EndTry(commands->GetFallThroughToken(), fallthrough_result);
- // Insert lazy bailout point.
- // TODO(mstarzinger): We are only using a 'call' to get a lazy bailout
- // point. Ideally, we whould not re-enter optimized code when deoptimized
- // lazily. Tracked by issue v8:4195.
- NewNode(common()->LazyBailout(),
- jsgraph()->ZeroConstant(), // dummy target.
- environment()->Checkpoint(stmt->HandlerId())); // frame state.
-
// The result value semantics depend on how the block was entered:
// - ReturnStatement: It represents the return value being returned.
// - ThrowStatement: It represents the exception being thrown.
@@ -1493,7 +1503,7 @@ void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// The result value, dispatch token and message is expected on the operand
// stack (this is in sync with FullCodeGenerator::EnterFinallyBlock).
Node* message = NewNode(javascript()->LoadMessage());
- environment()->Push(token); // TODO(mstarzinger): Cook token!
+ environment()->Push(token);
environment()->Push(result);
environment()->Push(message);
@@ -1509,20 +1519,17 @@ void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// stack (this is in sync with FullCodeGenerator::ExitFinallyBlock).
message = environment()->Pop();
result = environment()->Pop();
- token = environment()->Pop(); // TODO(mstarzinger): Uncook token!
+ token = environment()->Pop();
NewNode(javascript()->StoreMessage(), message);
// Dynamic dispatch after the finally-block.
commands->ApplyDeferredCommands(token, result);
-
- // TODO(mstarzinger): Remove bailout once everything works.
- if (!FLAG_turbo_try_finally) SetStackOverflow();
}
void AstGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
Node* node =
- NewNode(javascript()->CallRuntime(Runtime::kHandleDebuggerStatement, 0));
+ NewNode(javascript()->CallRuntime(Runtime::kHandleDebuggerStatement));
PrepareFrameState(node, stmt->DebugBreakId());
environment()->MarkAllLocalsLive();
}
@@ -1557,33 +1564,27 @@ void AstGraphBuilder::VisitClassLiteral(ClassLiteral* expr) {
void AstGraphBuilder::VisitClassLiteralContents(ClassLiteral* expr) {
- Node* class_name = expr->raw_name() ? jsgraph()->Constant(expr->name())
- : jsgraph()->UndefinedConstant();
-
- // The class name is expected on the operand stack.
- environment()->Push(class_name);
VisitForValueOrTheHole(expr->extends());
VisitForValue(expr->constructor());
// Create node to instantiate a new class.
Node* constructor = environment()->Pop();
Node* extends = environment()->Pop();
- Node* name = environment()->Pop();
Node* start = jsgraph()->Constant(expr->start_position());
Node* end = jsgraph()->Constant(expr->end_position());
- const Operator* opc = javascript()->CallRuntime(Runtime::kDefineClass, 5);
- Node* literal = NewNode(opc, name, extends, constructor, start, end);
+ const Operator* opc = javascript()->CallRuntime(Runtime::kDefineClass);
+ Node* literal = NewNode(opc, extends, constructor, start, end);
PrepareFrameState(literal, expr->CreateLiteralId(),
OutputFrameStateCombine::Push());
-
- // The prototype is ensured to exist by Runtime_DefineClass. No access check
- // is needed here since the constructor is created by the class literal.
- Node* prototype =
- BuildLoadObjectField(literal, JSFunction::kPrototypeOrInitialMapOffset);
-
- // The class literal and the prototype are both expected on the operand stack
- // during evaluation of the method values.
environment()->Push(literal);
+
+ // Load the "prototype" from the constructor.
+ FrameStateBeforeAndAfter states(this, expr->CreateLiteralId());
+ Handle<Name> name = isolate()->factory()->prototype_string();
+ VectorSlotPair pair = CreateVectorSlotPair(expr->PrototypeSlot());
+ Node* prototype = BuildNamedLoad(literal, name, pair);
+ states.AddToNode(prototype, expr->PrototypeId(),
+ OutputFrameStateCombine::Push());
environment()->Push(prototype);
// Create nodes to store method values into the literal.
@@ -1618,9 +1619,12 @@ void AstGraphBuilder::VisitClassLiteralContents(ClassLiteral* expr) {
case ObjectLiteral::Property::PROTOTYPE:
UNREACHABLE();
case ObjectLiteral::Property::COMPUTED: {
+ Node* attr = jsgraph()->Constant(DONT_ENUM);
+ Node* set_function_name =
+ jsgraph()->Constant(property->NeedsSetFunctionName());
const Operator* op =
- javascript()->CallRuntime(Runtime::kDefineClassMethod, 3);
- NewNode(op, receiver, key, value);
+ javascript()->CallRuntime(Runtime::kDefineDataPropertyInLiteral);
+ NewNode(op, receiver, key, value, attr, set_function_name);
break;
}
case ObjectLiteral::Property::GETTER: {
@@ -1645,7 +1649,7 @@ void AstGraphBuilder::VisitClassLiteralContents(ClassLiteral* expr) {
prototype = environment()->Pop();
literal = environment()->Pop();
const Operator* op =
- javascript()->CallRuntime(Runtime::kFinalizeClassDefinition, 2);
+ javascript()->CallRuntime(Runtime::kFinalizeClassDefinition);
literal = NewNode(op, literal, prototype);
// Assign to class variable.
@@ -1774,8 +1778,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
Node* receiver = environment()->Pop();
if (property->emit_store()) {
Node* language = jsgraph()->Constant(SLOPPY);
- const Operator* op =
- javascript()->CallRuntime(Runtime::kSetProperty, 4);
+ const Operator* op = javascript()->CallRuntime(Runtime::kSetProperty);
Node* set_property = NewNode(op, receiver, key, value, language);
// SetProperty should not lazy deopt on an object literal.
PrepareFrameState(set_property, BailoutId::None());
@@ -1790,7 +1793,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
Node* receiver = environment()->Pop();
DCHECK(property->emit_store());
const Operator* op =
- javascript()->CallRuntime(Runtime::kInternalSetPrototype, 2);
+ javascript()->CallRuntime(Runtime::kInternalSetPrototype);
Node* set_prototype = NewNode(op, receiver, value);
// SetPrototype should not lazy deopt on an object literal.
PrepareFrameState(set_prototype,
@@ -1823,7 +1826,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
Node* name = environment()->Pop();
Node* attr = jsgraph()->Constant(NONE);
const Operator* op =
- javascript()->CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+ javascript()->CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
Node* call = NewNode(op, literal, name, getter, setter, attr);
// This should not lazy deopt on a new literal.
PrepareFrameState(call, BailoutId::None());
@@ -1847,7 +1850,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
Node* value = environment()->Pop();
Node* receiver = environment()->Pop();
const Operator* op =
- javascript()->CallRuntime(Runtime::kInternalSetPrototype, 2);
+ javascript()->CallRuntime(Runtime::kInternalSetPrototype);
Node* call = NewNode(op, receiver, value);
PrepareFrameState(call, expr->GetIdForPropertySet(property_index));
continue;
@@ -1868,10 +1871,11 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::COMPUTED:
case ObjectLiteral::Property::MATERIALIZED_LITERAL: {
Node* attr = jsgraph()->Constant(NONE);
+ Node* set_function_name =
+ jsgraph()->Constant(property->NeedsSetFunctionName());
const Operator* op =
- javascript()->CallRuntime(Runtime::kDefineDataPropertyUnchecked, 4);
- Node* call = NewNode(op, receiver, key, value, attr);
- PrepareFrameState(call, BailoutId::None());
+ javascript()->CallRuntime(Runtime::kDefineDataPropertyInLiteral);
+ NewNode(op, receiver, key, value, attr, set_function_name);
break;
}
case ObjectLiteral::Property::PROTOTYPE:
@@ -1899,8 +1903,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
// Transform literals that contain functions to fast properties.
literal = environment()->Top(); // Reload from operand stack.
if (expr->has_function()) {
- const Operator* op =
- javascript()->CallRuntime(Runtime::kToFastProperties, 1);
+ const Operator* op = javascript()->CallRuntime(Runtime::kToFastProperties);
NewNode(op, literal);
}
@@ -1939,7 +1942,7 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
int array_index = 0;
for (; array_index < expr->values()->length(); array_index++) {
Expression* subexpr = expr->values()->at(array_index);
- if (subexpr->IsSpread()) break;
+ DCHECK(!subexpr->IsSpread());
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
VisitForValue(subexpr);
@@ -1962,30 +1965,17 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
// number elements an iterable produces is unknown ahead of time.
for (; array_index < expr->values()->length(); array_index++) {
Expression* subexpr = expr->values()->at(array_index);
- Node* result;
+ DCHECK(!subexpr->IsSpread());
- if (subexpr->IsSpread()) {
- VisitForValue(subexpr->AsSpread()->expression());
- FrameStateBeforeAndAfter states(this,
- subexpr->AsSpread()->expression()->id());
- Node* iterable = environment()->Pop();
- Node* array = environment()->Pop();
- Node* function = BuildLoadNativeContextField(
- Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX);
- result = NewNode(javascript()->CallFunction(3, language_mode()), function,
- array, iterable);
- states.AddToNode(result, expr->GetIdForElement(array_index));
- } else {
- VisitForValue(subexpr);
+ VisitForValue(subexpr);
+ {
Node* value = environment()->Pop();
Node* array = environment()->Pop();
- const Operator* op =
- javascript()->CallRuntime(Runtime::kAppendElement, 2);
- result = NewNode(op, array, value);
+ const Operator* op = javascript()->CallRuntime(Runtime::kAppendElement);
+ Node* result = NewNode(op, array, value);
PrepareFrameState(result, expr->GetIdForElement(array_index));
+ environment()->Push(result);
}
-
- environment()->Push(result);
}
ast_context()->ProduceValue(environment()->Pop());
@@ -2343,8 +2333,8 @@ void AstGraphBuilder::VisitCall(Call* expr) {
DCHECK(variable->location() == VariableLocation::LOOKUP);
Node* name = jsgraph()->Constant(variable->name());
const Operator* op =
- javascript()->CallRuntime(Runtime::kLoadLookupSlot, 2);
- Node* pair = NewNode(op, current_context(), name);
+ javascript()->CallRuntime(Runtime::kLoadLookupSlotForCall);
+ Node* pair = NewNode(op, name);
callee_value = NewNode(common()->Projection(0), pair);
receiver_value = NewNode(common()->Projection(1), pair);
PrepareFrameState(pair, expr->LookupId(),
@@ -2439,8 +2429,8 @@ void AstGraphBuilder::VisitCall(Call* expr) {
Variable* variable = callee->AsVariableProxy()->var();
Node* name = jsgraph()->Constant(variable->name());
const Operator* op =
- javascript()->CallRuntime(Runtime::kLoadLookupSlot, 2);
- Node* pair = NewNode(op, current_context(), name);
+ javascript()->CallRuntime(Runtime::kLoadLookupSlotForCall);
+ Node* pair = NewNode(op, name);
callee_value = NewNode(common()->Projection(0), pair);
receiver_value = NewNode(common()->Projection(1), pair);
PrepareFrameState(pair, expr->LookupId(),
@@ -2480,7 +2470,7 @@ void AstGraphBuilder::VisitCall(Call* expr) {
Node* language = jsgraph()->Constant(language_mode());
Node* position = jsgraph()->Constant(current_scope()->start_position());
const Operator* op =
- javascript()->CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ javascript()->CallRuntime(Runtime::kResolvePossiblyDirectEval);
Node* new_callee =
NewNode(op, callee, source, function, language, position);
PrepareFrameState(new_callee, expr->EvalId(),
@@ -2493,7 +2483,7 @@ void AstGraphBuilder::VisitCall(Call* expr) {
// Create node to perform the function call.
VectorSlotPair feedback = CreateVectorSlotPair(expr->CallFeedbackICSlot());
const Operator* call = javascript()->CallFunction(
- args->length() + 2, language_mode(), feedback, receiver_hint);
+ args->length() + 2, feedback, receiver_hint, expr->tail_call_mode());
FrameStateBeforeAndAfter states(this, expr->CallId());
Node* value = ProcessArguments(call, args->length() + 2);
environment()->Push(value->InputAt(0)); // The callee passed to the call.
@@ -2571,8 +2561,7 @@ void AstGraphBuilder::VisitCallJSRuntime(CallRuntime* expr) {
VisitForValues(args);
// Create node to perform the JS runtime call.
- const Operator* call =
- javascript()->CallFunction(args->length() + 2, language_mode());
+ const Operator* call = javascript()->CallFunction(args->length() + 2);
FrameStateBeforeAndAfter states(this, expr->CallId());
Node* value = ProcessArguments(call, args->length() + 2);
states.AddToNode(value, expr->id(), ast_context()->GetStateCombine());
@@ -2591,6 +2580,7 @@ void AstGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
// TODO(mstarzinger): This bailout is a gigantic hack, the owner is ashamed.
if (function->function_id == Runtime::kInlineGeneratorNext ||
+ function->function_id == Runtime::kInlineGeneratorReturn ||
function->function_id == Runtime::kInlineGeneratorThrow) {
ast_context()->ProduceValue(jsgraph()->TheHoleConstant());
return SetStackOverflow();
@@ -2740,7 +2730,7 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
// TODO(bmeurer): Cleanup this feedback/bailout mess!
FrameStateBeforeAndAfter states(this, BailoutId::None());
value = BuildBinaryOp(old_value, jsgraph()->OneConstant(),
- expr->binary_op(), TypeFeedbackId::None());
+ expr->binary_op(), expr->CountBinOpFeedbackId());
// This should never deoptimize outside strong mode because otherwise we
// have converted to number before.
states.AddToNode(value, is_strong(language_mode()) ? expr->ToNumberId()
@@ -2848,16 +2838,16 @@ void AstGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
op = javascript()->StrictNotEqual();
break;
case Token::LT:
- op = javascript()->LessThan(language_mode());
+ op = javascript()->LessThan();
break;
case Token::GT:
- op = javascript()->GreaterThan(language_mode());
+ op = javascript()->GreaterThan();
break;
case Token::LTE:
- op = javascript()->LessThanOrEqual(language_mode());
+ op = javascript()->LessThanOrEqual();
break;
case Token::GTE:
- op = javascript()->GreaterThanOrEqual(language_mode());
+ op = javascript()->GreaterThanOrEqual();
break;
case Token::INSTANCEOF:
op = javascript()->InstanceOf();
@@ -2930,7 +2920,7 @@ void AstGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
DeclareGlobalsLanguageMode::encode(language_mode());
Node* flags = jsgraph()->Constant(encoded_flags);
Node* pairs = jsgraph()->Constant(data);
- const Operator* op = javascript()->CallRuntime(Runtime::kDeclareGlobals, 2);
+ const Operator* op = javascript()->CallRuntime(Runtime::kDeclareGlobals);
Node* call = NewNode(op, pairs, flags);
PrepareFrameState(call, BailoutId::Declarations());
globals()->clear();
@@ -3072,8 +3062,7 @@ VectorSlotPair AstGraphBuilder::CreateVectorSlotPair(
}
-void AstGraphBuilder::VisitRewritableAssignmentExpression(
- RewritableAssignmentExpression* node) {
+void AstGraphBuilder::VisitRewritableExpression(RewritableExpression* node) {
Visit(node->expression());
}
@@ -3209,11 +3198,11 @@ Node* AstGraphBuilder::BuildArgumentsObject(Variable* arguments) {
if (arguments == nullptr) return nullptr;
// Allocate and initialize a new arguments object.
- CreateArgumentsParameters::Type type =
+ CreateArgumentsType type =
is_strict(language_mode()) || !info()->has_simple_parameters()
- ? CreateArgumentsParameters::kUnmappedArguments
- : CreateArgumentsParameters::kMappedArguments;
- const Operator* op = javascript()->CreateArguments(type, 0);
+ ? CreateArgumentsType::kUnmappedArguments
+ : CreateArgumentsType::kMappedArguments;
+ const Operator* op = javascript()->CreateArguments(type);
Node* object = NewNode(op, GetFunctionClosure());
PrepareFrameState(object, BailoutId::None());
@@ -3231,8 +3220,8 @@ Node* AstGraphBuilder::BuildRestArgumentsArray(Variable* rest, int index) {
if (rest == nullptr) return nullptr;
// Allocate and initialize a new arguments object.
- CreateArgumentsParameters::Type type = CreateArgumentsParameters::kRestArray;
- const Operator* op = javascript()->CreateArguments(type, index);
+ CreateArgumentsType type = CreateArgumentsType::kRestParameter;
+ const Operator* op = javascript()->CreateArguments(type);
Node* object = NewNode(op, GetFunctionClosure());
PrepareFrameState(object, BailoutId::None());
@@ -3405,8 +3394,7 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
feedback, combine, typeof_mode)) {
return node;
}
- const Operator* op = javascript()->LoadDynamic(name, typeof_mode);
- Node* value = NewNode(op, BuildLoadFeedbackVector(), current_context());
+ Node* value = BuildDynamicLoad(name, typeof_mode);
states.AddToNode(value, bailout_id, combine);
return value;
}
@@ -3440,8 +3428,8 @@ Node* AstGraphBuilder::BuildVariableDelete(Variable* variable,
// Dynamic lookup of context variable (anywhere in the chain).
Node* name = jsgraph()->Constant(variable->name());
const Operator* op =
- javascript()->CallRuntime(Runtime::kDeleteLookupSlot, 2);
- Node* result = NewNode(op, current_context(), name);
+ javascript()->CallRuntime(Runtime::kDeleteLookupSlot);
+ Node* result = NewNode(op, name);
PrepareFrameState(result, bailout_id, combine);
return result;
}
@@ -3563,13 +3551,10 @@ Node* AstGraphBuilder::BuildVariableAssignment(
}
case VariableLocation::LOOKUP: {
// Dynamic lookup of context variable (anywhere in the chain).
- Node* name = jsgraph()->Constant(variable->name());
- Node* language = jsgraph()->Constant(language_mode());
+ Handle<Name> name = variable->name();
// TODO(mstarzinger): Use Runtime::kInitializeLegacyConstLookupSlot for
// initializations of const declarations.
- const Operator* op =
- javascript()->CallRuntime(Runtime::kStoreLookupSlot, 4);
- Node* store = NewNode(op, value, current_context(), name, language);
+ Node* store = BuildDynamicStore(name, value);
PrepareFrameState(store, bailout_id, combine);
return store;
}
@@ -3581,16 +3566,16 @@ Node* AstGraphBuilder::BuildVariableAssignment(
Node* AstGraphBuilder::BuildKeyedLoad(Node* object, Node* key,
const VectorSlotPair& feedback) {
- const Operator* op = javascript()->LoadProperty(language_mode(), feedback);
- Node* node = NewNode(op, object, key, BuildLoadFeedbackVector());
+ const Operator* op = javascript()->LoadProperty(feedback);
+ Node* node = NewNode(op, object, key, GetFunctionClosure());
return node;
}
Node* AstGraphBuilder::BuildNamedLoad(Node* object, Handle<Name> name,
const VectorSlotPair& feedback) {
- const Operator* op = javascript()->LoadNamed(language_mode(), name, feedback);
- Node* node = NewNode(op, object, BuildLoadFeedbackVector());
+ const Operator* op = javascript()->LoadNamed(name, feedback);
+ Node* node = NewNode(op, object, GetFunctionClosure());
return node;
}
@@ -3598,7 +3583,7 @@ Node* AstGraphBuilder::BuildNamedLoad(Node* object, Handle<Name> name,
Node* AstGraphBuilder::BuildKeyedStore(Node* object, Node* key, Node* value,
const VectorSlotPair& feedback) {
const Operator* op = javascript()->StoreProperty(language_mode(), feedback);
- Node* node = NewNode(op, object, key, value, BuildLoadFeedbackVector());
+ Node* node = NewNode(op, object, key, value, GetFunctionClosure());
return node;
}
@@ -3608,7 +3593,7 @@ Node* AstGraphBuilder::BuildNamedStore(Node* object, Handle<Name> name,
const VectorSlotPair& feedback) {
const Operator* op =
javascript()->StoreNamed(language_mode(), name, feedback);
- Node* node = NewNode(op, object, value, BuildLoadFeedbackVector());
+ Node* node = NewNode(op, object, value, GetFunctionClosure());
return node;
}
@@ -3617,9 +3602,8 @@ Node* AstGraphBuilder::BuildNamedSuperLoad(Node* receiver, Node* home_object,
Handle<Name> name,
const VectorSlotPair& feedback) {
Node* name_node = jsgraph()->Constant(name);
- Node* language = jsgraph()->Constant(language_mode());
- const Operator* op = javascript()->CallRuntime(Runtime::kLoadFromSuper, 4);
- Node* node = NewNode(op, receiver, home_object, name_node, language);
+ const Operator* op = javascript()->CallRuntime(Runtime::kLoadFromSuper);
+ Node* node = NewNode(op, receiver, home_object, name_node);
return node;
}
@@ -3627,10 +3611,8 @@ Node* AstGraphBuilder::BuildNamedSuperLoad(Node* receiver, Node* home_object,
Node* AstGraphBuilder::BuildKeyedSuperLoad(Node* receiver, Node* home_object,
Node* key,
const VectorSlotPair& feedback) {
- Node* language = jsgraph()->Constant(language_mode());
- const Operator* op =
- javascript()->CallRuntime(Runtime::kLoadKeyedFromSuper, 4);
- Node* node = NewNode(op, receiver, home_object, key, language);
+ const Operator* op = javascript()->CallRuntime(Runtime::kLoadKeyedFromSuper);
+ Node* node = NewNode(op, receiver, home_object, key);
return node;
}
@@ -3662,7 +3644,7 @@ Node* AstGraphBuilder::BuildGlobalLoad(Handle<Name> name,
const VectorSlotPair& feedback,
TypeofMode typeof_mode) {
const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode);
- Node* node = NewNode(op, BuildLoadFeedbackVector());
+ Node* node = NewNode(op, GetFunctionClosure());
return node;
}
@@ -3671,22 +3653,30 @@ Node* AstGraphBuilder::BuildGlobalStore(Handle<Name> name, Node* value,
const VectorSlotPair& feedback) {
const Operator* op =
javascript()->StoreGlobal(language_mode(), name, feedback);
- Node* node = NewNode(op, value, BuildLoadFeedbackVector());
+ Node* node = NewNode(op, value, GetFunctionClosure());
return node;
}
-Node* AstGraphBuilder::BuildLoadObjectField(Node* object, int offset) {
- return NewNode(jsgraph()->machine()->Load(MachineType::AnyTagged()), object,
- jsgraph()->IntPtrConstant(offset - kHeapObjectTag));
+Node* AstGraphBuilder::BuildDynamicLoad(Handle<Name> name,
+ TypeofMode typeof_mode) {
+ Node* name_node = jsgraph()->Constant(name);
+ const Operator* op =
+ javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
+ ? Runtime::kLoadLookupSlot
+ : Runtime::kLoadLookupSlotInsideTypeof);
+ Node* node = NewNode(op, name_node);
+ return node;
}
-Node* AstGraphBuilder::BuildLoadImmutableObjectField(Node* object, int offset) {
- return graph()->NewNode(jsgraph()->machine()->Load(MachineType::AnyTagged()),
- object,
- jsgraph()->IntPtrConstant(offset - kHeapObjectTag),
- graph()->start(), graph()->start());
+Node* AstGraphBuilder::BuildDynamicStore(Handle<Name> name, Node* value) {
+ Node* name_node = jsgraph()->Constant(name);
+ const Operator* op = javascript()->CallRuntime(
+ is_strict(language_mode()) ? Runtime::kStoreLookupSlot_Strict
+ : Runtime::kStoreLookupSlot_Sloppy);
+ Node* node = NewNode(op, name_node, value);
+ return node;
}
@@ -3703,19 +3693,6 @@ Node* AstGraphBuilder::BuildLoadNativeContextField(int index) {
}
-Node* AstGraphBuilder::BuildLoadFeedbackVector() {
- if (!feedback_vector_.is_set()) {
- Node* closure = GetFunctionClosure();
- Node* shared = BuildLoadImmutableObjectField(
- closure, JSFunction::kSharedFunctionInfoOffset);
- Node* vector = BuildLoadImmutableObjectField(
- shared, SharedFunctionInfo::kFeedbackVectorOffset);
- feedback_vector_.set(vector);
- }
- return feedback_vector_.get();
-}
-
-
Node* AstGraphBuilder::BuildToBoolean(Node* input, TypeFeedbackId feedback_id) {
if (Node* node = TryFastToBoolean(input)) return node;
ToBooleanHints hints;
@@ -3758,7 +3735,7 @@ Node* AstGraphBuilder::BuildSetHomeObject(Node* value, Node* home_object,
Node* AstGraphBuilder::BuildThrowError(Node* exception, BailoutId bailout_id) {
- const Operator* op = javascript()->CallRuntime(Runtime::kThrow, 1);
+ const Operator* op = javascript()->CallRuntime(Runtime::kThrow);
Node* call = NewNode(op, exception);
PrepareFrameState(call, bailout_id);
Node* control = NewNode(common()->Throw(), call);
@@ -3770,8 +3747,7 @@ Node* AstGraphBuilder::BuildThrowError(Node* exception, BailoutId bailout_id) {
Node* AstGraphBuilder::BuildThrowReferenceError(Variable* variable,
BailoutId bailout_id) {
Node* variable_name = jsgraph()->Constant(variable->name());
- const Operator* op =
- javascript()->CallRuntime(Runtime::kThrowReferenceError, 1);
+ const Operator* op = javascript()->CallRuntime(Runtime::kThrowReferenceError);
Node* call = NewNode(op, variable_name);
PrepareFrameState(call, bailout_id);
Node* control = NewNode(common()->Throw(), call);
@@ -3782,7 +3758,7 @@ Node* AstGraphBuilder::BuildThrowReferenceError(Variable* variable,
Node* AstGraphBuilder::BuildThrowConstAssignError(BailoutId bailout_id) {
const Operator* op =
- javascript()->CallRuntime(Runtime::kThrowConstAssignError, 0);
+ javascript()->CallRuntime(Runtime::kThrowConstAssignError);
Node* call = NewNode(op);
PrepareFrameState(call, bailout_id);
Node* control = NewNode(common()->Throw(), call);
@@ -3793,7 +3769,7 @@ Node* AstGraphBuilder::BuildThrowConstAssignError(BailoutId bailout_id) {
Node* AstGraphBuilder::BuildThrowStaticPrototypeError(BailoutId bailout_id) {
const Operator* op =
- javascript()->CallRuntime(Runtime::kThrowStaticPrototypeError, 0);
+ javascript()->CallRuntime(Runtime::kThrowStaticPrototypeError);
Node* call = NewNode(op);
PrepareFrameState(call, bailout_id);
Node* control = NewNode(common()->Throw(), call);
@@ -3804,7 +3780,7 @@ Node* AstGraphBuilder::BuildThrowStaticPrototypeError(BailoutId bailout_id) {
Node* AstGraphBuilder::BuildThrowUnsupportedSuperError(BailoutId bailout_id) {
const Operator* op =
- javascript()->CallRuntime(Runtime::kThrowUnsupportedSuperError, 0);
+ javascript()->CallRuntime(Runtime::kThrowUnsupportedSuperError);
Node* call = NewNode(op);
PrepareFrameState(call, bailout_id);
Node* control = NewNode(common()->Throw(), call);
@@ -3814,6 +3790,11 @@ Node* AstGraphBuilder::BuildThrowUnsupportedSuperError(BailoutId bailout_id) {
Node* AstGraphBuilder::BuildReturn(Node* return_value) {
+ // Emit tracing call if requested to do so.
+ if (FLAG_trace) {
+ return_value =
+ NewNode(javascript()->CallRuntime(Runtime::kTraceExit), return_value);
+ }
Node* control = NewNode(common()->Return(), return_value);
UpdateControlDependencyToLeaveFunction(control);
return control;
@@ -3821,7 +3802,7 @@ Node* AstGraphBuilder::BuildReturn(Node* return_value) {
Node* AstGraphBuilder::BuildThrow(Node* exception_value) {
- NewNode(javascript()->CallRuntime(Runtime::kReThrow, 1), exception_value);
+ NewNode(javascript()->CallRuntime(Runtime::kReThrow), exception_value);
Node* control = NewNode(common()->Throw(), exception_value);
UpdateControlDependencyToLeaveFunction(control);
return control;
@@ -3838,37 +3819,37 @@ Node* AstGraphBuilder::BuildBinaryOp(Node* left, Node* right, Token::Value op,
}
switch (op) {
case Token::BIT_OR:
- js_op = javascript()->BitwiseOr(language_mode(), hints);
+ js_op = javascript()->BitwiseOr(hints);
break;
case Token::BIT_AND:
- js_op = javascript()->BitwiseAnd(language_mode(), hints);
+ js_op = javascript()->BitwiseAnd(hints);
break;
case Token::BIT_XOR:
- js_op = javascript()->BitwiseXor(language_mode(), hints);
+ js_op = javascript()->BitwiseXor(hints);
break;
case Token::SHL:
- js_op = javascript()->ShiftLeft(language_mode(), hints);
+ js_op = javascript()->ShiftLeft(hints);
break;
case Token::SAR:
- js_op = javascript()->ShiftRight(language_mode(), hints);
+ js_op = javascript()->ShiftRight(hints);
break;
case Token::SHR:
- js_op = javascript()->ShiftRightLogical(language_mode(), hints);
+ js_op = javascript()->ShiftRightLogical(hints);
break;
case Token::ADD:
- js_op = javascript()->Add(language_mode(), hints);
+ js_op = javascript()->Add(hints);
break;
case Token::SUB:
- js_op = javascript()->Subtract(language_mode(), hints);
+ js_op = javascript()->Subtract(hints);
break;
case Token::MUL:
- js_op = javascript()->Multiply(language_mode(), hints);
+ js_op = javascript()->Multiply(hints);
break;
case Token::DIV:
- js_op = javascript()->Divide(language_mode(), hints);
+ js_op = javascript()->Divide(hints);
break;
case Token::MOD:
- js_op = javascript()->Modulus(language_mode(), hints);
+ js_op = javascript()->Modulus(hints);
break;
default:
UNREACHABLE();
@@ -3916,17 +3897,21 @@ Node* AstGraphBuilder::TryLoadDynamicVariable(
fast_block.BreakUnless(check, BranchHint::kTrue);
}
- // Fast case, because variable is not shadowed. Perform global slot load.
- Node* fast = BuildGlobalLoad(name, feedback, typeof_mode);
- states.AddToNode(fast, bailout_id, combine);
- environment()->Push(fast);
+ // Fast case, because variable is not shadowed.
+ if (Node* constant = TryLoadGlobalConstant(name)) {
+ environment()->Push(constant);
+ } else {
+ // Perform global slot load.
+ Node* fast = BuildGlobalLoad(name, feedback, typeof_mode);
+ states.AddToNode(fast, bailout_id, combine);
+ environment()->Push(fast);
+ }
slow_block.Break();
environment()->Pop();
fast_block.EndBlock();
// Slow case, because variable potentially shadowed. Perform dynamic lookup.
- const Operator* op = javascript()->LoadDynamic(name, typeof_mode);
- Node* slow = NewNode(op, BuildLoadFeedbackVector(), current_context());
+ Node* slow = BuildDynamicLoad(name, typeof_mode);
states.AddToNode(slow, bailout_id, combine);
environment()->Push(slow);
slow_block.EndBlock();
@@ -3969,8 +3954,7 @@ Node* AstGraphBuilder::TryLoadDynamicVariable(
fast_block.EndBlock();
// Slow case, because variable potentially shadowed. Perform dynamic lookup.
- const Operator* op = javascript()->LoadDynamic(name, typeof_mode);
- Node* slow = NewNode(op, BuildLoadFeedbackVector(), current_context());
+ Node* slow = BuildDynamicLoad(name, typeof_mode);
states.AddToNode(slow, bailout_id, combine);
environment()->Push(slow);
slow_block.EndBlock();
@@ -4047,8 +4031,10 @@ void AstGraphBuilder::PrepareFrameState(Node* node, BailoutId ast_id,
DCHECK_EQ(IrOpcode::kDead,
NodeProperties::GetFrameStateInput(node, 0)->opcode());
+ bool node_has_exception = NodeProperties::IsExceptionalCall(node);
NodeProperties::ReplaceFrameStateInput(
- node, 0, environment()->Checkpoint(ast_id, combine));
+ node, 0,
+ environment()->Checkpoint(ast_id, combine, node_has_exception));
}
}
diff --git a/deps/v8/src/compiler/ast-graph-builder.h b/deps/v8/src/compiler/ast-graph-builder.h
index 3b6302d3dd..6cff237c3c 100644
--- a/deps/v8/src/compiler/ast-graph-builder.h
+++ b/deps/v8/src/compiler/ast-graph-builder.h
@@ -314,14 +314,13 @@ class AstGraphBuilder : public AstVisitor {
Node* BuildGlobalStore(Handle<Name> name, Node* value,
const VectorSlotPair& feedback);
+ // Builders for dynamic variable loads and stores.
+ Node* BuildDynamicLoad(Handle<Name> name, TypeofMode typeof_mode);
+ Node* BuildDynamicStore(Handle<Name> name, Node* value);
+
// Builders for accessing the function context.
Node* BuildLoadGlobalObject();
Node* BuildLoadNativeContextField(int index);
- Node* BuildLoadFeedbackVector();
-
- // Builder for accessing a (potentially immutable) object field.
- Node* BuildLoadObjectField(Node* object, int offset);
- Node* BuildLoadImmutableObjectField(Node* object, int offset);
// Builders for automatic type conversion.
Node* BuildToBoolean(Node* input, TypeFeedbackId feedback_id);
@@ -519,7 +518,8 @@ class AstGraphBuilder::Environment : public ZoneObject {
// Preserve a checkpoint of the environment for the IR graph. Any
// further mutation of the environment will not affect checkpoints.
Node* Checkpoint(BailoutId ast_id, OutputFrameStateCombine combine =
- OutputFrameStateCombine::Ignore());
+ OutputFrameStateCombine::Ignore(),
+ bool node_has_exception = false);
// Control dependency tracked by this environment.
Node* GetControlDependency() { return control_dependency_; }
diff --git a/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc b/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
index 2074c944e6..ac96399774 100644
--- a/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
+++ b/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
@@ -198,7 +198,7 @@ void ALAA::VisitCompareOperation(CompareOperation* e) {
}
-void ALAA::VisitSpread(Spread* e) { Visit(e->expression()); }
+void ALAA::VisitSpread(Spread* e) { UNREACHABLE(); }
void ALAA::VisitEmptyParentheses(EmptyParentheses* e) { UNREACHABLE(); }
@@ -266,7 +266,6 @@ void ALAA::VisitForOfStatement(ForOfStatement* loop) {
Visit(loop->assign_iterator());
Enter(loop);
Visit(loop->assign_each());
- Visit(loop->each());
Visit(loop->subject());
Visit(loop->body());
Exit(loop);
@@ -288,8 +287,7 @@ void ALAA::VisitCountOperation(CountOperation* e) {
}
-void ALAA::VisitRewritableAssignmentExpression(
- RewritableAssignmentExpression* expr) {
+void ALAA::VisitRewritableExpression(RewritableExpression* expr) {
Visit(expr->expression());
}
diff --git a/deps/v8/src/compiler/bytecode-branch-analysis.cc b/deps/v8/src/compiler/bytecode-branch-analysis.cc
index 27699a1b9a..4e96a53aeb 100644
--- a/deps/v8/src/compiler/bytecode-branch-analysis.cc
+++ b/deps/v8/src/compiler/bytecode-branch-analysis.cc
@@ -11,115 +11,33 @@ namespace v8 {
namespace internal {
namespace compiler {
-// The class contains all of the sites that contain
-// branches to a particular target (bytecode offset).
-class BytecodeBranchInfo final : public ZoneObject {
- public:
- explicit BytecodeBranchInfo(Zone* zone)
- : back_edge_offsets_(zone), fore_edge_offsets_(zone) {}
-
- void AddBranch(int source_offset, int target_offset);
-
- // The offsets of bytecodes that refer to this bytecode as
- // a back-edge predecessor.
- const ZoneVector<int>* back_edge_offsets() { return &back_edge_offsets_; }
-
- // The offsets of bytecodes that refer to this bytecode as
- // a forwards-edge predecessor.
- const ZoneVector<int>* fore_edge_offsets() { return &fore_edge_offsets_; }
-
- private:
- ZoneVector<int> back_edge_offsets_;
- ZoneVector<int> fore_edge_offsets_;
-
- DISALLOW_COPY_AND_ASSIGN(BytecodeBranchInfo);
-};
-
-
-void BytecodeBranchInfo::AddBranch(int source_offset, int target_offset) {
- if (source_offset < target_offset) {
- fore_edge_offsets_.push_back(source_offset);
- } else {
- back_edge_offsets_.push_back(source_offset);
- }
-}
-
-
BytecodeBranchAnalysis::BytecodeBranchAnalysis(
Handle<BytecodeArray> bytecode_array, Zone* zone)
- : branch_infos_(zone),
- bytecode_array_(bytecode_array),
- reachable_(bytecode_array->length(), zone),
+ : bytecode_array_(bytecode_array),
+ is_backward_target_(bytecode_array->length(), zone),
+ is_forward_target_(bytecode_array->length(), zone),
zone_(zone) {}
-
void BytecodeBranchAnalysis::Analyze() {
interpreter::BytecodeArrayIterator iterator(bytecode_array());
- bool reachable = true;
while (!iterator.done()) {
interpreter::Bytecode bytecode = iterator.current_bytecode();
int current_offset = iterator.current_offset();
- // All bytecode basic blocks are generated to be forward reachable
- // and may also be backward reachable. Hence if there's a forward
- // branch targetting here the code becomes reachable.
- reachable = reachable || forward_branches_target(current_offset);
- if (reachable) {
- reachable_.Add(current_offset);
- if (interpreter::Bytecodes::IsConditionalJump(bytecode)) {
- // Only the branch is recorded, the forward path falls through
- // and is handled as normal bytecode data flow.
- AddBranch(current_offset, iterator.GetJumpTargetOffset());
- } else if (interpreter::Bytecodes::IsJump(bytecode)) {
- // Unless the branch targets the next bytecode it's not
- // reachable. If it targets the next bytecode the check at the
- // start of the loop will set the reachable flag.
- AddBranch(current_offset, iterator.GetJumpTargetOffset());
- reachable = false;
- } else if (interpreter::Bytecodes::IsJumpOrReturn(bytecode)) {
- DCHECK_EQ(bytecode, interpreter::Bytecode::kReturn);
- reachable = false;
- }
+ if (interpreter::Bytecodes::IsJump(bytecode)) {
+ AddBranch(current_offset, iterator.GetJumpTargetOffset());
}
iterator.Advance();
}
}
-
-const ZoneVector<int>* BytecodeBranchAnalysis::BackwardBranchesTargetting(
- int offset) const {
- auto iterator = branch_infos_.find(offset);
- if (branch_infos_.end() != iterator) {
- return iterator->second->back_edge_offsets();
- } else {
- return nullptr;
- }
-}
-
-
-const ZoneVector<int>* BytecodeBranchAnalysis::ForwardBranchesTargetting(
- int offset) const {
- auto iterator = branch_infos_.find(offset);
- if (branch_infos_.end() != iterator) {
- return iterator->second->fore_edge_offsets();
- } else {
- return nullptr;
- }
-}
-
-
void BytecodeBranchAnalysis::AddBranch(int source_offset, int target_offset) {
- BytecodeBranchInfo* branch_info = nullptr;
- auto iterator = branch_infos_.find(target_offset);
- if (branch_infos_.end() == iterator) {
- branch_info = new (zone()) BytecodeBranchInfo(zone());
- branch_infos_.insert(std::make_pair(target_offset, branch_info));
+ if (source_offset < target_offset) {
+ is_forward_target_.Add(target_offset);
} else {
- branch_info = iterator->second;
+ is_backward_target_.Add(target_offset);
}
- branch_info->AddBranch(source_offset, target_offset);
}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/bytecode-branch-analysis.h b/deps/v8/src/compiler/bytecode-branch-analysis.h
index 0ef33b640c..7d32da8281 100644
--- a/deps/v8/src/compiler/bytecode-branch-analysis.h
+++ b/deps/v8/src/compiler/bytecode-branch-analysis.h
@@ -7,7 +7,6 @@
#include "src/bit-vector.h"
#include "src/handles.h"
-#include "src/zone-containers.h"
namespace v8 {
namespace internal {
@@ -16,15 +15,13 @@ class BytecodeArray;
namespace compiler {
-class BytecodeBranchInfo;
-
-// A class for identifying the branch targets and their branch sites
-// within a bytecode array and also identifying which bytecodes are
-// reachable. This information can be used to construct the local
-// control flow logic for high-level IR graphs built from bytecode.
+// A class for identifying branch targets within a bytecode array.
+// This information can be used to construct the local control flow
+// logic for high-level IR graphs built from bytecode.
//
-// NB This class relies on the only backwards branches in bytecode
-// being jumps back to loop headers.
+// N.B. If this class is used to determine loop headers, then such a
+// usage relies on the only backwards branches in bytecode being jumps
+// back to loop headers.
class BytecodeBranchAnalysis BASE_EMBEDDED {
public:
BytecodeBranchAnalysis(Handle<BytecodeArray> bytecode_array, Zone* zone);
@@ -34,27 +31,16 @@ class BytecodeBranchAnalysis BASE_EMBEDDED {
// until this has been called.
void Analyze();
- // Offsets of bytecodes having a backward branch to the bytecode at |offset|.
- const ZoneVector<int>* BackwardBranchesTargetting(int offset) const;
-
- // Offsets of bytecodes having a forward branch to the bytecode at |offset|.
- const ZoneVector<int>* ForwardBranchesTargetting(int offset) const;
-
- // Returns true if the bytecode at |offset| is reachable.
- bool is_reachable(int offset) const { return reachable_.Contains(offset); }
-
// Returns true if there are any forward branches to the bytecode at
// |offset|.
bool forward_branches_target(int offset) const {
- const ZoneVector<int>* sites = ForwardBranchesTargetting(offset);
- return sites != nullptr && sites->size() > 0;
+ return is_forward_target_.Contains(offset);
}
// Returns true if there are any backward branches to the bytecode
// at |offset|.
bool backward_branches_target(int offset) const {
- const ZoneVector<int>* sites = BackwardBranchesTargetting(offset);
- return sites != nullptr && sites->size() > 0;
+ return is_backward_target_.Contains(offset);
}
private:
@@ -63,9 +49,9 @@ class BytecodeBranchAnalysis BASE_EMBEDDED {
Zone* zone() const { return zone_; }
Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
- ZoneMap<int, BytecodeBranchInfo*> branch_infos_;
Handle<BytecodeArray> bytecode_array_;
- BitVector reachable_;
+ BitVector is_backward_target_;
+ BitVector is_forward_target_;
Zone* zone_;
DISALLOW_COPY_AND_ASSIGN(BytecodeBranchAnalysis);
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index cf0b6ab438..e28c19d844 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -13,25 +13,108 @@ namespace v8 {
namespace internal {
namespace compiler {
+// The abstract execution environment simulates the content of the interpreter
+// register file. The environment performs SSA-renaming of all tracked nodes at
+// split and merge points in the control flow.
+class BytecodeGraphBuilder::Environment : public ZoneObject {
+ public:
+ Environment(BytecodeGraphBuilder* builder, int register_count,
+ int parameter_count, Node* control_dependency, Node* context);
+
+ int parameter_count() const { return parameter_count_; }
+ int register_count() const { return register_count_; }
+
+ Node* LookupAccumulator() const;
+ Node* LookupRegister(interpreter::Register the_register) const;
+
+ void BindAccumulator(Node* node, FrameStateBeforeAndAfter* states = nullptr);
+ void BindRegister(interpreter::Register the_register, Node* node,
+ FrameStateBeforeAndAfter* states = nullptr);
+ void BindRegistersToProjections(interpreter::Register first_reg, Node* node,
+ FrameStateBeforeAndAfter* states = nullptr);
+ void RecordAfterState(Node* node, FrameStateBeforeAndAfter* states);
+
+ // Effect dependency tracked by this environment.
+ Node* GetEffectDependency() { return effect_dependency_; }
+ void UpdateEffectDependency(Node* dependency) {
+ effect_dependency_ = dependency;
+ }
+
+ // Preserve a checkpoint of the environment for the IR graph. Any
+ // further mutation of the environment will not affect checkpoints.
+ Node* Checkpoint(BailoutId bytecode_offset, OutputFrameStateCombine combine);
+
+ // Returns true if the state values are up to date with the current
+ // environment.
+ bool StateValuesAreUpToDate(int output_poke_offset, int output_poke_count);
+
+ // Control dependency tracked by this environment.
+ Node* GetControlDependency() const { return control_dependency_; }
+ void UpdateControlDependency(Node* dependency) {
+ control_dependency_ = dependency;
+ }
+
+ Node* Context() const { return context_; }
+ void SetContext(Node* new_context) { context_ = new_context; }
+
+ Environment* CopyForConditional() const;
+ Environment* CopyForLoop();
+ void Merge(Environment* other);
+
+ private:
+ explicit Environment(const Environment* copy);
+ void PrepareForLoop();
+ bool StateValuesAreUpToDate(Node** state_values, int offset, int count,
+ int output_poke_start, int output_poke_end);
+ bool StateValuesRequireUpdate(Node** state_values, int offset, int count);
+ void UpdateStateValues(Node** state_values, int offset, int count);
+
+ int RegisterToValuesIndex(interpreter::Register the_register) const;
+
+ Zone* zone() const { return builder_->local_zone(); }
+ Graph* graph() const { return builder_->graph(); }
+ CommonOperatorBuilder* common() const { return builder_->common(); }
+ BytecodeGraphBuilder* builder() const { return builder_; }
+ const NodeVector* values() const { return &values_; }
+ NodeVector* values() { return &values_; }
+ int register_base() const { return register_base_; }
+ int accumulator_base() const { return accumulator_base_; }
+
+ BytecodeGraphBuilder* builder_;
+ int register_count_;
+ int parameter_count_;
+ Node* context_;
+ Node* control_dependency_;
+ Node* effect_dependency_;
+ NodeVector values_;
+ Node* parameters_state_values_;
+ Node* registers_state_values_;
+ Node* accumulator_state_values_;
+ int register_base_;
+ int accumulator_base_;
+};
+
// Helper for generating frame states for before and after a bytecode.
class BytecodeGraphBuilder::FrameStateBeforeAndAfter {
public:
- FrameStateBeforeAndAfter(BytecodeGraphBuilder* builder,
- const interpreter::BytecodeArrayIterator& iterator)
+ explicit FrameStateBeforeAndAfter(BytecodeGraphBuilder* builder)
: builder_(builder),
id_after_(BailoutId::None()),
added_to_node_(false),
+ frame_states_unused_(false),
output_poke_offset_(0),
output_poke_count_(0) {
- BailoutId id_before(iterator.current_offset());
+ BailoutId id_before(builder->bytecode_iterator().current_offset());
frame_state_before_ = builder_->environment()->Checkpoint(
id_before, OutputFrameStateCombine::Ignore());
- id_after_ = BailoutId(id_before.ToInt() + iterator.current_bytecode_size());
+ id_after_ = BailoutId(id_before.ToInt() +
+ builder->bytecode_iterator().current_bytecode_size());
}
~FrameStateBeforeAndAfter() {
DCHECK(added_to_node_);
- DCHECK(builder_->environment()->StateValuesAreUpToDate(output_poke_offset_,
+ DCHECK(frame_states_unused_ ||
+ builder_->environment()->StateValuesAreUpToDate(output_poke_offset_,
output_poke_count_));
}
@@ -62,6 +145,7 @@ class BytecodeGraphBuilder::FrameStateBeforeAndAfter {
output_poke_offset_ = static_cast<int>(combine.GetOffsetToPokeAt());
output_poke_count_ = node->op()->ValueOutputCount();
}
+ frame_states_unused_ = count == 0;
added_to_node_ = true;
}
@@ -70,6 +154,7 @@ class BytecodeGraphBuilder::FrameStateBeforeAndAfter {
BailoutId id_after_;
bool added_to_node_;
+ bool frame_states_unused_;
int output_poke_offset_;
int output_poke_count_;
};
@@ -155,8 +240,8 @@ Node* BytecodeGraphBuilder::Environment::LookupAccumulator() const {
Node* BytecodeGraphBuilder::Environment::LookupRegister(
interpreter::Register the_register) const {
- if (the_register.is_function_context()) {
- return builder()->GetFunctionContext();
+ if (the_register.is_current_context()) {
+ return Context();
} else if (the_register.is_function_closure()) {
return builder()->GetFunctionClosure();
} else if (the_register.is_new_target()) {
@@ -168,16 +253,6 @@ Node* BytecodeGraphBuilder::Environment::LookupRegister(
}
-void BytecodeGraphBuilder::Environment::ExchangeRegisters(
- interpreter::Register reg0, interpreter::Register reg1) {
- int reg0_index = RegisterToValuesIndex(reg0);
- int reg1_index = RegisterToValuesIndex(reg1);
- Node* saved_reg0_value = values()->at(reg0_index);
- values()->at(reg0_index) = values()->at(reg1_index);
- values()->at(reg1_index) = saved_reg0_value;
-}
-
-
void BytecodeGraphBuilder::Environment::BindAccumulator(
Node* node, FrameStateBeforeAndAfter* states) {
if (states) {
@@ -220,16 +295,6 @@ void BytecodeGraphBuilder::Environment::RecordAfterState(
}
-bool BytecodeGraphBuilder::Environment::IsMarkedAsUnreachable() const {
- return GetControlDependency()->opcode() == IrOpcode::kDead;
-}
-
-
-void BytecodeGraphBuilder::Environment::MarkAsUnreachable() {
- UpdateControlDependency(builder()->jsgraph()->Dead());
-}
-
-
BytecodeGraphBuilder::Environment*
BytecodeGraphBuilder::Environment::CopyForLoop() {
PrepareForLoop();
@@ -245,11 +310,6 @@ BytecodeGraphBuilder::Environment::CopyForConditional() const {
void BytecodeGraphBuilder::Environment::Merge(
BytecodeGraphBuilder::Environment* other) {
- // Nothing to do if the other environment is dead.
- if (other->IsMarkedAsUnreachable()) {
- return;
- }
-
// Create a merge of the control dependencies of both environments and update
// the current environment's control dependency accordingly.
Node* control = builder()->MergeControl(GetControlDependency(),
@@ -295,7 +355,7 @@ void BytecodeGraphBuilder::Environment::PrepareForLoop() {
bool BytecodeGraphBuilder::Environment::StateValuesRequireUpdate(
Node** state_values, int offset, int count) {
- if (!builder()->info()->is_deoptimization_enabled()) {
+ if (!builder()->deoptimization_enabled_) {
return false;
}
if (*state_values == nullptr) {
@@ -325,7 +385,7 @@ void BytecodeGraphBuilder::Environment::UpdateStateValues(Node** state_values,
Node* BytecodeGraphBuilder::Environment::Checkpoint(
BailoutId bailout_id, OutputFrameStateCombine combine) {
- if (!builder()->info()->is_deoptimization_enabled()) {
+ if (!builder()->deoptimization_enabled_) {
return builder()->jsgraph()->EmptyFrameState();
}
@@ -363,6 +423,7 @@ bool BytecodeGraphBuilder::Environment::StateValuesAreUpToDate(
bool BytecodeGraphBuilder::Environment::StateValuesAreUpToDate(
int output_poke_offset, int output_poke_count) {
+ if (!builder()->deoptimization_enabled_) return true;
// Poke offset is relative to the top of the stack (i.e., the accumulator).
int output_poke_start = accumulator_base() - output_poke_offset;
int output_poke_end = output_poke_start + output_poke_count;
@@ -375,26 +436,27 @@ bool BytecodeGraphBuilder::Environment::StateValuesAreUpToDate(
1, output_poke_start, output_poke_end);
}
-
BytecodeGraphBuilder::BytecodeGraphBuilder(Zone* local_zone,
- CompilationInfo* compilation_info,
+ CompilationInfo* info,
JSGraph* jsgraph)
: local_zone_(local_zone),
- info_(compilation_info),
jsgraph_(jsgraph),
- bytecode_array_(handle(info()->shared_info()->bytecode_array())),
+ bytecode_array_(handle(info->shared_info()->bytecode_array())),
+ exception_handler_table_(
+ handle(HandlerTable::cast(bytecode_array()->handler_table()))),
+ feedback_vector_(info->feedback_vector()),
frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
FrameStateType::kInterpretedFunction,
bytecode_array()->parameter_count(),
- bytecode_array()->register_count(), info()->shared_info(),
- CALL_MAINTAINS_NATIVE_CONTEXT)),
+ bytecode_array()->register_count(), info->shared_info())),
+ deoptimization_enabled_(info->is_deoptimization_enabled()),
merge_environments_(local_zone),
- loop_header_environments_(local_zone),
+ exception_handlers_(local_zone),
+ current_exception_handler_(0),
input_buffer_size_(0),
input_buffer_(nullptr),
exit_controls_(local_zone) {}
-
Node* BytecodeGraphBuilder::GetNewTarget() {
if (!new_target_.is_set()) {
int params = bytecode_array()->parameter_count();
@@ -430,21 +492,6 @@ Node* BytecodeGraphBuilder::GetFunctionClosure() {
}
-Node* BytecodeGraphBuilder::BuildLoadObjectField(Node* object, int offset) {
- return NewNode(jsgraph()->machine()->Load(MachineType::AnyTagged()), object,
- jsgraph()->IntPtrConstant(offset - kHeapObjectTag));
-}
-
-
-Node* BytecodeGraphBuilder::BuildLoadImmutableObjectField(Node* object,
- int offset) {
- return graph()->NewNode(jsgraph()->machine()->Load(MachineType::AnyTagged()),
- object,
- jsgraph()->IntPtrConstant(offset - kHeapObjectTag),
- graph()->start(), graph()->start());
-}
-
-
Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) {
const Operator* op =
javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true);
@@ -453,30 +500,15 @@ Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) {
}
-Node* BytecodeGraphBuilder::BuildLoadFeedbackVector() {
- if (!feedback_vector_.is_set()) {
- Node* closure = GetFunctionClosure();
- Node* shared = BuildLoadImmutableObjectField(
- closure, JSFunction::kSharedFunctionInfoOffset);
- Node* vector = BuildLoadImmutableObjectField(
- shared, SharedFunctionInfo::kFeedbackVectorOffset);
- feedback_vector_.set(vector);
- }
- return feedback_vector_.get();
-}
-
-
VectorSlotPair BytecodeGraphBuilder::CreateVectorSlotPair(int slot_id) {
- Handle<TypeFeedbackVector> feedback_vector = info()->feedback_vector();
FeedbackVectorSlot slot;
if (slot_id >= TypeFeedbackVector::kReservedIndexCount) {
- slot = feedback_vector->ToSlot(slot_id);
+ slot = feedback_vector()->ToSlot(slot_id);
}
- return VectorSlotPair(feedback_vector, slot);
+ return VectorSlotPair(feedback_vector(), slot);
}
-
-bool BytecodeGraphBuilder::CreateGraph(bool stack_check) {
+bool BytecodeGraphBuilder::CreateGraph() {
// Set up the basic structure of the graph. Outputs for {Start} are
// the formal parameters (including the receiver) plus context and
// closure.
@@ -492,7 +524,7 @@ bool BytecodeGraphBuilder::CreateGraph(bool stack_check) {
GetFunctionContext());
set_environment(&env);
- CreateGraphBody(stack_check);
+ VisitBytecodes();
// Finish the basic structure of the graph.
DCHECK_NE(0u, exit_controls_.size());
@@ -504,20 +536,6 @@ bool BytecodeGraphBuilder::CreateGraph(bool stack_check) {
return true;
}
-
-void BytecodeGraphBuilder::CreateGraphBody(bool stack_check) {
- // TODO(oth): Review ast-graph-builder equivalent, i.e. arguments
- // object setup, this function variable if used, tracing hooks.
-
- if (stack_check) {
- Node* node = NewNode(javascript()->StackCheck());
- PrepareEntryFrameState(node);
- }
-
- VisitBytecodes();
-}
-
-
void BytecodeGraphBuilder::VisitBytecodes() {
BytecodeBranchAnalysis analysis(bytecode_array(), local_zone());
analysis.Analyze();
@@ -526,14 +544,15 @@ void BytecodeGraphBuilder::VisitBytecodes() {
set_bytecode_iterator(&iterator);
while (!iterator.done()) {
int current_offset = iterator.current_offset();
- if (analysis.is_reachable(current_offset)) {
- MergeEnvironmentsOfForwardBranches(current_offset);
- BuildLoopHeaderForBackwardBranches(current_offset);
+ EnterAndExitExceptionHandlers(current_offset);
+ SwitchToMergeEnvironment(current_offset);
+ if (environment() != nullptr) {
+ BuildLoopHeaderEnvironment(current_offset);
switch (iterator.current_bytecode()) {
#define BYTECODE_CASE(name, ...) \
case interpreter::Bytecode::k##name: \
- Visit##name(iterator); \
+ Visit##name(); \
break;
BYTECODE_LIST(BYTECODE_CASE)
#undef BYTECODE_CODE
@@ -543,635 +562,417 @@ void BytecodeGraphBuilder::VisitBytecodes() {
}
set_branch_analysis(nullptr);
set_bytecode_iterator(nullptr);
+ DCHECK(exception_handlers_.empty());
}
-
-void BytecodeGraphBuilder::VisitLdaZero(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitLdaZero() {
Node* node = jsgraph()->ZeroConstant();
environment()->BindAccumulator(node);
}
-
-void BytecodeGraphBuilder::VisitLdaSmi8(
- const interpreter::BytecodeArrayIterator& iterator) {
- Node* node = jsgraph()->Constant(iterator.GetImmediateOperand(0));
+void BytecodeGraphBuilder::VisitLdaSmi8() {
+ Node* node = jsgraph()->Constant(bytecode_iterator().GetImmediateOperand(0));
environment()->BindAccumulator(node);
}
-
-void BytecodeGraphBuilder::VisitLdaConstantWide(
- const interpreter::BytecodeArrayIterator& iterator) {
- Node* node = jsgraph()->Constant(iterator.GetConstantForIndexOperand(0));
+void BytecodeGraphBuilder::VisitLdaConstantWide() {
+ Node* node =
+ jsgraph()->Constant(bytecode_iterator().GetConstantForIndexOperand(0));
environment()->BindAccumulator(node);
}
-
-void BytecodeGraphBuilder::VisitLdaConstant(
- const interpreter::BytecodeArrayIterator& iterator) {
- Node* node = jsgraph()->Constant(iterator.GetConstantForIndexOperand(0));
+void BytecodeGraphBuilder::VisitLdaConstant() {
+ Node* node =
+ jsgraph()->Constant(bytecode_iterator().GetConstantForIndexOperand(0));
environment()->BindAccumulator(node);
}
-
-void BytecodeGraphBuilder::VisitLdaUndefined(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitLdaUndefined() {
Node* node = jsgraph()->UndefinedConstant();
environment()->BindAccumulator(node);
}
-
-void BytecodeGraphBuilder::VisitLdaNull(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitLdaNull() {
Node* node = jsgraph()->NullConstant();
environment()->BindAccumulator(node);
}
-
-void BytecodeGraphBuilder::VisitLdaTheHole(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitLdaTheHole() {
Node* node = jsgraph()->TheHoleConstant();
environment()->BindAccumulator(node);
}
-
-void BytecodeGraphBuilder::VisitLdaTrue(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitLdaTrue() {
Node* node = jsgraph()->TrueConstant();
environment()->BindAccumulator(node);
}
-
-void BytecodeGraphBuilder::VisitLdaFalse(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitLdaFalse() {
Node* node = jsgraph()->FalseConstant();
environment()->BindAccumulator(node);
}
-
-void BytecodeGraphBuilder::VisitLdar(
- const interpreter::BytecodeArrayIterator& iterator) {
- Node* value = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+void BytecodeGraphBuilder::VisitLdar() {
+ Node* value =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
environment()->BindAccumulator(value);
}
-
-void BytecodeGraphBuilder::VisitStar(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitStar() {
Node* value = environment()->LookupAccumulator();
- environment()->BindRegister(iterator.GetRegisterOperand(0), value);
+ environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0), value);
}
-
-void BytecodeGraphBuilder::VisitMov(
- const interpreter::BytecodeArrayIterator& iterator) {
- Node* value = environment()->LookupRegister(iterator.GetRegisterOperand(0));
- environment()->BindRegister(iterator.GetRegisterOperand(1), value);
-}
-
-
-void BytecodeGraphBuilder::VisitExchange(
- const interpreter::BytecodeArrayIterator& iterator) {
- environment()->ExchangeRegisters(iterator.GetRegisterOperand(0),
- iterator.GetRegisterOperand(1));
-}
-
-
-void BytecodeGraphBuilder::VisitExchangeWide(
- const interpreter::BytecodeArrayIterator& iterator) {
- environment()->ExchangeRegisters(iterator.GetRegisterOperand(0),
- iterator.GetRegisterOperand(1));
+void BytecodeGraphBuilder::VisitMov() {
+ Node* value =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ environment()->BindRegister(bytecode_iterator().GetRegisterOperand(1), value);
}
+void BytecodeGraphBuilder::VisitMovWide() { VisitMov(); }
void BytecodeGraphBuilder::BuildLoadGlobal(
- const interpreter::BytecodeArrayIterator& iterator,
TypeofMode typeof_mode) {
- FrameStateBeforeAndAfter states(this, iterator);
+ FrameStateBeforeAndAfter states(this);
Handle<Name> name =
- Handle<Name>::cast(iterator.GetConstantForIndexOperand(0));
- VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(1));
+ Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
+ VectorSlotPair feedback =
+ CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode);
- Node* node = NewNode(op, BuildLoadFeedbackVector());
+ Node* node = NewNode(op, GetFunctionClosure());
environment()->BindAccumulator(node, &states);
}
-
-void BytecodeGraphBuilder::VisitLdaGlobalSloppy(
- const interpreter::BytecodeArrayIterator& iterator) {
- DCHECK(is_sloppy(language_mode()));
- BuildLoadGlobal(iterator, TypeofMode::NOT_INSIDE_TYPEOF);
-}
-
-
-void BytecodeGraphBuilder::VisitLdaGlobalStrict(
- const interpreter::BytecodeArrayIterator& iterator) {
- DCHECK(is_strict(language_mode()));
- BuildLoadGlobal(iterator, TypeofMode::NOT_INSIDE_TYPEOF);
-}
-
-
-void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofSloppy(
- const interpreter::BytecodeArrayIterator& iterator) {
- DCHECK(is_sloppy(language_mode()));
- BuildLoadGlobal(iterator, TypeofMode::INSIDE_TYPEOF);
-}
-
-
-void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofStrict(
- const interpreter::BytecodeArrayIterator& iterator) {
- DCHECK(is_strict(language_mode()));
- BuildLoadGlobal(iterator, TypeofMode::INSIDE_TYPEOF);
-}
-
-
-void BytecodeGraphBuilder::VisitLdaGlobalSloppyWide(
- const interpreter::BytecodeArrayIterator& iterator) {
- DCHECK(is_sloppy(language_mode()));
- BuildLoadGlobal(iterator, TypeofMode::NOT_INSIDE_TYPEOF);
+void BytecodeGraphBuilder::VisitLdaGlobal() {
+ BuildLoadGlobal(TypeofMode::NOT_INSIDE_TYPEOF);
}
-
-void BytecodeGraphBuilder::VisitLdaGlobalStrictWide(
- const interpreter::BytecodeArrayIterator& iterator) {
- DCHECK(is_strict(language_mode()));
- BuildLoadGlobal(iterator, TypeofMode::NOT_INSIDE_TYPEOF);
+void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
+ BuildLoadGlobal(TypeofMode::INSIDE_TYPEOF);
}
-
-void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofSloppyWide(
- const interpreter::BytecodeArrayIterator& iterator) {
- DCHECK(is_sloppy(language_mode()));
- BuildLoadGlobal(iterator, TypeofMode::INSIDE_TYPEOF);
+void BytecodeGraphBuilder::VisitLdaGlobalWide() {
+ BuildLoadGlobal(TypeofMode::NOT_INSIDE_TYPEOF);
}
-
-void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofStrictWide(
- const interpreter::BytecodeArrayIterator& iterator) {
- DCHECK(is_strict(language_mode()));
- BuildLoadGlobal(iterator, TypeofMode::INSIDE_TYPEOF);
+void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeofWide() {
+ BuildLoadGlobal(TypeofMode::INSIDE_TYPEOF);
}
-
-void BytecodeGraphBuilder::BuildStoreGlobal(
- const interpreter::BytecodeArrayIterator& iterator) {
- FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildStoreGlobal(LanguageMode language_mode) {
+ FrameStateBeforeAndAfter states(this);
Handle<Name> name =
- Handle<Name>::cast(iterator.GetConstantForIndexOperand(0));
- VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(1));
+ Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
+ VectorSlotPair feedback =
+ CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
Node* value = environment()->LookupAccumulator();
- const Operator* op =
- javascript()->StoreGlobal(language_mode(), name, feedback);
- Node* node = NewNode(op, value, BuildLoadFeedbackVector());
+ const Operator* op = javascript()->StoreGlobal(language_mode, name, feedback);
+ Node* node = NewNode(op, value, GetFunctionClosure());
environment()->RecordAfterState(node, &states);
}
-
-void BytecodeGraphBuilder::VisitStaGlobalSloppy(
- const interpreter::BytecodeArrayIterator& iterator) {
- DCHECK(is_sloppy(language_mode()));
- BuildStoreGlobal(iterator);
+void BytecodeGraphBuilder::VisitStaGlobalSloppy() {
+ BuildStoreGlobal(LanguageMode::SLOPPY);
}
-
-void BytecodeGraphBuilder::VisitStaGlobalStrict(
- const interpreter::BytecodeArrayIterator& iterator) {
- DCHECK(is_strict(language_mode()));
- BuildStoreGlobal(iterator);
+void BytecodeGraphBuilder::VisitStaGlobalStrict() {
+ BuildStoreGlobal(LanguageMode::STRICT);
}
-void BytecodeGraphBuilder::VisitStaGlobalSloppyWide(
- const interpreter::BytecodeArrayIterator& iterator) {
- DCHECK(is_sloppy(language_mode()));
- BuildStoreGlobal(iterator);
+void BytecodeGraphBuilder::VisitStaGlobalSloppyWide() {
+ BuildStoreGlobal(LanguageMode::SLOPPY);
}
-
-void BytecodeGraphBuilder::VisitStaGlobalStrictWide(
- const interpreter::BytecodeArrayIterator& iterator) {
- DCHECK(is_strict(language_mode()));
- BuildStoreGlobal(iterator);
+void BytecodeGraphBuilder::VisitStaGlobalStrictWide() {
+ BuildStoreGlobal(LanguageMode::STRICT);
}
-
-void BytecodeGraphBuilder::VisitLdaContextSlot(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitLdaContextSlot() {
// TODO(mythria): LoadContextSlots are unrolled by the required depth when
// generating bytecode. Hence the value of depth is always 0. Update this
// code, when the implementation changes.
// TODO(mythria): immutable flag is also set to false. This information is not
// available in bytecode array. update this code when the implementation
// changes.
- const Operator* op =
- javascript()->LoadContext(0, iterator.GetIndexOperand(1), false);
- Node* context = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ const Operator* op = javascript()->LoadContext(
+ 0, bytecode_iterator().GetIndexOperand(1), false);
+ Node* context =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Node* node = NewNode(op, context);
environment()->BindAccumulator(node);
}
+void BytecodeGraphBuilder::VisitLdaContextSlotWide() { VisitLdaContextSlot(); }
-void BytecodeGraphBuilder::VisitLdaContextSlotWide(
- const interpreter::BytecodeArrayIterator& iterator) {
- VisitLdaContextSlot(iterator);
-}
-
-
-void BytecodeGraphBuilder::VisitStaContextSlot(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitStaContextSlot() {
// TODO(mythria): LoadContextSlots are unrolled by the required depth when
// generating bytecode. Hence the value of depth is always 0. Update this
// code, when the implementation changes.
const Operator* op =
- javascript()->StoreContext(0, iterator.GetIndexOperand(1));
- Node* context = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ javascript()->StoreContext(0, bytecode_iterator().GetIndexOperand(1));
+ Node* context =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Node* value = environment()->LookupAccumulator();
NewNode(op, context, value);
}
+void BytecodeGraphBuilder::VisitStaContextSlotWide() { VisitStaContextSlot(); }
-void BytecodeGraphBuilder::VisitStaContextSlotWide(
- const interpreter::BytecodeArrayIterator& iterator) {
- VisitStaContextSlot(iterator);
-}
-
-
-void BytecodeGraphBuilder::BuildLdaLookupSlot(
- TypeofMode typeof_mode,
- const interpreter::BytecodeArrayIterator& iterator) {
- FrameStateBeforeAndAfter states(this, iterator);
- Handle<String> name =
- Handle<String>::cast(iterator.GetConstantForIndexOperand(0));
- const Operator* op = javascript()->LoadDynamic(name, typeof_mode);
- Node* value =
- NewNode(op, BuildLoadFeedbackVector(), environment()->Context());
+void BytecodeGraphBuilder::BuildLdaLookupSlot(TypeofMode typeof_mode) {
+ FrameStateBeforeAndAfter states(this);
+ Node* name =
+ jsgraph()->Constant(bytecode_iterator().GetConstantForIndexOperand(0));
+ const Operator* op =
+ javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
+ ? Runtime::kLoadLookupSlot
+ : Runtime::kLoadLookupSlotInsideTypeof);
+ Node* value = NewNode(op, name);
environment()->BindAccumulator(value, &states);
}
-
-void BytecodeGraphBuilder::VisitLdaLookupSlot(
- const interpreter::BytecodeArrayIterator& iterator) {
- BuildLdaLookupSlot(TypeofMode::NOT_INSIDE_TYPEOF, iterator);
+void BytecodeGraphBuilder::VisitLdaLookupSlot() {
+ BuildLdaLookupSlot(TypeofMode::NOT_INSIDE_TYPEOF);
}
-
-void BytecodeGraphBuilder::VisitLdaLookupSlotInsideTypeof(
- const interpreter::BytecodeArrayIterator& iterator) {
- BuildLdaLookupSlot(TypeofMode::INSIDE_TYPEOF, iterator);
+void BytecodeGraphBuilder::VisitLdaLookupSlotInsideTypeof() {
+ BuildLdaLookupSlot(TypeofMode::INSIDE_TYPEOF);
}
-
-void BytecodeGraphBuilder::BuildStaLookupSlot(
- LanguageMode language_mode,
- const interpreter::BytecodeArrayIterator& iterator) {
- FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildStaLookupSlot(LanguageMode language_mode) {
+ FrameStateBeforeAndAfter states(this);
Node* value = environment()->LookupAccumulator();
- Node* name = jsgraph()->Constant(iterator.GetConstantForIndexOperand(0));
- Node* language = jsgraph()->Constant(language_mode);
- const Operator* op = javascript()->CallRuntime(Runtime::kStoreLookupSlot, 4);
- Node* store = NewNode(op, value, environment()->Context(), name, language);
+ Node* name =
+ jsgraph()->Constant(bytecode_iterator().GetConstantForIndexOperand(0));
+ const Operator* op = javascript()->CallRuntime(
+ is_strict(language_mode) ? Runtime::kStoreLookupSlot_Strict
+ : Runtime::kStoreLookupSlot_Sloppy);
+ Node* store = NewNode(op, name, value);
environment()->BindAccumulator(store, &states);
}
+void BytecodeGraphBuilder::VisitLdaLookupSlotWide() { VisitLdaLookupSlot(); }
-void BytecodeGraphBuilder::VisitLdaLookupSlotWide(
- const interpreter::BytecodeArrayIterator& iterator) {
- VisitLdaLookupSlot(iterator);
-}
-
-
-void BytecodeGraphBuilder::VisitLdaLookupSlotInsideTypeofWide(
- const interpreter::BytecodeArrayIterator& iterator) {
- VisitLdaLookupSlotInsideTypeof(iterator);
+void BytecodeGraphBuilder::VisitLdaLookupSlotInsideTypeofWide() {
+ VisitLdaLookupSlotInsideTypeof();
}
-
-void BytecodeGraphBuilder::VisitStaLookupSlotSloppy(
- const interpreter::BytecodeArrayIterator& iterator) {
- BuildStaLookupSlot(LanguageMode::SLOPPY, iterator);
+void BytecodeGraphBuilder::VisitStaLookupSlotSloppy() {
+ BuildStaLookupSlot(LanguageMode::SLOPPY);
}
-
-void BytecodeGraphBuilder::VisitStaLookupSlotStrict(
- const interpreter::BytecodeArrayIterator& iterator) {
- BuildStaLookupSlot(LanguageMode::STRICT, iterator);
+void BytecodeGraphBuilder::VisitStaLookupSlotStrict() {
+ BuildStaLookupSlot(LanguageMode::STRICT);
}
-
-void BytecodeGraphBuilder::VisitStaLookupSlotSloppyWide(
- const interpreter::BytecodeArrayIterator& iterator) {
- VisitStaLookupSlotSloppy(iterator);
+void BytecodeGraphBuilder::VisitStaLookupSlotSloppyWide() {
+ VisitStaLookupSlotSloppy();
}
-
-void BytecodeGraphBuilder::VisitStaLookupSlotStrictWide(
- const interpreter::BytecodeArrayIterator& iterator) {
- VisitStaLookupSlotStrict(iterator);
+void BytecodeGraphBuilder::VisitStaLookupSlotStrictWide() {
+ VisitStaLookupSlotStrict();
}
-
-void BytecodeGraphBuilder::BuildNamedLoad(
- const interpreter::BytecodeArrayIterator& iterator) {
- FrameStateBeforeAndAfter states(this, iterator);
- Node* object = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+void BytecodeGraphBuilder::BuildNamedLoad() {
+ FrameStateBeforeAndAfter states(this);
+ Node* object =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Handle<Name> name =
- Handle<Name>::cast(iterator.GetConstantForIndexOperand(1));
- VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(2));
+ Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(1));
+ VectorSlotPair feedback =
+ CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
- const Operator* op = javascript()->LoadNamed(language_mode(), name, feedback);
- Node* node = NewNode(op, object, BuildLoadFeedbackVector());
+ const Operator* op = javascript()->LoadNamed(name, feedback);
+ Node* node = NewNode(op, object, GetFunctionClosure());
environment()->BindAccumulator(node, &states);
}
+void BytecodeGraphBuilder::VisitLoadIC() { BuildNamedLoad(); }
-void BytecodeGraphBuilder::VisitLoadICSloppy(
- const interpreter::BytecodeArrayIterator& iterator) {
- DCHECK(is_sloppy(language_mode()));
- BuildNamedLoad(iterator);
-}
-
-
-void BytecodeGraphBuilder::VisitLoadICStrict(
- const interpreter::BytecodeArrayIterator& iterator) {
- DCHECK(is_strict(language_mode()));
- BuildNamedLoad(iterator);
-}
-
-
-void BytecodeGraphBuilder::VisitLoadICSloppyWide(
- const interpreter::BytecodeArrayIterator& iterator) {
- DCHECK(is_sloppy(language_mode()));
- BuildNamedLoad(iterator);
-}
-
+void BytecodeGraphBuilder::VisitLoadICWide() { BuildNamedLoad(); }
-void BytecodeGraphBuilder::VisitLoadICStrictWide(
- const interpreter::BytecodeArrayIterator& iterator) {
- DCHECK(is_strict(language_mode()));
- BuildNamedLoad(iterator);
-}
-
-
-void BytecodeGraphBuilder::BuildKeyedLoad(
- const interpreter::BytecodeArrayIterator& iterator) {
- FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildKeyedLoad() {
+ FrameStateBeforeAndAfter states(this);
Node* key = environment()->LookupAccumulator();
- Node* object = environment()->LookupRegister(iterator.GetRegisterOperand(0));
- VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(1));
+ Node* object =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ VectorSlotPair feedback =
+ CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
- const Operator* op = javascript()->LoadProperty(language_mode(), feedback);
- Node* node = NewNode(op, object, key, BuildLoadFeedbackVector());
+ const Operator* op = javascript()->LoadProperty(feedback);
+ Node* node = NewNode(op, object, key, GetFunctionClosure());
environment()->BindAccumulator(node, &states);
}
+void BytecodeGraphBuilder::VisitKeyedLoadIC() { BuildKeyedLoad(); }
-void BytecodeGraphBuilder::VisitKeyedLoadICSloppy(
- const interpreter::BytecodeArrayIterator& iterator) {
- DCHECK(is_sloppy(language_mode()));
- BuildKeyedLoad(iterator);
-}
+void BytecodeGraphBuilder::VisitKeyedLoadICWide() { BuildKeyedLoad(); }
-
-void BytecodeGraphBuilder::VisitKeyedLoadICStrict(
- const interpreter::BytecodeArrayIterator& iterator) {
- DCHECK(is_strict(language_mode()));
- BuildKeyedLoad(iterator);
-}
-
-
-void BytecodeGraphBuilder::VisitKeyedLoadICSloppyWide(
- const interpreter::BytecodeArrayIterator& iterator) {
- DCHECK(is_sloppy(language_mode()));
- BuildKeyedLoad(iterator);
-}
-
-
-void BytecodeGraphBuilder::VisitKeyedLoadICStrictWide(
- const interpreter::BytecodeArrayIterator& iterator) {
- DCHECK(is_strict(language_mode()));
- BuildKeyedLoad(iterator);
-}
-
-
-void BytecodeGraphBuilder::BuildNamedStore(
- const interpreter::BytecodeArrayIterator& iterator) {
- FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildNamedStore(LanguageMode language_mode) {
+ FrameStateBeforeAndAfter states(this);
Node* value = environment()->LookupAccumulator();
- Node* object = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ Node* object =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Handle<Name> name =
- Handle<Name>::cast(iterator.GetConstantForIndexOperand(1));
- VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(2));
+ Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(1));
+ VectorSlotPair feedback =
+ CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
- const Operator* op =
- javascript()->StoreNamed(language_mode(), name, feedback);
- Node* node = NewNode(op, object, value, BuildLoadFeedbackVector());
+ const Operator* op = javascript()->StoreNamed(language_mode, name, feedback);
+ Node* node = NewNode(op, object, value, GetFunctionClosure());
environment()->RecordAfterState(node, &states);
}
-
-void BytecodeGraphBuilder::VisitStoreICSloppy(
- const interpreter::BytecodeArrayIterator& iterator) {
- DCHECK(is_sloppy(language_mode()));
- BuildNamedStore(iterator);
+void BytecodeGraphBuilder::VisitStoreICSloppy() {
+ BuildNamedStore(LanguageMode::SLOPPY);
}
-
-void BytecodeGraphBuilder::VisitStoreICStrict(
- const interpreter::BytecodeArrayIterator& iterator) {
- DCHECK(is_strict(language_mode()));
- BuildNamedStore(iterator);
+void BytecodeGraphBuilder::VisitStoreICStrict() {
+ BuildNamedStore(LanguageMode::STRICT);
}
-
-void BytecodeGraphBuilder::VisitStoreICSloppyWide(
- const interpreter::BytecodeArrayIterator& iterator) {
- DCHECK(is_sloppy(language_mode()));
- BuildNamedStore(iterator);
+void BytecodeGraphBuilder::VisitStoreICSloppyWide() {
+ BuildNamedStore(LanguageMode::SLOPPY);
}
-
-void BytecodeGraphBuilder::VisitStoreICStrictWide(
- const interpreter::BytecodeArrayIterator& iterator) {
- DCHECK(is_strict(language_mode()));
- BuildNamedStore(iterator);
+void BytecodeGraphBuilder::VisitStoreICStrictWide() {
+ BuildNamedStore(LanguageMode::STRICT);
}
-
-void BytecodeGraphBuilder::BuildKeyedStore(
- const interpreter::BytecodeArrayIterator& iterator) {
- FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildKeyedStore(LanguageMode language_mode) {
+ FrameStateBeforeAndAfter states(this);
Node* value = environment()->LookupAccumulator();
- Node* object = environment()->LookupRegister(iterator.GetRegisterOperand(0));
- Node* key = environment()->LookupRegister(iterator.GetRegisterOperand(1));
- VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(2));
-
- const Operator* op = javascript()->StoreProperty(language_mode(), feedback);
- Node* node = NewNode(op, object, key, value, BuildLoadFeedbackVector());
+ Node* object =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ Node* key =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
+ VectorSlotPair feedback =
+ CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
+
+ const Operator* op = javascript()->StoreProperty(language_mode, feedback);
+ Node* node = NewNode(op, object, key, value, GetFunctionClosure());
environment()->RecordAfterState(node, &states);
}
-
-void BytecodeGraphBuilder::VisitKeyedStoreICSloppy(
- const interpreter::BytecodeArrayIterator& iterator) {
- DCHECK(is_sloppy(language_mode()));
- BuildKeyedStore(iterator);
+void BytecodeGraphBuilder::VisitKeyedStoreICSloppy() {
+ BuildKeyedStore(LanguageMode::SLOPPY);
}
-
-void BytecodeGraphBuilder::VisitKeyedStoreICStrict(
- const interpreter::BytecodeArrayIterator& iterator) {
- DCHECK(is_strict(language_mode()));
- BuildKeyedStore(iterator);
+void BytecodeGraphBuilder::VisitKeyedStoreICStrict() {
+ BuildKeyedStore(LanguageMode::STRICT);
}
-
-void BytecodeGraphBuilder::VisitKeyedStoreICSloppyWide(
- const interpreter::BytecodeArrayIterator& iterator) {
- DCHECK(is_sloppy(language_mode()));
- BuildKeyedStore(iterator);
+void BytecodeGraphBuilder::VisitKeyedStoreICSloppyWide() {
+ BuildKeyedStore(LanguageMode::SLOPPY);
}
-
-void BytecodeGraphBuilder::VisitKeyedStoreICStrictWide(
- const interpreter::BytecodeArrayIterator& iterator) {
- DCHECK(is_strict(language_mode()));
- BuildKeyedStore(iterator);
+void BytecodeGraphBuilder::VisitKeyedStoreICStrictWide() {
+ BuildKeyedStore(LanguageMode::STRICT);
}
-
-void BytecodeGraphBuilder::VisitPushContext(
- const interpreter::BytecodeArrayIterator& iterator) {
- Node* context = environment()->LookupAccumulator();
- environment()->BindRegister(iterator.GetRegisterOperand(0), context);
- environment()->SetContext(context);
+void BytecodeGraphBuilder::VisitPushContext() {
+ Node* new_context = environment()->LookupAccumulator();
+ environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0),
+ environment()->Context());
+ environment()->SetContext(new_context);
}
-
-void BytecodeGraphBuilder::VisitPopContext(
- const interpreter::BytecodeArrayIterator& iterator) {
- Node* context = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+void BytecodeGraphBuilder::VisitPopContext() {
+ Node* context =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
environment()->SetContext(context);
}
-
-void BytecodeGraphBuilder::VisitCreateClosure(
- const interpreter::BytecodeArrayIterator& iterator) {
- Handle<SharedFunctionInfo> shared_info =
- Handle<SharedFunctionInfo>::cast(iterator.GetConstantForIndexOperand(0));
+void BytecodeGraphBuilder::VisitCreateClosure() {
+ Handle<SharedFunctionInfo> shared_info = Handle<SharedFunctionInfo>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0));
PretenureFlag tenured =
- iterator.GetImmediateOperand(1) ? TENURED : NOT_TENURED;
+ bytecode_iterator().GetImmediateOperand(1) ? TENURED : NOT_TENURED;
const Operator* op = javascript()->CreateClosure(shared_info, tenured);
Node* closure = NewNode(op);
environment()->BindAccumulator(closure);
}
+void BytecodeGraphBuilder::VisitCreateClosureWide() { VisitCreateClosure(); }
-void BytecodeGraphBuilder::VisitCreateClosureWide(
- const interpreter::BytecodeArrayIterator& iterator) {
- VisitCreateClosure(iterator);
-}
-
-
-void BytecodeGraphBuilder::BuildCreateArguments(
- CreateArgumentsParameters::Type type,
- const interpreter::BytecodeArrayIterator& iterator) {
- FrameStateBeforeAndAfter states(this, iterator);
- const Operator* op = javascript()->CreateArguments(type, 0);
+void BytecodeGraphBuilder::BuildCreateArguments(CreateArgumentsType type) {
+ FrameStateBeforeAndAfter states(this);
+ const Operator* op = javascript()->CreateArguments(type);
Node* object = NewNode(op, GetFunctionClosure());
environment()->BindAccumulator(object, &states);
}
-
-void BytecodeGraphBuilder::VisitCreateMappedArguments(
- const interpreter::BytecodeArrayIterator& iterator) {
- BuildCreateArguments(CreateArgumentsParameters::kMappedArguments, iterator);
+void BytecodeGraphBuilder::VisitCreateMappedArguments() {
+ BuildCreateArguments(CreateArgumentsType::kMappedArguments);
}
-
-void BytecodeGraphBuilder::VisitCreateUnmappedArguments(
- const interpreter::BytecodeArrayIterator& iterator) {
- BuildCreateArguments(CreateArgumentsParameters::kUnmappedArguments, iterator);
+void BytecodeGraphBuilder::VisitCreateUnmappedArguments() {
+ BuildCreateArguments(CreateArgumentsType::kUnmappedArguments);
}
+void BytecodeGraphBuilder::VisitCreateRestParameter() {
+ BuildCreateArguments(CreateArgumentsType::kRestParameter);
+}
-void BytecodeGraphBuilder::BuildCreateLiteral(
- const Operator* op, const interpreter::BytecodeArrayIterator& iterator) {
- FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildCreateLiteral(const Operator* op) {
+ FrameStateBeforeAndAfter states(this);
Node* literal = NewNode(op, GetFunctionClosure());
environment()->BindAccumulator(literal, &states);
}
-
-void BytecodeGraphBuilder::BuildCreateRegExpLiteral(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::BuildCreateRegExpLiteral() {
Handle<String> constant_pattern =
- Handle<String>::cast(iterator.GetConstantForIndexOperand(0));
- int literal_index = iterator.GetIndexOperand(1);
- int literal_flags = iterator.GetImmediateOperand(2);
+ Handle<String>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
+ int literal_index = bytecode_iterator().GetIndexOperand(1);
+ int literal_flags = bytecode_iterator().GetImmediateOperand(2);
const Operator* op = javascript()->CreateLiteralRegExp(
constant_pattern, literal_flags, literal_index);
- BuildCreateLiteral(op, iterator);
+ BuildCreateLiteral(op);
}
-
-void BytecodeGraphBuilder::VisitCreateRegExpLiteral(
- const interpreter::BytecodeArrayIterator& iterator) {
- BuildCreateRegExpLiteral(iterator);
+void BytecodeGraphBuilder::VisitCreateRegExpLiteral() {
+ BuildCreateRegExpLiteral();
}
-
-void BytecodeGraphBuilder::VisitCreateRegExpLiteralWide(
- const interpreter::BytecodeArrayIterator& iterator) {
- BuildCreateRegExpLiteral(iterator);
+void BytecodeGraphBuilder::VisitCreateRegExpLiteralWide() {
+ BuildCreateRegExpLiteral();
}
-
-void BytecodeGraphBuilder::BuildCreateArrayLiteral(
- const interpreter::BytecodeArrayIterator& iterator) {
- Handle<FixedArray> constant_elements =
- Handle<FixedArray>::cast(iterator.GetConstantForIndexOperand(0));
- int literal_index = iterator.GetIndexOperand(1);
- int literal_flags = iterator.GetImmediateOperand(2);
+void BytecodeGraphBuilder::BuildCreateArrayLiteral() {
+ Handle<FixedArray> constant_elements = Handle<FixedArray>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0));
+ int literal_index = bytecode_iterator().GetIndexOperand(1);
+ int literal_flags = bytecode_iterator().GetImmediateOperand(2);
const Operator* op = javascript()->CreateLiteralArray(
constant_elements, literal_flags, literal_index);
- BuildCreateLiteral(op, iterator);
+ BuildCreateLiteral(op);
}
-
-void BytecodeGraphBuilder::VisitCreateArrayLiteral(
- const interpreter::BytecodeArrayIterator& iterator) {
- BuildCreateArrayLiteral(iterator);
+void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
+ BuildCreateArrayLiteral();
}
-
-void BytecodeGraphBuilder::VisitCreateArrayLiteralWide(
- const interpreter::BytecodeArrayIterator& iterator) {
- BuildCreateArrayLiteral(iterator);
+void BytecodeGraphBuilder::VisitCreateArrayLiteralWide() {
+ BuildCreateArrayLiteral();
}
-
-void BytecodeGraphBuilder::BuildCreateObjectLiteral(
- const interpreter::BytecodeArrayIterator& iterator) {
- Handle<FixedArray> constant_properties =
- Handle<FixedArray>::cast(iterator.GetConstantForIndexOperand(0));
- int literal_index = iterator.GetIndexOperand(1);
- int literal_flags = iterator.GetImmediateOperand(2);
+void BytecodeGraphBuilder::BuildCreateObjectLiteral() {
+ Handle<FixedArray> constant_properties = Handle<FixedArray>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0));
+ int literal_index = bytecode_iterator().GetIndexOperand(1);
+ int literal_flags = bytecode_iterator().GetImmediateOperand(2);
const Operator* op = javascript()->CreateLiteralObject(
constant_properties, literal_flags, literal_index);
- BuildCreateLiteral(op, iterator);
+ BuildCreateLiteral(op);
}
-
-void BytecodeGraphBuilder::VisitCreateObjectLiteral(
- const interpreter::BytecodeArrayIterator& iterator) {
- BuildCreateObjectLiteral(iterator);
+void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
+ BuildCreateObjectLiteral();
}
-
-void BytecodeGraphBuilder::VisitCreateObjectLiteralWide(
- const interpreter::BytecodeArrayIterator& iterator) {
- BuildCreateObjectLiteral(iterator);
+void BytecodeGraphBuilder::VisitCreateObjectLiteralWide() {
+ BuildCreateObjectLiteral();
}
@@ -1179,7 +980,7 @@ Node* BytecodeGraphBuilder::ProcessCallArguments(const Operator* call_op,
Node* callee,
interpreter::Register receiver,
size_t arity) {
- Node** all = info()->zone()->NewArray<Node*>(static_cast<int>(arity));
+ Node** all = local_zone()->NewArray<Node*>(static_cast<int>(arity));
all[0] = callee;
all[1] = environment()->LookupRegister(receiver);
int receiver_index = receiver.index();
@@ -1191,57 +992,58 @@ Node* BytecodeGraphBuilder::ProcessCallArguments(const Operator* call_op,
return value;
}
-
-void BytecodeGraphBuilder::BuildCall(
- const interpreter::BytecodeArrayIterator& iterator) {
- FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildCall(TailCallMode tail_call_mode) {
+ FrameStateBeforeAndAfter states(this);
// TODO(rmcilroy): Set receiver_hint correctly based on whether the receiver
// register has been loaded with null / undefined explicitly or we are sure it
// is not null / undefined.
ConvertReceiverMode receiver_hint = ConvertReceiverMode::kAny;
- Node* callee = environment()->LookupRegister(iterator.GetRegisterOperand(0));
- interpreter::Register receiver = iterator.GetRegisterOperand(1);
- size_t arg_count = iterator.GetCountOperand(2);
- VectorSlotPair feedback = CreateVectorSlotPair(iterator.GetIndexOperand(3));
+ Node* callee =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ interpreter::Register receiver = bytecode_iterator().GetRegisterOperand(1);
+ size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
+ VectorSlotPair feedback =
+ CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(3));
const Operator* call = javascript()->CallFunction(
- arg_count + 2, language_mode(), feedback, receiver_hint);
- Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 2);
+ arg_count + 1, feedback, receiver_hint, tail_call_mode);
+ Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 1);
environment()->BindAccumulator(value, &states);
}
+void BytecodeGraphBuilder::VisitCall() { BuildCall(TailCallMode::kDisallow); }
-void BytecodeGraphBuilder::VisitCall(
- const interpreter::BytecodeArrayIterator& iterator) {
- BuildCall(iterator);
+void BytecodeGraphBuilder::VisitCallWide() {
+ BuildCall(TailCallMode::kDisallow);
}
+void BytecodeGraphBuilder::VisitTailCall() { BuildCall(TailCallMode::kAllow); }
-void BytecodeGraphBuilder::VisitCallWide(
- const interpreter::BytecodeArrayIterator& iterator) {
- BuildCall(iterator);
+void BytecodeGraphBuilder::VisitTailCallWide() {
+ BuildCall(TailCallMode::kAllow);
}
-
-void BytecodeGraphBuilder::VisitCallJSRuntime(
- const interpreter::BytecodeArrayIterator& iterator) {
- FrameStateBeforeAndAfter states(this, iterator);
- Node* callee = BuildLoadNativeContextField(iterator.GetIndexOperand(0));
- interpreter::Register receiver = iterator.GetRegisterOperand(1);
- size_t arg_count = iterator.GetCountOperand(2);
+void BytecodeGraphBuilder::BuildCallJSRuntime() {
+ FrameStateBeforeAndAfter states(this);
+ Node* callee =
+ BuildLoadNativeContextField(bytecode_iterator().GetIndexOperand(0));
+ interpreter::Register receiver = bytecode_iterator().GetRegisterOperand(1);
+ size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
// Create node to perform the JS runtime call.
- const Operator* call =
- javascript()->CallFunction(arg_count + 2, language_mode());
- Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 2);
+ const Operator* call = javascript()->CallFunction(arg_count + 1);
+ Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 1);
environment()->BindAccumulator(value, &states);
}
+void BytecodeGraphBuilder::VisitCallJSRuntime() { BuildCallJSRuntime(); }
+
+void BytecodeGraphBuilder::VisitCallJSRuntimeWide() { BuildCallJSRuntime(); }
Node* BytecodeGraphBuilder::ProcessCallRuntimeArguments(
const Operator* call_runtime_op, interpreter::Register first_arg,
size_t arity) {
- Node** all = info()->zone()->NewArray<Node*>(arity);
+ Node** all = local_zone()->NewArray<Node*>(arity);
int first_arg_index = first_arg.index();
for (int i = 0; i < static_cast<int>(arity); ++i) {
all[i] = environment()->LookupRegister(
@@ -1251,14 +1053,12 @@ Node* BytecodeGraphBuilder::ProcessCallRuntimeArguments(
return value;
}
-
-void BytecodeGraphBuilder::VisitCallRuntime(
- const interpreter::BytecodeArrayIterator& iterator) {
- FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildCallRuntime() {
+ FrameStateBeforeAndAfter states(this);
Runtime::FunctionId functionId =
- static_cast<Runtime::FunctionId>(iterator.GetIndexOperand(0));
- interpreter::Register first_arg = iterator.GetRegisterOperand(1);
- size_t arg_count = iterator.GetCountOperand(2);
+ static_cast<Runtime::FunctionId>(bytecode_iterator().GetIndexOperand(0));
+ interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
+ size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
// Create node to perform the runtime call.
const Operator* call = javascript()->CallRuntime(functionId, arg_count);
@@ -1266,15 +1066,18 @@ void BytecodeGraphBuilder::VisitCallRuntime(
environment()->BindAccumulator(value, &states);
}
+void BytecodeGraphBuilder::VisitCallRuntime() { BuildCallRuntime(); }
-void BytecodeGraphBuilder::VisitCallRuntimeForPair(
- const interpreter::BytecodeArrayIterator& iterator) {
- FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::VisitCallRuntimeWide() { BuildCallRuntime(); }
+
+void BytecodeGraphBuilder::BuildCallRuntimeForPair() {
+ FrameStateBeforeAndAfter states(this);
Runtime::FunctionId functionId =
- static_cast<Runtime::FunctionId>(iterator.GetIndexOperand(0));
- interpreter::Register first_arg = iterator.GetRegisterOperand(1);
- size_t arg_count = iterator.GetCountOperand(2);
- interpreter::Register first_return = iterator.GetRegisterOperand(3);
+ static_cast<Runtime::FunctionId>(bytecode_iterator().GetIndexOperand(0));
+ interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
+ size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
+ interpreter::Register first_return =
+ bytecode_iterator().GetRegisterOperand(3);
// Create node to perform the runtime call.
const Operator* call = javascript()->CallRuntime(functionId, arg_count);
@@ -1282,164 +1085,151 @@ void BytecodeGraphBuilder::VisitCallRuntimeForPair(
environment()->BindRegistersToProjections(first_return, return_pair, &states);
}
+void BytecodeGraphBuilder::VisitCallRuntimeForPair() {
+ BuildCallRuntimeForPair();
+}
+
+void BytecodeGraphBuilder::VisitCallRuntimeForPairWide() {
+ BuildCallRuntimeForPair();
+}
Node* BytecodeGraphBuilder::ProcessCallNewArguments(
- const Operator* call_new_op, interpreter::Register callee,
+ const Operator* call_new_op, Node* callee, Node* new_target,
interpreter::Register first_arg, size_t arity) {
- Node** all = info()->zone()->NewArray<Node*>(arity);
- all[0] = environment()->LookupRegister(callee);
+ Node** all = local_zone()->NewArray<Node*>(arity);
+ all[0] = new_target;
int first_arg_index = first_arg.index();
for (int i = 1; i < static_cast<int>(arity) - 1; ++i) {
all[i] = environment()->LookupRegister(
interpreter::Register(first_arg_index + i - 1));
}
- // Original constructor is the same as the callee.
- all[arity - 1] = environment()->LookupRegister(callee);
+ all[arity - 1] = callee;
Node* value = MakeNode(call_new_op, static_cast<int>(arity), all, false);
return value;
}
+void BytecodeGraphBuilder::BuildCallConstruct() {
+ FrameStateBeforeAndAfter states(this);
+ interpreter::Register callee_reg = bytecode_iterator().GetRegisterOperand(0);
+ interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
+ size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
-void BytecodeGraphBuilder::VisitNew(
- const interpreter::BytecodeArrayIterator& iterator) {
- FrameStateBeforeAndAfter states(this, iterator);
- interpreter::Register callee = iterator.GetRegisterOperand(0);
- interpreter::Register first_arg = iterator.GetRegisterOperand(1);
- size_t arg_count = iterator.GetCountOperand(2);
-
+ Node* new_target = environment()->LookupAccumulator();
+ Node* callee = environment()->LookupRegister(callee_reg);
// TODO(turbofan): Pass the feedback here.
const Operator* call = javascript()->CallConstruct(
static_cast<int>(arg_count) + 2, VectorSlotPair());
- Node* value = ProcessCallNewArguments(call, callee, first_arg, arg_count + 2);
+ Node* value = ProcessCallNewArguments(call, callee, new_target, first_arg,
+ arg_count + 2);
environment()->BindAccumulator(value, &states);
}
+void BytecodeGraphBuilder::VisitNew() { BuildCallConstruct(); }
+
+void BytecodeGraphBuilder::VisitNewWide() { BuildCallConstruct(); }
-void BytecodeGraphBuilder::VisitThrow(
- const interpreter::BytecodeArrayIterator& iterator) {
- FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildThrow() {
+ FrameStateBeforeAndAfter states(this);
Node* value = environment()->LookupAccumulator();
- // TODO(mythria): Change to Runtime::kThrow when we have deoptimization
- // information support in the interpreter.
- NewNode(javascript()->CallRuntime(Runtime::kReThrow, 1), value);
- Node* control = NewNode(common()->Throw(), value);
- environment()->RecordAfterState(control, &states);
- UpdateControlDependencyToLeaveFunction(control);
+ Node* call = NewNode(javascript()->CallRuntime(Runtime::kThrow), value);
+ environment()->BindAccumulator(call, &states);
+}
+
+void BytecodeGraphBuilder::VisitThrow() {
+ BuildThrow();
+ Node* call = environment()->LookupAccumulator();
+ Node* control = NewNode(common()->Throw(), call);
+ MergeControlToLeaveFunction(control);
}
+void BytecodeGraphBuilder::VisitReThrow() {
+ Node* value = environment()->LookupAccumulator();
+ Node* call = NewNode(javascript()->CallRuntime(Runtime::kReThrow), value);
+ Node* control = NewNode(common()->Throw(), call);
+ MergeControlToLeaveFunction(control);
+}
-void BytecodeGraphBuilder::BuildBinaryOp(
- const Operator* js_op, const interpreter::BytecodeArrayIterator& iterator) {
- FrameStateBeforeAndAfter states(this, iterator);
- Node* left = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+void BytecodeGraphBuilder::BuildBinaryOp(const Operator* js_op) {
+ FrameStateBeforeAndAfter states(this);
+ Node* left =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Node* right = environment()->LookupAccumulator();
Node* node = NewNode(js_op, left, right);
environment()->BindAccumulator(node, &states);
}
-
-void BytecodeGraphBuilder::VisitAdd(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitAdd() {
BinaryOperationHints hints = BinaryOperationHints::Any();
- BuildBinaryOp(javascript()->Add(language_mode(), hints), iterator);
+ BuildBinaryOp(javascript()->Add(hints));
}
-
-void BytecodeGraphBuilder::VisitSub(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitSub() {
BinaryOperationHints hints = BinaryOperationHints::Any();
- BuildBinaryOp(javascript()->Subtract(language_mode(), hints), iterator);
+ BuildBinaryOp(javascript()->Subtract(hints));
}
-
-void BytecodeGraphBuilder::VisitMul(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitMul() {
BinaryOperationHints hints = BinaryOperationHints::Any();
- BuildBinaryOp(javascript()->Multiply(language_mode(), hints), iterator);
+ BuildBinaryOp(javascript()->Multiply(hints));
}
-
-void BytecodeGraphBuilder::VisitDiv(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitDiv() {
BinaryOperationHints hints = BinaryOperationHints::Any();
- BuildBinaryOp(javascript()->Divide(language_mode(), hints), iterator);
+ BuildBinaryOp(javascript()->Divide(hints));
}
-
-void BytecodeGraphBuilder::VisitMod(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitMod() {
BinaryOperationHints hints = BinaryOperationHints::Any();
- BuildBinaryOp(javascript()->Modulus(language_mode(), hints), iterator);
+ BuildBinaryOp(javascript()->Modulus(hints));
}
-
-void BytecodeGraphBuilder::VisitBitwiseOr(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitBitwiseOr() {
BinaryOperationHints hints = BinaryOperationHints::Any();
- BuildBinaryOp(javascript()->BitwiseOr(language_mode(), hints), iterator);
+ BuildBinaryOp(javascript()->BitwiseOr(hints));
}
-
-void BytecodeGraphBuilder::VisitBitwiseXor(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitBitwiseXor() {
BinaryOperationHints hints = BinaryOperationHints::Any();
- BuildBinaryOp(javascript()->BitwiseXor(language_mode(), hints), iterator);
+ BuildBinaryOp(javascript()->BitwiseXor(hints));
}
-
-void BytecodeGraphBuilder::VisitBitwiseAnd(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitBitwiseAnd() {
BinaryOperationHints hints = BinaryOperationHints::Any();
- BuildBinaryOp(javascript()->BitwiseAnd(language_mode(), hints), iterator);
+ BuildBinaryOp(javascript()->BitwiseAnd(hints));
}
-
-void BytecodeGraphBuilder::VisitShiftLeft(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitShiftLeft() {
BinaryOperationHints hints = BinaryOperationHints::Any();
- BuildBinaryOp(javascript()->ShiftLeft(language_mode(), hints), iterator);
+ BuildBinaryOp(javascript()->ShiftLeft(hints));
}
-
-void BytecodeGraphBuilder::VisitShiftRight(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitShiftRight() {
BinaryOperationHints hints = BinaryOperationHints::Any();
- BuildBinaryOp(javascript()->ShiftRight(language_mode(), hints), iterator);
+ BuildBinaryOp(javascript()->ShiftRight(hints));
}
-
-void BytecodeGraphBuilder::VisitShiftRightLogical(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitShiftRightLogical() {
BinaryOperationHints hints = BinaryOperationHints::Any();
- BuildBinaryOp(javascript()->ShiftRightLogical(language_mode(), hints),
- iterator);
+ BuildBinaryOp(javascript()->ShiftRightLogical(hints));
}
-
-void BytecodeGraphBuilder::VisitInc(
- const interpreter::BytecodeArrayIterator& iterator) {
- FrameStateBeforeAndAfter states(this, iterator);
- const Operator* js_op =
- javascript()->Add(language_mode(), BinaryOperationHints::Any());
+void BytecodeGraphBuilder::VisitInc() {
+ FrameStateBeforeAndAfter states(this);
+ const Operator* js_op = javascript()->Add(BinaryOperationHints::Any());
Node* node = NewNode(js_op, environment()->LookupAccumulator(),
jsgraph()->OneConstant());
environment()->BindAccumulator(node, &states);
}
-
-void BytecodeGraphBuilder::VisitDec(
- const interpreter::BytecodeArrayIterator& iterator) {
- FrameStateBeforeAndAfter states(this, iterator);
- const Operator* js_op =
- javascript()->Subtract(language_mode(), BinaryOperationHints::Any());
+void BytecodeGraphBuilder::VisitDec() {
+ FrameStateBeforeAndAfter states(this);
+ const Operator* js_op = javascript()->Subtract(BinaryOperationHints::Any());
Node* node = NewNode(js_op, environment()->LookupAccumulator(),
jsgraph()->OneConstant());
environment()->BindAccumulator(node, &states);
}
-
-void BytecodeGraphBuilder::VisitLogicalNot(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitLogicalNot() {
Node* value = NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
environment()->LookupAccumulator());
Node* node = NewNode(common()->Select(MachineRepresentation::kTagged), value,
@@ -1447,408 +1237,307 @@ void BytecodeGraphBuilder::VisitLogicalNot(
environment()->BindAccumulator(node);
}
-
-void BytecodeGraphBuilder::VisitTypeOf(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitTypeOf() {
Node* node =
NewNode(javascript()->TypeOf(), environment()->LookupAccumulator());
environment()->BindAccumulator(node);
}
-
-void BytecodeGraphBuilder::BuildDelete(
- const interpreter::BytecodeArrayIterator& iterator) {
- FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildDelete(LanguageMode language_mode) {
+ FrameStateBeforeAndAfter states(this);
Node* key = environment()->LookupAccumulator();
- Node* object = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ Node* object =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Node* node =
- NewNode(javascript()->DeleteProperty(language_mode()), object, key);
+ NewNode(javascript()->DeleteProperty(language_mode), object, key);
environment()->BindAccumulator(node, &states);
}
-
-void BytecodeGraphBuilder::VisitDeletePropertyStrict(
- const interpreter::BytecodeArrayIterator& iterator) {
- DCHECK(is_strict(language_mode()));
- BuildDelete(iterator);
+void BytecodeGraphBuilder::VisitDeletePropertyStrict() {
+ BuildDelete(LanguageMode::STRICT);
}
-
-void BytecodeGraphBuilder::VisitDeletePropertySloppy(
- const interpreter::BytecodeArrayIterator& iterator) {
- DCHECK(is_sloppy(language_mode()));
- BuildDelete(iterator);
-}
-
-
-void BytecodeGraphBuilder::VisitDeleteLookupSlot(
- const interpreter::BytecodeArrayIterator& iterator) {
- FrameStateBeforeAndAfter states(this, iterator);
- Node* name = environment()->LookupAccumulator();
- const Operator* op = javascript()->CallRuntime(Runtime::kDeleteLookupSlot, 2);
- Node* result = NewNode(op, environment()->Context(), name);
- environment()->BindAccumulator(result, &states);
+void BytecodeGraphBuilder::VisitDeletePropertySloppy() {
+ BuildDelete(LanguageMode::SLOPPY);
}
-
-void BytecodeGraphBuilder::BuildCompareOp(
- const Operator* js_op, const interpreter::BytecodeArrayIterator& iterator) {
- FrameStateBeforeAndAfter states(this, iterator);
- Node* left = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+void BytecodeGraphBuilder::BuildCompareOp(const Operator* js_op) {
+ FrameStateBeforeAndAfter states(this);
+ Node* left =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Node* right = environment()->LookupAccumulator();
Node* node = NewNode(js_op, left, right);
environment()->BindAccumulator(node, &states);
}
-
-void BytecodeGraphBuilder::VisitTestEqual(
- const interpreter::BytecodeArrayIterator& iterator) {
- BuildCompareOp(javascript()->Equal(), iterator);
+void BytecodeGraphBuilder::VisitTestEqual() {
+ BuildCompareOp(javascript()->Equal());
}
-
-void BytecodeGraphBuilder::VisitTestNotEqual(
- const interpreter::BytecodeArrayIterator& iterator) {
- BuildCompareOp(javascript()->NotEqual(), iterator);
+void BytecodeGraphBuilder::VisitTestNotEqual() {
+ BuildCompareOp(javascript()->NotEqual());
}
-
-void BytecodeGraphBuilder::VisitTestEqualStrict(
- const interpreter::BytecodeArrayIterator& iterator) {
- BuildCompareOp(javascript()->StrictEqual(), iterator);
+void BytecodeGraphBuilder::VisitTestEqualStrict() {
+ BuildCompareOp(javascript()->StrictEqual());
}
-
-void BytecodeGraphBuilder::VisitTestNotEqualStrict(
- const interpreter::BytecodeArrayIterator& iterator) {
- BuildCompareOp(javascript()->StrictNotEqual(), iterator);
+void BytecodeGraphBuilder::VisitTestNotEqualStrict() {
+ BuildCompareOp(javascript()->StrictNotEqual());
}
-
-void BytecodeGraphBuilder::VisitTestLessThan(
- const interpreter::BytecodeArrayIterator& iterator) {
- BuildCompareOp(javascript()->LessThan(language_mode()), iterator);
+void BytecodeGraphBuilder::VisitTestLessThan() {
+ BuildCompareOp(javascript()->LessThan());
}
-
-void BytecodeGraphBuilder::VisitTestGreaterThan(
- const interpreter::BytecodeArrayIterator& iterator) {
- BuildCompareOp(javascript()->GreaterThan(language_mode()), iterator);
+void BytecodeGraphBuilder::VisitTestGreaterThan() {
+ BuildCompareOp(javascript()->GreaterThan());
}
-
-void BytecodeGraphBuilder::VisitTestLessThanOrEqual(
- const interpreter::BytecodeArrayIterator& iterator) {
- BuildCompareOp(javascript()->LessThanOrEqual(language_mode()), iterator);
+void BytecodeGraphBuilder::VisitTestLessThanOrEqual() {
+ BuildCompareOp(javascript()->LessThanOrEqual());
}
-
-void BytecodeGraphBuilder::VisitTestGreaterThanOrEqual(
- const interpreter::BytecodeArrayIterator& iterator) {
- BuildCompareOp(javascript()->GreaterThanOrEqual(language_mode()), iterator);
+void BytecodeGraphBuilder::VisitTestGreaterThanOrEqual() {
+ BuildCompareOp(javascript()->GreaterThanOrEqual());
}
-
-void BytecodeGraphBuilder::VisitTestIn(
- const interpreter::BytecodeArrayIterator& iterator) {
- BuildCompareOp(javascript()->HasProperty(), iterator);
+void BytecodeGraphBuilder::VisitTestIn() {
+ BuildCompareOp(javascript()->HasProperty());
}
-
-void BytecodeGraphBuilder::VisitTestInstanceOf(
- const interpreter::BytecodeArrayIterator& iterator) {
- BuildCompareOp(javascript()->InstanceOf(), iterator);
+void BytecodeGraphBuilder::VisitTestInstanceOf() {
+ BuildCompareOp(javascript()->InstanceOf());
}
-
-void BytecodeGraphBuilder::BuildCastOperator(
- const Operator* js_op, const interpreter::BytecodeArrayIterator& iterator) {
- FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildCastOperator(const Operator* js_op) {
+ FrameStateBeforeAndAfter states(this);
Node* node = NewNode(js_op, environment()->LookupAccumulator());
environment()->BindAccumulator(node, &states);
}
-
-void BytecodeGraphBuilder::VisitToName(
- const interpreter::BytecodeArrayIterator& iterator) {
- BuildCastOperator(javascript()->ToName(), iterator);
+void BytecodeGraphBuilder::VisitToName() {
+ BuildCastOperator(javascript()->ToName());
}
-
-void BytecodeGraphBuilder::VisitToObject(
- const interpreter::BytecodeArrayIterator& iterator) {
- BuildCastOperator(javascript()->ToObject(), iterator);
-}
-
-
-void BytecodeGraphBuilder::VisitToNumber(
- const interpreter::BytecodeArrayIterator& iterator) {
- BuildCastOperator(javascript()->ToNumber(), iterator);
+void BytecodeGraphBuilder::VisitToObject() {
+ BuildCastOperator(javascript()->ToObject());
}
-
-void BytecodeGraphBuilder::VisitJump(
- const interpreter::BytecodeArrayIterator& iterator) {
- BuildJump();
-}
-
-
-void BytecodeGraphBuilder::VisitJumpConstant(
- const interpreter::BytecodeArrayIterator& iterator) {
- BuildJump();
+void BytecodeGraphBuilder::VisitToNumber() {
+ BuildCastOperator(javascript()->ToNumber());
}
+void BytecodeGraphBuilder::VisitJump() { BuildJump(); }
-void BytecodeGraphBuilder::VisitJumpConstantWide(
- const interpreter::BytecodeArrayIterator& iterator) {
- BuildJump();
-}
+void BytecodeGraphBuilder::VisitJumpConstant() { BuildJump(); }
+void BytecodeGraphBuilder::VisitJumpConstantWide() { BuildJump(); }
-void BytecodeGraphBuilder::VisitJumpIfTrue(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfTrue() {
BuildJumpIfEqual(jsgraph()->TrueConstant());
}
-
-void BytecodeGraphBuilder::VisitJumpIfTrueConstant(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfTrueConstant() {
BuildJumpIfEqual(jsgraph()->TrueConstant());
}
-
-void BytecodeGraphBuilder::VisitJumpIfTrueConstantWide(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfTrueConstantWide() {
BuildJumpIfEqual(jsgraph()->TrueConstant());
}
-
-void BytecodeGraphBuilder::VisitJumpIfFalse(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfFalse() {
BuildJumpIfEqual(jsgraph()->FalseConstant());
}
-
-void BytecodeGraphBuilder::VisitJumpIfFalseConstant(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfFalseConstant() {
BuildJumpIfEqual(jsgraph()->FalseConstant());
}
-
-void BytecodeGraphBuilder::VisitJumpIfFalseConstantWide(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfFalseConstantWide() {
BuildJumpIfEqual(jsgraph()->FalseConstant());
}
-
-void BytecodeGraphBuilder::VisitJumpIfToBooleanTrue(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfToBooleanTrue() {
BuildJumpIfToBooleanEqual(jsgraph()->TrueConstant());
}
-
-void BytecodeGraphBuilder::VisitJumpIfToBooleanTrueConstant(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfToBooleanTrueConstant() {
BuildJumpIfToBooleanEqual(jsgraph()->TrueConstant());
}
-
-void BytecodeGraphBuilder::VisitJumpIfToBooleanTrueConstantWide(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfToBooleanTrueConstantWide() {
BuildJumpIfToBooleanEqual(jsgraph()->TrueConstant());
}
-
-void BytecodeGraphBuilder::VisitJumpIfToBooleanFalse(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfToBooleanFalse() {
BuildJumpIfToBooleanEqual(jsgraph()->FalseConstant());
}
-
-void BytecodeGraphBuilder::VisitJumpIfToBooleanFalseConstant(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfToBooleanFalseConstant() {
BuildJumpIfToBooleanEqual(jsgraph()->FalseConstant());
}
-
-void BytecodeGraphBuilder::VisitJumpIfToBooleanFalseConstantWide(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfToBooleanFalseConstantWide() {
BuildJumpIfToBooleanEqual(jsgraph()->FalseConstant());
}
+void BytecodeGraphBuilder::VisitJumpIfNotHole() { BuildJumpIfNotHole(); }
-void BytecodeGraphBuilder::VisitJumpIfNull(
- const interpreter::BytecodeArrayIterator& iterator) {
- BuildJumpIfEqual(jsgraph()->NullConstant());
+void BytecodeGraphBuilder::VisitJumpIfNotHoleConstant() {
+ BuildJumpIfNotHole();
}
+void BytecodeGraphBuilder::VisitJumpIfNotHoleConstantWide() {
+ BuildJumpIfNotHole();
+}
-void BytecodeGraphBuilder::VisitJumpIfNullConstant(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfNull() {
BuildJumpIfEqual(jsgraph()->NullConstant());
}
-
-void BytecodeGraphBuilder::VisitJumpIfNullConstantWide(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfNullConstant() {
BuildJumpIfEqual(jsgraph()->NullConstant());
}
+void BytecodeGraphBuilder::VisitJumpIfNullConstantWide() {
+ BuildJumpIfEqual(jsgraph()->NullConstant());
+}
-void BytecodeGraphBuilder::VisitJumpIfUndefined(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfUndefined() {
BuildJumpIfEqual(jsgraph()->UndefinedConstant());
}
-
-void BytecodeGraphBuilder::VisitJumpIfUndefinedConstant(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfUndefinedConstant() {
BuildJumpIfEqual(jsgraph()->UndefinedConstant());
}
-
-void BytecodeGraphBuilder::VisitJumpIfUndefinedConstantWide(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitJumpIfUndefinedConstantWide() {
BuildJumpIfEqual(jsgraph()->UndefinedConstant());
}
+void BytecodeGraphBuilder::VisitStackCheck() {
+ FrameStateBeforeAndAfter states(this);
+ Node* node = NewNode(javascript()->StackCheck());
+ environment()->RecordAfterState(node, &states);
+}
-void BytecodeGraphBuilder::VisitReturn(
- const interpreter::BytecodeArrayIterator& iterator) {
+void BytecodeGraphBuilder::VisitReturn() {
Node* control =
NewNode(common()->Return(), environment()->LookupAccumulator());
- UpdateControlDependencyToLeaveFunction(control);
- set_environment(nullptr);
+ MergeControlToLeaveFunction(control);
}
+void BytecodeGraphBuilder::VisitDebugger() {
+ FrameStateBeforeAndAfter states(this);
+ Node* call =
+ NewNode(javascript()->CallRuntime(Runtime::kHandleDebuggerStatement));
+ environment()->BindAccumulator(call, &states);
+}
-void BytecodeGraphBuilder::VisitForInPrepare(
- const interpreter::BytecodeArrayIterator& iterator) {
- Node* prepare = nullptr;
- {
- FrameStateBeforeAndAfter states(this, iterator);
- Node* receiver = environment()->LookupAccumulator();
- prepare = NewNode(javascript()->ForInPrepare(), receiver);
- environment()->RecordAfterState(prepare, &states);
- }
- // Project cache_type, cache_array, cache_length into register
- // operands 1, 2, 3.
- for (int i = 0; i < 3; i++) {
- environment()->BindRegister(iterator.GetRegisterOperand(i),
- NewNode(common()->Projection(i), prepare));
- }
+// We cannot create a graph from the debugger copy of the bytecode array.
+#define DEBUG_BREAK(Name, ...) \
+ void BytecodeGraphBuilder::Visit##Name() { UNREACHABLE(); }
+DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
+#undef DEBUG_BREAK
+
+void BytecodeGraphBuilder::BuildForInPrepare() {
+ FrameStateBeforeAndAfter states(this);
+ Node* receiver = environment()->LookupAccumulator();
+ Node* prepare = NewNode(javascript()->ForInPrepare(), receiver);
+ environment()->BindRegistersToProjections(
+ bytecode_iterator().GetRegisterOperand(0), prepare, &states);
}
+void BytecodeGraphBuilder::VisitForInPrepare() { BuildForInPrepare(); }
-void BytecodeGraphBuilder::VisitForInDone(
- const interpreter::BytecodeArrayIterator& iterator) {
- FrameStateBeforeAndAfter states(this, iterator);
- Node* index = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+void BytecodeGraphBuilder::VisitForInPrepareWide() { BuildForInPrepare(); }
+
+void BytecodeGraphBuilder::VisitForInDone() {
+ FrameStateBeforeAndAfter states(this);
+ Node* index =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Node* cache_length =
- environment()->LookupRegister(iterator.GetRegisterOperand(1));
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
Node* exit_cond = NewNode(javascript()->ForInDone(), index, cache_length);
environment()->BindAccumulator(exit_cond, &states);
}
-
-void BytecodeGraphBuilder::VisitForInNext(
- const interpreter::BytecodeArrayIterator& iterator) {
- FrameStateBeforeAndAfter states(this, iterator);
+void BytecodeGraphBuilder::BuildForInNext() {
+ FrameStateBeforeAndAfter states(this);
Node* receiver =
- environment()->LookupRegister(iterator.GetRegisterOperand(0));
- Node* cache_type =
- environment()->LookupRegister(iterator.GetRegisterOperand(1));
- Node* cache_array =
- environment()->LookupRegister(iterator.GetRegisterOperand(2));
- Node* index = environment()->LookupRegister(iterator.GetRegisterOperand(3));
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ Node* index =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
+ int catch_reg_pair_index = bytecode_iterator().GetRegisterOperand(2).index();
+ Node* cache_type = environment()->LookupRegister(
+ interpreter::Register(catch_reg_pair_index));
+ Node* cache_array = environment()->LookupRegister(
+ interpreter::Register(catch_reg_pair_index + 1));
+
Node* value = NewNode(javascript()->ForInNext(), receiver, cache_array,
cache_type, index);
environment()->BindAccumulator(value, &states);
}
+void BytecodeGraphBuilder::VisitForInNext() { BuildForInNext(); }
-void BytecodeGraphBuilder::VisitForInStep(
- const interpreter::BytecodeArrayIterator& iterator) {
- FrameStateBeforeAndAfter states(this, iterator);
- Node* index = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+void BytecodeGraphBuilder::VisitForInNextWide() { BuildForInNext(); }
+
+void BytecodeGraphBuilder::VisitForInStep() {
+ FrameStateBeforeAndAfter states(this);
+ Node* index =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
index = NewNode(javascript()->ForInStep(), index);
environment()->BindAccumulator(index, &states);
}
-
-void BytecodeGraphBuilder::MergeEnvironmentsOfBackwardBranches(
- int source_offset, int target_offset) {
- DCHECK_GE(source_offset, target_offset);
- const ZoneVector<int>* branch_sites =
- branch_analysis()->BackwardBranchesTargetting(target_offset);
- if (branch_sites->back() == source_offset) {
- // The set of back branches is complete, merge them.
- DCHECK_GE(branch_sites->at(0), target_offset);
- Environment* merged = merge_environments_[branch_sites->at(0)];
- for (size_t i = 1; i < branch_sites->size(); i++) {
- DCHECK_GE(branch_sites->at(i), target_offset);
- merged->Merge(merge_environments_[branch_sites->at(i)]);
+void BytecodeGraphBuilder::SwitchToMergeEnvironment(int current_offset) {
+ if (merge_environments_[current_offset] != nullptr) {
+ if (environment() != nullptr) {
+ merge_environments_[current_offset]->Merge(environment());
}
- // And now merge with loop header environment created when loop
- // header was visited.
- loop_header_environments_[target_offset]->Merge(merged);
+ set_environment(merge_environments_[current_offset]);
}
}
-
-void BytecodeGraphBuilder::MergeEnvironmentsOfForwardBranches(
- int source_offset) {
- if (branch_analysis()->forward_branches_target(source_offset)) {
- // Merge environments of branches that reach this bytecode.
- auto branch_sites =
- branch_analysis()->ForwardBranchesTargetting(source_offset);
- DCHECK_LT(branch_sites->at(0), source_offset);
- Environment* merged = merge_environments_[branch_sites->at(0)];
- for (size_t i = 1; i < branch_sites->size(); i++) {
- DCHECK_LT(branch_sites->at(i), source_offset);
- merged->Merge(merge_environments_[branch_sites->at(i)]);
- }
- if (environment()) {
- merged->Merge(environment());
- }
- set_environment(merged);
- }
-}
-
-
-void BytecodeGraphBuilder::BuildLoopHeaderForBackwardBranches(
- int source_offset) {
- if (branch_analysis()->backward_branches_target(source_offset)) {
+void BytecodeGraphBuilder::BuildLoopHeaderEnvironment(int current_offset) {
+ if (branch_analysis()->backward_branches_target(current_offset)) {
// Add loop header and store a copy so we can connect merged back
// edge inputs to the loop header.
- loop_header_environments_[source_offset] = environment()->CopyForLoop();
+ merge_environments_[current_offset] = environment()->CopyForLoop();
}
}
-
-void BytecodeGraphBuilder::BuildJump(int source_offset, int target_offset) {
- DCHECK_NULL(merge_environments_[source_offset]);
- merge_environments_[source_offset] = environment();
- if (source_offset >= target_offset) {
- MergeEnvironmentsOfBackwardBranches(source_offset, target_offset);
+void BytecodeGraphBuilder::MergeIntoSuccessorEnvironment(int target_offset) {
+ if (merge_environments_[target_offset] == nullptr) {
+ // Append merge nodes to the environment. We may merge here with another
+ // environment. So add a place holder for merge nodes. We may add redundant
+ // but will be eliminated in a later pass.
+ // TODO(mstarzinger): Be smarter about this!
+ NewMerge();
+ merge_environments_[target_offset] = environment();
+ } else {
+ merge_environments_[target_offset]->Merge(environment());
}
set_environment(nullptr);
}
+void BytecodeGraphBuilder::MergeControlToLeaveFunction(Node* exit) {
+ exit_controls_.push_back(exit);
+ set_environment(nullptr);
+}
void BytecodeGraphBuilder::BuildJump() {
- int source_offset = bytecode_iterator()->current_offset();
- int target_offset = bytecode_iterator()->GetJumpTargetOffset();
- BuildJump(source_offset, target_offset);
+ MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
}
void BytecodeGraphBuilder::BuildConditionalJump(Node* condition) {
- int source_offset = bytecode_iterator()->current_offset();
NewBranch(condition);
Environment* if_false_environment = environment()->CopyForConditional();
NewIfTrue();
- BuildJump(source_offset, bytecode_iterator()->GetJumpTargetOffset());
+ MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
set_environment(if_false_environment);
NewIfFalse();
}
@@ -1870,6 +1559,15 @@ void BytecodeGraphBuilder::BuildJumpIfToBooleanEqual(Node* comperand) {
BuildConditionalJump(condition);
}
+void BytecodeGraphBuilder::BuildJumpIfNotHole() {
+ Node* accumulator = environment()->LookupAccumulator();
+ Node* condition = NewNode(javascript()->StrictEqual(), accumulator,
+ jsgraph()->TheHoleConstant());
+ Node* node =
+ NewNode(common()->Select(MachineRepresentation::kTagged), condition,
+ jsgraph()->FalseConstant(), jsgraph()->TrueConstant());
+ BuildConditionalJump(node);
+}
Node** BytecodeGraphBuilder::EnsureInputBufferSize(int size) {
if (size > input_buffer_size_) {
@@ -1880,16 +1578,31 @@ Node** BytecodeGraphBuilder::EnsureInputBufferSize(int size) {
return input_buffer_;
}
+void BytecodeGraphBuilder::EnterAndExitExceptionHandlers(int current_offset) {
+ Handle<HandlerTable> table = exception_handler_table();
+ int num_entries = table->NumberOfRangeEntries();
-void BytecodeGraphBuilder::PrepareEntryFrameState(Node* node) {
- DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
- DCHECK_EQ(IrOpcode::kDead,
- NodeProperties::GetFrameStateInput(node, 0)->opcode());
- NodeProperties::ReplaceFrameStateInput(
- node, 0, environment()->Checkpoint(BailoutId(0),
- OutputFrameStateCombine::Ignore()));
-}
+ // Potentially exit exception handlers.
+ while (!exception_handlers_.empty()) {
+ int current_end = exception_handlers_.top().end_offset_;
+ if (current_offset < current_end) break; // Still covered by range.
+ exception_handlers_.pop();
+ }
+ // Potentially enter exception handlers.
+ while (current_exception_handler_ < num_entries) {
+ int next_start = table->GetRangeStart(current_exception_handler_);
+ if (current_offset < next_start) break; // Not yet covered by range.
+ int next_end = table->GetRangeEnd(current_exception_handler_);
+ int next_handler = table->GetRangeHandler(current_exception_handler_);
+ int context_register = table->GetRangeData(current_exception_handler_);
+ CatchPrediction pred =
+ table->GetRangePrediction(current_exception_handler_);
+ exception_handlers_.push(
+ {next_start, next_end, next_handler, context_register, pred});
+ current_exception_handler_++;
+ }
+}
Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count,
Node** value_inputs, bool incomplete) {
@@ -1907,6 +1620,7 @@ Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count,
if (!has_context && frame_state_count == 0 && !has_control && !has_effect) {
result = graph()->NewNode(op, value_input_count, value_inputs, incomplete);
} else {
+ bool inside_handler = !exception_handlers_.empty();
int input_count_with_deps = value_input_count;
if (has_context) ++input_count_with_deps;
input_count_with_deps += frame_state_count;
@@ -1931,21 +1645,40 @@ Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count,
*current_input++ = environment()->GetControlDependency();
}
result = graph()->NewNode(op, input_count_with_deps, buffer, incomplete);
- if (!environment()->IsMarkedAsUnreachable()) {
- // Update the current control dependency for control-producing nodes.
- if (NodeProperties::IsControl(result)) {
- environment()->UpdateControlDependency(result);
- }
- // Update the current effect dependency for effect-producing nodes.
- if (result->op()->EffectOutputCount() > 0) {
- environment()->UpdateEffectDependency(result);
- }
- // Add implicit success continuation for throwing nodes.
- if (!result->op()->HasProperty(Operator::kNoThrow)) {
- const Operator* if_success = common()->IfSuccess();
- Node* on_success = graph()->NewNode(if_success, result);
- environment_->UpdateControlDependency(on_success);
- }
+ // Update the current control dependency for control-producing nodes.
+ if (NodeProperties::IsControl(result)) {
+ environment()->UpdateControlDependency(result);
+ }
+ // Update the current effect dependency for effect-producing nodes.
+ if (result->op()->EffectOutputCount() > 0) {
+ environment()->UpdateEffectDependency(result);
+ }
+ // Add implicit exception continuation for throwing nodes.
+ if (!result->op()->HasProperty(Operator::kNoThrow) && inside_handler) {
+ int handler_offset = exception_handlers_.top().handler_offset_;
+ int context_index = exception_handlers_.top().context_register_;
+ CatchPrediction prediction = exception_handlers_.top().pred_;
+ interpreter::Register context_register(context_index);
+ IfExceptionHint hint = prediction == CatchPrediction::CAUGHT
+ ? IfExceptionHint::kLocallyCaught
+ : IfExceptionHint::kLocallyUncaught;
+ Environment* success_env = environment()->CopyForConditional();
+ const Operator* op = common()->IfException(hint);
+ Node* effect = environment()->GetEffectDependency();
+ Node* on_exception = graph()->NewNode(op, effect, result);
+ Node* context = environment()->LookupRegister(context_register);
+ environment()->UpdateControlDependency(on_exception);
+ environment()->UpdateEffectDependency(on_exception);
+ environment()->BindAccumulator(on_exception);
+ environment()->SetContext(context);
+ MergeIntoSuccessorEnvironment(handler_offset);
+ set_environment(success_env);
+ }
+ // Add implicit success continuation for throwing nodes.
+ if (!result->op()->HasProperty(Operator::kNoThrow)) {
+ const Operator* if_success = common()->IfSuccess();
+ Node* on_success = graph()->NewNode(if_success, result);
+ environment()->UpdateControlDependency(on_success);
}
}
@@ -2028,13 +1761,6 @@ Node* BytecodeGraphBuilder::MergeValue(Node* value, Node* other,
return value;
}
-
-void BytecodeGraphBuilder::UpdateControlDependencyToLeaveFunction(Node* exit) {
- if (environment()->IsMarkedAsUnreachable()) return;
- environment()->MarkAsUnreachable();
- exit_controls_.push_back(exit);
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index 94a278c3cf..2fa5967c86 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -23,19 +23,14 @@ class BytecodeGraphBuilder {
JSGraph* jsgraph);
// Creates a graph by visiting bytecodes.
- bool CreateGraph(bool stack_check = true);
-
- Graph* graph() const { return jsgraph_->graph(); }
+ bool CreateGraph();
private:
class Environment;
class FrameStateBeforeAndAfter;
- void CreateGraphBody(bool stack_check);
void VisitBytecodes();
- Node* LoadAccumulator(Node* value);
-
// Get or create the node that represents the outer function closure.
Node* GetFunctionClosure();
@@ -45,13 +40,6 @@ class BytecodeGraphBuilder {
// Get or create the node that represents the incoming new target value.
Node* GetNewTarget();
- // Builder for accessing a (potentially immutable) object field.
- Node* BuildLoadObjectField(Node* object, int offset);
- Node* BuildLoadImmutableObjectField(Node* object, int offset);
-
- // Builder for accessing type feedback vector.
- Node* BuildLoadFeedbackVector();
-
// Builder for loading the a native context field.
Node* BuildLoadNativeContextField(int index);
@@ -111,91 +99,102 @@ class BytecodeGraphBuilder {
Node* MakeNode(const Operator* op, int value_input_count, Node** value_inputs,
bool incomplete);
- // Helper to indicate a node exits the function body.
- void UpdateControlDependencyToLeaveFunction(Node* exit);
-
Node** EnsureInputBufferSize(int size);
Node* ProcessCallArguments(const Operator* call_op, Node* callee,
interpreter::Register receiver, size_t arity);
- Node* ProcessCallNewArguments(const Operator* call_new_op,
- interpreter::Register callee,
+ Node* ProcessCallNewArguments(const Operator* call_new_op, Node* callee,
+ Node* new_target,
interpreter::Register first_arg, size_t arity);
Node* ProcessCallRuntimeArguments(const Operator* call_runtime_op,
interpreter::Register first_arg,
size_t arity);
- void BuildCreateLiteral(const Operator* op,
- const interpreter::BytecodeArrayIterator& iterator);
- void BuildCreateRegExpLiteral(
- const interpreter::BytecodeArrayIterator& iterator);
- void BuildCreateArrayLiteral(
- const interpreter::BytecodeArrayIterator& iterator);
- void BuildCreateObjectLiteral(
- const interpreter::BytecodeArrayIterator& iterator);
- void BuildCreateArguments(CreateArgumentsParameters::Type type,
- const interpreter::BytecodeArrayIterator& iterator);
- void BuildLoadGlobal(const interpreter::BytecodeArrayIterator& iterator,
- TypeofMode typeof_mode);
- void BuildStoreGlobal(const interpreter::BytecodeArrayIterator& iterator);
- void BuildNamedLoad(const interpreter::BytecodeArrayIterator& iterator);
- void BuildKeyedLoad(const interpreter::BytecodeArrayIterator& iterator);
- void BuildNamedStore(const interpreter::BytecodeArrayIterator& iterator);
- void BuildKeyedStore(const interpreter::BytecodeArrayIterator& iterator);
- void BuildLdaLookupSlot(TypeofMode typeof_mode,
- const interpreter::BytecodeArrayIterator& iterator);
- void BuildStaLookupSlot(LanguageMode language_mode,
- const interpreter::BytecodeArrayIterator& iterator);
- void BuildCall(const interpreter::BytecodeArrayIterator& iterator);
- void BuildBinaryOp(const Operator* op,
- const interpreter::BytecodeArrayIterator& iterator);
- void BuildCompareOp(const Operator* op,
- const interpreter::BytecodeArrayIterator& iterator);
- void BuildDelete(const interpreter::BytecodeArrayIterator& iterator);
- void BuildCastOperator(const Operator* js_op,
- const interpreter::BytecodeArrayIterator& iterator);
+ void BuildCreateLiteral(const Operator* op);
+ void BuildCreateRegExpLiteral();
+ void BuildCreateArrayLiteral();
+ void BuildCreateObjectLiteral();
+ void BuildCreateArguments(CreateArgumentsType type);
+ void BuildLoadGlobal(TypeofMode typeof_mode);
+ void BuildStoreGlobal(LanguageMode language_mode);
+ void BuildNamedLoad();
+ void BuildKeyedLoad();
+ void BuildNamedStore(LanguageMode language_mode);
+ void BuildKeyedStore(LanguageMode language_mode);
+ void BuildLdaLookupSlot(TypeofMode typeof_mode);
+ void BuildStaLookupSlot(LanguageMode language_mode);
+ void BuildCall(TailCallMode tail_call_mode);
+ void BuildCallJSRuntime();
+ void BuildCallRuntime();
+ void BuildCallRuntimeForPair();
+ void BuildCallConstruct();
+ void BuildThrow();
+ void BuildBinaryOp(const Operator* op);
+ void BuildCompareOp(const Operator* op);
+ void BuildDelete(LanguageMode language_mode);
+ void BuildCastOperator(const Operator* op);
+ void BuildForInPrepare();
+ void BuildForInNext();
// Control flow plumbing.
- void BuildJump(int source_offset, int target_offset);
void BuildJump();
void BuildConditionalJump(Node* condition);
void BuildJumpIfEqual(Node* comperand);
void BuildJumpIfToBooleanEqual(Node* boolean_comperand);
+ void BuildJumpIfNotHole();
+
+ // Simulates control flow by forward-propagating environments.
+ void MergeIntoSuccessorEnvironment(int target_offset);
+ void BuildLoopHeaderEnvironment(int current_offset);
+ void SwitchToMergeEnvironment(int current_offset);
- // Constructing merge and loop headers.
- void MergeEnvironmentsOfBackwardBranches(int source_offset,
- int target_offset);
- void MergeEnvironmentsOfForwardBranches(int source_offset);
- void BuildLoopHeaderForBackwardBranches(int source_offset);
+ // Simulates control flow that exits the function body.
+ void MergeControlToLeaveFunction(Node* exit);
- // Attaches a frame state to |node| for the entry to the function.
- void PrepareEntryFrameState(Node* node);
+ // Simulates entry and exit of exception handlers.
+ void EnterAndExitExceptionHandlers(int current_offset);
// Growth increment for the temporary buffer used to construct input lists to
// new nodes.
static const int kInputBufferSizeIncrement = 64;
+ // The catch prediction from the handler table is reused.
+ typedef HandlerTable::CatchPrediction CatchPrediction;
+
+ // An abstract representation for an exception handler that is being
+ // entered and exited while the graph builder is iterating over the
+ // underlying bytecode. The exception handlers within the bytecode are
+ // well scoped, hence will form a stack during iteration.
+ struct ExceptionHandler {
+ int start_offset_; // Start offset of the handled area in the bytecode.
+ int end_offset_; // End offset of the handled area in the bytecode.
+ int handler_offset_; // Handler entry offset within the bytecode.
+ int context_register_; // Index of register holding handler context.
+ CatchPrediction pred_; // Prediction of whether handler is catching.
+ };
+
// Field accessors
+ Graph* graph() const { return jsgraph_->graph(); }
CommonOperatorBuilder* common() const { return jsgraph_->common(); }
Zone* graph_zone() const { return graph()->zone(); }
- CompilationInfo* info() const { return info_; }
JSGraph* jsgraph() const { return jsgraph_; }
JSOperatorBuilder* javascript() const { return jsgraph_->javascript(); }
Zone* local_zone() const { return local_zone_; }
const Handle<BytecodeArray>& bytecode_array() const {
return bytecode_array_;
}
+ const Handle<HandlerTable>& exception_handler_table() const {
+ return exception_handler_table_;
+ }
+ const Handle<TypeFeedbackVector>& feedback_vector() const {
+ return feedback_vector_;
+ }
const FrameStateFunctionInfo* frame_state_function_info() const {
return frame_state_function_info_;
}
- LanguageMode language_mode() const {
- // TODO(mythria): Don't rely on parse information to get language mode.
- return info()->language_mode();
- }
-
- const interpreter::BytecodeArrayIterator* bytecode_iterator() const {
- return bytecode_iterator_;
+ const interpreter::BytecodeArrayIterator& bytecode_iterator() const {
+ return *bytecode_iterator_;
}
void set_bytecode_iterator(
@@ -211,28 +210,32 @@ class BytecodeGraphBuilder {
branch_analysis_ = branch_analysis;
}
-#define DECLARE_VISIT_BYTECODE(name, ...) \
- void Visit##name(const interpreter::BytecodeArrayIterator& iterator);
+#define DECLARE_VISIT_BYTECODE(name, ...) void Visit##name();
BYTECODE_LIST(DECLARE_VISIT_BYTECODE)
#undef DECLARE_VISIT_BYTECODE
Zone* local_zone_;
- CompilationInfo* info_;
JSGraph* jsgraph_;
Handle<BytecodeArray> bytecode_array_;
+ Handle<HandlerTable> exception_handler_table_;
+ Handle<TypeFeedbackVector> feedback_vector_;
const FrameStateFunctionInfo* frame_state_function_info_;
const interpreter::BytecodeArrayIterator* bytecode_iterator_;
const BytecodeBranchAnalysis* branch_analysis_;
Environment* environment_;
+ // Indicates whether deoptimization support is enabled for this compilation
+ // and whether valid frame states need to be attached to deoptimizing nodes.
+ bool deoptimization_enabled_;
- // Merge environments are snapshots of the environment at a particular
- // bytecode offset to be merged into a later environment.
+ // Merge environments are snapshots of the environment at points where the
+ // control flow merges. This models a forward data flow propagation of all
+ // values from all predecessors of the merge in question.
ZoneMap<int, Environment*> merge_environments_;
- // Loop header environments are environments created for bytecodes
- // where it is known there are back branches, ie a loop header.
- ZoneMap<int, Environment*> loop_header_environments_;
+ // Exception handlers currently entered by the iteration.
+ ZoneStack<ExceptionHandler> exception_handlers_;
+ int current_exception_handler_;
// Temporary storage for building node input lists.
int input_buffer_size_;
@@ -243,100 +246,12 @@ class BytecodeGraphBuilder {
SetOncePointer<Node> function_closure_;
SetOncePointer<Node> new_target_;
- // Optimization to cache loaded feedback vector.
- SetOncePointer<Node> feedback_vector_;
-
// Control nodes that exit the function body.
ZoneVector<Node*> exit_controls_;
DISALLOW_COPY_AND_ASSIGN(BytecodeGraphBuilder);
};
-
-class BytecodeGraphBuilder::Environment : public ZoneObject {
- public:
- Environment(BytecodeGraphBuilder* builder, int register_count,
- int parameter_count, Node* control_dependency, Node* context);
-
- int parameter_count() const { return parameter_count_; }
- int register_count() const { return register_count_; }
-
- Node* LookupAccumulator() const;
- Node* LookupRegister(interpreter::Register the_register) const;
-
- void ExchangeRegisters(interpreter::Register reg0,
- interpreter::Register reg1);
-
- void BindAccumulator(Node* node, FrameStateBeforeAndAfter* states = nullptr);
- void BindRegister(interpreter::Register the_register, Node* node,
- FrameStateBeforeAndAfter* states = nullptr);
- void BindRegistersToProjections(interpreter::Register first_reg, Node* node,
- FrameStateBeforeAndAfter* states = nullptr);
- void RecordAfterState(Node* node, FrameStateBeforeAndAfter* states);
-
- bool IsMarkedAsUnreachable() const;
- void MarkAsUnreachable();
-
- // Effect dependency tracked by this environment.
- Node* GetEffectDependency() { return effect_dependency_; }
- void UpdateEffectDependency(Node* dependency) {
- effect_dependency_ = dependency;
- }
-
- // Preserve a checkpoint of the environment for the IR graph. Any
- // further mutation of the environment will not affect checkpoints.
- Node* Checkpoint(BailoutId bytecode_offset, OutputFrameStateCombine combine);
-
- // Returns true if the state values are up to date with the current
- // environment.
- bool StateValuesAreUpToDate(int output_poke_offset, int output_poke_count);
-
- // Control dependency tracked by this environment.
- Node* GetControlDependency() const { return control_dependency_; }
- void UpdateControlDependency(Node* dependency) {
- control_dependency_ = dependency;
- }
-
- Node* Context() const { return context_; }
- void SetContext(Node* new_context) { context_ = new_context; }
-
- Environment* CopyForConditional() const;
- Environment* CopyForLoop();
- void Merge(Environment* other);
-
- private:
- explicit Environment(const Environment* copy);
- void PrepareForLoop();
- bool StateValuesAreUpToDate(Node** state_values, int offset, int count,
- int output_poke_start, int output_poke_end);
- bool StateValuesRequireUpdate(Node** state_values, int offset, int count);
- void UpdateStateValues(Node** state_values, int offset, int count);
-
- int RegisterToValuesIndex(interpreter::Register the_register) const;
-
- Zone* zone() const { return builder_->local_zone(); }
- Graph* graph() const { return builder_->graph(); }
- CommonOperatorBuilder* common() const { return builder_->common(); }
- BytecodeGraphBuilder* builder() const { return builder_; }
- const NodeVector* values() const { return &values_; }
- NodeVector* values() { return &values_; }
- int register_base() const { return register_base_; }
- int accumulator_base() const { return accumulator_base_; }
-
- BytecodeGraphBuilder* builder_;
- int register_count_;
- int parameter_count_;
- Node* context_;
- Node* control_dependency_;
- Node* effect_dependency_;
- NodeVector values_;
- Node* parameters_state_values_;
- Node* registers_state_values_;
- Node* accumulator_state_values_;
- int register_base_;
- int accumulator_base_;
-};
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index 44e0bf1672..783d9d6da3 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -90,6 +90,7 @@ LinkageLocation regloc(Register reg) {
// ===========================================================================
// == mips ===================================================================
// ===========================================================================
+#define STACK_SHADOW_WORDS 4
#define PARAM_REGISTERS a0, a1, a2, a3
#define CALLEE_SAVE_REGISTERS \
s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() | s6.bit() | \
@@ -133,23 +134,22 @@ LinkageLocation regloc(Register reg) {
// General code uses the above configuration data.
CallDescriptor* Linkage::GetSimplifiedCDescriptor(
- Zone* zone, const MachineSignature* msig) {
+ Zone* zone, const MachineSignature* msig, bool set_initialize_root_flag) {
LocationSignature::Builder locations(zone, msig->return_count(),
msig->parameter_count());
-#if 0 // TODO(titzer): instruction selector tests break here.
// Check the types of the signature.
// Currently no floating point parameters or returns are allowed because
// on x87 and ia32, the FP top of stack is involved.
-
for (size_t i = 0; i < msig->return_count(); i++) {
- MachineType type = RepresentationOf(msig->GetReturn(i));
- CHECK(type != kRepFloat32 && type != kRepFloat64);
+ MachineRepresentation rep = msig->GetReturn(i).representation();
+ CHECK_NE(MachineRepresentation::kFloat32, rep);
+ CHECK_NE(MachineRepresentation::kFloat64, rep);
}
for (size_t i = 0; i < msig->parameter_count(); i++) {
- MachineType type = RepresentationOf(msig->GetParam(i));
- CHECK(type != kRepFloat32 && type != kRepFloat64);
+ MachineRepresentation rep = msig->GetParam(i).representation();
+ CHECK_NE(MachineRepresentation::kFloat32, rep);
+ CHECK_NE(MachineRepresentation::kFloat64, rep);
}
-#endif
#ifdef UNSUPPORTED_C_LINKAGE
// This method should not be called on unknown architectures.
@@ -220,7 +220,9 @@ CallDescriptor* Linkage::GetSimplifiedCDescriptor(
Operator::kNoProperties, // properties
kCalleeSaveRegisters, // callee-saved registers
kCalleeSaveFPRegisters, // callee-saved fp regs
- CallDescriptor::kNoFlags, // flags
+ set_initialize_root_flag ? // flags
+ CallDescriptor::kInitializeRootRegister
+ : CallDescriptor::kNoFlags,
"c-call");
}
diff --git a/deps/v8/src/compiler/change-lowering.cc b/deps/v8/src/compiler/change-lowering.cc
index f791db1fdc..e217f3786b 100644
--- a/deps/v8/src/compiler/change-lowering.cc
+++ b/deps/v8/src/compiler/change-lowering.cc
@@ -49,6 +49,12 @@ Reduction ChangeLowering::Reduce(Node* node) {
return StoreElement(node);
case IrOpcode::kAllocate:
return Allocate(node);
+ case IrOpcode::kObjectIsReceiver:
+ return ObjectIsReceiver(node);
+ case IrOpcode::kObjectIsSmi:
+ return ObjectIsSmi(node);
+ case IrOpcode::kObjectIsNumber:
+ return ObjectIsNumber(node);
default:
return NoChange();
}
@@ -582,6 +588,76 @@ Reduction ChangeLowering::Allocate(Node* node) {
return Changed(node);
}
+Node* ChangeLowering::IsSmi(Node* value) {
+ return graph()->NewNode(
+ machine()->WordEqual(),
+ graph()->NewNode(machine()->WordAnd(), value,
+ jsgraph()->IntPtrConstant(kSmiTagMask)),
+ jsgraph()->IntPtrConstant(kSmiTag));
+}
+
+Node* ChangeLowering::LoadHeapObjectMap(Node* object, Node* control) {
+ return graph()->NewNode(
+ machine()->Load(MachineType::AnyTagged()), object,
+ jsgraph()->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag),
+ graph()->start(), control);
+}
+
+Node* ChangeLowering::LoadMapInstanceType(Node* map) {
+ return graph()->NewNode(
+ machine()->Load(MachineType::Uint8()), map,
+ jsgraph()->IntPtrConstant(Map::kInstanceTypeOffset - kHeapObjectTag),
+ graph()->start(), graph()->start());
+}
+
+Reduction ChangeLowering::ObjectIsNumber(Node* node) {
+ Node* input = NodeProperties::GetValueInput(node, 0);
+ // TODO(bmeurer): Optimize somewhat based on input type.
+ Node* check = IsSmi(input);
+ Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue = jsgraph()->Int32Constant(1);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = graph()->NewNode(
+ machine()->WordEqual(), LoadHeapObjectMap(input, if_false),
+ jsgraph()->HeapConstant(isolate()->factory()->heap_number_map()));
+ Node* control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ node->ReplaceInput(0, vtrue);
+ node->AppendInput(graph()->zone(), vfalse);
+ node->AppendInput(graph()->zone(), control);
+ NodeProperties::ChangeOp(node, common()->Phi(MachineRepresentation::kBit, 2));
+ return Changed(node);
+}
+
+Reduction ChangeLowering::ObjectIsReceiver(Node* node) {
+ Node* input = NodeProperties::GetValueInput(node, 0);
+ // TODO(bmeurer): Optimize somewhat based on input type.
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ Node* check = IsSmi(input);
+ Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue = jsgraph()->Int32Constant(0);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse =
+ graph()->NewNode(machine()->Uint32LessThanOrEqual(),
+ jsgraph()->Uint32Constant(FIRST_JS_RECEIVER_TYPE),
+ LoadMapInstanceType(LoadHeapObjectMap(input, if_false)));
+ Node* control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ node->ReplaceInput(0, vtrue);
+ node->AppendInput(graph()->zone(), vfalse);
+ node->AppendInput(graph()->zone(), control);
+ NodeProperties::ChangeOp(node, common()->Phi(MachineRepresentation::kBit, 2));
+ return Changed(node);
+}
+
+Reduction ChangeLowering::ObjectIsSmi(Node* node) {
+ node->ReplaceInput(0,
+ graph()->NewNode(machine()->WordAnd(), node->InputAt(0),
+ jsgraph()->IntPtrConstant(kSmiTagMask)));
+ node->AppendInput(graph()->zone(), jsgraph()->IntPtrConstant(kSmiTag));
+ NodeProperties::ChangeOp(node, machine()->WordEqual());
+ return Changed(node);
+}
Isolate* ChangeLowering::isolate() const { return jsgraph()->isolate(); }
diff --git a/deps/v8/src/compiler/change-lowering.h b/deps/v8/src/compiler/change-lowering.h
index 6d607768d9..defadd95fd 100644
--- a/deps/v8/src/compiler/change-lowering.h
+++ b/deps/v8/src/compiler/change-lowering.h
@@ -56,6 +56,14 @@ class ChangeLowering final : public Reducer {
Reduction StoreElement(Node* node);
Reduction Allocate(Node* node);
+ Node* IsSmi(Node* value);
+ Node* LoadHeapObjectMap(Node* object, Node* control);
+ Node* LoadMapInstanceType(Node* map);
+
+ Reduction ObjectIsNumber(Node* node);
+ Reduction ObjectIsReceiver(Node* node);
+ Reduction ObjectIsSmi(Node* node);
+
Node* ComputeIndex(const ElementAccess& access, Node* const key);
Graph* graph() const;
Isolate* isolate() const;
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index 313567ed87..712cfe0b2d 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -78,10 +78,12 @@ Handle<Code> CodeGenerator::GenerateCode() {
if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
ProfileEntryHookStub::MaybeCallEntryHook(masm());
}
-
// Architecture-specific, linkage-specific prologue.
info->set_prologue_offset(masm()->pc_offset());
AssemblePrologue();
+ if (linkage()->GetIncomingDescriptor()->InitializeRootRegister()) {
+ masm()->InitializeRootRegister();
+ }
// Define deoptimization literals for all inlined functions.
DCHECK_EQ(0u, deoptimization_literals_.size());
@@ -175,12 +177,12 @@ Handle<Code> CodeGenerator::GenerateCode() {
}
}
- safepoints()->Emit(masm(), frame()->GetSpillSlotCount());
+ safepoints()->Emit(masm(), frame()->GetTotalFrameSlotCount());
Handle<Code> result =
v8::internal::CodeGenerator::MakeCodeEpilogue(masm(), info);
result->set_is_turbofanned(true);
- result->set_stack_slots(frame()->GetSpillSlotCount());
+ result->set_stack_slots(frame()->GetTotalFrameSlotCount());
result->set_safepoint_table_offset(safepoints()->GetCodeOffset());
// Emit exception handler table.
@@ -234,9 +236,12 @@ void CodeGenerator::RecordSafepoint(ReferenceMap* references,
if (operand.IsStackSlot()) {
int index = LocationOperand::cast(operand).index();
DCHECK(index >= 0);
- // Safepoint table indices are 0-based from the beginning of the spill
- // slot area, adjust appropriately.
- index -= stackSlotToSpillSlotDelta;
+ // We might index values in the fixed part of the frame (i.e. the
+ // closure pointer or the context pointer); these are not spill slots
+ // and therefore don't work with the SafepointTable currently, but
+ // we also don't need to worry about them, since the GC has special
+ // knowledge about those fields anyway.
+ if (index < stackSlotToSpillSlotDelta) continue;
safepoint.DefinePointerSlot(index, zone());
} else if (operand.IsRegister() && (kind & Safepoint::kWithRegisters)) {
Register reg = LocationOperand::cast(operand).GetRegister();
@@ -583,7 +588,7 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor(
case FrameStateType::kInterpretedFunction:
translation->BeginInterpretedFrame(
descriptor->bailout_id(), shared_info_id,
- static_cast<unsigned int>(descriptor->locals_count()));
+ static_cast<unsigned int>(descriptor->locals_count() + 1));
break;
case FrameStateType::kArgumentsAdaptor:
translation->BeginArgumentsAdaptorFrame(
diff --git a/deps/v8/src/compiler/code-stub-assembler.cc b/deps/v8/src/compiler/code-stub-assembler.cc
index b2a05b64f8..45f47d3310 100644
--- a/deps/v8/src/compiler/code-stub-assembler.cc
+++ b/deps/v8/src/compiler/code-stub-assembler.cc
@@ -24,28 +24,33 @@ namespace v8 {
namespace internal {
namespace compiler {
-
CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
const CallInterfaceDescriptor& descriptor,
- Code::Kind kind, const char* name)
+ Code::Flags flags, const char* name,
+ size_t result_size)
: raw_assembler_(new RawMachineAssembler(
isolate, new (zone) Graph(zone),
- Linkage::GetStubCallDescriptor(isolate, zone, descriptor, 0,
- CallDescriptor::kNoFlags))),
- kind_(kind),
+ Linkage::GetStubCallDescriptor(
+ isolate, zone, descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size))),
+ flags_(flags),
name_(name),
- code_generated_(false) {}
-
+ code_generated_(false),
+ variables_(zone) {}
CodeStubAssembler::~CodeStubAssembler() {}
+void CodeStubAssembler::CallPrologue() {}
+
+void CodeStubAssembler::CallEpilogue() {}
Handle<Code> CodeStubAssembler::GenerateCode() {
DCHECK(!code_generated_);
Schedule* schedule = raw_assembler_->Export();
Handle<Code> code = Pipeline::GenerateCodeForCodeStub(
- isolate(), raw_assembler_->call_descriptor(), graph(), schedule, kind_,
+ isolate(), raw_assembler_->call_descriptor(), graph(), schedule, flags_,
name_);
code_generated_ = true;
@@ -77,6 +82,9 @@ Node* CodeStubAssembler::BooleanConstant(bool value) {
return raw_assembler_->BooleanConstant(value);
}
+Node* CodeStubAssembler::ExternalConstant(ExternalReference address) {
+ return raw_assembler_->ExternalConstant(address);
+}
Node* CodeStubAssembler::Parameter(int value) {
return raw_assembler_->Parameter(value);
@@ -87,6 +95,21 @@ void CodeStubAssembler::Return(Node* value) {
return raw_assembler_->Return(value);
}
+void CodeStubAssembler::Bind(CodeStubAssembler::Label* label) {
+ return label->Bind();
+}
+
+Node* CodeStubAssembler::LoadFramePointer() {
+ return raw_assembler_->LoadFramePointer();
+}
+
+Node* CodeStubAssembler::LoadParentFramePointer() {
+ return raw_assembler_->LoadParentFramePointer();
+}
+
+Node* CodeStubAssembler::LoadStackPointer() {
+ return raw_assembler_->LoadStackPointer();
+}
Node* CodeStubAssembler::SmiShiftBitsConstant() {
return Int32Constant(kSmiShiftSize + kSmiTagSize);
@@ -102,31 +125,117 @@ Node* CodeStubAssembler::SmiUntag(Node* value) {
return raw_assembler_->WordSar(value, SmiShiftBitsConstant());
}
+#define DEFINE_CODE_STUB_ASSEMBER_BINARY_OP(name) \
+ Node* CodeStubAssembler::name(Node* a, Node* b) { \
+ return raw_assembler_->name(a, b); \
+ }
+CODE_STUB_ASSEMBLER_BINARY_OP_LIST(DEFINE_CODE_STUB_ASSEMBER_BINARY_OP)
+#undef DEFINE_CODE_STUB_ASSEMBER_BINARY_OP
-Node* CodeStubAssembler::IntPtrAdd(Node* a, Node* b) {
- return raw_assembler_->IntPtrAdd(a, b);
-}
-
-
-Node* CodeStubAssembler::IntPtrSub(Node* a, Node* b) {
- return raw_assembler_->IntPtrSub(a, b);
+Node* CodeStubAssembler::ChangeInt32ToInt64(Node* value) {
+ return raw_assembler_->ChangeInt32ToInt64(value);
}
-
Node* CodeStubAssembler::WordShl(Node* value, int shift) {
return raw_assembler_->WordShl(value, Int32Constant(shift));
}
+Node* CodeStubAssembler::WordIsSmi(Node* a) {
+ return WordEqual(raw_assembler_->WordAnd(a, Int32Constant(kSmiTagMask)),
+ Int32Constant(0));
+}
+
+Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset) {
+ return raw_assembler_->Load(MachineType::AnyTagged(), buffer,
+ IntPtrConstant(offset));
+}
Node* CodeStubAssembler::LoadObjectField(Node* object, int offset) {
return raw_assembler_->Load(MachineType::AnyTagged(), object,
IntPtrConstant(offset - kHeapObjectTag));
}
+Node* CodeStubAssembler::LoadFixedArrayElementSmiIndex(Node* object,
+ Node* smi_index,
+ int additional_offset) {
+ Node* header_size = raw_assembler_->Int32Constant(
+ additional_offset + FixedArray::kHeaderSize - kHeapObjectTag);
+ Node* scaled_index =
+ (kSmiShiftSize == 0)
+ ? raw_assembler_->Word32Shl(
+ smi_index, Int32Constant(kPointerSizeLog2 - kSmiTagSize))
+ : raw_assembler_->Word32Shl(SmiUntag(smi_index),
+ Int32Constant(kPointerSizeLog2));
+ Node* offset = raw_assembler_->Int32Add(scaled_index, header_size);
+ return raw_assembler_->Load(MachineType::AnyTagged(), object, offset);
+}
+
+Node* CodeStubAssembler::LoadFixedArrayElementConstantIndex(Node* object,
+ int index) {
+ Node* offset = raw_assembler_->Int32Constant(
+ FixedArray::kHeaderSize - kHeapObjectTag + index * kPointerSize);
+ return raw_assembler_->Load(MachineType::AnyTagged(), object, offset);
+}
+
+Node* CodeStubAssembler::LoadRoot(Heap::RootListIndex root_index) {
+ if (isolate()->heap()->RootCanBeTreatedAsConstant(root_index)) {
+ Handle<Object> root = isolate()->heap()->root_handle(root_index);
+ if (root->IsSmi()) {
+ return Int32Constant(Handle<Smi>::cast(root)->value());
+ } else {
+ return HeapConstant(Handle<HeapObject>::cast(root));
+ }
+ }
+
+ compiler::Node* roots_array_start =
+ ExternalConstant(ExternalReference::roots_array_start(isolate()));
+ USE(roots_array_start);
+
+ // TODO(danno): Implement thee root-access case where the root is not constant
+ // and must be loaded from the root array.
+ UNIMPLEMENTED();
+ return nullptr;
+}
+
+Node* CodeStubAssembler::Load(MachineType rep, Node* base) {
+ return raw_assembler_->Load(rep, base);
+}
+
+Node* CodeStubAssembler::Load(MachineType rep, Node* base, Node* index) {
+ return raw_assembler_->Load(rep, base, index);
+}
+
+Node* CodeStubAssembler::Store(MachineRepresentation rep, Node* base,
+ Node* value) {
+ return raw_assembler_->Store(rep, base, value, kFullWriteBarrier);
+}
+
+Node* CodeStubAssembler::Store(MachineRepresentation rep, Node* base,
+ Node* index, Node* value) {
+ return raw_assembler_->Store(rep, base, index, value, kFullWriteBarrier);
+}
+
+Node* CodeStubAssembler::StoreNoWriteBarrier(MachineRepresentation rep,
+ Node* base, Node* value) {
+ return raw_assembler_->Store(rep, base, value, kNoWriteBarrier);
+}
+
+Node* CodeStubAssembler::StoreNoWriteBarrier(MachineRepresentation rep,
+ Node* base, Node* index,
+ Node* value) {
+ return raw_assembler_->Store(rep, base, index, value, kNoWriteBarrier);
+}
+
+Node* CodeStubAssembler::Projection(int index, Node* value) {
+ return raw_assembler_->Projection(index, value);
+}
Node* CodeStubAssembler::CallN(CallDescriptor* descriptor, Node* code_target,
Node** args) {
- return raw_assembler_->CallN(descriptor, code_target, args);
+ CallPrologue();
+ Node* return_value = raw_assembler_->CallN(descriptor, code_target, args);
+ CallEpilogue();
+ return return_value;
}
@@ -135,41 +244,371 @@ Node* CodeStubAssembler::TailCallN(CallDescriptor* descriptor,
return raw_assembler_->TailCallN(descriptor, code_target, args);
}
+Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
+ Node* context) {
+ CallPrologue();
+ Node* return_value = raw_assembler_->CallRuntime0(function_id, context);
+ CallEpilogue();
+ return return_value;
+}
Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
Node* context, Node* arg1) {
- return raw_assembler_->CallRuntime1(function_id, arg1, context);
+ CallPrologue();
+ Node* return_value = raw_assembler_->CallRuntime1(function_id, arg1, context);
+ CallEpilogue();
+ return return_value;
}
-
Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
Node* context, Node* arg1, Node* arg2) {
- return raw_assembler_->CallRuntime2(function_id, arg1, arg2, context);
+ CallPrologue();
+ Node* return_value =
+ raw_assembler_->CallRuntime2(function_id, arg1, arg2, context);
+ CallEpilogue();
+ return return_value;
+}
+
+Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
+ Node* context, Node* arg1, Node* arg2,
+ Node* arg3) {
+ CallPrologue();
+ Node* return_value =
+ raw_assembler_->CallRuntime3(function_id, arg1, arg2, arg3, context);
+ CallEpilogue();
+ return return_value;
}
+Node* CodeStubAssembler::CallRuntime(Runtime::FunctionId function_id,
+ Node* context, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4) {
+ CallPrologue();
+ Node* return_value = raw_assembler_->CallRuntime4(function_id, arg1, arg2,
+ arg3, arg4, context);
+ CallEpilogue();
+ return return_value;
+}
Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
Node* context, Node* arg1) {
return raw_assembler_->TailCallRuntime1(function_id, arg1, context);
}
-
Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
Node* context, Node* arg1,
Node* arg2) {
return raw_assembler_->TailCallRuntime2(function_id, arg1, arg2, context);
}
+Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+ Node* context, Node* arg1, Node* arg2,
+ Node* arg3) {
+ return raw_assembler_->TailCallRuntime3(function_id, arg1, arg2, arg3,
+ context);
+}
+
+Node* CodeStubAssembler::TailCallRuntime(Runtime::FunctionId function_id,
+ Node* context, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4) {
+ return raw_assembler_->TailCallRuntime4(function_id, arg1, arg2, arg3, arg4,
+ context);
+}
+
+Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, Node* arg1,
+ size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ Node** args = zone()->NewArray<Node*>(2);
+ args[0] = arg1;
+ args[1] = context;
+
+ return CallN(call_descriptor, target, args);
+}
+
+Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, Node* arg1,
+ Node* arg2, size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ Node** args = zone()->NewArray<Node*>(3);
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = context;
+
+ return CallN(call_descriptor, target, args);
+}
+
+Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, Node* arg1,
+ Node* arg2, Node* arg3, size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ Node** args = zone()->NewArray<Node*>(4);
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = arg3;
+ args[3] = context;
+
+ return CallN(call_descriptor, target, args);
+}
+
+Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, Node* arg1,
+ Node* arg2, Node* arg3, Node* arg4,
+ size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ Node** args = zone()->NewArray<Node*>(5);
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = arg3;
+ args[3] = arg4;
+ args[4] = context;
+
+ return CallN(call_descriptor, target, args);
+}
+
+Node* CodeStubAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
+ Node* target, Node* context, Node* arg1,
+ Node* arg2, Node* arg3, Node* arg4,
+ Node* arg5, size_t result_size) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+
+ Node** args = zone()->NewArray<Node*>(6);
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = arg3;
+ args[3] = arg4;
+ args[4] = arg5;
+ args[5] = context;
+
+ return CallN(call_descriptor, target, args);
+}
+
+Node* CodeStubAssembler::TailCallStub(CodeStub& stub, Node** args) {
+ Node* code_target = HeapConstant(stub.GetCode());
+ CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), stub.GetCallInterfaceDescriptor(),
+ stub.GetStackParameterCount(), CallDescriptor::kSupportsTailCalls);
+ return raw_assembler_->TailCallN(descriptor, code_target, args);
+}
+
+Node* CodeStubAssembler::TailCall(
+ const CallInterfaceDescriptor& interface_descriptor, Node* code_target,
+ Node** args, size_t result_size) {
+ CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), interface_descriptor,
+ interface_descriptor.GetStackParameterCount(),
+ CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
+ MachineType::AnyTagged(), result_size);
+ return raw_assembler_->TailCallN(descriptor, code_target, args);
+}
+
+void CodeStubAssembler::Goto(CodeStubAssembler::Label* label) {
+ label->MergeVariables();
+ raw_assembler_->Goto(label->label_);
+}
+
+void CodeStubAssembler::Branch(Node* condition,
+ CodeStubAssembler::Label* true_label,
+ CodeStubAssembler::Label* false_label) {
+ true_label->MergeVariables();
+ false_label->MergeVariables();
+ return raw_assembler_->Branch(condition, true_label->label_,
+ false_label->label_);
+}
+
+void CodeStubAssembler::Switch(Node* index, Label* default_label,
+ int32_t* case_values, Label** case_labels,
+ size_t case_count) {
+ RawMachineLabel** labels =
+ new (zone()->New(sizeof(RawMachineLabel*) * case_count))
+ RawMachineLabel*[case_count];
+ for (size_t i = 0; i < case_count; ++i) {
+ labels[i] = case_labels[i]->label_;
+ case_labels[i]->MergeVariables();
+ default_label->MergeVariables();
+ }
+ return raw_assembler_->Switch(index, default_label->label_, case_values,
+ labels, case_count);
+}
// RawMachineAssembler delegate helpers:
Isolate* CodeStubAssembler::isolate() { return raw_assembler_->isolate(); }
-
Graph* CodeStubAssembler::graph() { return raw_assembler_->graph(); }
-
Zone* CodeStubAssembler::zone() { return raw_assembler_->zone(); }
+// The core implementation of Variable is stored through an indirection so
+// that it can outlive the often block-scoped Variable declarations. This is
+// needed to ensure that variable binding and merging through phis can
+// properly be verified.
+class CodeStubAssembler::Variable::Impl : public ZoneObject {
+ public:
+ explicit Impl(MachineRepresentation rep) : value_(nullptr), rep_(rep) {}
+ Node* value_;
+ MachineRepresentation rep_;
+};
+
+CodeStubAssembler::Variable::Variable(CodeStubAssembler* assembler,
+ MachineRepresentation rep)
+ : impl_(new (assembler->zone()) Impl(rep)) {
+ assembler->variables_.push_back(impl_);
+}
+
+void CodeStubAssembler::Variable::Bind(Node* value) { impl_->value_ = value; }
+
+Node* CodeStubAssembler::Variable::value() const {
+ DCHECK_NOT_NULL(impl_->value_);
+ return impl_->value_;
+}
+
+MachineRepresentation CodeStubAssembler::Variable::rep() const {
+ return impl_->rep_;
+}
+
+bool CodeStubAssembler::Variable::IsBound() const {
+ return impl_->value_ != nullptr;
+}
+
+CodeStubAssembler::Label::Label(CodeStubAssembler* assembler)
+ : bound_(false), merge_count_(0), assembler_(assembler), label_(nullptr) {
+ void* buffer = assembler->zone()->New(sizeof(RawMachineLabel));
+ label_ = new (buffer) RawMachineLabel();
+}
+
+CodeStubAssembler::Label::Label(CodeStubAssembler* assembler,
+ int merged_value_count,
+ CodeStubAssembler::Variable** merged_variables)
+ : bound_(false), merge_count_(0), assembler_(assembler), label_(nullptr) {
+ void* buffer = assembler->zone()->New(sizeof(RawMachineLabel));
+ label_ = new (buffer) RawMachineLabel();
+ for (int i = 0; i < merged_value_count; ++i) {
+ variable_phis_[merged_variables[i]->impl_] = nullptr;
+ }
+}
+
+CodeStubAssembler::Label::Label(CodeStubAssembler* assembler,
+ CodeStubAssembler::Variable* merged_variable)
+ : CodeStubAssembler::Label(assembler, 1, &merged_variable) {}
+
+void CodeStubAssembler::Label::MergeVariables() {
+ ++merge_count_;
+ for (auto var : assembler_->variables_) {
+ size_t count = 0;
+ Node* node = var->value_;
+ if (node != nullptr) {
+ auto i = variable_merges_.find(var);
+ if (i != variable_merges_.end()) {
+ i->second.push_back(node);
+ count = i->second.size();
+ } else {
+ count = 1;
+ variable_merges_[var] = std::vector<Node*>(1, node);
+ }
+ }
+ // If the following asserts, then you've jumped to a label without a bound
+ // variable along that path that expects to merge its value into a phi.
+ DCHECK(variable_phis_.find(var) == variable_phis_.end() ||
+ count == merge_count_);
+ USE(count);
+
+ // If the label is already bound, we already know the set of variables to
+ // merge and phi nodes have already been created.
+ if (bound_) {
+ auto phi = variable_phis_.find(var);
+ if (phi != variable_phis_.end()) {
+ DCHECK_NOT_NULL(phi->second);
+ assembler_->raw_assembler_->AppendPhiInput(phi->second, node);
+ } else {
+ auto i = variable_merges_.find(var);
+ USE(i);
+ // If the following assert fires, then you've declared a variable that
+ // has the same bound value along all paths up until the point you bound
+ // this label, but then later merged a path with a new value for the
+ // variable after the label bind (it's not possible to add phis to the
+ // bound label after the fact, just make sure to list the variable in
+ // the label's constructor's list of merged variables).
+ DCHECK(find_if(i->second.begin(), i->second.end(),
+ [node](Node* e) -> bool { return node != e; }) ==
+ i->second.end());
+ }
+ }
+ }
+}
+
+void CodeStubAssembler::Label::Bind() {
+ DCHECK(!bound_);
+ assembler_->raw_assembler_->Bind(label_);
+
+ // Make sure that all variables that have changed along any path up to this
+ // point are marked as merge variables.
+ for (auto var : assembler_->variables_) {
+ Node* shared_value = nullptr;
+ auto i = variable_merges_.find(var);
+ if (i != variable_merges_.end()) {
+ for (auto value : i->second) {
+ DCHECK(value != nullptr);
+ if (value != shared_value) {
+ if (shared_value == nullptr) {
+ shared_value = value;
+ } else {
+ variable_phis_[var] = nullptr;
+ }
+ }
+ }
+ }
+ }
+
+ for (auto var : variable_phis_) {
+ CodeStubAssembler::Variable::Impl* var_impl = var.first;
+ auto i = variable_merges_.find(var_impl);
+ // If the following assert fires, then a variable that has been marked as
+ // being merged at the label--either by explicitly marking it so in the
+ // label constructor or by having seen different bound values at branches
+ // into the label--doesn't have a bound value along all of the paths that
+ // have been merged into the label up to this point.
+ DCHECK(i != variable_merges_.end() && i->second.size() == merge_count_);
+ Node* phi = assembler_->raw_assembler_->Phi(
+ var.first->rep_, static_cast<int>(merge_count_), &(i->second[0]));
+ variable_phis_[var_impl] = phi;
+ }
+
+ // Bind all variables to a merge phi, the common value along all paths or
+ // null.
+ for (auto var : assembler_->variables_) {
+ auto i = variable_phis_.find(var);
+ if (i != variable_phis_.end()) {
+ var->value_ = i->second;
+ } else {
+ auto j = variable_merges_.find(var);
+ if (j != variable_merges_.end() && j->second.size() == merge_count_) {
+ var->value_ = j->second.back();
+ } else {
+ var->value_ = nullptr;
+ }
+ }
+ }
+
+ bound_ = true;
+}
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/code-stub-assembler.h b/deps/v8/src/compiler/code-stub-assembler.h
index 3c4ae05eaa..2ab13764c4 100644
--- a/deps/v8/src/compiler/code-stub-assembler.h
+++ b/deps/v8/src/compiler/code-stub-assembler.h
@@ -5,11 +5,16 @@
#ifndef V8_COMPILER_CODE_STUB_ASSEMBLER_H_
#define V8_COMPILER_CODE_STUB_ASSEMBLER_H_
+#include <map>
+
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
#include "src/allocation.h"
#include "src/builtins.h"
+#include "src/heap/heap.h"
+#include "src/machine-type.h"
#include "src/runtime/runtime.h"
+#include "src/zone-containers.h"
namespace v8 {
namespace internal {
@@ -25,48 +30,196 @@ class Graph;
class Node;
class Operator;
class RawMachineAssembler;
+class RawMachineLabel;
class Schedule;
+#define CODE_STUB_ASSEMBLER_BINARY_OP_LIST(V) \
+ V(IntPtrAdd) \
+ V(IntPtrSub) \
+ V(Int32Add) \
+ V(Int32Sub) \
+ V(Int32Mul) \
+ V(Int32GreaterThanOrEqual) \
+ V(WordEqual) \
+ V(WordNotEqual) \
+ V(WordOr) \
+ V(WordAnd) \
+ V(WordXor) \
+ V(WordShl) \
+ V(WordShr) \
+ V(WordSar) \
+ V(WordRor) \
+ V(Word32Equal) \
+ V(Word32NotEqual) \
+ V(Word32Or) \
+ V(Word32And) \
+ V(Word32Xor) \
+ V(Word32Shl) \
+ V(Word32Shr) \
+ V(Word32Sar) \
+ V(Word32Ror) \
+ V(Word64Equal) \
+ V(Word64NotEqual) \
+ V(Word64Or) \
+ V(Word64And) \
+ V(Word64Xor) \
+ V(Word64Shr) \
+ V(Word64Sar) \
+ V(Word64Ror) \
+ V(UintPtrGreaterThanOrEqual)
+
class CodeStubAssembler {
public:
+ // |result_size| specifies the number of results returned by the stub.
+ // TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
CodeStubAssembler(Isolate* isolate, Zone* zone,
- const CallInterfaceDescriptor& descriptor, Code::Kind kind,
- const char* name);
+ const CallInterfaceDescriptor& descriptor,
+ Code::Flags flags, const char* name,
+ size_t result_size = 1);
virtual ~CodeStubAssembler();
Handle<Code> GenerateCode();
+ class Label;
+ class Variable {
+ public:
+ explicit Variable(CodeStubAssembler* assembler, MachineRepresentation rep);
+ void Bind(Node* value);
+ Node* value() const;
+ MachineRepresentation rep() const;
+ bool IsBound() const;
+
+ private:
+ friend class CodeStubAssembler;
+ class Impl;
+ Impl* impl_;
+ };
+
+ // ===========================================================================
+ // Base Assembler
+ // ===========================================================================
+
// Constants.
Node* Int32Constant(int value);
Node* IntPtrConstant(intptr_t value);
Node* NumberConstant(double value);
Node* HeapConstant(Handle<HeapObject> object);
Node* BooleanConstant(bool value);
+ Node* ExternalConstant(ExternalReference address);
Node* Parameter(int value);
void Return(Node* value);
- // Tag and untag Smi values.
- Node* SmiTag(Node* value);
- Node* SmiUntag(Node* value);
+ void Bind(Label* label);
+ void Goto(Label* label);
+ void Branch(Node* condition, Label* true_label, Label* false_label);
+
+ void Switch(Node* index, Label* default_label, int32_t* case_values,
+ Label** case_labels, size_t case_count);
+
+ // Access to the frame pointer
+ Node* LoadFramePointer();
+ Node* LoadParentFramePointer();
+
+ // Access to the stack pointer
+ Node* LoadStackPointer();
+
+ // Load raw memory location.
+ Node* Load(MachineType rep, Node* base);
+ Node* Load(MachineType rep, Node* base, Node* index);
+
+ // Store value to raw memory location.
+ Node* Store(MachineRepresentation rep, Node* base, Node* value);
+ Node* Store(MachineRepresentation rep, Node* base, Node* index, Node* value);
+ Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* value);
+ Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* index,
+ Node* value);
+
+// Basic arithmetic operations.
+#define DECLARE_CODE_STUB_ASSEMBER_BINARY_OP(name) Node* name(Node* a, Node* b);
+ CODE_STUB_ASSEMBLER_BINARY_OP_LIST(DECLARE_CODE_STUB_ASSEMBER_BINARY_OP)
+#undef DECLARE_CODE_STUB_ASSEMBER_BINARY_OP
- // Basic arithmetic operations.
- Node* IntPtrAdd(Node* a, Node* b);
- Node* IntPtrSub(Node* a, Node* b);
Node* WordShl(Node* value, int shift);
- // Load a field from an object on the heap.
- Node* LoadObjectField(Node* object, int offset);
+ // Conversions
+ Node* ChangeInt32ToInt64(Node* value);
+
+ // Projections
+ Node* Projection(int index, Node* value);
- // Call runtime function.
+ // Calls
+ Node* CallRuntime(Runtime::FunctionId function_id, Node* context);
Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1);
Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
Node* arg2);
+ Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
+ Node* arg2, Node* arg3);
+ Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
+ Node* arg2, Node* arg3, Node* arg4);
+ Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
+ Node* arg2, Node* arg3, Node* arg4, Node* arg5);
Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
Node* arg1);
Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
Node* arg1, Node* arg2);
+ Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
+ Node* arg1, Node* arg2, Node* arg3);
+ Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
+ Node* arg1, Node* arg2, Node* arg3, Node* arg4);
+
+ Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, Node* arg1, size_t result_size = 1);
+ Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, Node* arg1, Node* arg2, size_t result_size = 1);
+ Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, Node* arg1, Node* arg2, Node* arg3,
+ size_t result_size = 1);
+ Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
+ size_t result_size = 1);
+ Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
+ Node* arg5, size_t result_size = 1);
+
+ Node* TailCallStub(CodeStub& stub, Node** args);
+ Node* TailCall(const CallInterfaceDescriptor& descriptor, Node* target,
+ Node** args, size_t result_size = 1);
+
+ // ===========================================================================
+ // Macros
+ // ===========================================================================
+
+ // Tag and untag Smi values.
+ Node* SmiTag(Node* value);
+ Node* SmiUntag(Node* value);
+
+ // Load a value from the root array.
+ Node* LoadRoot(Heap::RootListIndex root_index);
+
+ // Check a value for smi-ness
+ Node* WordIsSmi(Node* a);
+
+ // Load an object pointer from a buffer that isn't in the heap.
+ Node* LoadBufferObject(Node* buffer, int offset);
+ // Load a field from an object on the heap.
+ Node* LoadObjectField(Node* object, int offset);
+
+ // Load an array element from a FixedArray.
+ Node* LoadFixedArrayElementSmiIndex(Node* object, Node* smi_index,
+ int additional_offset = 0);
+ Node* LoadFixedArrayElementConstantIndex(Node* object, int index);
+
+ protected:
+ // Protected helpers which delegate to RawMachineAssembler.
+ Graph* graph();
+ Isolate* isolate();
+ Zone* zone();
+
+ // Enables subclasses to perform operations before and after a call.
+ virtual void CallPrologue();
+ virtual void CallEpilogue();
private:
friend class CodeStubAssemblerTester;
@@ -76,19 +229,42 @@ class CodeStubAssembler {
Node* SmiShiftBitsConstant();
- // Private helpers which delegate to RawMachineAssembler.
- Graph* graph();
- Isolate* isolate();
- Zone* zone();
-
base::SmartPointer<RawMachineAssembler> raw_assembler_;
- Code::Kind kind_;
+ Code::Flags flags_;
const char* name_;
bool code_generated_;
+ ZoneVector<Variable::Impl*> variables_;
DISALLOW_COPY_AND_ASSIGN(CodeStubAssembler);
};
+class CodeStubAssembler::Label {
+ public:
+ explicit Label(CodeStubAssembler* assembler);
+ Label(CodeStubAssembler* assembler, int merged_variable_count,
+ CodeStubAssembler::Variable** merged_variables);
+ Label(CodeStubAssembler* assembler,
+ CodeStubAssembler::Variable* merged_variable);
+ ~Label() {}
+
+ private:
+ friend class CodeStubAssembler;
+
+ void Bind();
+ void MergeVariables();
+
+ bool bound_;
+ size_t merge_count_;
+ CodeStubAssembler* assembler_;
+ RawMachineLabel* label_;
+ // Map of variables that need to be merged to their phi nodes (or placeholders
+ // for those phis).
+ std::map<Variable::Impl*, Node*> variable_phis_;
+ // Map of variables to the list of value nodes that have been added from each
+ // merge path in their order of merging.
+ std::map<Variable::Impl*, std::vector<Node*>> variable_merges_;
+};
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index be7730962f..c92bae9b19 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -803,11 +803,6 @@ const Operator* CommonOperatorBuilder::Call(const CallDescriptor* descriptor) {
}
-const Operator* CommonOperatorBuilder::LazyBailout() {
- return Call(Linkage::GetLazyBailoutDescriptor(zone()));
-}
-
-
const Operator* CommonOperatorBuilder::TailCall(
const CallDescriptor* descriptor) {
class TailCallOperator final : public Operator1<const CallDescriptor*> {
@@ -866,11 +861,9 @@ const Operator* CommonOperatorBuilder::ResizeMergeOrPhi(const Operator* op,
const FrameStateFunctionInfo*
CommonOperatorBuilder::CreateFrameStateFunctionInfo(
FrameStateType type, int parameter_count, int local_count,
- Handle<SharedFunctionInfo> shared_info,
- ContextCallingMode context_calling_mode) {
+ Handle<SharedFunctionInfo> shared_info) {
return new (zone()->New(sizeof(FrameStateFunctionInfo)))
- FrameStateFunctionInfo(type, parameter_count, local_count, shared_info,
- context_calling_mode);
+ FrameStateFunctionInfo(type, parameter_count, local_count, shared_info);
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 83cb5b2c66..7c3f3dae86 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -14,11 +14,7 @@ namespace internal {
// Forward declarations.
class ExternalReference;
-template <class>
-class TypeImpl;
-struct ZoneTypeConfig;
-typedef TypeImpl<ZoneTypeConfig> Type;
-
+class Type;
namespace compiler {
@@ -174,7 +170,6 @@ class CommonOperatorBuilder final : public ZoneObject {
const Operator* Call(const CallDescriptor* descriptor);
const Operator* TailCall(const CallDescriptor* descriptor);
const Operator* Projection(size_t index);
- const Operator* LazyBailout();
// Constructs a new merge or phi operator with the same opcode as {op}, but
// with {size} inputs.
@@ -183,8 +178,7 @@ class CommonOperatorBuilder final : public ZoneObject {
// Constructs function info for frame state construction.
const FrameStateFunctionInfo* CreateFrameStateFunctionInfo(
FrameStateType type, int parameter_count, int local_count,
- Handle<SharedFunctionInfo> shared_info,
- ContextCallingMode context_calling_mode);
+ Handle<SharedFunctionInfo> shared_info);
private:
Zone* zone() const { return zone_; }
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.cc b/deps/v8/src/compiler/escape-analysis-reducer.cc
index df8b65dab2..313b6396dd 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.cc
+++ b/deps/v8/src/compiler/escape-analysis-reducer.cc
@@ -11,6 +11,15 @@ namespace v8 {
namespace internal {
namespace compiler {
+#ifdef DEBUG
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_turbo_escape) PrintF(__VA_ARGS__); \
+ } while (false)
+#else
+#define TRACE(...)
+#endif // DEBUG
+
EscapeAnalysisReducer::EscapeAnalysisReducer(Editor* editor, JSGraph* jsgraph,
EscapeAnalysis* escape_analysis,
Zone* zone)
@@ -18,10 +27,16 @@ EscapeAnalysisReducer::EscapeAnalysisReducer(Editor* editor, JSGraph* jsgraph,
jsgraph_(jsgraph),
escape_analysis_(escape_analysis),
zone_(zone),
- visited_(static_cast<int>(jsgraph->graph()->NodeCount()), zone) {}
+ fully_reduced_(static_cast<int>(jsgraph->graph()->NodeCount() * 2), zone),
+ exists_virtual_allocate_(true) {}
Reduction EscapeAnalysisReducer::Reduce(Node* node) {
+ if (node->id() < static_cast<NodeId>(fully_reduced_.length()) &&
+ fully_reduced_.Contains(node->id())) {
+ return NoChange();
+ }
+
switch (node->opcode()) {
case IrOpcode::kLoadField:
case IrOpcode::kLoadElement:
@@ -37,11 +52,44 @@ Reduction EscapeAnalysisReducer::Reduce(Node* node) {
return ReduceReferenceEqual(node);
case IrOpcode::kObjectIsSmi:
return ReduceObjectIsSmi(node);
+ // FrameStates and Value nodes are preprocessed here,
+ // and visited via ReduceFrameStateUses from their user nodes.
+ case IrOpcode::kFrameState:
+ case IrOpcode::kStateValues: {
+ if (node->id() >= static_cast<NodeId>(fully_reduced_.length()) ||
+ fully_reduced_.Contains(node->id())) {
+ break;
+ }
+ bool depends_on_object_state = false;
+ for (int i = 0; i < node->InputCount(); i++) {
+ Node* input = node->InputAt(i);
+ switch (input->opcode()) {
+ case IrOpcode::kAllocate:
+ case IrOpcode::kFinishRegion:
+ depends_on_object_state =
+ depends_on_object_state || escape_analysis()->IsVirtual(input);
+ break;
+ case IrOpcode::kFrameState:
+ case IrOpcode::kStateValues:
+ depends_on_object_state =
+ depends_on_object_state ||
+ input->id() >= static_cast<NodeId>(fully_reduced_.length()) ||
+ !fully_reduced_.Contains(input->id());
+ break;
+ default:
+ break;
+ }
+ }
+ if (!depends_on_object_state) {
+ fully_reduced_.Add(node->id());
+ }
+ return NoChange();
+ }
default:
// TODO(sigurds): Change this to GetFrameStateInputCount once
// it is working. For now we use EffectInputCount > 0 to determine
// whether a node might have a frame state input.
- if (node->op()->EffectInputCount() > 0) {
+ if (exists_virtual_allocate_ && node->op()->EffectInputCount() > 0) {
return ReduceFrameStateUses(node);
}
break;
@@ -53,17 +101,15 @@ Reduction EscapeAnalysisReducer::Reduce(Node* node) {
Reduction EscapeAnalysisReducer::ReduceLoad(Node* node) {
DCHECK(node->opcode() == IrOpcode::kLoadField ||
node->opcode() == IrOpcode::kLoadElement);
- if (visited_.Contains(node->id())) return NoChange();
- visited_.Add(node->id());
+ if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
+ fully_reduced_.Add(node->id());
+ }
if (Node* rep = escape_analysis()->GetReplacement(node)) {
- visited_.Add(node->id());
counters()->turbo_escape_loads_replaced()->Increment();
- if (FLAG_trace_turbo_escape) {
- PrintF("Replaced #%d (%s) with #%d (%s)\n", node->id(),
- node->op()->mnemonic(), rep->id(), rep->op()->mnemonic());
- }
+ TRACE("Replaced #%d (%s) with #%d (%s)\n", node->id(),
+ node->op()->mnemonic(), rep->id(), rep->op()->mnemonic());
ReplaceWithValue(node, rep);
- return Changed(rep);
+ return Replace(rep);
}
return NoChange();
}
@@ -72,13 +118,12 @@ Reduction EscapeAnalysisReducer::ReduceLoad(Node* node) {
Reduction EscapeAnalysisReducer::ReduceStore(Node* node) {
DCHECK(node->opcode() == IrOpcode::kStoreField ||
node->opcode() == IrOpcode::kStoreElement);
- if (visited_.Contains(node->id())) return NoChange();
- visited_.Add(node->id());
+ if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
+ fully_reduced_.Add(node->id());
+ }
if (escape_analysis()->IsVirtual(NodeProperties::GetValueInput(node, 0))) {
- if (FLAG_trace_turbo_escape) {
- PrintF("Removed #%d (%s) from effect chain\n", node->id(),
- node->op()->mnemonic());
- }
+ TRACE("Removed #%d (%s) from effect chain\n", node->id(),
+ node->op()->mnemonic());
RelaxEffectsAndControls(node);
return Changed(node);
}
@@ -88,14 +133,13 @@ Reduction EscapeAnalysisReducer::ReduceStore(Node* node) {
Reduction EscapeAnalysisReducer::ReduceAllocate(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kAllocate);
- if (visited_.Contains(node->id())) return NoChange();
- visited_.Add(node->id());
+ if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
+ fully_reduced_.Add(node->id());
+ }
if (escape_analysis()->IsVirtual(node)) {
RelaxEffectsAndControls(node);
counters()->turbo_escape_allocs_replaced()->Increment();
- if (FLAG_trace_turbo_escape) {
- PrintF("Removed allocate #%d from effect chain\n", node->id());
- }
+ TRACE("Removed allocate #%d from effect chain\n", node->id());
return Changed(node);
}
return NoChange();
@@ -106,8 +150,14 @@ Reduction EscapeAnalysisReducer::ReduceFinishRegion(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kFinishRegion);
Node* effect = NodeProperties::GetEffectInput(node, 0);
if (effect->opcode() == IrOpcode::kBeginRegion) {
+ // We only add it now to remove empty Begin/Finish region pairs
+ // in the process.
+ if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
+ fully_reduced_.Add(node->id());
+ }
RelaxEffectsAndControls(effect);
RelaxEffectsAndControls(node);
+#ifdef DEBUG
if (FLAG_trace_turbo_escape) {
PrintF("Removed region #%d / #%d from effect chain,", effect->id(),
node->id());
@@ -117,6 +167,7 @@ Reduction EscapeAnalysisReducer::ReduceFinishRegion(Node* node) {
}
PrintF("\n");
}
+#endif // DEBUG
return Changed(node);
}
return NoChange();
@@ -131,22 +182,18 @@ Reduction EscapeAnalysisReducer::ReduceReferenceEqual(Node* node) {
if (escape_analysis()->IsVirtual(right) &&
escape_analysis()->CompareVirtualObjects(left, right)) {
ReplaceWithValue(node, jsgraph()->TrueConstant());
- if (FLAG_trace_turbo_escape) {
- PrintF("Replaced ref eq #%d with true\n", node->id());
- }
+ TRACE("Replaced ref eq #%d with true\n", node->id());
+ Replace(jsgraph()->TrueConstant());
}
// Right-hand side is not a virtual object, or a different one.
ReplaceWithValue(node, jsgraph()->FalseConstant());
- if (FLAG_trace_turbo_escape) {
- PrintF("Replaced ref eq #%d with false\n", node->id());
- }
- return Replace(node);
+ TRACE("Replaced ref eq #%d with false\n", node->id());
+ return Replace(jsgraph()->FalseConstant());
} else if (escape_analysis()->IsVirtual(right)) {
// Left-hand side is not a virtual object.
ReplaceWithValue(node, jsgraph()->FalseConstant());
- if (FLAG_trace_turbo_escape) {
- PrintF("Replaced ref eq #%d with false\n", node->id());
- }
+ TRACE("Replaced ref eq #%d with false\n", node->id());
+ return Replace(jsgraph()->FalseConstant());
}
return NoChange();
}
@@ -157,24 +204,23 @@ Reduction EscapeAnalysisReducer::ReduceObjectIsSmi(Node* node) {
Node* input = NodeProperties::GetValueInput(node, 0);
if (escape_analysis()->IsVirtual(input)) {
ReplaceWithValue(node, jsgraph()->FalseConstant());
- if (FLAG_trace_turbo_escape) {
- PrintF("Replaced ObjectIsSmi #%d with false\n", node->id());
- }
- return Replace(node);
+ TRACE("Replaced ObjectIsSmi #%d with false\n", node->id());
+ return Replace(jsgraph()->FalseConstant());
}
return NoChange();
}
Reduction EscapeAnalysisReducer::ReduceFrameStateUses(Node* node) {
- if (visited_.Contains(node->id())) return NoChange();
- visited_.Add(node->id());
DCHECK_GE(node->op()->EffectInputCount(), 1);
+ if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
+ fully_reduced_.Add(node->id());
+ }
bool changed = false;
for (int i = 0; i < node->InputCount(); ++i) {
Node* input = node->InputAt(i);
if (input->opcode() == IrOpcode::kFrameState) {
- if (Node* ret = ReduceFrameState(input, node, false)) {
+ if (Node* ret = ReduceDeoptState(input, node, false)) {
node->ReplaceInput(i, ret);
changed = true;
}
@@ -188,77 +234,55 @@ Reduction EscapeAnalysisReducer::ReduceFrameStateUses(Node* node) {
// Returns the clone if it duplicated the node, and null otherwise.
-Node* EscapeAnalysisReducer::ReduceFrameState(Node* node, Node* effect,
+Node* EscapeAnalysisReducer::ReduceDeoptState(Node* node, Node* effect,
bool multiple_users) {
- DCHECK(node->opcode() == IrOpcode::kFrameState);
- if (FLAG_trace_turbo_escape) {
- PrintF("Reducing FrameState %d\n", node->id());
+ DCHECK(node->opcode() == IrOpcode::kFrameState ||
+ node->opcode() == IrOpcode::kStateValues);
+ if (node->id() < static_cast<NodeId>(fully_reduced_.length()) &&
+ fully_reduced_.Contains(node->id())) {
+ return nullptr;
}
+ TRACE("Reducing %s %d\n", node->op()->mnemonic(), node->id());
Node* clone = nullptr;
+ bool node_multiused = node->UseCount() > 1;
+ bool multiple_users_rec = multiple_users || node_multiused;
for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
Node* input = NodeProperties::GetValueInput(node, i);
- Node* ret =
- input->opcode() == IrOpcode::kStateValues
- ? ReduceStateValueInputs(input, effect, node->UseCount() > 1)
- : ReduceStateValueInput(node, i, effect, node->UseCount() > 1);
- if (ret) {
- if (node->UseCount() > 1 || multiple_users) {
- if (FLAG_trace_turbo_escape) {
- PrintF(" Cloning #%d", node->id());
- }
- node = clone = jsgraph()->graph()->CloneNode(node);
- if (FLAG_trace_turbo_escape) {
- PrintF(" to #%d\n", node->id());
+ if (input->opcode() == IrOpcode::kStateValues) {
+ if (Node* ret = ReduceDeoptState(input, effect, multiple_users_rec)) {
+ if (node_multiused || (multiple_users && !clone)) {
+ TRACE(" Cloning #%d", node->id());
+ node = clone = jsgraph()->graph()->CloneNode(node);
+ TRACE(" to #%d\n", node->id());
+ node_multiused = false;
}
- multiple_users = false; // Don't clone anymore.
+ NodeProperties::ReplaceValueInput(node, ret, i);
+ }
+ } else {
+ if (Node* ret = ReduceStateValueInput(node, i, effect, node_multiused,
+ clone, multiple_users)) {
+ DCHECK_NULL(clone);
+ node_multiused = false; // Don't clone anymore.
+ node = clone = ret;
}
- NodeProperties::ReplaceValueInput(node, ret, i);
}
}
- Node* outer_frame_state = NodeProperties::GetFrameStateInput(node, 0);
- if (outer_frame_state->opcode() == IrOpcode::kFrameState) {
- if (Node* ret =
- ReduceFrameState(outer_frame_state, effect, node->UseCount() > 1)) {
- if (node->UseCount() > 1 || multiple_users) {
- if (FLAG_trace_turbo_escape) {
- PrintF(" Cloning #%d", node->id());
- }
- node = clone = jsgraph()->graph()->CloneNode(node);
- if (FLAG_trace_turbo_escape) {
- PrintF(" to #%d\n", node->id());
+ if (node->opcode() == IrOpcode::kFrameState) {
+ Node* outer_frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ if (outer_frame_state->opcode() == IrOpcode::kFrameState) {
+ if (Node* ret =
+ ReduceDeoptState(outer_frame_state, effect, multiple_users_rec)) {
+ if (node_multiused || (multiple_users && !clone)) {
+ TRACE(" Cloning #%d", node->id());
+ node = clone = jsgraph()->graph()->CloneNode(node);
+ TRACE(" to #%d\n", node->id());
}
- multiple_users = false;
+ NodeProperties::ReplaceFrameStateInput(node, 0, ret);
}
- NodeProperties::ReplaceFrameStateInput(node, 0, ret);
}
}
- return clone;
-}
-
-
-// Returns the clone if it duplicated the node, and null otherwise.
-Node* EscapeAnalysisReducer::ReduceStateValueInputs(Node* node, Node* effect,
- bool multiple_users) {
- if (FLAG_trace_turbo_escape) {
- PrintF("Reducing StateValue #%d\n", node->id());
- }
- DCHECK(node->opcode() == IrOpcode::kStateValues);
- DCHECK_NOT_NULL(effect);
- Node* clone = nullptr;
- for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
- Node* input = NodeProperties::GetValueInput(node, i);
- Node* ret = nullptr;
- if (input->opcode() == IrOpcode::kStateValues) {
- ret = ReduceStateValueInputs(input, effect, multiple_users);
- } else {
- ret = ReduceStateValueInput(node, i, effect, multiple_users);
- }
- if (ret) {
- node = ret;
- DCHECK_NULL(clone);
- clone = ret;
- multiple_users = false;
- }
+ if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
+ fully_reduced_.Add(node->id());
}
return clone;
}
@@ -267,36 +291,36 @@ Node* EscapeAnalysisReducer::ReduceStateValueInputs(Node* node, Node* effect,
// Returns the clone if it duplicated the node, and null otherwise.
Node* EscapeAnalysisReducer::ReduceStateValueInput(Node* node, int node_index,
Node* effect,
+ bool node_multiused,
+ bool already_cloned,
bool multiple_users) {
Node* input = NodeProperties::GetValueInput(node, node_index);
- if (FLAG_trace_turbo_escape) {
- PrintF("Reducing State Input #%d (%s)\n", input->id(),
- input->op()->mnemonic());
+ if (node->id() < static_cast<NodeId>(fully_reduced_.length()) &&
+ fully_reduced_.Contains(node->id())) {
+ return nullptr;
}
+ TRACE("Reducing State Input #%d (%s)\n", input->id(),
+ input->op()->mnemonic());
Node* clone = nullptr;
if (input->opcode() == IrOpcode::kFinishRegion ||
input->opcode() == IrOpcode::kAllocate) {
if (escape_analysis()->IsVirtual(input)) {
if (Node* object_state =
escape_analysis()->GetOrCreateObjectState(effect, input)) {
- if (node->UseCount() > 1 || multiple_users) {
- if (FLAG_trace_turbo_escape) {
- PrintF("Cloning #%d", node->id());
- }
+ if (node_multiused || (multiple_users && !already_cloned)) {
+ TRACE("Cloning #%d", node->id());
node = clone = jsgraph()->graph()->CloneNode(node);
- if (FLAG_trace_turbo_escape) {
- PrintF(" to #%d\n", node->id());
- }
+ TRACE(" to #%d\n", node->id());
+ node_multiused = false;
+ already_cloned = true;
}
NodeProperties::ReplaceValueInput(node, object_state, node_index);
- if (FLAG_trace_turbo_escape) {
- PrintF("Replaced state #%d input #%d with object state #%d\n",
- node->id(), input->id(), object_state->id());
- }
+ TRACE("Replaced state #%d input #%d with object state #%d\n",
+ node->id(), input->id(), object_state->id());
} else {
- if (FLAG_trace_turbo_escape) {
- PrintF("No object state replacement available.\n");
- }
+ TRACE("No object state replacement for #%d at effect #%d available.\n",
+ input->id(), effect->id());
+ UNREACHABLE();
}
}
}
@@ -308,6 +332,36 @@ Counters* EscapeAnalysisReducer::counters() const {
return jsgraph_->isolate()->counters();
}
+
+class EscapeAnalysisVerifier final : public AdvancedReducer {
+ public:
+ EscapeAnalysisVerifier(Editor* editor, EscapeAnalysis* escape_analysis)
+ : AdvancedReducer(editor), escape_analysis_(escape_analysis) {}
+
+ Reduction Reduce(Node* node) final {
+ switch (node->opcode()) {
+ case IrOpcode::kAllocate:
+ CHECK(!escape_analysis_->IsVirtual(node));
+ break;
+ default:
+ break;
+ }
+ return NoChange();
+ }
+
+ private:
+ EscapeAnalysis* escape_analysis_;
+};
+
+void EscapeAnalysisReducer::VerifyReplacement() const {
+#ifdef DEBUG
+ GraphReducer graph_reducer(zone(), jsgraph()->graph());
+ EscapeAnalysisVerifier verifier(&graph_reducer, escape_analysis());
+ graph_reducer.AddReducer(&verifier);
+ graph_reducer.ReduceGraph();
+#endif // DEBUG
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.h b/deps/v8/src/compiler/escape-analysis-reducer.h
index 1c0da165fb..12487b1dcf 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.h
+++ b/deps/v8/src/compiler/escape-analysis-reducer.h
@@ -29,6 +29,10 @@ class EscapeAnalysisReducer final : public AdvancedReducer {
EscapeAnalysis* escape_analysis, Zone* zone);
Reduction Reduce(Node* node) final;
+ void SetExistsVirtualAllocate(bool exists) {
+ exists_virtual_allocate_ = exists;
+ }
+ void VerifyReplacement() const;
private:
Reduction ReduceLoad(Node* node);
@@ -38,9 +42,9 @@ class EscapeAnalysisReducer final : public AdvancedReducer {
Reduction ReduceReferenceEqual(Node* node);
Reduction ReduceObjectIsSmi(Node* node);
Reduction ReduceFrameStateUses(Node* node);
- Node* ReduceFrameState(Node* node, Node* effect, bool multiple_users);
- Node* ReduceStateValueInputs(Node* node, Node* effect, bool multiple_users);
+ Node* ReduceDeoptState(Node* node, Node* effect, bool multiple_users);
Node* ReduceStateValueInput(Node* node, int node_index, Node* effect,
+ bool node_multiused, bool already_cloned,
bool multiple_users);
JSGraph* jsgraph() const { return jsgraph_; }
@@ -51,7 +55,10 @@ class EscapeAnalysisReducer final : public AdvancedReducer {
JSGraph* const jsgraph_;
EscapeAnalysis* escape_analysis_;
Zone* const zone_;
- BitVector visited_;
+ // _visited marks nodes we already processed (allocs, loads, stores)
+ // and nodes that do not need a visit from ReduceDeoptState etc.
+ BitVector fully_reduced_;
+ bool exists_virtual_allocate_;
DISALLOW_COPY_AND_ASSIGN(EscapeAnalysisReducer);
};
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index af0ba6a639..b1a12b201e 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -24,106 +24,134 @@ namespace v8 {
namespace internal {
namespace compiler {
-const EscapeAnalysis::Alias EscapeAnalysis::kNotReachable =
+using Alias = EscapeStatusAnalysis::Alias;
+
+#ifdef DEBUG
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_turbo_escape) PrintF(__VA_ARGS__); \
+ } while (false)
+#else
+#define TRACE(...)
+#endif
+
+const Alias EscapeStatusAnalysis::kNotReachable =
std::numeric_limits<Alias>::max();
-const EscapeAnalysis::Alias EscapeAnalysis::kUntrackable =
+const Alias EscapeStatusAnalysis::kUntrackable =
std::numeric_limits<Alias>::max() - 1;
-
class VirtualObject : public ZoneObject {
public:
- enum Status { kUntracked = 0, kTracked = 1 };
- VirtualObject(NodeId id, Zone* zone)
+ enum Status {
+ kInitial = 0,
+ kTracked = 1u << 0,
+ kInitialized = 1u << 1,
+ kCopyRequired = 1u << 2,
+ };
+ typedef base::Flags<Status, unsigned char> StatusFlags;
+
+ VirtualObject(NodeId id, VirtualState* owner, Zone* zone)
: id_(id),
- status_(kUntracked),
+ status_(kInitial),
fields_(zone),
phi_(zone),
- object_state_(nullptr) {}
+ object_state_(nullptr),
+ owner_(owner) {}
- VirtualObject(const VirtualObject& other)
+ VirtualObject(VirtualState* owner, const VirtualObject& other)
: id_(other.id_),
- status_(other.status_),
+ status_(other.status_ & ~kCopyRequired),
fields_(other.fields_),
phi_(other.phi_),
- object_state_(other.object_state_) {}
+ object_state_(other.object_state_),
+ owner_(owner) {}
- VirtualObject(NodeId id, Zone* zone, size_t field_number)
+ VirtualObject(NodeId id, VirtualState* owner, Zone* zone, size_t field_number,
+ bool initialized)
: id_(id),
- status_(kTracked),
+ status_(kTracked | (initialized ? kInitialized : kInitial)),
fields_(zone),
phi_(zone),
- object_state_(nullptr) {
+ object_state_(nullptr),
+ owner_(owner) {
fields_.resize(field_number);
phi_.resize(field_number, false);
}
- Node* GetField(size_t offset) {
- if (offset < fields_.size()) {
- return fields_[offset];
- }
- return nullptr;
- }
+ Node* GetField(size_t offset) { return fields_[offset]; }
- bool IsCreatedPhi(size_t offset) {
- if (offset < phi_.size()) {
- return phi_[offset];
- }
- return false;
- }
+ bool IsCreatedPhi(size_t offset) { return phi_[offset]; }
- bool SetField(size_t offset, Node* node, bool created_phi = false) {
- bool changed = fields_[offset] != node || phi_[offset] != created_phi;
+ void SetField(size_t offset, Node* node, bool created_phi = false) {
fields_[offset] = node;
phi_[offset] = created_phi;
- if (changed && FLAG_trace_turbo_escape && node) {
- PrintF("Setting field %zu of #%d to #%d (%s)\n", offset, id(), node->id(),
- node->op()->mnemonic());
- }
- return changed;
}
- bool IsVirtual() const { return status_ == kTracked; }
- bool IsTracked() const { return status_ != kUntracked; }
+ bool IsTracked() const { return status_ & kTracked; }
+ bool IsInitialized() const { return status_ & kInitialized; }
+ bool SetInitialized() { return status_ |= kInitialized; }
+ VirtualState* owner() const { return owner_; }
Node** fields_array() { return &fields_.front(); }
size_t field_count() { return fields_.size(); }
bool ResizeFields(size_t field_count) {
- if (field_count != fields_.size()) {
+ if (field_count > fields_.size()) {
fields_.resize(field_count);
phi_.resize(field_count);
return true;
}
return false;
}
- bool ClearAllFields() {
- bool changed = false;
+ void ClearAllFields() {
+ for (size_t i = 0; i < fields_.size(); ++i) {
+ fields_[i] = nullptr;
+ phi_[i] = false;
+ }
+ }
+ bool AllFieldsClear() {
for (size_t i = 0; i < fields_.size(); ++i) {
if (fields_[i] != nullptr) {
- fields_[i] = nullptr;
- changed = true;
+ return false;
}
- phi_[i] = false;
}
- return changed;
+ return true;
}
bool UpdateFrom(const VirtualObject& other);
+ bool MergeFrom(MergeCache* cache, Node* at, Graph* graph,
+ CommonOperatorBuilder* common);
void SetObjectState(Node* node) { object_state_ = node; }
Node* GetObjectState() const { return object_state_; }
+ bool IsCopyRequired() const { return status_ & kCopyRequired; }
+ void SetCopyRequired() { status_ |= kCopyRequired; }
+ bool NeedCopyForModification() {
+ if (!IsCopyRequired() || !IsInitialized()) {
+ return false;
+ }
+ return true;
+ }
NodeId id() const { return id_; }
void id(NodeId id) { id_ = id; }
private:
+ bool MergeFields(size_t i, Node* at, MergeCache* cache, Graph* graph,
+ CommonOperatorBuilder* common);
+
NodeId id_;
- Status status_;
+ StatusFlags status_;
ZoneVector<Node*> fields_;
ZoneVector<bool> phi_;
Node* object_state_;
+ VirtualState* owner_;
+
+ DISALLOW_COPY_AND_ASSIGN(VirtualObject);
};
+DEFINE_OPERATORS_FOR_FLAGS(VirtualObject::StatusFlags)
bool VirtualObject::UpdateFrom(const VirtualObject& other) {
bool changed = status_ != other.status_;
status_ = other.status_;
+ phi_ = other.phi_;
if (fields_.size() != other.fields_.size()) {
fields_ = other.fields_;
return true;
@@ -137,36 +165,49 @@ bool VirtualObject::UpdateFrom(const VirtualObject& other) {
return changed;
}
-
class VirtualState : public ZoneObject {
public:
- VirtualState(Zone* zone, size_t size);
- VirtualState(const VirtualState& states);
+ VirtualState(Node* owner, Zone* zone, size_t size)
+ : info_(size, nullptr, zone), owner_(owner) {}
+
+ VirtualState(Node* owner, const VirtualState& state)
+ : info_(state.info_.size(), nullptr, state.info_.get_allocator().zone()),
+ owner_(owner) {
+ for (size_t i = 0; i < info_.size(); ++i) {
+ if (state.info_[i]) {
+ info_[i] = state.info_[i];
+ }
+ }
+ }
VirtualObject* VirtualObjectFromAlias(size_t alias);
- VirtualObject* GetOrCreateTrackedVirtualObject(EscapeAnalysis::Alias alias,
- NodeId id, Zone* zone);
- void SetVirtualObject(EscapeAnalysis::Alias alias, VirtualObject* state);
- void LastChangedAt(Node* node) { last_changed_ = node; }
- Node* GetLastChanged() { return last_changed_; }
+ void SetVirtualObject(Alias alias, VirtualObject* state);
bool UpdateFrom(VirtualState* state, Zone* zone);
bool MergeFrom(MergeCache* cache, Zone* zone, Graph* graph,
- CommonOperatorBuilder* common, Node* control);
+ CommonOperatorBuilder* common, Node* at);
size_t size() const { return info_.size(); }
+ Node* owner() const { return owner_; }
+ VirtualObject* Copy(VirtualObject* obj, Alias alias);
+ void SetCopyRequired() {
+ for (VirtualObject* obj : info_) {
+ if (obj) obj->SetCopyRequired();
+ }
+ }
private:
ZoneVector<VirtualObject*> info_;
- Node* last_changed_;
-};
+ Node* owner_;
+ DISALLOW_COPY_AND_ASSIGN(VirtualState);
+};
class MergeCache : public ZoneObject {
public:
explicit MergeCache(Zone* zone)
: states_(zone), objects_(zone), fields_(zone) {
- states_.reserve(4);
- objects_.reserve(4);
- fields_.reserve(4);
+ states_.reserve(5);
+ objects_.reserve(5);
+ fields_.reserve(5);
}
ZoneVector<VirtualState*>& states() { return states_; }
ZoneVector<VirtualObject*>& objects() { return objects_; }
@@ -176,20 +217,20 @@ class MergeCache : public ZoneObject {
objects_.clear();
fields_.clear();
}
- size_t LoadVirtualObjectsFromStatesFor(EscapeAnalysis::Alias alias);
- void LoadVirtualObjectsForFieldsFrom(
- VirtualState* state, const ZoneVector<EscapeAnalysis::Alias>& aliases);
+ size_t LoadVirtualObjectsFromStatesFor(Alias alias);
+ void LoadVirtualObjectsForFieldsFrom(VirtualState* state,
+ const ZoneVector<Alias>& aliases);
Node* GetFields(size_t pos);
private:
ZoneVector<VirtualState*> states_;
ZoneVector<VirtualObject*> objects_;
ZoneVector<Node*> fields_;
-};
+ DISALLOW_COPY_AND_ASSIGN(MergeCache);
+};
-size_t MergeCache::LoadVirtualObjectsFromStatesFor(
- EscapeAnalysis::Alias alias) {
+size_t MergeCache::LoadVirtualObjectsFromStatesFor(Alias alias) {
objects_.clear();
DCHECK_GT(states_.size(), 0u);
size_t min = std::numeric_limits<size_t>::max();
@@ -202,13 +243,12 @@ size_t MergeCache::LoadVirtualObjectsFromStatesFor(
return min;
}
-
void MergeCache::LoadVirtualObjectsForFieldsFrom(
- VirtualState* state, const ZoneVector<EscapeAnalysis::Alias>& aliases) {
+ VirtualState* state, const ZoneVector<Alias>& aliases) {
objects_.clear();
size_t max_alias = state->size();
for (Node* field : fields_) {
- EscapeAnalysis::Alias alias = aliases[field->id()];
+ Alias alias = aliases[field->id()];
if (alias >= max_alias) continue;
if (VirtualObject* obj = state->VirtualObjectFromAlias(alias)) {
objects_.push_back(obj);
@@ -216,11 +256,13 @@ void MergeCache::LoadVirtualObjectsForFieldsFrom(
}
}
-
Node* MergeCache::GetFields(size_t pos) {
fields_.clear();
- Node* rep = objects_.front()->GetField(pos);
+ Node* rep = pos >= objects_.front()->field_count()
+ ? nullptr
+ : objects_.front()->GetField(pos);
for (VirtualObject* obj : objects_) {
+ if (pos >= obj->field_count()) continue;
Node* field = obj->GetField(pos);
if (field) {
fields_.push_back(field);
@@ -232,72 +274,48 @@ Node* MergeCache::GetFields(size_t pos) {
return rep;
}
-
-VirtualState::VirtualState(Zone* zone, size_t size)
- : info_(size, nullptr, zone), last_changed_(nullptr) {}
-
-
-VirtualState::VirtualState(const VirtualState& state)
- : info_(state.info_.size(), nullptr, state.info_.get_allocator().zone()),
- last_changed_(state.last_changed_) {
- for (size_t i = 0; i < state.info_.size(); ++i) {
- if (state.info_[i]) {
- info_[i] =
- new (info_.get_allocator().zone()) VirtualObject(*state.info_[i]);
- }
- }
+VirtualObject* VirtualState::Copy(VirtualObject* obj, Alias alias) {
+ if (obj->owner() == this) return obj;
+ VirtualObject* new_obj =
+ new (info_.get_allocator().zone()) VirtualObject(this, *obj);
+ TRACE("At state %p, alias @%d (#%d), copying virtual object from %p to %p\n",
+ static_cast<void*>(this), alias, obj->id(), static_cast<void*>(obj),
+ static_cast<void*>(new_obj));
+ info_[alias] = new_obj;
+ return new_obj;
}
-
VirtualObject* VirtualState::VirtualObjectFromAlias(size_t alias) {
return info_[alias];
}
-
-VirtualObject* VirtualState::GetOrCreateTrackedVirtualObject(
- EscapeAnalysis::Alias alias, NodeId id, Zone* zone) {
- if (VirtualObject* obj = VirtualObjectFromAlias(alias)) {
- return obj;
- }
- VirtualObject* obj = new (zone) VirtualObject(id, zone, 0);
- SetVirtualObject(alias, obj);
- return obj;
-}
-
-
-void VirtualState::SetVirtualObject(EscapeAnalysis::Alias alias,
- VirtualObject* obj) {
+void VirtualState::SetVirtualObject(Alias alias, VirtualObject* obj) {
info_[alias] = obj;
}
-
bool VirtualState::UpdateFrom(VirtualState* from, Zone* zone) {
+ if (from == this) return false;
bool changed = false;
- for (EscapeAnalysis::Alias alias = 0; alias < size(); ++alias) {
+ for (Alias alias = 0; alias < size(); ++alias) {
VirtualObject* ls = VirtualObjectFromAlias(alias);
VirtualObject* rs = from->VirtualObjectFromAlias(alias);
- if (rs == nullptr) {
- continue;
- }
+ if (ls == rs || rs == nullptr) continue;
if (ls == nullptr) {
- ls = new (zone) VirtualObject(*rs);
+ ls = new (zone) VirtualObject(this, *rs);
SetVirtualObject(alias, ls);
changed = true;
continue;
}
- if (FLAG_trace_turbo_escape) {
- PrintF(" Updating fields of @%d\n", alias);
- }
+ TRACE(" Updating fields of @%d\n", alias);
changed = ls->UpdateFrom(*rs) || changed;
}
return false;
}
-
namespace {
bool IsEquivalentPhi(Node* node1, Node* node2) {
@@ -316,7 +334,6 @@ bool IsEquivalentPhi(Node* node1, Node* node2) {
return true;
}
-
bool IsEquivalentPhi(Node* phi, ZoneVector<Node*>& inputs) {
if (phi->opcode() != IrOpcode::kPhi) return false;
if (phi->op()->ValueInputCount() != inputs.size()) {
@@ -333,186 +350,225 @@ bool IsEquivalentPhi(Node* phi, ZoneVector<Node*>& inputs) {
} // namespace
-
-Node* EscapeAnalysis::GetReplacementIfSame(ZoneVector<VirtualObject*>& objs) {
- Node* rep = GetReplacement(objs.front()->id());
- for (VirtualObject* obj : objs) {
- if (GetReplacement(obj->id()) != rep) {
- return nullptr;
+bool VirtualObject::MergeFields(size_t i, Node* at, MergeCache* cache,
+ Graph* graph, CommonOperatorBuilder* common) {
+ bool changed = false;
+ int value_input_count = static_cast<int>(cache->fields().size());
+ Node* rep = GetField(i);
+ if (!rep || !IsCreatedPhi(i)) {
+ Node* control = NodeProperties::GetControlInput(at);
+ cache->fields().push_back(control);
+ Node* phi = graph->NewNode(
+ common->Phi(MachineRepresentation::kTagged, value_input_count),
+ value_input_count + 1, &cache->fields().front());
+ SetField(i, phi, true);
+#ifdef DEBUG
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" Creating Phi #%d as merge of", phi->id());
+ for (int i = 0; i < value_input_count; i++) {
+ PrintF(" #%d (%s)", cache->fields()[i]->id(),
+ cache->fields()[i]->op()->mnemonic());
+ }
+ PrintF("\n");
+ }
+#endif
+ changed = true;
+ } else {
+ DCHECK(rep->opcode() == IrOpcode::kPhi);
+ for (int n = 0; n < value_input_count; ++n) {
+ Node* old = NodeProperties::GetValueInput(rep, n);
+ if (old != cache->fields()[n]) {
+ changed = true;
+ NodeProperties::ReplaceValueInput(rep, cache->fields()[n], n);
+ }
}
}
- return rep;
+ return changed;
}
+bool VirtualObject::MergeFrom(MergeCache* cache, Node* at, Graph* graph,
+ CommonOperatorBuilder* common) {
+ DCHECK(at->opcode() == IrOpcode::kEffectPhi ||
+ at->opcode() == IrOpcode::kPhi);
+ bool changed = false;
+ for (size_t i = 0; i < field_count(); ++i) {
+ if (Node* field = cache->GetFields(i)) {
+ changed = changed || GetField(i) != field;
+ SetField(i, field);
+ TRACE(" Field %zu agree on rep #%d\n", i, field->id());
+ } else {
+ int arity = at->opcode() == IrOpcode::kEffectPhi
+ ? at->op()->EffectInputCount()
+ : at->op()->ValueInputCount();
+ if (cache->fields().size() == arity) {
+ changed = MergeFields(i, at, cache, graph, common) || changed;
+ } else {
+ if (GetField(i) != nullptr) {
+ TRACE(" Field %zu cleared\n", i);
+ changed = true;
+ }
+ SetField(i, nullptr);
+ }
+ }
+ }
+ return changed;
+}
bool VirtualState::MergeFrom(MergeCache* cache, Zone* zone, Graph* graph,
- CommonOperatorBuilder* common, Node* control) {
+ CommonOperatorBuilder* common, Node* at) {
DCHECK_GT(cache->states().size(), 0u);
bool changed = false;
- for (EscapeAnalysis::Alias alias = 0; alias < size(); ++alias) {
- size_t fields = cache->LoadVirtualObjectsFromStatesFor(alias);
+ for (Alias alias = 0; alias < size(); ++alias) {
+ cache->objects().clear();
+ VirtualObject* mergeObject = VirtualObjectFromAlias(alias);
+ bool copy_merge_object = false;
+ size_t fields = std::numeric_limits<size_t>::max();
+ for (VirtualState* state : cache->states()) {
+ if (VirtualObject* obj = state->VirtualObjectFromAlias(alias)) {
+ cache->objects().push_back(obj);
+ if (mergeObject == obj) {
+ copy_merge_object = true;
+ }
+ fields = std::min(obj->field_count(), fields);
+ }
+ }
if (cache->objects().size() == cache->states().size()) {
- if (FLAG_trace_turbo_escape) {
- PrintF(" Merging virtual objects of @%d\n", alias);
+ if (!mergeObject) {
+ VirtualObject* obj = new (zone)
+ VirtualObject(cache->objects().front()->id(), this, zone, fields,
+ cache->objects().front()->IsInitialized());
+ SetVirtualObject(alias, obj);
+ mergeObject = obj;
+ changed = true;
+ } else if (copy_merge_object) {
+ VirtualObject* obj = new (zone) VirtualObject(this, *mergeObject);
+ SetVirtualObject(alias, obj);
+ mergeObject = obj;
+ changed = true;
+ } else {
+ changed = mergeObject->ResizeFields(fields) || changed;
}
- VirtualObject* mergeObject = GetOrCreateTrackedVirtualObject(
- alias, cache->objects().front()->id(), zone);
- changed = mergeObject->ResizeFields(fields) || changed;
- for (size_t i = 0; i < fields; ++i) {
- if (Node* field = cache->GetFields(i)) {
- changed = mergeObject->SetField(i, field) || changed;
- if (FLAG_trace_turbo_escape) {
- PrintF(" Field %zu agree on rep #%d\n", i, field->id());
- }
- } else {
- int value_input_count = static_cast<int>(cache->fields().size());
- if (cache->fields().size() == cache->objects().size()) {
- Node* rep = mergeObject->GetField(i);
- if (!rep || !mergeObject->IsCreatedPhi(i)) {
- cache->fields().push_back(control);
- Node* phi = graph->NewNode(
- common->Phi(MachineRepresentation::kTagged,
- value_input_count),
- value_input_count + 1, &cache->fields().front());
- mergeObject->SetField(i, phi, true);
- if (FLAG_trace_turbo_escape) {
- PrintF(" Creating Phi #%d as merge of", phi->id());
- for (int i = 0; i < value_input_count; i++) {
- PrintF(" #%d (%s)", cache->fields()[i]->id(),
- cache->fields()[i]->op()->mnemonic());
- }
- PrintF("\n");
- }
- changed = true;
- } else {
- DCHECK(rep->opcode() == IrOpcode::kPhi);
- for (int n = 0; n < value_input_count; ++n) {
- if (n < rep->op()->ValueInputCount()) {
- Node* old = NodeProperties::GetValueInput(rep, n);
- if (old != cache->fields()[n]) {
- changed = true;
- NodeProperties::ReplaceValueInput(rep, cache->fields()[n],
- n);
- }
- } else {
- changed = true;
- rep->InsertInput(graph->zone(), n, cache->fields()[n]);
- }
- }
- if (rep->op()->ValueInputCount() != value_input_count) {
- if (FLAG_trace_turbo_escape) {
- PrintF(" Widening Phi #%d of arity %d to %d", rep->id(),
- rep->op()->ValueInputCount(), value_input_count);
- }
- NodeProperties::ChangeOp(
- rep, common->Phi(MachineRepresentation::kTagged,
- value_input_count));
- }
- }
- } else {
- changed = mergeObject->SetField(i, nullptr) || changed;
- }
+#ifdef DEBUG
+ if (FLAG_trace_turbo_escape) {
+ PrintF(" Alias @%d, merging into %p virtual objects", alias,
+ static_cast<void*>(mergeObject));
+ for (size_t i = 0; i < cache->objects().size(); i++) {
+ PrintF(" %p", static_cast<void*>(cache->objects()[i]));
}
+ PrintF("\n");
}
+#endif // DEBUG
+ changed = mergeObject->MergeFrom(cache, at, graph, common) || changed;
} else {
+ if (mergeObject) {
+ TRACE(" Alias %d, virtual object removed\n", alias);
+ changed = true;
+ }
SetVirtualObject(alias, nullptr);
}
}
return changed;
}
-
EscapeStatusAnalysis::EscapeStatusAnalysis(EscapeAnalysis* object_analysis,
Graph* graph, Zone* zone)
- : object_analysis_(object_analysis),
+ : stack_(zone),
+ object_analysis_(object_analysis),
graph_(graph),
zone_(zone),
- status_(graph->NodeCount(), kUnknown, zone),
- queue_(zone) {}
-
+ status_(zone),
+ next_free_alias_(0),
+ status_stack_(zone),
+ aliases_(zone) {}
EscapeStatusAnalysis::~EscapeStatusAnalysis() {}
-
bool EscapeStatusAnalysis::HasEntry(Node* node) {
return status_[node->id()] & (kTracked | kEscaped);
}
-
bool EscapeStatusAnalysis::IsVirtual(Node* node) {
- return (status_[node->id()] & kTracked) && !(status_[node->id()] & kEscaped);
+ return IsVirtual(node->id());
}
+bool EscapeStatusAnalysis::IsVirtual(NodeId id) {
+ return (status_[id] & kTracked) && !(status_[id] & kEscaped);
+}
bool EscapeStatusAnalysis::IsEscaped(Node* node) {
return status_[node->id()] & kEscaped;
}
-
bool EscapeStatusAnalysis::IsAllocation(Node* node) {
return node->opcode() == IrOpcode::kAllocate ||
node->opcode() == IrOpcode::kFinishRegion;
}
-
bool EscapeStatusAnalysis::SetEscaped(Node* node) {
bool changed = !(status_[node->id()] & kEscaped);
status_[node->id()] |= kEscaped | kTracked;
return changed;
}
-
-void EscapeStatusAnalysis::Resize() {
- status_.resize(graph()->NodeCount(), kUnknown);
+bool EscapeStatusAnalysis::IsInQueue(NodeId id) {
+ return status_[id] & kInQueue;
}
+void EscapeStatusAnalysis::SetInQueue(NodeId id, bool on_stack) {
+ if (on_stack) {
+ status_[id] |= kInQueue;
+ } else {
+ status_[id] &= ~kInQueue;
+ }
+}
-size_t EscapeStatusAnalysis::size() { return status_.size(); }
+void EscapeStatusAnalysis::ResizeStatusVector() {
+ if (status_.size() <= graph()->NodeCount()) {
+ status_.resize(graph()->NodeCount() * 1.1, kUnknown);
+ }
+}
+size_t EscapeStatusAnalysis::GetStatusVectorSize() { return status_.size(); }
-void EscapeStatusAnalysis::Run() {
- Resize();
- queue_.push_back(graph()->end());
- status_[graph()->end()->id()] |= kOnStack;
- while (!queue_.empty()) {
- Node* node = queue_.front();
- queue_.pop_front();
+void EscapeStatusAnalysis::RunStatusAnalysis() {
+ ResizeStatusVector();
+ while (!status_stack_.empty()) {
+ Node* node = status_stack_.back();
+ status_stack_.pop_back();
status_[node->id()] &= ~kOnStack;
Process(node);
status_[node->id()] |= kVisited;
- for (Edge edge : node->input_edges()) {
- Node* input = edge.to();
- if (!(status_[input->id()] & (kVisited | kOnStack))) {
- queue_.push_back(input);
- status_[input->id()] |= kOnStack;
- }
- }
}
}
+void EscapeStatusAnalysis::EnqueueForStatusAnalysis(Node* node) {
+ DCHECK_NOT_NULL(node);
+ if (!(status_[node->id()] & kOnStack)) {
+ status_stack_.push_back(node);
+ status_[node->id()] |= kOnStack;
+ }
+}
void EscapeStatusAnalysis::RevisitInputs(Node* node) {
for (Edge edge : node->input_edges()) {
Node* input = edge.to();
if (!(status_[input->id()] & kOnStack)) {
- queue_.push_back(input);
+ status_stack_.push_back(input);
status_[input->id()] |= kOnStack;
}
}
}
-
void EscapeStatusAnalysis::RevisitUses(Node* node) {
for (Edge edge : node->use_edges()) {
Node* use = edge.from();
- if (!(status_[use->id()] & kOnStack)) {
- queue_.push_back(use);
+ if (!(status_[use->id()] & kOnStack) && !IsNotReachable(use)) {
+ status_stack_.push_back(use);
status_[use->id()] |= kOnStack;
}
}
}
-
void EscapeStatusAnalysis::Process(Node* node) {
switch (node->opcode()) {
case IrOpcode::kAllocate:
@@ -535,15 +591,17 @@ void EscapeStatusAnalysis::Process(Node* node) {
RevisitUses(rep);
}
}
+ RevisitUses(node);
break;
}
case IrOpcode::kPhi:
if (!HasEntry(node)) {
status_[node->id()] |= kTracked;
- if (!IsAllocationPhi(node)) {
- SetEscaped(node);
- RevisitUses(node);
- }
+ RevisitUses(node);
+ }
+ if (!IsAllocationPhi(node) && SetEscaped(node)) {
+ RevisitInputs(node);
+ RevisitUses(node);
}
CheckUsesForEscape(node);
default:
@@ -551,7 +609,6 @@ void EscapeStatusAnalysis::Process(Node* node) {
}
}
-
bool EscapeStatusAnalysis::IsAllocationPhi(Node* node) {
for (Edge edge : node->input_edges()) {
Node* input = edge.to();
@@ -562,7 +619,6 @@ bool EscapeStatusAnalysis::IsAllocationPhi(Node* node) {
return true;
}
-
void EscapeStatusAnalysis::ProcessStoreField(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kStoreField);
Node* to = NodeProperties::GetValueInput(node, 0);
@@ -570,14 +626,11 @@ void EscapeStatusAnalysis::ProcessStoreField(Node* node) {
if ((IsEscaped(to) || !IsAllocation(to)) && SetEscaped(val)) {
RevisitUses(val);
RevisitInputs(val);
- if (FLAG_trace_turbo_escape) {
- PrintF("Setting #%d (%s) to escaped because of store to field of #%d\n",
- val->id(), val->op()->mnemonic(), to->id());
- }
+ TRACE("Setting #%d (%s) to escaped because of store to field of #%d\n",
+ val->id(), val->op()->mnemonic(), to->id());
}
}
-
void EscapeStatusAnalysis::ProcessStoreElement(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kStoreElement);
Node* to = NodeProperties::GetValueInput(node, 0);
@@ -585,34 +638,27 @@ void EscapeStatusAnalysis::ProcessStoreElement(Node* node) {
if ((IsEscaped(to) || !IsAllocation(to)) && SetEscaped(val)) {
RevisitUses(val);
RevisitInputs(val);
- if (FLAG_trace_turbo_escape) {
- PrintF("Setting #%d (%s) to escaped because of store to field of #%d\n",
- val->id(), val->op()->mnemonic(), to->id());
- }
+ TRACE("Setting #%d (%s) to escaped because of store to field of #%d\n",
+ val->id(), val->op()->mnemonic(), to->id());
}
}
-
void EscapeStatusAnalysis::ProcessAllocate(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kAllocate);
if (!HasEntry(node)) {
status_[node->id()] |= kTracked;
- if (FLAG_trace_turbo_escape) {
- PrintF("Created status entry for node #%d (%s)\n", node->id(),
- node->op()->mnemonic());
- }
+ TRACE("Created status entry for node #%d (%s)\n", node->id(),
+ node->op()->mnemonic());
NumberMatcher size(node->InputAt(0));
DCHECK(node->InputAt(0)->opcode() != IrOpcode::kInt32Constant &&
node->InputAt(0)->opcode() != IrOpcode::kInt64Constant &&
node->InputAt(0)->opcode() != IrOpcode::kFloat32Constant &&
node->InputAt(0)->opcode() != IrOpcode::kFloat64Constant);
+ RevisitUses(node);
if (!size.HasValue() && SetEscaped(node)) {
- RevisitUses(node);
- if (FLAG_trace_turbo_escape) {
- PrintF("Setting #%d to escaped because of non-const alloc\n",
- node->id());
- }
- // This node is known to escape, uses do not have to be checked.
+ TRACE("Setting #%d to escaped because of non-const alloc\n", node->id());
+ // This node is already known to escape, uses do not have to be checked
+ // for escape.
return;
}
}
@@ -621,24 +667,22 @@ void EscapeStatusAnalysis::ProcessAllocate(Node* node) {
}
}
-
bool EscapeStatusAnalysis::CheckUsesForEscape(Node* uses, Node* rep,
bool phi_escaping) {
for (Edge edge : uses->use_edges()) {
Node* use = edge.from();
+ if (IsNotReachable(use)) continue;
if (edge.index() >= use->op()->ValueInputCount() +
OperatorProperties::GetContextInputCount(use->op()))
continue;
switch (use->opcode()) {
case IrOpcode::kPhi:
if (phi_escaping && SetEscaped(rep)) {
- if (FLAG_trace_turbo_escape) {
- PrintF(
- "Setting #%d (%s) to escaped because of use by phi node "
- "#%d (%s)\n",
- rep->id(), rep->op()->mnemonic(), use->id(),
- use->op()->mnemonic());
- }
+ TRACE(
+ "Setting #%d (%s) to escaped because of use by phi node "
+ "#%d (%s)\n",
+ rep->id(), rep->op()->mnemonic(), use->id(),
+ use->op()->mnemonic());
return true;
}
// Fallthrough.
@@ -651,37 +695,41 @@ bool EscapeStatusAnalysis::CheckUsesForEscape(Node* uses, Node* rep,
case IrOpcode::kReferenceEqual:
case IrOpcode::kFinishRegion:
if (IsEscaped(use) && SetEscaped(rep)) {
- if (FLAG_trace_turbo_escape) {
- PrintF(
- "Setting #%d (%s) to escaped because of use by escaping node "
- "#%d (%s)\n",
- rep->id(), rep->op()->mnemonic(), use->id(),
- use->op()->mnemonic());
- }
+ TRACE(
+ "Setting #%d (%s) to escaped because of use by escaping node "
+ "#%d (%s)\n",
+ rep->id(), rep->op()->mnemonic(), use->id(),
+ use->op()->mnemonic());
return true;
}
break;
case IrOpcode::kObjectIsSmi:
if (!IsAllocation(rep) && SetEscaped(rep)) {
- PrintF("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
- rep->id(), rep->op()->mnemonic(), use->id(),
- use->op()->mnemonic());
+ TRACE("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
+ rep->id(), rep->op()->mnemonic(), use->id(),
+ use->op()->mnemonic());
+ return true;
+ }
+ break;
+ case IrOpcode::kSelect:
+ if (SetEscaped(rep)) {
+ TRACE("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
+ rep->id(), rep->op()->mnemonic(), use->id(),
+ use->op()->mnemonic());
return true;
}
break;
default:
if (use->op()->EffectInputCount() == 0 &&
uses->op()->EffectInputCount() > 0) {
- PrintF("Encountered unaccounted use by #%d (%s)\n", use->id(),
- use->op()->mnemonic());
+ TRACE("Encountered unaccounted use by #%d (%s)\n", use->id(),
+ use->op()->mnemonic());
UNREACHABLE();
}
if (SetEscaped(rep)) {
- if (FLAG_trace_turbo_escape) {
- PrintF("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
- rep->id(), rep->op()->mnemonic(), use->id(),
- use->op()->mnemonic());
- }
+ TRACE("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
+ rep->id(), rep->op()->mnemonic(), use->id(),
+ use->op()->mnemonic());
return true;
}
}
@@ -689,7 +737,6 @@ bool EscapeStatusAnalysis::CheckUsesForEscape(Node* uses, Node* rep,
return false;
}
-
void EscapeStatusAnalysis::ProcessFinishRegion(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kFinishRegion);
if (!HasEntry(node)) {
@@ -701,7 +748,6 @@ void EscapeStatusAnalysis::ProcessFinishRegion(Node* node) {
}
}
-
void EscapeStatusAnalysis::DebugPrint() {
for (NodeId id = 0; id < status_.size(); id++) {
if (status_[id] & kTracked) {
@@ -711,58 +757,69 @@ void EscapeStatusAnalysis::DebugPrint() {
}
}
-
EscapeAnalysis::EscapeAnalysis(Graph* graph, CommonOperatorBuilder* common,
Zone* zone)
- : graph_(graph),
+ : status_analysis_(this, graph, zone),
common_(common),
- zone_(zone),
virtual_states_(zone),
replacements_(zone),
- escape_status_(this, graph, zone),
- cache_(new (zone) MergeCache(zone)),
- aliases_(zone),
- next_free_alias_(0) {}
-
+ cache_(nullptr) {}
EscapeAnalysis::~EscapeAnalysis() {}
-
void EscapeAnalysis::Run() {
replacements_.resize(graph()->NodeCount());
- AssignAliases();
- RunObjectAnalysis();
- escape_status_.Run();
-}
-
-
-void EscapeAnalysis::AssignAliases() {
- ZoneVector<Node*> stack(zone());
- stack.push_back(graph()->end());
+ status_analysis_.AssignAliases();
+ if (status_analysis_.AliasCount() > 0) {
+ cache_ = new (zone()) MergeCache(zone());
+ replacements_.resize(graph()->NodeCount());
+ status_analysis_.ResizeStatusVector();
+ RunObjectAnalysis();
+ status_analysis_.RunStatusAnalysis();
+ }
+}
+
+void EscapeStatusAnalysis::AssignAliases() {
+ size_t max_size = 1024;
+ size_t min_size = 32;
+ size_t stack_size =
+ std::min(std::max(graph()->NodeCount() / 5, min_size), max_size);
+ stack_.reserve(stack_size);
+ ResizeStatusVector();
+ stack_.push_back(graph()->end());
CHECK_LT(graph()->NodeCount(), kUntrackable);
aliases_.resize(graph()->NodeCount(), kNotReachable);
aliases_[graph()->end()->id()] = kUntrackable;
- while (!stack.empty()) {
- Node* node = stack.back();
- stack.pop_back();
+ status_stack_.reserve(8);
+ TRACE("Discovering trackable nodes");
+ while (!stack_.empty()) {
+ Node* node = stack_.back();
+ stack_.pop_back();
switch (node->opcode()) {
case IrOpcode::kAllocate:
if (aliases_[node->id()] >= kUntrackable) {
aliases_[node->id()] = NextAlias();
+ TRACE(" @%d:%s#%u", aliases_[node->id()], node->op()->mnemonic(),
+ node->id());
+ EnqueueForStatusAnalysis(node);
}
break;
case IrOpcode::kFinishRegion: {
Node* allocate = NodeProperties::GetValueInput(node, 0);
+ DCHECK_NOT_NULL(allocate);
if (allocate->opcode() == IrOpcode::kAllocate) {
if (aliases_[allocate->id()] >= kUntrackable) {
if (aliases_[allocate->id()] == kNotReachable) {
- stack.push_back(allocate);
+ stack_.push_back(allocate);
}
aliases_[allocate->id()] = NextAlias();
+ TRACE(" @%d:%s#%u", aliases_[allocate->id()],
+ allocate->op()->mnemonic(), allocate->id());
+ EnqueueForStatusAnalysis(allocate);
}
aliases_[node->id()] = aliases_[allocate->id()];
- } else {
- aliases_[node->id()] = NextAlias();
+ TRACE(" @%d:%s#%u", aliases_[node->id()], node->op()->mnemonic(),
+ node->id());
}
break;
}
@@ -773,81 +830,119 @@ void EscapeAnalysis::AssignAliases() {
for (Edge edge : node->input_edges()) {
Node* input = edge.to();
if (aliases_[input->id()] == kNotReachable) {
- stack.push_back(input);
+ stack_.push_back(input);
aliases_[input->id()] = kUntrackable;
}
}
}
+ TRACE("\n");
+}
- if (FLAG_trace_turbo_escape) {
- PrintF("Discovered trackable nodes");
- for (EscapeAnalysis::Alias id = 0; id < graph()->NodeCount(); ++id) {
- if (aliases_[id] < kUntrackable) {
- if (FLAG_trace_turbo_escape) {
- PrintF(" #%u", id);
- }
- }
- }
- PrintF("\n");
+bool EscapeStatusAnalysis::IsNotReachable(Node* node) {
+ if (node->id() >= aliases_.size()) {
+ return false;
}
+ return aliases_[node->id()] == kNotReachable;
}
-
void EscapeAnalysis::RunObjectAnalysis() {
virtual_states_.resize(graph()->NodeCount());
- ZoneVector<Node*> stack(zone());
- stack.push_back(graph()->start());
- while (!stack.empty()) {
- Node* node = stack.back();
- stack.pop_back();
- if (aliases_[node->id()] != kNotReachable && Process(node)) {
+ ZoneDeque<Node*> queue(zone());
+ queue.push_back(graph()->start());
+ ZoneVector<Node*> danglers(zone());
+ while (!queue.empty()) {
+ Node* node = queue.back();
+ queue.pop_back();
+ status_analysis_.SetInQueue(node->id(), false);
+ if (Process(node)) {
for (Edge edge : node->use_edges()) {
- if (NodeProperties::IsEffectEdge(edge)) {
- Node* use = edge.from();
- if ((use->opcode() != IrOpcode::kLoadField &&
- use->opcode() != IrOpcode::kLoadElement) ||
- !IsDanglingEffectNode(use)) {
- stack.push_back(use);
- }
+ Node* use = edge.from();
+ if (IsNotReachable(use)) {
+ continue;
}
- }
- // First process loads: dangling loads are a problem otherwise.
- for (Edge edge : node->use_edges()) {
if (NodeProperties::IsEffectEdge(edge)) {
- Node* use = edge.from();
- if ((use->opcode() == IrOpcode::kLoadField ||
- use->opcode() == IrOpcode::kLoadElement) &&
- IsDanglingEffectNode(use)) {
- stack.push_back(use);
+ // Iteration order: depth first, but delay phis.
+ // We need DFS do avoid some duplication of VirtualStates and
+ // VirtualObjects, and we want to delay phis to improve performance.
+ if (use->opcode() == IrOpcode::kEffectPhi) {
+ if (!status_analysis_.IsInQueue(use->id())) {
+ queue.push_front(use);
+ }
+ } else if ((use->opcode() != IrOpcode::kLoadField &&
+ use->opcode() != IrOpcode::kLoadElement) ||
+ !IsDanglingEffectNode(use)) {
+ if (!status_analysis_.IsInQueue(use->id())) {
+ status_analysis_.SetInQueue(use->id(), true);
+ queue.push_back(use);
+ }
+ } else {
+ danglers.push_back(use);
}
}
}
+ // Danglers need to be processed immediately, even if they are
+ // on the stack. Since they do not have effect outputs,
+ // we don't have to track whether they are on the stack.
+ queue.insert(queue.end(), danglers.begin(), danglers.end());
+ danglers.clear();
}
}
+#ifdef DEBUG
if (FLAG_trace_turbo_escape) {
DebugPrint();
}
+#endif
}
-
-bool EscapeAnalysis::IsDanglingEffectNode(Node* node) {
- if (node->op()->EffectInputCount() == 0) return false;
- if (node->op()->EffectOutputCount() == 0) return false;
- if (node->op()->EffectInputCount() == 1 &&
- NodeProperties::GetEffectInput(node)->opcode() == IrOpcode::kStart) {
+bool EscapeStatusAnalysis::IsDanglingEffectNode(Node* node) {
+ if (status_[node->id()] & kDanglingComputed) {
+ return status_[node->id()] & kDangling;
+ }
+ if (node->op()->EffectInputCount() == 0 ||
+ node->op()->EffectOutputCount() == 0 ||
+ (node->op()->EffectInputCount() == 1 &&
+ NodeProperties::GetEffectInput(node)->opcode() == IrOpcode::kStart)) {
// The start node is used as sentinel for nodes that are in general
// effectful, but of which an analysis has determined that they do not
// produce effects in this instance. We don't consider these nodes dangling.
+ status_[node->id()] |= kDanglingComputed;
return false;
}
for (Edge edge : node->use_edges()) {
+ Node* use = edge.from();
+ if (aliases_[use->id()] == kNotReachable) continue;
if (NodeProperties::IsEffectEdge(edge)) {
+ status_[node->id()] |= kDanglingComputed;
return false;
}
}
+ status_[node->id()] |= kDanglingComputed | kDangling;
return true;
}
+bool EscapeStatusAnalysis::IsEffectBranchPoint(Node* node) {
+ if (status_[node->id()] & kBranchPointComputed) {
+ return status_[node->id()] & kBranchPoint;
+ }
+ int count = 0;
+ for (Edge edge : node->use_edges()) {
+ Node* use = edge.from();
+ if (aliases_[use->id()] == kNotReachable) continue;
+ if (NodeProperties::IsEffectEdge(edge)) {
+ if ((use->opcode() == IrOpcode::kLoadField ||
+ use->opcode() == IrOpcode::kLoadElement ||
+ use->opcode() == IrOpcode::kLoad) &&
+ IsDanglingEffectNode(use))
+ continue;
+ if (++count > 1) {
+ status_[node->id()] |= kBranchPointComputed | kBranchPoint;
+ return true;
+ }
+ }
+ }
+ status_[node->id()] |= kBranchPointComputed;
+ return false;
+}
bool EscapeAnalysis::Process(Node* node) {
switch (node->opcode()) {
@@ -888,12 +983,12 @@ bool EscapeAnalysis::Process(Node* node) {
return true;
}
-
void EscapeAnalysis::ProcessAllocationUsers(Node* node) {
for (Edge edge : node->input_edges()) {
Node* input = edge.to();
- if (!NodeProperties::IsValueEdge(edge) &&
- !NodeProperties::IsContextEdge(edge))
+ Node* use = edge.from();
+ if (edge.index() >= use->op()->ValueInputCount() +
+ OperatorProperties::GetContextInputCount(use->op()))
continue;
switch (node->opcode()) {
case IrOpcode::kStoreField:
@@ -904,13 +999,17 @@ void EscapeAnalysis::ProcessAllocationUsers(Node* node) {
case IrOpcode::kStateValues:
case IrOpcode::kReferenceEqual:
case IrOpcode::kFinishRegion:
- case IrOpcode::kPhi:
+ case IrOpcode::kObjectIsSmi:
break;
default:
VirtualState* state = virtual_states_[node->id()];
- if (VirtualObject* obj = ResolveVirtualObject(state, input)) {
- if (obj->ClearAllFields()) {
- state->LastChangedAt(node);
+ if (VirtualObject* obj =
+ GetVirtualObject(state, ResolveReplacement(input))) {
+ if (!obj->AllFieldsClear()) {
+ obj = CopyForModificationAt(obj, state, node);
+ obj->ClearAllFields();
+ TRACE("Cleared all fields of @%d:#%d\n", GetAlias(obj->id()),
+ obj->id());
}
}
break;
@@ -918,22 +1017,32 @@ void EscapeAnalysis::ProcessAllocationUsers(Node* node) {
}
}
-
-bool EscapeAnalysis::IsEffectBranchPoint(Node* node) {
- int count = 0;
- for (Edge edge : node->use_edges()) {
- if (NodeProperties::IsEffectEdge(edge)) {
- if (++count > 1) {
- return true;
- }
- }
+VirtualState* EscapeAnalysis::CopyForModificationAt(VirtualState* state,
+ Node* node) {
+ if (state->owner() != node) {
+ VirtualState* new_state = new (zone()) VirtualState(node, *state);
+ virtual_states_[node->id()] = new_state;
+ TRACE("Copying virtual state %p to new state %p at node %s#%d\n",
+ static_cast<void*>(state), static_cast<void*>(new_state),
+ node->op()->mnemonic(), node->id());
+ return new_state;
}
- return false;
+ return state;
}
+VirtualObject* EscapeAnalysis::CopyForModificationAt(VirtualObject* obj,
+ VirtualState* state,
+ Node* node) {
+ if (obj->NeedCopyForModification()) {
+ state = CopyForModificationAt(state, node);
+ return state->Copy(obj, GetAlias(obj->id()));
+ }
+ return obj;
+}
void EscapeAnalysis::ForwardVirtualState(Node* node) {
DCHECK_EQ(node->op()->EffectInputCount(), 1);
+#ifdef DEBUG
if (node->opcode() != IrOpcode::kLoadField &&
node->opcode() != IrOpcode::kLoadElement &&
node->opcode() != IrOpcode::kLoad && IsDanglingEffectNode(node)) {
@@ -941,189 +1050,154 @@ void EscapeAnalysis::ForwardVirtualState(Node* node) {
node->op()->mnemonic());
UNREACHABLE();
}
+#endif // DEBUG
Node* effect = NodeProperties::GetEffectInput(node);
- // Break the cycle for effect phis.
- if (effect->opcode() == IrOpcode::kEffectPhi) {
- if (virtual_states_[effect->id()] == nullptr) {
- virtual_states_[effect->id()] =
- new (zone()) VirtualState(zone(), AliasCount());
- }
- }
DCHECK_NOT_NULL(virtual_states_[effect->id()]);
- if (IsEffectBranchPoint(effect)) {
- if (FLAG_trace_turbo_escape) {
- PrintF("Copying object state %p from #%d (%s) to #%d (%s)\n",
- static_cast<void*>(virtual_states_[effect->id()]), effect->id(),
- effect->op()->mnemonic(), node->id(), node->op()->mnemonic());
- }
- if (!virtual_states_[node->id()]) {
- virtual_states_[node->id()] =
- new (zone()) VirtualState(*virtual_states_[effect->id()]);
- } else {
- virtual_states_[node->id()]->UpdateFrom(virtual_states_[effect->id()],
- zone());
- }
+ if (virtual_states_[node->id()]) {
+ virtual_states_[node->id()]->UpdateFrom(virtual_states_[effect->id()],
+ zone());
} else {
virtual_states_[node->id()] = virtual_states_[effect->id()];
- if (FLAG_trace_turbo_escape) {
- PrintF("Forwarding object state %p from #%d (%s) to #%d (%s)\n",
- static_cast<void*>(virtual_states_[effect->id()]), effect->id(),
- effect->op()->mnemonic(), node->id(), node->op()->mnemonic());
+ TRACE("Forwarding object state %p from %s#%d to %s#%d",
+ static_cast<void*>(virtual_states_[effect->id()]),
+ effect->op()->mnemonic(), effect->id(), node->op()->mnemonic(),
+ node->id());
+ if (IsEffectBranchPoint(effect) ||
+ OperatorProperties::GetFrameStateInputCount(node->op()) > 0) {
+ virtual_states_[node->id()]->SetCopyRequired();
+ TRACE(", effect input %s#%d is branch point", effect->op()->mnemonic(),
+ effect->id());
}
+ TRACE("\n");
}
}
-
void EscapeAnalysis::ProcessStart(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kStart);
- virtual_states_[node->id()] = new (zone()) VirtualState(zone(), AliasCount());
+ virtual_states_[node->id()] =
+ new (zone()) VirtualState(node, zone(), AliasCount());
}
-
bool EscapeAnalysis::ProcessEffectPhi(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kEffectPhi);
bool changed = false;
VirtualState* mergeState = virtual_states_[node->id()];
if (!mergeState) {
- mergeState = new (zone()) VirtualState(zone(), AliasCount());
+ mergeState = new (zone()) VirtualState(node, zone(), AliasCount());
virtual_states_[node->id()] = mergeState;
changed = true;
- if (FLAG_trace_turbo_escape) {
- PrintF("Effect Phi #%d got new states map %p.\n", node->id(),
- static_cast<void*>(mergeState));
- }
- } else if (mergeState->GetLastChanged() != node) {
- changed = true;
+ TRACE("Effect Phi #%d got new virtual state %p.\n", node->id(),
+ static_cast<void*>(mergeState));
}
cache_->Clear();
- if (FLAG_trace_turbo_escape) {
- PrintF("At Effect Phi #%d, merging states into %p:", node->id(),
- static_cast<void*>(mergeState));
- }
+ TRACE("At Effect Phi #%d, merging states into %p:", node->id(),
+ static_cast<void*>(mergeState));
for (int i = 0; i < node->op()->EffectInputCount(); ++i) {
Node* input = NodeProperties::GetEffectInput(node, i);
VirtualState* state = virtual_states_[input->id()];
if (state) {
cache_->states().push_back(state);
+ if (state == mergeState) {
+ mergeState = new (zone()) VirtualState(node, zone(), AliasCount());
+ virtual_states_[node->id()] = mergeState;
+ changed = true;
+ }
}
- if (FLAG_trace_turbo_escape) {
- PrintF(" %p (from %d %s)", static_cast<void*>(state), input->id(),
- input->op()->mnemonic());
- }
- }
- if (FLAG_trace_turbo_escape) {
- PrintF("\n");
+ TRACE(" %p (from %d %s)", static_cast<void*>(state), input->id(),
+ input->op()->mnemonic());
}
+ TRACE("\n");
if (cache_->states().size() == 0) {
return changed;
}
- changed = mergeState->MergeFrom(cache_, zone(), graph(), common(),
- NodeProperties::GetControlInput(node)) ||
- changed;
+ changed =
+ mergeState->MergeFrom(cache_, zone(), graph(), common(), node) || changed;
- if (FLAG_trace_turbo_escape) {
- PrintF("Merge %s the node.\n", changed ? "changed" : "did not change");
- }
+ TRACE("Merge %s the node.\n", changed ? "changed" : "did not change");
if (changed) {
- mergeState->LastChangedAt(node);
- escape_status_.Resize();
+ status_analysis_.ResizeStatusVector();
}
return changed;
}
-
void EscapeAnalysis::ProcessAllocation(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kAllocate);
ForwardVirtualState(node);
+ VirtualState* state = virtual_states_[node->id()];
+ Alias alias = GetAlias(node->id());
// Check if we have already processed this node.
- if (virtual_states_[node->id()]->VirtualObjectFromAlias(
- aliases_[node->id()])) {
+ if (state->VirtualObjectFromAlias(alias)) {
return;
}
+ if (state->owner()->opcode() == IrOpcode::kEffectPhi) {
+ state = CopyForModificationAt(state, node);
+ }
+
NumberMatcher size(node->InputAt(0));
DCHECK(node->InputAt(0)->opcode() != IrOpcode::kInt32Constant &&
node->InputAt(0)->opcode() != IrOpcode::kInt64Constant &&
node->InputAt(0)->opcode() != IrOpcode::kFloat32Constant &&
node->InputAt(0)->opcode() != IrOpcode::kFloat64Constant);
if (size.HasValue()) {
- virtual_states_[node->id()]->SetVirtualObject(
- aliases_[node->id()],
- new (zone())
- VirtualObject(node->id(), zone(), size.Value() / kPointerSize));
+ VirtualObject* obj = new (zone()) VirtualObject(
+ node->id(), state, zone(), size.Value() / kPointerSize, false);
+ state->SetVirtualObject(alias, obj);
} else {
- virtual_states_[node->id()]->SetVirtualObject(
- aliases_[node->id()], new (zone()) VirtualObject(node->id(), zone()));
+ state->SetVirtualObject(
+ alias, new (zone()) VirtualObject(node->id(), state, zone()));
}
- virtual_states_[node->id()]->LastChangedAt(node);
}
-
void EscapeAnalysis::ProcessFinishRegion(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kFinishRegion);
ForwardVirtualState(node);
Node* allocation = NodeProperties::GetValueInput(node, 0);
if (allocation->opcode() == IrOpcode::kAllocate) {
VirtualState* state = virtual_states_[node->id()];
- if (!state->VirtualObjectFromAlias(aliases_[node->id()])) {
- VirtualObject* vobj_alloc =
- state->VirtualObjectFromAlias(aliases_[allocation->id()]);
- DCHECK_NOT_NULL(vobj_alloc);
- state->SetVirtualObject(aliases_[node->id()], vobj_alloc);
- if (FLAG_trace_turbo_escape) {
- PrintF("Linked finish region node #%d to node #%d\n", node->id(),
- allocation->id());
- }
- state->LastChangedAt(node);
- }
+ VirtualObject* obj = state->VirtualObjectFromAlias(GetAlias(node->id()));
+ DCHECK_NOT_NULL(obj);
+ obj->SetInitialized();
}
}
-
Node* EscapeAnalysis::replacement(NodeId id) {
if (id >= replacements_.size()) return nullptr;
return replacements_[id];
}
-
Node* EscapeAnalysis::replacement(Node* node) {
return replacement(node->id());
}
-
bool EscapeAnalysis::SetReplacement(Node* node, Node* rep) {
bool changed = replacements_[node->id()] != rep;
replacements_[node->id()] = rep;
return changed;
}
-
bool EscapeAnalysis::UpdateReplacement(VirtualState* state, Node* node,
Node* rep) {
if (SetReplacement(node, rep)) {
- state->LastChangedAt(node);
- if (FLAG_trace_turbo_escape) {
- if (rep) {
- PrintF("Replacement of #%d is #%d (%s)\n", node->id(), rep->id(),
- rep->op()->mnemonic());
- } else {
- PrintF("Replacement of #%d cleared\n", node->id());
- }
+ if (rep) {
+ TRACE("Replacement of #%d is #%d (%s)\n", node->id(), rep->id(),
+ rep->op()->mnemonic());
+ } else {
+ TRACE("Replacement of #%d cleared\n", node->id());
}
return true;
}
return false;
}
-
Node* EscapeAnalysis::ResolveReplacement(Node* node) {
while (replacement(node)) {
node = replacement(node);
@@ -1131,12 +1205,10 @@ Node* EscapeAnalysis::ResolveReplacement(Node* node) {
return node;
}
-
Node* EscapeAnalysis::GetReplacement(Node* node) {
return GetReplacement(node->id());
}
-
Node* EscapeAnalysis::GetReplacement(NodeId id) {
Node* node = nullptr;
while (replacement(id)) {
@@ -1146,50 +1218,31 @@ Node* EscapeAnalysis::GetReplacement(NodeId id) {
return node;
}
-
bool EscapeAnalysis::IsVirtual(Node* node) {
- if (node->id() >= escape_status_.size()) {
+ if (node->id() >= status_analysis_.GetStatusVectorSize()) {
return false;
}
- return escape_status_.IsVirtual(node);
+ return status_analysis_.IsVirtual(node);
}
-
bool EscapeAnalysis::IsEscaped(Node* node) {
- if (node->id() >= escape_status_.size()) {
+ if (node->id() >= status_analysis_.GetStatusVectorSize()) {
return false;
}
- return escape_status_.IsEscaped(node);
+ return status_analysis_.IsEscaped(node);
}
-
bool EscapeAnalysis::SetEscaped(Node* node) {
- return escape_status_.SetEscaped(node);
+ return status_analysis_.SetEscaped(node);
}
-
VirtualObject* EscapeAnalysis::GetVirtualObject(Node* at, NodeId id) {
if (VirtualState* states = virtual_states_[at->id()]) {
- return states->VirtualObjectFromAlias(aliases_[id]);
+ return states->VirtualObjectFromAlias(GetAlias(id));
}
return nullptr;
}
-
-VirtualObject* EscapeAnalysis::ResolveVirtualObject(VirtualState* state,
- Node* node) {
- VirtualObject* obj = GetVirtualObject(state, ResolveReplacement(node));
- while (obj && replacement(obj->id())) {
- if (VirtualObject* next = GetVirtualObject(state, replacement(obj->id()))) {
- obj = next;
- } else {
- break;
- }
- }
- return obj;
-}
-
-
bool EscapeAnalysis::CompareVirtualObjects(Node* left, Node* right) {
DCHECK(IsVirtual(left) && IsVirtual(right));
left = ResolveReplacement(left);
@@ -1200,83 +1253,78 @@ bool EscapeAnalysis::CompareVirtualObjects(Node* left, Node* right) {
return false;
}
-
int EscapeAnalysis::OffsetFromAccess(Node* node) {
DCHECK(OpParameter<FieldAccess>(node).offset % kPointerSize == 0);
return OpParameter<FieldAccess>(node).offset / kPointerSize;
}
-
-void EscapeAnalysis::ProcessLoadFromPhi(int offset, Node* from, Node* node,
+void EscapeAnalysis::ProcessLoadFromPhi(int offset, Node* from, Node* load,
VirtualState* state) {
- if (FLAG_trace_turbo_escape) {
- PrintF("Load #%d from phi #%d", node->id(), from->id());
- }
+ TRACE("Load #%d from phi #%d", load->id(), from->id());
cache_->fields().clear();
- for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
- Node* input = NodeProperties::GetValueInput(node, i);
+ for (int i = 0; i < load->op()->ValueInputCount(); ++i) {
+ Node* input = NodeProperties::GetValueInput(load, i);
cache_->fields().push_back(input);
}
- cache_->LoadVirtualObjectsForFieldsFrom(state, aliases_);
+ cache_->LoadVirtualObjectsForFieldsFrom(state,
+ status_analysis_.GetAliasMap());
if (cache_->objects().size() == cache_->fields().size()) {
cache_->GetFields(offset);
if (cache_->fields().size() == cache_->objects().size()) {
- Node* rep = replacement(node);
+ Node* rep = replacement(load);
if (!rep || !IsEquivalentPhi(rep, cache_->fields())) {
int value_input_count = static_cast<int>(cache_->fields().size());
cache_->fields().push_back(NodeProperties::GetControlInput(from));
Node* phi = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, value_input_count),
value_input_count + 1, &cache_->fields().front());
- escape_status_.Resize();
- SetReplacement(node, phi);
- state->LastChangedAt(node);
- if (FLAG_trace_turbo_escape) {
- PrintF(" got phi created.\n");
- }
- } else if (FLAG_trace_turbo_escape) {
- PrintF(" has already phi #%d.\n", rep->id());
+ status_analysis_.ResizeStatusVector();
+ SetReplacement(load, phi);
+ TRACE(" got phi created.\n");
+ } else {
+ TRACE(" has already phi #%d.\n", rep->id());
}
- } else if (FLAG_trace_turbo_escape) {
- PrintF(" has incomplete field info.\n");
+ } else {
+ TRACE(" has incomplete field info.\n");
}
- } else if (FLAG_trace_turbo_escape) {
- PrintF(" has incomplete virtual object info.\n");
+ } else {
+ TRACE(" has incomplete virtual object info.\n");
}
}
-
void EscapeAnalysis::ProcessLoadField(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kLoadField);
ForwardVirtualState(node);
- Node* from = NodeProperties::GetValueInput(node, 0);
+ Node* from = ResolveReplacement(NodeProperties::GetValueInput(node, 0));
VirtualState* state = virtual_states_[node->id()];
- if (VirtualObject* object = ResolveVirtualObject(state, from)) {
+ if (VirtualObject* object = GetVirtualObject(state, from)) {
int offset = OffsetFromAccess(node);
- if (!object->IsTracked()) return;
+ if (!object->IsTracked() ||
+ static_cast<size_t>(offset) >= object->field_count()) {
+ return;
+ }
Node* value = object->GetField(offset);
if (value) {
value = ResolveReplacement(value);
}
// Record that the load has this alias.
UpdateReplacement(state, node, value);
+ } else if (from->opcode() == IrOpcode::kPhi &&
+ OpParameter<FieldAccess>(node).offset % kPointerSize == 0) {
+ int offset = OffsetFromAccess(node);
+ // Only binary phis are supported for now.
+ ProcessLoadFromPhi(offset, from, node, state);
} else {
- if (from->opcode() == IrOpcode::kPhi &&
- OpParameter<FieldAccess>(node).offset % kPointerSize == 0) {
- int offset = OffsetFromAccess(node);
- // Only binary phis are supported for now.
- ProcessLoadFromPhi(offset, from, node, state);
- }
+ UpdateReplacement(state, node, nullptr);
}
}
-
void EscapeAnalysis::ProcessLoadElement(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kLoadElement);
ForwardVirtualState(node);
- Node* from = NodeProperties::GetValueInput(node, 0);
+ Node* from = ResolveReplacement(NodeProperties::GetValueInput(node, 0));
VirtualState* state = virtual_states_[node->id()];
Node* index_node = node->InputAt(1);
NumberMatcher index(index_node);
@@ -1287,12 +1335,16 @@ void EscapeAnalysis::ProcessLoadElement(Node* node) {
ElementAccess access = OpParameter<ElementAccess>(node);
if (index.HasValue()) {
int offset = index.Value() + access.header_size / kPointerSize;
- if (VirtualObject* object = ResolveVirtualObject(state, from)) {
+ if (VirtualObject* object = GetVirtualObject(state, from)) {
CHECK_GE(ElementSizeLog2Of(access.machine_type.representation()),
kPointerSizeLog2);
CHECK_EQ(access.header_size % kPointerSize, 0);
- if (!object->IsTracked()) return;
+ if (!object->IsTracked() ||
+ static_cast<size_t>(offset) >= object->field_count()) {
+ return;
+ }
+
Node* value = object->GetField(offset);
if (value) {
value = ResolveReplacement(value);
@@ -1303,43 +1355,42 @@ void EscapeAnalysis::ProcessLoadElement(Node* node) {
ElementAccess access = OpParameter<ElementAccess>(node);
int offset = index.Value() + access.header_size / kPointerSize;
ProcessLoadFromPhi(offset, from, node, state);
+ } else {
+ UpdateReplacement(state, node, nullptr);
}
} else {
// We have a load from a non-const index, cannot eliminate object.
if (SetEscaped(from)) {
- if (FLAG_trace_turbo_escape) {
- PrintF(
- "Setting #%d (%s) to escaped because store element #%d to "
- "non-const "
- "index #%d (%s)\n",
- from->id(), from->op()->mnemonic(), node->id(), index_node->id(),
- index_node->op()->mnemonic());
- }
+ TRACE(
+ "Setting #%d (%s) to escaped because load element #%d from non-const "
+ "index #%d (%s)\n",
+ from->id(), from->op()->mnemonic(), node->id(), index_node->id(),
+ index_node->op()->mnemonic());
}
}
}
-
void EscapeAnalysis::ProcessStoreField(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kStoreField);
ForwardVirtualState(node);
- Node* to = NodeProperties::GetValueInput(node, 0);
- Node* val = NodeProperties::GetValueInput(node, 1);
+ Node* to = ResolveReplacement(NodeProperties::GetValueInput(node, 0));
VirtualState* state = virtual_states_[node->id()];
- if (VirtualObject* obj = ResolveVirtualObject(state, to)) {
- if (!obj->IsTracked()) return;
- int offset = OffsetFromAccess(node);
- if (obj->SetField(offset, ResolveReplacement(val))) {
- state->LastChangedAt(node);
+ VirtualObject* obj = GetVirtualObject(state, to);
+ int offset = OffsetFromAccess(node);
+ if (obj && obj->IsTracked() &&
+ static_cast<size_t>(offset) < obj->field_count()) {
+ Node* val = ResolveReplacement(NodeProperties::GetValueInput(node, 1));
+ if (obj->GetField(offset) != val) {
+ obj = CopyForModificationAt(obj, state, node);
+ obj->SetField(offset, val);
}
}
}
-
void EscapeAnalysis::ProcessStoreElement(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kStoreElement);
ForwardVirtualState(node);
- Node* to = NodeProperties::GetValueInput(node, 0);
+ Node* to = ResolveReplacement(NodeProperties::GetValueInput(node, 0));
Node* index_node = node->InputAt(1);
NumberMatcher index(index_node);
DCHECK(index_node->opcode() != IrOpcode::kInt32Constant &&
@@ -1347,41 +1398,47 @@ void EscapeAnalysis::ProcessStoreElement(Node* node) {
index_node->opcode() != IrOpcode::kFloat32Constant &&
index_node->opcode() != IrOpcode::kFloat64Constant);
ElementAccess access = OpParameter<ElementAccess>(node);
- Node* val = NodeProperties::GetValueInput(node, 2);
+ VirtualState* state = virtual_states_[node->id()];
+ VirtualObject* obj = GetVirtualObject(state, to);
if (index.HasValue()) {
int offset = index.Value() + access.header_size / kPointerSize;
- VirtualState* states = virtual_states_[node->id()];
- if (VirtualObject* obj = ResolveVirtualObject(states, to)) {
- if (!obj->IsTracked()) return;
+ if (obj && obj->IsTracked() &&
+ static_cast<size_t>(offset) < obj->field_count()) {
CHECK_GE(ElementSizeLog2Of(access.machine_type.representation()),
kPointerSizeLog2);
CHECK_EQ(access.header_size % kPointerSize, 0);
- if (obj->SetField(offset, ResolveReplacement(val))) {
- states->LastChangedAt(node);
+ Node* val = ResolveReplacement(NodeProperties::GetValueInput(node, 2));
+ if (obj->GetField(offset) != val) {
+ obj = CopyForModificationAt(obj, state, node);
+ obj->SetField(offset, val);
}
}
} else {
// We have a store to a non-const index, cannot eliminate object.
if (SetEscaped(to)) {
- if (FLAG_trace_turbo_escape) {
- PrintF(
- "Setting #%d (%s) to escaped because store element #%d to "
- "non-const "
- "index #%d (%s)\n",
- to->id(), to->op()->mnemonic(), node->id(), index_node->id(),
- index_node->op()->mnemonic());
+ TRACE(
+ "Setting #%d (%s) to escaped because store element #%d to non-const "
+ "index #%d (%s)\n",
+ to->id(), to->op()->mnemonic(), node->id(), index_node->id(),
+ index_node->op()->mnemonic());
+ }
+ if (obj && obj->IsTracked()) {
+ if (!obj->AllFieldsClear()) {
+ obj = CopyForModificationAt(obj, state, node);
+ obj->ClearAllFields();
+ TRACE("Cleared all fields of @%d:#%d\n", GetAlias(obj->id()),
+ obj->id());
}
}
}
}
-
Node* EscapeAnalysis::GetOrCreateObjectState(Node* effect, Node* node) {
if ((node->opcode() == IrOpcode::kFinishRegion ||
node->opcode() == IrOpcode::kAllocate) &&
IsVirtual(node)) {
- if (VirtualObject* vobj =
- ResolveVirtualObject(virtual_states_[effect->id()], node)) {
+ if (VirtualObject* vobj = GetVirtualObject(virtual_states_[effect->id()],
+ ResolveReplacement(node))) {
if (Node* object_state = vobj->GetObjectState()) {
return object_state;
} else {
@@ -1396,13 +1453,11 @@ Node* EscapeAnalysis::GetOrCreateObjectState(Node* effect, Node* node) {
graph()->NewNode(common()->ObjectState(input_count, vobj->id()),
input_count, &cache_->fields().front());
vobj->SetObjectState(new_object_state);
- if (FLAG_trace_turbo_escape) {
- PrintF(
- "Creating object state #%d for vobj %p (from node #%d) at effect "
- "#%d\n",
- new_object_state->id(), static_cast<void*>(vobj), node->id(),
- effect->id());
- }
+ TRACE(
+ "Creating object state #%d for vobj %p (from node #%d) at effect "
+ "#%d\n",
+ new_object_state->id(), static_cast<void*>(vobj), node->id(),
+ effect->id());
// Now fix uses of other objects.
for (size_t i = 0; i < vobj->field_count(); ++i) {
if (Node* field = vobj->GetField(i)) {
@@ -1420,7 +1475,6 @@ Node* EscapeAnalysis::GetOrCreateObjectState(Node* effect, Node* node) {
return nullptr;
}
-
void EscapeAnalysis::DebugPrintObject(VirtualObject* object, Alias alias) {
PrintF(" Alias @%d: Object #%d with %zu fields\n", alias, object->id(),
object->field_count());
@@ -1431,9 +1485,8 @@ void EscapeAnalysis::DebugPrintObject(VirtualObject* object, Alias alias) {
}
}
-
void EscapeAnalysis::DebugPrintState(VirtualState* state) {
- PrintF("Dumping object state %p\n", static_cast<void*>(state));
+ PrintF("Dumping virtual state %p\n", static_cast<void*>(state));
for (Alias alias = 0; alias < AliasCount(); ++alias) {
if (VirtualObject* object = state->VirtualObjectFromAlias(alias)) {
DebugPrintObject(object, alias);
@@ -1441,7 +1494,6 @@ void EscapeAnalysis::DebugPrintState(VirtualState* state) {
}
}
-
void EscapeAnalysis::DebugPrint() {
ZoneVector<VirtualState*> object_states(zone());
for (NodeId id = 0; id < virtual_states_.size(); id++) {
@@ -1457,15 +1509,26 @@ void EscapeAnalysis::DebugPrint() {
}
}
-
VirtualObject* EscapeAnalysis::GetVirtualObject(VirtualState* state,
Node* node) {
- if (node->id() >= aliases_.size()) return nullptr;
- Alias alias = aliases_[node->id()];
+ if (node->id() >= status_analysis_.GetAliasMap().size()) return nullptr;
+ Alias alias = GetAlias(node->id());
if (alias >= state->size()) return nullptr;
return state->VirtualObjectFromAlias(alias);
}
+bool EscapeAnalysis::ExistsVirtualAllocate() {
+ for (size_t id = 0; id < status_analysis_.GetAliasMap().size(); ++id) {
+ Alias alias = GetAlias(static_cast<NodeId>(id));
+ if (alias < EscapeStatusAnalysis::kUntrackable) {
+ if (status_analysis_.IsVirtual(static_cast<int>(id))) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/escape-analysis.h b/deps/v8/src/compiler/escape-analysis.h
index ea7b11ecdf..c3f236d556 100644
--- a/deps/v8/src/compiler/escape-analysis.h
+++ b/deps/v8/src/compiler/escape-analysis.h
@@ -18,34 +18,63 @@ class EscapeAnalysis;
class VirtualState;
class VirtualObject;
-
// EscapeStatusAnalysis determines for each allocation whether it escapes.
class EscapeStatusAnalysis {
public:
+ typedef NodeId Alias;
~EscapeStatusAnalysis();
- enum EscapeStatusFlag {
+ enum Status {
kUnknown = 0u,
kTracked = 1u << 0,
kEscaped = 1u << 1,
kOnStack = 1u << 2,
kVisited = 1u << 3,
+ // A node is dangling, if it is a load of some kind, and does not have
+ // an effect successor.
+ kDanglingComputed = 1u << 4,
+ kDangling = 1u << 5,
+ // A node is is an effect branch point, if it has more than 2 non-dangling
+ // effect successors.
+ kBranchPointComputed = 1u << 6,
+ kBranchPoint = 1u << 7,
+ kInQueue = 1u << 8
};
- typedef base::Flags<EscapeStatusFlag, unsigned char> EscapeStatusFlags;
+ typedef base::Flags<Status, uint16_t> StatusFlags;
- void Run();
+ void RunStatusAnalysis();
bool IsVirtual(Node* node);
bool IsEscaped(Node* node);
bool IsAllocation(Node* node);
- void DebugPrint();
+ bool IsInQueue(NodeId id);
+ void SetInQueue(NodeId id, bool on_stack);
- friend class EscapeAnalysis;
+ void DebugPrint();
- private:
EscapeStatusAnalysis(EscapeAnalysis* object_analysis, Graph* graph,
Zone* zone);
+ void EnqueueForStatusAnalysis(Node* node);
+ bool SetEscaped(Node* node);
+ bool IsEffectBranchPoint(Node* node);
+ bool IsDanglingEffectNode(Node* node);
+ void ResizeStatusVector();
+ size_t GetStatusVectorSize();
+ bool IsVirtual(NodeId id);
+
+ Graph* graph() const { return graph_; }
+ Zone* zone() const { return zone_; }
+ void AssignAliases();
+ Alias GetAlias(NodeId id) const { return aliases_[id]; }
+ const ZoneVector<Alias>& GetAliasMap() const { return aliases_; }
+ Alias AliasCount() const { return next_free_alias_; }
+ static const Alias kNotReachable;
+ static const Alias kUntrackable;
+
+ bool IsNotReachable(Node* node);
+
+ private:
void Process(Node* node);
void ProcessAllocate(Node* node);
void ProcessFinishRegion(Node* node);
@@ -57,38 +86,35 @@ class EscapeStatusAnalysis {
bool CheckUsesForEscape(Node* node, Node* rep, bool phi_escaping = false);
void RevisitUses(Node* node);
void RevisitInputs(Node* node);
- bool SetEscaped(Node* node);
+
+ Alias NextAlias() { return next_free_alias_++; }
+
bool HasEntry(Node* node);
- void Resize();
- size_t size();
- bool IsAllocationPhi(Node* node);
- Graph* graph() const { return graph_; }
- Zone* zone() const { return zone_; }
+ bool IsAllocationPhi(Node* node);
+ ZoneVector<Node*> stack_;
EscapeAnalysis* object_analysis_;
Graph* const graph_;
Zone* const zone_;
- ZoneVector<EscapeStatusFlags> status_;
- ZoneDeque<Node*> queue_;
+ ZoneVector<StatusFlags> status_;
+ Alias next_free_alias_;
+ ZoneVector<Node*> status_stack_;
+ ZoneVector<Alias> aliases_;
DISALLOW_COPY_AND_ASSIGN(EscapeStatusAnalysis);
};
-
-DEFINE_OPERATORS_FOR_FLAGS(EscapeStatusAnalysis::EscapeStatusFlags)
-
+DEFINE_OPERATORS_FOR_FLAGS(EscapeStatusAnalysis::StatusFlags)
// Forward Declaration.
class MergeCache;
-
// EscapeObjectAnalysis simulates stores to determine values of loads if
// an object is virtual and eliminated.
class EscapeAnalysis {
public:
- typedef NodeId Alias;
-
+ using Alias = EscapeStatusAnalysis::Alias;
EscapeAnalysis(Graph* graph, CommonOperatorBuilder* common, Zone* zone);
~EscapeAnalysis();
@@ -99,10 +125,10 @@ class EscapeAnalysis {
bool IsEscaped(Node* node);
bool CompareVirtualObjects(Node* left, Node* right);
Node* GetOrCreateObjectState(Node* effect, Node* node);
+ bool ExistsVirtualAllocate();
private:
void RunObjectAnalysis();
- void AssignAliases();
bool Process(Node* node);
void ProcessLoadField(Node* node);
void ProcessStoreField(Node* node);
@@ -118,13 +144,11 @@ class EscapeAnalysis {
VirtualState* states);
void ForwardVirtualState(Node* node);
- bool IsEffectBranchPoint(Node* node);
- bool IsDanglingEffectNode(Node* node);
int OffsetFromAccess(Node* node);
-
+ VirtualState* CopyForModificationAt(VirtualState* state, Node* node);
+ VirtualObject* CopyForModificationAt(VirtualObject* obj, VirtualState* state,
+ Node* node);
VirtualObject* GetVirtualObject(Node* at, NodeId id);
- VirtualObject* ResolveVirtualObject(VirtualState* state, Node* node);
- Node* GetReplacementIfSame(ZoneVector<VirtualObject*>& objs);
bool SetEscaped(Node* node);
Node* replacement(NodeId id);
@@ -140,24 +164,26 @@ class EscapeAnalysis {
void DebugPrintState(VirtualState* state);
void DebugPrintObject(VirtualObject* state, Alias id);
- Alias NextAlias() { return next_free_alias_++; }
- Alias AliasCount() const { return next_free_alias_; }
-
- Graph* graph() const { return graph_; }
+ Graph* graph() const { return status_analysis_.graph(); }
+ Zone* zone() const { return status_analysis_.zone(); }
CommonOperatorBuilder* common() const { return common_; }
- Zone* zone() const { return zone_; }
+ bool IsEffectBranchPoint(Node* node) {
+ return status_analysis_.IsEffectBranchPoint(node);
+ }
+ bool IsDanglingEffectNode(Node* node) {
+ return status_analysis_.IsDanglingEffectNode(node);
+ }
+ bool IsNotReachable(Node* node) {
+ return status_analysis_.IsNotReachable(node);
+ }
+ Alias GetAlias(NodeId id) const { return status_analysis_.GetAlias(id); }
+ Alias AliasCount() const { return status_analysis_.AliasCount(); }
- static const Alias kNotReachable;
- static const Alias kUntrackable;
- Graph* const graph_;
+ EscapeStatusAnalysis status_analysis_;
CommonOperatorBuilder* const common_;
- Zone* const zone_;
ZoneVector<VirtualState*> virtual_states_;
ZoneVector<Node*> replacements_;
- EscapeStatusAnalysis escape_status_;
MergeCache* cache_;
- ZoneVector<Alias> aliases_;
- Alias next_free_alias_;
DISALLOW_COPY_AND_ASSIGN(EscapeAnalysis);
};
diff --git a/deps/v8/src/compiler/fast-accessor-assembler.cc b/deps/v8/src/compiler/fast-accessor-assembler.cc
index 09d513fdc6..518003b2ee 100644
--- a/deps/v8/src/compiler/fast-accessor-assembler.cc
+++ b/deps/v8/src/compiler/fast-accessor-assembler.cc
@@ -5,6 +5,7 @@
#include "src/compiler/fast-accessor-assembler.h"
#include "src/base/logging.h"
+#include "src/code-stubs.h" // For CallApiFunctionStub.
#include "src/compiler/graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/pipeline.h"
@@ -166,6 +167,46 @@ void FastAccessorAssembler::CheckNotZeroOrJump(ValueId value_id,
assembler_->Bind(&pass);
}
+FastAccessorAssembler::ValueId FastAccessorAssembler::Call(
+ FunctionCallback callback_function, ValueId arg) {
+ CHECK_EQ(kBuilding, state_);
+
+ // Create API function stub.
+ CallApiFunctionStub stub(assembler_->isolate(), true);
+
+ // Wrap the FunctionCallback in an ExternalReference.
+ ApiFunction callback_api_function(FUNCTION_ADDR(callback_function));
+ ExternalReference callback(&callback_api_function,
+ ExternalReference::DIRECT_API_CALL,
+ assembler_->isolate());
+
+ // The stub has 5 parameters, and kJSParam (here: 1) parameters to pass
+ // through to the callback.
+ // See: ApiFunctionDescriptor::BuildCallInterfaceDescriptorFunctionType
+ static const int kStackParam = 1;
+ Node* args[] = {
+ // Stub/register parameters:
+ assembler_->Parameter(0), /* receiver (use accessor's) */
+ assembler_->UndefinedConstant(), /* call_data (undefined) */
+ assembler_->NullConstant(), /* holder (null) */
+ assembler_->ExternalConstant(callback), /* API callback function */
+ assembler_->IntPtrConstant(kStackParam), /* # JS arguments */
+
+ // kStackParam stack parameter(s):
+ FromId(arg),
+
+ // Context parameter. (See Linkage::GetStubCallDescriptor.)
+ assembler_->UndefinedConstant()};
+ CHECK_EQ(5 + kStackParam + 1, arraysize(args));
+
+ Node* call = assembler_->CallN(
+ Linkage::GetStubCallDescriptor(
+ assembler_->isolate(), zone(), stub.GetCallInterfaceDescriptor(),
+ kStackParam + stub.GetStackParameterCount(),
+ CallDescriptor::kNoFlags),
+ assembler_->HeapConstant(stub.GetCode()), args);
+ return FromRaw(call);
+}
MaybeHandle<Code> FastAccessorAssembler::Build() {
CHECK_EQ(kBuilding, state_);
@@ -176,9 +217,10 @@ MaybeHandle<Code> FastAccessorAssembler::Build() {
// Export the schedule and call the compiler.
Schedule* schedule = assembler_->Export();
+ Code::Flags flags = Code::ComputeFlags(Code::STUB);
MaybeHandle<Code> code = Pipeline::GenerateCodeForCodeStub(
assembler_->isolate(), assembler_->call_descriptor(), assembler_->graph(),
- schedule, Code::STUB, "FastAccessorAssembler");
+ schedule, flags, "FastAccessorAssembler");
// Update state & return.
state_ = !code.is_null() ? kBuilt : kError;
diff --git a/deps/v8/src/compiler/fast-accessor-assembler.h b/deps/v8/src/compiler/fast-accessor-assembler.h
index a9df3f0749..1cb751d026 100644
--- a/deps/v8/src/compiler/fast-accessor-assembler.h
+++ b/deps/v8/src/compiler/fast-accessor-assembler.h
@@ -48,6 +48,7 @@ class FastAccessorAssembler {
public:
typedef v8::experimental::FastAccessorBuilder::ValueId ValueId;
typedef v8::experimental::FastAccessorBuilder::LabelId LabelId;
+ typedef v8::FunctionCallback FunctionCallback;
explicit FastAccessorAssembler(Isolate* isolate);
~FastAccessorAssembler();
@@ -63,15 +64,13 @@ class FastAccessorAssembler {
void ReturnValue(ValueId value_id);
void CheckFlagSetOrReturnNull(ValueId value_id, int mask);
void CheckNotZeroOrReturnNull(ValueId value_id);
-
- // TODO(vogelheim): Implement a C++ callback.
- // void CheckNotNullOrCallback(ValueId value_id, ..c++-callback type...,
- // ValueId arg1, ValueId arg2, ...);
-
LabelId MakeLabel();
void SetLabel(LabelId label_id);
void CheckNotZeroOrJump(ValueId value_id, LabelId label_id);
+ // C++ callback.
+ ValueId Call(FunctionCallback callback, ValueId arg);
+
// Assemble the code.
MaybeHandle<Code> Build();
diff --git a/deps/v8/src/compiler/frame-states.h b/deps/v8/src/compiler/frame-states.h
index ddb55c35d2..60ff9b55fa 100644
--- a/deps/v8/src/compiler/frame-states.h
+++ b/deps/v8/src/compiler/frame-states.h
@@ -83,31 +83,20 @@ enum class FrameStateType {
};
-enum ContextCallingMode {
- CALL_MAINTAINS_NATIVE_CONTEXT,
- CALL_CHANGES_NATIVE_CONTEXT
-};
-
-
class FrameStateFunctionInfo {
public:
FrameStateFunctionInfo(FrameStateType type, int parameter_count,
int local_count,
- Handle<SharedFunctionInfo> shared_info,
- ContextCallingMode context_calling_mode)
+ Handle<SharedFunctionInfo> shared_info)
: type_(type),
parameter_count_(parameter_count),
local_count_(local_count),
- shared_info_(shared_info),
- context_calling_mode_(context_calling_mode) {}
+ shared_info_(shared_info) {}
int local_count() const { return local_count_; }
int parameter_count() const { return parameter_count_; }
Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
FrameStateType type() const { return type_; }
- ContextCallingMode context_calling_mode() const {
- return context_calling_mode_;
- }
static bool IsJSFunctionType(FrameStateType type) {
return type == FrameStateType::kJavaScriptFunction ||
@@ -119,7 +108,6 @@ class FrameStateFunctionInfo {
int const parameter_count_;
int const local_count_;
Handle<SharedFunctionInfo> const shared_info_;
- ContextCallingMode context_calling_mode_;
};
diff --git a/deps/v8/src/compiler/frame.h b/deps/v8/src/compiler/frame.h
index 72f756b0dc..011a0f02d5 100644
--- a/deps/v8/src/compiler/frame.h
+++ b/deps/v8/src/compiler/frame.h
@@ -34,19 +34,10 @@ class CallDescriptor;
// determined after register allocation once the number of used callee-saved
// register is certain.
//
-// Every pointer in a frame has a slot id. On 32-bit platforms, doubles consume
-// two slots.
-//
-// Stack slot indices >= 0 access the callee stack with slot 0 corresponding to
-// the callee's saved return address and 1 corresponding to the saved frame
-// pointer. Some frames have additional information stored in the fixed header,
-// for example JSFunctions store the function context and marker in the fixed
-// header, with slot index 2 corresponding to the current function context and 3
-// corresponding to the frame marker/JSFunction. The frame region immediately
-// below the fixed header contains spill slots starting at 4 for JsFunctions.
-// The callee-saved frame region below that starts at 4+spill_slot_count_.
-// Callee stack slots corresponding to parameters are accessible through
-// negative slot ids.
+// The frame region immediately below the fixed header contains spill slots
+// starting at slot 4 for JSFunctions. The callee-saved frame region below that
+// starts at 4+spill_slot_count_. Callee stack slots corresponding to
+// parameters are accessible through negative slot ids.
//
// Every slot of a caller or callee frame is accessible by the register
// allocator and gap resolver with a SpillSlotOperand containing its
@@ -76,13 +67,13 @@ class CallDescriptor;
// |- - - - - - - - -| | frame slots
// ... | ... | Spill slots (slot >= 0)
// |- - - - - - - - -| | |
-// m+4 | spill m | v |
+// m+3 | spill m | v |
// +-----------------+---- |
-// m+5 | callee-saved 1 | ^ |
+// m+4 | callee-saved 1 | ^ |
// |- - - - - - - - -| | |
// | ... | Callee-saved |
// |- - - - - - - - -| | |
-// m+r+4 | callee-saved r | v v
+// m+r+3 | callee-saved r | v v
// -----+-----------------+----- <-- stack ptr -------------
//
class Frame : public ZoneObject {
@@ -90,16 +81,6 @@ class Frame : public ZoneObject {
explicit Frame(int fixed_frame_size_in_slots,
const CallDescriptor* descriptor);
- static int FPOffsetToSlot(int frame_offset) {
- return StandardFrameConstants::kFixedSlotCountAboveFp - 1 -
- frame_offset / kPointerSize;
- }
-
- static int SlotToFPOffset(int slot) {
- return (StandardFrameConstants::kFixedSlotCountAboveFp - 1 - slot) *
- kPointerSize;
- }
-
inline bool needs_frame() const { return needs_frame_; }
inline void MarkNeedsFrame() { needs_frame_ = true; }
diff --git a/deps/v8/src/compiler/graph-trimmer.cc b/deps/v8/src/compiler/graph-trimmer.cc
index 5fae425e1e..75071c68b3 100644
--- a/deps/v8/src/compiler/graph-trimmer.cc
+++ b/deps/v8/src/compiler/graph-trimmer.cc
@@ -24,7 +24,8 @@ void GraphTrimmer::TrimGraph() {
MarkAsLive(graph()->end());
// Compute transitive closure of live nodes.
for (size_t i = 0; i < live_.size(); ++i) {
- for (Node* const input : live_[i]->inputs()) MarkAsLive(input);
+ Node* const live = live_[i];
+ for (Node* const input : live->inputs()) MarkAsLive(input);
}
// Remove dead->live edges.
for (Node* const live : live_) {
diff --git a/deps/v8/src/compiler/graph-trimmer.h b/deps/v8/src/compiler/graph-trimmer.h
index d8258becc8..98d335a44d 100644
--- a/deps/v8/src/compiler/graph-trimmer.h
+++ b/deps/v8/src/compiler/graph-trimmer.h
@@ -28,14 +28,18 @@ class GraphTrimmer final {
// or any of the roots in the sequence [{begin},{end}[.
template <typename ForwardIterator>
void TrimGraph(ForwardIterator begin, ForwardIterator end) {
- while (begin != end) MarkAsLive(*begin++);
+ while (begin != end) {
+ Node* const node = *begin++;
+ if (!node->IsDead()) MarkAsLive(node);
+ }
TrimGraph();
}
private:
V8_INLINE bool IsLive(Node* const node) { return is_live_.Get(node); }
V8_INLINE void MarkAsLive(Node* const node) {
- if (!node->IsDead() && !IsLive(node)) {
+ DCHECK(!node->IsDead());
+ if (!IsLive(node)) {
is_live_.Set(node, true);
live_.push_back(node);
}
diff --git a/deps/v8/src/compiler/graph.cc b/deps/v8/src/compiler/graph.cc
index 3d4d6da89c..ba69617bd2 100644
--- a/deps/v8/src/compiler/graph.cc
+++ b/deps/v8/src/compiler/graph.cc
@@ -42,17 +42,15 @@ void Graph::RemoveDecorator(GraphDecorator* decorator) {
decorators_.erase(it);
}
-
-Node* Graph::NewNode(const Operator* op, int input_count, Node** inputs,
+Node* Graph::NewNode(const Operator* op, int input_count, Node* const* inputs,
bool incomplete) {
Node* node = NewNodeUnchecked(op, input_count, inputs, incomplete);
Verifier::VerifyNode(node);
return node;
}
-
Node* Graph::NewNodeUnchecked(const Operator* op, int input_count,
- Node** inputs, bool incomplete) {
+ Node* const* inputs, bool incomplete) {
Node* const node =
Node::New(zone(), NextNodeId(), op, input_count, inputs, incomplete);
Decorate(node);
diff --git a/deps/v8/src/compiler/graph.h b/deps/v8/src/compiler/graph.h
index b53c7fd308..958a15d282 100644
--- a/deps/v8/src/compiler/graph.h
+++ b/deps/v8/src/compiler/graph.h
@@ -34,16 +34,16 @@ class Graph : public ZoneObject {
explicit Graph(Zone* zone);
// Base implementation used by all factory methods.
- Node* NewNodeUnchecked(const Operator* op, int input_count, Node** inputs,
- bool incomplete = false);
+ Node* NewNodeUnchecked(const Operator* op, int input_count,
+ Node* const* inputs, bool incomplete = false);
// Factory that checks the input count.
- Node* NewNode(const Operator* op, int input_count, Node** inputs,
+ Node* NewNode(const Operator* op, int input_count, Node* const* inputs,
bool incomplete = false);
// Factories for nodes with static input counts.
Node* NewNode(const Operator* op) {
- return NewNode(op, 0, static_cast<Node**>(nullptr));
+ return NewNode(op, 0, static_cast<Node* const*>(nullptr));
}
Node* NewNode(const Operator* op, Node* n1) { return NewNode(op, 1, &n1); }
Node* NewNode(const Operator* op, Node* n1, Node* n2) {
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index f63bc22e43..1f61af8abf 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -9,6 +9,7 @@
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
+#include "src/frames.h"
#include "src/ia32/assembler-ia32.h"
#include "src/ia32/frames-ia32.h"
#include "src/ia32/macro-assembler-ia32.h"
@@ -56,7 +57,7 @@ class IA32OperandConverter : public InstructionOperandConverter {
Operand ToMaterializableOperand(int materializable_offset) {
FrameOffset offset = frame_access_state()->GetFrameOffset(
- Frame::FPOffsetToSlot(materializable_offset));
+ FPOffsetToFrameSlot(materializable_offset));
return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
}
@@ -241,15 +242,16 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
if (mode_ > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value_, exit());
}
- if (mode_ > RecordWriteMode::kValueIsMap) {
- __ CheckPageFlag(value_, scratch0_,
- MemoryChunk::kPointersToHereAreInterestingMask, zero,
- exit());
- }
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, zero,
+ exit());
+ RememberedSetAction const remembered_set_action =
+ mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
+ : OMIT_REMEMBERED_SET;
SaveFPRegsMode const save_fp_mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
- EMIT_REMEMBERED_SET, save_fp_mode);
+ remembered_set_action, save_fp_mode);
__ lea(scratch1_, operand_);
__ CallStub(&stub);
}
@@ -413,11 +415,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
frame_access_state()->ClearSPDelta();
break;
}
- case kArchLazyBailout: {
- EnsureSpaceForLazyDeopt();
- RecordCallPosition(instr);
- break;
- }
case kArchPrepareCallCFunction: {
// Frame alignment requires using FP-relative frame addressing.
frame_access_state()->SetFrameAccessToFP();
@@ -471,6 +468,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchFramePointer:
__ mov(i.OutputRegister(), ebp);
break;
+ case kArchParentFramePointer:
+ if (frame_access_state()->frame()->needs_frame()) {
+ __ mov(i.OutputRegister(), Operand(ebp, 0));
+ } else {
+ __ mov(i.OutputRegister(), ebp);
+ }
+ break;
case kArchTruncateDoubleToI: {
auto result = i.OutputRegister();
auto input = i.InputDoubleRegister(0);
@@ -499,6 +503,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ bind(ool->exit());
break;
}
+ case kArchStackSlot: {
+ FrameOffset offset =
+ frame_access_state()->GetFrameOffset(i.InputInt32(0));
+ Register base;
+ if (offset.from_stack_pointer()) {
+ base = esp;
+ } else {
+ base = ebp;
+ }
+ __ lea(i.OutputRegister(), Operand(base, offset.offset()));
+ break;
+ }
case kIA32Add:
if (HasImmediateInput(instr, 1)) {
__ add(i.InputOperand(0), i.InputImmediate(1));
@@ -514,17 +530,37 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
case kIA32Cmp:
- if (HasImmediateInput(instr, 1)) {
- __ cmp(i.InputOperand(0), i.InputImmediate(1));
+ if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ if (HasImmediateInput(instr, index)) {
+ __ cmp(operand, i.InputImmediate(index));
+ } else {
+ __ cmp(operand, i.InputRegister(index));
+ }
} else {
- __ cmp(i.InputRegister(0), i.InputOperand(1));
+ if (HasImmediateInput(instr, 1)) {
+ __ cmp(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ cmp(i.InputRegister(0), i.InputOperand(1));
+ }
}
break;
case kIA32Test:
- if (HasImmediateInput(instr, 1)) {
- __ test(i.InputOperand(0), i.InputImmediate(1));
+ if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ if (HasImmediateInput(instr, index)) {
+ __ test(operand, i.InputImmediate(index));
+ } else {
+ __ test(i.InputRegister(index), operand);
+ }
} else {
- __ test(i.InputRegister(0), i.InputOperand(1));
+ if (HasImmediateInput(instr, 1)) {
+ __ test(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ test(i.InputRegister(0), i.InputOperand(1));
+ }
}
break;
case kIA32Imul:
@@ -739,6 +775,21 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kSSEFloat64ToFloat32:
__ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
break;
+ case kSSEFloat32ToInt32:
+ __ cvttss2si(i.OutputRegister(), i.InputOperand(0));
+ break;
+ case kSSEFloat32ToUint32: {
+ Label success;
+ __ cvttss2si(i.OutputRegister(), i.InputOperand(0));
+ __ test(i.OutputRegister(), i.OutputRegister());
+ __ j(positive, &success);
+ __ Move(kScratchDoubleReg, static_cast<float>(INT32_MIN));
+ __ addss(kScratchDoubleReg, i.InputOperand(0));
+ __ cvttss2si(i.OutputRegister(), kScratchDoubleReg);
+ __ or_(i.OutputRegister(), Immediate(0x80000000));
+ __ bind(&success);
+ break;
+ }
case kSSEFloat64ToInt32:
__ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
break;
@@ -749,6 +800,16 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ add(i.OutputRegister(), Immediate(0x80000000));
break;
}
+ case kSSEInt32ToFloat32:
+ __ cvtsi2ss(i.OutputDoubleRegister(), i.InputOperand(0));
+ break;
+ case kSSEUint32ToFloat32: {
+ Register scratch0 = i.TempRegister(0);
+ Register scratch1 = i.TempRegister(1);
+ __ mov(scratch0, i.InputOperand(0));
+ __ Cvtui2ss(i.OutputDoubleRegister(), scratch0, scratch1);
+ break;
+ }
case kSSEInt32ToFloat64:
__ cvtsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
@@ -1441,8 +1502,6 @@ void CodeGenerator::AssemblePrologue() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- // TODO(titzer): cannot address target function == local #-1
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
diff --git a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
index 816487db8c..61fd035403 100644
--- a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
@@ -58,8 +58,12 @@ namespace compiler {
V(SSEFloat64Round) \
V(SSEFloat32ToFloat64) \
V(SSEFloat64ToFloat32) \
+ V(SSEFloat32ToInt32) \
+ V(SSEFloat32ToUint32) \
V(SSEFloat64ToInt32) \
V(SSEFloat64ToUint32) \
+ V(SSEInt32ToFloat32) \
+ V(SSEUint32ToFloat32) \
V(SSEInt32ToFloat64) \
V(SSEUint32ToFloat64) \
V(SSEFloat64ExtractLowWord32) \
diff --git a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
index 0a8fcac59a..093bc22268 100644
--- a/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-scheduler-ia32.cc
@@ -61,8 +61,12 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kSSEFloat64Round:
case kSSEFloat32ToFloat64:
case kSSEFloat64ToFloat32:
+ case kSSEFloat32ToInt32:
+ case kSSEFloat32ToUint32:
case kSSEFloat64ToInt32:
case kSSEFloat64ToUint32:
+ case kSSEInt32ToFloat32:
+ case kSSEUint32ToFloat32:
case kSSEInt32ToFloat64:
case kSSEUint32ToFloat64:
case kSSEFloat64ExtractLowWord32:
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index 090645212e..d821462526 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -190,7 +190,8 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord32:
opcode = kIA32Movl;
break;
- case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -275,7 +276,8 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord32:
opcode = kIA32Movl;
break;
- case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -327,9 +329,10 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -373,9 +376,10 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -508,9 +512,10 @@ namespace {
void VisitMulHigh(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
IA32OperandGenerator g(selector);
- selector->Emit(opcode, g.DefineAsFixed(node, edx),
- g.UseFixed(node->InputAt(0), eax),
- g.UseUniqueRegister(node->InputAt(1)));
+ InstructionOperand temps[] = {g.TempRegister(eax)};
+ selector->Emit(
+ opcode, g.DefineAsFixed(node, edx), g.UseFixed(node->InputAt(0), eax),
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
}
@@ -591,6 +596,9 @@ void InstructionSelector::VisitWord32Ctz(Node* node) {
}
+void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
+
+
void InstructionSelector::VisitWord32Popcnt(Node* node) {
IA32OperandGenerator g(this);
Emit(kIA32Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
@@ -695,6 +703,19 @@ void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
}
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+ VisitRO(this, node, kSSEInt32ToFloat32);
+}
+
+
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+ IA32OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ Emit(kSSEUint32ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)),
+ arraysize(temps), temps);
+}
+
+
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
VisitRO(this, node, kSSEInt32ToFloat64);
}
@@ -705,6 +726,16 @@ void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
}
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+ VisitRO(this, node, kSSEFloat32ToInt32);
+}
+
+
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+ VisitRO(this, node, kSSEFloat32ToUint32);
+}
+
+
void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
VisitRO(this, node, kSSEFloat64ToInt32);
}
@@ -958,6 +989,46 @@ bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
namespace {
+void VisitCompareWithMemoryOperand(InstructionSelector* selector,
+ InstructionCode opcode, Node* left,
+ InstructionOperand right,
+ FlagsContinuation* cont) {
+ DCHECK(left->opcode() == IrOpcode::kLoad);
+ IA32OperandGenerator g(selector);
+ size_t input_count = 0;
+ InstructionOperand inputs[6];
+ AddressingMode addressing_mode =
+ g.GetEffectiveAddressMemoryOperand(left, inputs, &input_count);
+ opcode |= AddressingModeField::encode(addressing_mode);
+ opcode = cont->Encode(opcode);
+ inputs[input_count++] = right;
+
+ if (cont->IsBranch()) {
+ inputs[input_count++] = g.Label(cont->true_block());
+ inputs[input_count++] = g.Label(cont->false_block());
+ selector->Emit(opcode, 0, nullptr, input_count, inputs);
+ } else {
+ DCHECK(cont->IsSet());
+ InstructionOperand output = g.DefineAsRegister(cont->result());
+ selector->Emit(opcode, 1, &output, input_count, inputs);
+ }
+}
+
+// Determines if {input} of {node} can be replaced by a memory operand.
+bool CanUseMemoryOperand(InstructionSelector* selector, InstructionCode opcode,
+ Node* node, Node* input) {
+ if (input->opcode() != IrOpcode::kLoad || !selector->CanCover(node, input)) {
+ return false;
+ }
+ MachineRepresentation load_representation =
+ LoadRepresentationOf(input->op()).representation();
+ if (load_representation == MachineRepresentation::kWord32 ||
+ load_representation == MachineRepresentation::kTagged) {
+ return opcode == kIA32Cmp || opcode == kIA32Test;
+ }
+ return false;
+}
+
// Shared routine for multiple compare operations.
void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
InstructionOperand left, InstructionOperand right,
@@ -1003,26 +1074,41 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
VisitCompare(selector, kSSEFloat64Cmp, right, left, cont, false);
}
-
// Shared routine for multiple word compare operations.
void VisitWordCompare(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont) {
IA32OperandGenerator g(selector);
- Node* const left = node->InputAt(0);
- Node* const right = node->InputAt(1);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
- // Match immediates on left or right side of comparison.
+ // If one of the two inputs is an immediate, make sure it's on the right.
+ if (!g.CanBeImmediate(right) && g.CanBeImmediate(left)) {
+ if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
+ std::swap(left, right);
+ }
+
+ // Match immediates on right side of comparison.
if (g.CanBeImmediate(right)) {
- VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
- } else if (g.CanBeImmediate(left)) {
+ if (CanUseMemoryOperand(selector, opcode, node, left)) {
+ return VisitCompareWithMemoryOperand(selector, opcode, left,
+ g.UseImmediate(right), cont);
+ }
+ return VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right),
+ cont);
+ }
+
+ if (g.CanBeBetterLeftOperand(right)) {
if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
- VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
- } else {
- VisitCompare(selector, opcode, left, right, cont,
- node->op()->HasProperty(Operator::kCommutative));
+ std::swap(left, right);
}
-}
+ if (CanUseMemoryOperand(selector, opcode, node, left)) {
+ return VisitCompareWithMemoryOperand(selector, opcode, left,
+ g.UseRegister(right), cont);
+ }
+ return VisitCompare(selector, opcode, left, right, cont,
+ node->op()->HasProperty(Operator::kCommutative));
+}
void VisitWordCompare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
diff --git a/deps/v8/src/compiler/instruction-codes.h b/deps/v8/src/compiler/instruction-codes.h
index 6c31ac8f9d..d2144cf638 100644
--- a/deps/v8/src/compiler/instruction-codes.h
+++ b/deps/v8/src/compiler/instruction-codes.h
@@ -47,7 +47,6 @@ enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
V(ArchPrepareCallCFunction) \
V(ArchCallCFunction) \
V(ArchPrepareTailCall) \
- V(ArchLazyBailout) \
V(ArchJmp) \
V(ArchLookupSwitch) \
V(ArchTableSwitch) \
@@ -57,6 +56,7 @@ enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
V(ArchRet) \
V(ArchStackPointer) \
V(ArchFramePointer) \
+ V(ArchParentFramePointer) \
V(ArchTruncateDoubleToI) \
V(ArchStoreWithWriteBarrier) \
V(CheckedLoadInt8) \
@@ -72,7 +72,8 @@ enum class RecordWriteMode { kValueIsMap, kValueIsPointer, kValueIsAny };
V(CheckedStoreWord32) \
V(CheckedStoreWord64) \
V(CheckedStoreFloat32) \
- V(CheckedStoreFloat64)
+ V(CheckedStoreFloat64) \
+ V(ArchStackSlot)
#define ARCH_OPCODE_LIST(V) \
COMMON_ARCH_OPCODE_LIST(V) \
diff --git a/deps/v8/src/compiler/instruction-scheduler.cc b/deps/v8/src/compiler/instruction-scheduler.cc
index 2f329ead41..adbfd5d10d 100644
--- a/deps/v8/src/compiler/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/instruction-scheduler.cc
@@ -5,11 +5,57 @@
#include "src/compiler/instruction-scheduler.h"
#include "src/base/adapters.h"
+#include "src/base/utils/random-number-generator.h"
namespace v8 {
namespace internal {
namespace compiler {
+// Compare the two nodes and return true if node1 is a better candidate than
+// node2 (i.e. node1 should be scheduled before node2).
+bool InstructionScheduler::CriticalPathFirstQueue::CompareNodes(
+ ScheduleGraphNode *node1, ScheduleGraphNode *node2) const {
+ return node1->total_latency() > node2->total_latency();
+}
+
+
+InstructionScheduler::ScheduleGraphNode*
+InstructionScheduler::CriticalPathFirstQueue::PopBestCandidate(int cycle) {
+ DCHECK(!IsEmpty());
+ auto candidate = nodes_.end();
+ for (auto iterator = nodes_.begin(); iterator != nodes_.end(); ++iterator) {
+ // We only consider instructions that have all their operands ready and
+ // we try to schedule the critical path first.
+ if (cycle >= (*iterator)->start_cycle()) {
+ if ((candidate == nodes_.end()) || CompareNodes(*iterator, *candidate)) {
+ candidate = iterator;
+ }
+ }
+ }
+
+ if (candidate != nodes_.end()) {
+ ScheduleGraphNode *result = *candidate;
+ nodes_.erase(candidate);
+ return result;
+ }
+
+ return nullptr;
+}
+
+
+InstructionScheduler::ScheduleGraphNode*
+InstructionScheduler::StressSchedulerQueue::PopBestCandidate(int cycle) {
+ DCHECK(!IsEmpty());
+ // Choose a random element from the ready list.
+ auto candidate = nodes_.begin();
+ std::advance(candidate, isolate()->random_number_generator()->NextInt(
+ static_cast<int>(nodes_.size())));
+ ScheduleGraphNode *result = *candidate;
+ nodes_.erase(candidate);
+ return result;
+}
+
+
InstructionScheduler::ScheduleGraphNode::ScheduleGraphNode(
Zone* zone,
Instruction* instr)
@@ -50,7 +96,11 @@ void InstructionScheduler::StartBlock(RpoNumber rpo) {
void InstructionScheduler::EndBlock(RpoNumber rpo) {
- ScheduleBlock();
+ if (FLAG_turbo_stress_instruction_scheduling) {
+ ScheduleBlock<StressSchedulerQueue>();
+ } else {
+ ScheduleBlock<CriticalPathFirstQueue>();
+ }
sequence()->EndBlock(rpo);
graph_.clear();
last_side_effect_instr_ = nullptr;
@@ -110,14 +160,9 @@ void InstructionScheduler::AddInstruction(Instruction* instr) {
}
-bool InstructionScheduler::CompareNodes(ScheduleGraphNode *node1,
- ScheduleGraphNode *node2) const {
- return node1->total_latency() > node2->total_latency();
-}
-
-
+template <typename QueueType>
void InstructionScheduler::ScheduleBlock() {
- ZoneLinkedList<ScheduleGraphNode*> ready_list(zone());
+ QueueType ready_list(this);
// Compute total latencies so that we can schedule the critical path first.
ComputeTotalLatencies();
@@ -125,43 +170,28 @@ void InstructionScheduler::ScheduleBlock() {
// Add nodes which don't have dependencies to the ready list.
for (auto node : graph_) {
if (!node->HasUnscheduledPredecessor()) {
- ready_list.push_back(node);
+ ready_list.AddNode(node);
}
}
// Go through the ready list and schedule the instructions.
int cycle = 0;
- while (!ready_list.empty()) {
- auto candidate = ready_list.end();
- for (auto iterator = ready_list.begin(); iterator != ready_list.end();
- ++iterator) {
- // Look for the best candidate to schedule.
- // We only consider instructions that have all their operands ready and
- // we try to schedule the critical path first (we look for the instruction
- // with the highest latency on the path to reach the end of the graph).
- if (cycle >= (*iterator)->start_cycle()) {
- if ((candidate == ready_list.end()) ||
- CompareNodes(*iterator, *candidate)) {
- candidate = iterator;
- }
- }
- }
+ while (!ready_list.IsEmpty()) {
+ auto candidate = ready_list.PopBestCandidate(cycle);
- if (candidate != ready_list.end()) {
- sequence()->AddInstruction((*candidate)->instruction());
+ if (candidate != nullptr) {
+ sequence()->AddInstruction(candidate->instruction());
- for (auto successor : (*candidate)->successors()) {
+ for (auto successor : candidate->successors()) {
successor->DropUnscheduledPredecessor();
successor->set_start_cycle(
std::max(successor->start_cycle(),
- cycle + (*candidate)->latency()));
+ cycle + candidate->latency()));
if (!successor->HasUnscheduledPredecessor()) {
- ready_list.push_back(successor);
+ ready_list.AddNode(successor);
}
}
-
- ready_list.erase(candidate);
}
cycle++;
@@ -172,17 +202,22 @@ void InstructionScheduler::ScheduleBlock() {
int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
switch (instr->arch_opcode()) {
case kArchNop:
- case kArchStackPointer:
case kArchFramePointer:
+ case kArchParentFramePointer:
case kArchTruncateDoubleToI:
+ case kArchStackSlot:
return kNoOpcodeFlags;
+ case kArchStackPointer:
+ // ArchStackPointer instruction loads the current stack pointer value and
+ // must not be reordered with instruction with side effects.
+ return kIsLoadOperation;
+
case kArchPrepareCallCFunction:
case kArchPrepareTailCall:
case kArchCallCFunction:
case kArchCallCodeObject:
case kArchCallJSFunction:
- case kArchLazyBailout:
return kHasSideEffect;
case kArchTailCallCodeObject:
diff --git a/deps/v8/src/compiler/instruction-scheduler.h b/deps/v8/src/compiler/instruction-scheduler.h
index fafbe47908..104c0b97de 100644
--- a/deps/v8/src/compiler/instruction-scheduler.h
+++ b/deps/v8/src/compiler/instruction-scheduler.h
@@ -90,11 +90,66 @@ class InstructionScheduler final : public ZoneObject {
int start_cycle_;
};
- // Compare the two nodes and return true if node1 is a better candidate than
- // node2 (i.e. node1 should be scheduled before node2).
- bool CompareNodes(ScheduleGraphNode *node1, ScheduleGraphNode *node2) const;
+ // Keep track of all nodes ready to be scheduled (i.e. all their dependencies
+ // have been scheduled. Note that this class is inteded to be extended by
+ // concrete implementation of the scheduling queue which define the policy
+ // to pop node from the queue.
+ class SchedulingQueueBase {
+ public:
+ explicit SchedulingQueueBase(InstructionScheduler* scheduler)
+ : scheduler_(scheduler),
+ nodes_(scheduler->zone()) {
+ }
+
+ void AddNode(ScheduleGraphNode* node) {
+ nodes_.push_back(node);
+ }
+
+ bool IsEmpty() const {
+ return nodes_.empty();
+ }
+
+ protected:
+ InstructionScheduler* scheduler_;
+ ZoneLinkedList<ScheduleGraphNode*> nodes_;
+ };
+
+ // A scheduling queue which prioritize nodes on the critical path (we look
+ // for the instruction with the highest latency on the path to reach the end
+ // of the graph).
+ class CriticalPathFirstQueue : public SchedulingQueueBase {
+ public:
+ explicit CriticalPathFirstQueue(InstructionScheduler* scheduler)
+ : SchedulingQueueBase(scheduler) { }
+
+ // Look for the best candidate to schedule, remove it from the queue and
+ // return it.
+ ScheduleGraphNode* PopBestCandidate(int cycle);
+
+ private:
+ // Compare the two nodes and return true if node1 is a better candidate than
+ // node2 (i.e. node1 should be scheduled before node2).
+ bool CompareNodes(ScheduleGraphNode *node1, ScheduleGraphNode *node2) const;
+ };
+
+ // A queue which pop a random node from the queue to perform stress tests on
+ // the scheduler.
+ class StressSchedulerQueue : public SchedulingQueueBase {
+ public:
+ explicit StressSchedulerQueue(InstructionScheduler* scheduler)
+ : SchedulingQueueBase(scheduler) { }
+
+ ScheduleGraphNode* PopBestCandidate(int cycle);
+
+ private:
+ Isolate *isolate() {
+ return scheduler_->isolate();
+ }
+ };
- // Perform scheduling for the current block.
+ // Perform scheduling for the current block specifying the queue type to
+ // use to determine the next best candidate.
+ template <typename QueueType>
void ScheduleBlock();
// Return the scheduling properties of the given instruction.
@@ -134,6 +189,7 @@ class InstructionScheduler final : public ZoneObject {
Zone* zone() { return zone_; }
InstructionSequence* sequence() { return sequence_; }
+ Isolate* isolate() { return sequence()->isolate(); }
Zone* zone_;
InstructionSequence* sequence_;
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index 86868e59ee..0f27e50dc9 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -21,7 +21,7 @@ namespace compiler {
InstructionSelector::InstructionSelector(
Zone* zone, size_t node_count, Linkage* linkage,
InstructionSequence* sequence, Schedule* schedule,
- SourcePositionTable* source_positions,
+ SourcePositionTable* source_positions, Frame* frame,
SourcePositionMode source_position_mode, Features features)
: zone_(zone),
linkage_(linkage),
@@ -34,9 +34,11 @@ InstructionSelector::InstructionSelector(
instructions_(zone),
defined_(node_count, false, zone),
used_(node_count, false, zone),
+ effect_level_(node_count, 0, zone),
virtual_registers_(node_count,
InstructionOperand::kInvalidVirtualRegister, zone),
- scheduler_(nullptr) {
+ scheduler_(nullptr),
+ frame_(frame) {
instructions_.reserve(node_count);
}
@@ -217,10 +219,11 @@ Instruction* InstructionSelector::Emit(Instruction* instr) {
bool InstructionSelector::CanCover(Node* user, Node* node) const {
return node->OwnedBy(user) &&
- schedule()->block(node) == schedule()->block(user);
+ schedule()->block(node) == schedule()->block(user) &&
+ (node->op()->HasProperty(Operator::kPure) ||
+ GetEffectLevel(node) == GetEffectLevel(user));
}
-
int InstructionSelector::GetVirtualRegister(const Node* node) {
DCHECK_NOT_NULL(node);
size_t const id = node->id();
@@ -279,6 +282,19 @@ void InstructionSelector::MarkAsUsed(Node* node) {
used_[id] = true;
}
+int InstructionSelector::GetEffectLevel(Node* node) const {
+ DCHECK_NOT_NULL(node);
+ size_t const id = node->id();
+ DCHECK_LT(id, effect_level_.size());
+ return effect_level_[id];
+}
+
+void InstructionSelector::SetEffectLevel(Node* node, int effect_level) {
+ DCHECK_NOT_NULL(node);
+ size_t const id = node->id();
+ DCHECK_LT(id, effect_level_.size());
+ effect_level_[id] = effect_level;
+}
void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
const InstructionOperand& op) {
@@ -567,10 +583,6 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
g.UseLocation(callee, buffer->descriptor->GetInputLocation(0),
buffer->descriptor->GetInputType(0).representation()));
break;
- case CallDescriptor::kLazyBailout:
- // The target is ignored, but we still need to pass a value here.
- buffer->instruction_args.push_back(g.UseImmediate(callee));
- break;
}
DCHECK_EQ(1u, buffer->instruction_args.size());
@@ -581,13 +593,29 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
size_t frame_state_entries = 0;
USE(frame_state_entries); // frame_state_entries is only used for debug.
if (buffer->frame_state_descriptor != nullptr) {
+ Node* frame_state =
+ call->InputAt(static_cast<int>(buffer->descriptor->InputCount()));
+
+ // If it was a syntactic tail call we need to drop the current frame and
+ // an arguments adaptor frame on top of it (if the latter is present).
+ if (buffer->descriptor->SupportsTailCalls()) {
+ frame_state = NodeProperties::GetFrameStateInput(frame_state, 0);
+ buffer->frame_state_descriptor =
+ buffer->frame_state_descriptor->outer_state();
+
+ if (buffer->frame_state_descriptor != nullptr &&
+ buffer->frame_state_descriptor->type() ==
+ FrameStateType::kArgumentsAdaptor) {
+ frame_state = NodeProperties::GetFrameStateInput(frame_state, 0);
+ buffer->frame_state_descriptor =
+ buffer->frame_state_descriptor->outer_state();
+ }
+ }
+
InstructionSequence::StateId state_id =
sequence()->AddFrameStateDescriptor(buffer->frame_state_descriptor);
buffer->instruction_args.push_back(g.TempImmediate(state_id.ToInt()));
- Node* frame_state =
- call->InputAt(static_cast<int>(buffer->descriptor->InputCount()));
-
StateObjectDeduplicator deduplicator(instruction_zone());
frame_state_entries =
@@ -656,6 +684,16 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
current_block_ = block;
int current_block_end = static_cast<int>(instructions_.size());
+ int effect_level = 0;
+ for (Node* const node : *block) {
+ if (node->opcode() == IrOpcode::kStore ||
+ node->opcode() == IrOpcode::kCheckedStore ||
+ node->opcode() == IrOpcode::kCall) {
+ ++effect_level;
+ }
+ SetEffectLevel(node, effect_level);
+ }
+
// Generate code for the block control "top down", but schedule the code
// "bottom up".
VisitControl(block);
@@ -767,7 +805,7 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
DCHECK_EQ(IrOpcode::kThrow, input->opcode());
return VisitThrow(input->InputAt(0));
case BasicBlock::kNone: {
- // TODO(titzer): exit block doesn't have control.
+ // Exit block doesn't have control.
DCHECK_NULL(input);
break;
}
@@ -866,6 +904,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord32(node), VisitWord32Clz(node);
case IrOpcode::kWord32Ctz:
return MarkAsWord32(node), VisitWord32Ctz(node);
+ case IrOpcode::kWord32ReverseBits:
+ return MarkAsWord32(node), VisitWord32ReverseBits(node);
case IrOpcode::kWord32Popcnt:
return MarkAsWord32(node), VisitWord32Popcnt(node);
case IrOpcode::kWord64Popcnt:
@@ -888,6 +928,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord64(node), VisitWord64Clz(node);
case IrOpcode::kWord64Ctz:
return MarkAsWord64(node), VisitWord64Ctz(node);
+ case IrOpcode::kWord64ReverseBits:
+ return MarkAsWord64(node), VisitWord64ReverseBits(node);
case IrOpcode::kWord64Equal:
return VisitWord64Equal(node);
case IrOpcode::kInt32Add:
@@ -956,6 +998,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord32(node), VisitChangeFloat64ToInt32(node);
case IrOpcode::kChangeFloat64ToUint32:
return MarkAsWord32(node), VisitChangeFloat64ToUint32(node);
+ case IrOpcode::kTruncateFloat32ToInt32:
+ return MarkAsWord32(node), VisitTruncateFloat32ToInt32(node);
+ case IrOpcode::kTruncateFloat32ToUint32:
+ return MarkAsWord32(node), VisitTruncateFloat32ToUint32(node);
case IrOpcode::kTryTruncateFloat32ToInt64:
return MarkAsWord64(node), VisitTryTruncateFloat32ToInt64(node);
case IrOpcode::kTryTruncateFloat64ToInt64:
@@ -976,10 +1022,14 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord32(node), VisitTruncateInt64ToInt32(node);
case IrOpcode::kRoundInt64ToFloat32:
return MarkAsFloat32(node), VisitRoundInt64ToFloat32(node);
+ case IrOpcode::kRoundInt32ToFloat32:
+ return MarkAsFloat32(node), VisitRoundInt32ToFloat32(node);
case IrOpcode::kRoundInt64ToFloat64:
return MarkAsFloat64(node), VisitRoundInt64ToFloat64(node);
case IrOpcode::kBitcastFloat32ToInt32:
return MarkAsWord32(node), VisitBitcastFloat32ToInt32(node);
+ case IrOpcode::kRoundUint32ToFloat32:
+ return MarkAsFloat32(node), VisitRoundUint32ToFloat32(node);
case IrOpcode::kRoundUint64ToFloat32:
return MarkAsFloat64(node), VisitRoundUint64ToFloat32(node);
case IrOpcode::kRoundUint64ToFloat64:
@@ -1062,10 +1112,14 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsFloat64(node), VisitFloat64InsertLowWord32(node);
case IrOpcode::kFloat64InsertHighWord32:
return MarkAsFloat64(node), VisitFloat64InsertHighWord32(node);
+ case IrOpcode::kStackSlot:
+ return VisitStackSlot(node);
case IrOpcode::kLoadStackPointer:
return VisitLoadStackPointer(node);
case IrOpcode::kLoadFramePointer:
return VisitLoadFramePointer(node);
+ case IrOpcode::kLoadParentFramePointer:
+ return VisitLoadParentFramePointer(node);
case IrOpcode::kCheckedLoad: {
MachineRepresentation rep =
CheckedLoadRepresentationOf(node->op()).representation();
@@ -1090,9 +1144,14 @@ void InstructionSelector::VisitLoadStackPointer(Node* node) {
void InstructionSelector::VisitLoadFramePointer(Node* node) {
OperandGenerator g(this);
+ frame_->MarkNeedsFrame();
Emit(kArchFramePointer, g.DefineAsRegister(node));
}
+void InstructionSelector::VisitLoadParentFramePointer(Node* node) {
+ OperandGenerator g(this);
+ Emit(kArchParentFramePointer, g.DefineAsRegister(node));
+}
void InstructionSelector::EmitTableSwitch(const SwitchInfo& sw,
InstructionOperand& index_operand) {
@@ -1129,6 +1188,14 @@ void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw,
Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
}
+void InstructionSelector::VisitStackSlot(Node* node) {
+ int size = 1 << ElementSizeLog2Of(StackSlotRepresentationOf(node->op()));
+ int slot = frame_->AllocateSpillSlot(size);
+ OperandGenerator g(this);
+
+ Emit(kArchStackSlot, g.DefineAsRegister(node),
+ sequence()->AddImmediate(Constant(slot)), 0, nullptr);
+}
// 32 bit targets do not implement the following instructions.
#if V8_TARGET_ARCH_32_BIT
@@ -1160,6 +1227,11 @@ void InstructionSelector::VisitWord64Clz(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64Ctz(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitWord64ReverseBits(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
void InstructionSelector::VisitWord64Popcnt(Node* node) { UNIMPLEMENTED(); }
@@ -1412,6 +1484,13 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
buffer.instruction_args.push_back(g.Label(handler));
}
+ // (arm64 only) caller uses JSSP but callee might destroy it.
+ if (descriptor->UseNativeStack() &&
+ !linkage()->GetIncomingDescriptor()->UseNativeStack()) {
+ flags |= CallDescriptor::kRestoreJSSP;
+ }
+
+
// Select the appropriate opcode based on the call type.
InstructionCode opcode = kArchNop;
switch (descriptor->kind()) {
@@ -1426,9 +1505,6 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
case CallDescriptor::kCallJSFunction:
opcode = kArchCallJSFunction | MiscField::encode(flags);
break;
- case CallDescriptor::kLazyBailout:
- opcode = kArchLazyBailout | MiscField::encode(flags);
- break;
}
// Emit the call instruction.
@@ -1585,7 +1661,7 @@ void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind, Node* value) {
void InstructionSelector::VisitThrow(Node* value) {
OperandGenerator g(this);
- Emit(kArchThrowTerminator, g.NoOutput()); // TODO(titzer)
+ Emit(kArchThrowTerminator, g.NoOutput());
}
diff --git a/deps/v8/src/compiler/instruction-selector.h b/deps/v8/src/compiler/instruction-selector.h
index 52aea70eb6..a01cab4dab 100644
--- a/deps/v8/src/compiler/instruction-selector.h
+++ b/deps/v8/src/compiler/instruction-selector.h
@@ -52,7 +52,7 @@ class InstructionSelector final {
InstructionSelector(
Zone* zone, size_t node_count, Linkage* linkage,
InstructionSequence* sequence, Schedule* schedule,
- SourcePositionTable* source_positions,
+ SourcePositionTable* source_positions, Frame* frame,
SourcePositionMode source_position_mode = kCallSourcePositions,
Features features = SupportedFeatures());
@@ -149,6 +149,9 @@ class InstructionSelector final {
// Checks if {node} is currently live.
bool IsLive(Node* node) const { return !IsDefined(node) && IsUsed(node); }
+ // Gets the effect level of {node}.
+ int GetEffectLevel(Node* node) const;
+
int GetVirtualRegister(const Node* node);
const std::map<NodeId, int> GetVirtualRegistersForTesting() const;
@@ -168,6 +171,9 @@ class InstructionSelector final {
// will need to generate code for it.
void MarkAsUsed(Node* node);
+ // Sets the effect level of {node}.
+ void SetEffectLevel(Node* node, int effect_level);
+
// Inform the register allocation of the representation of the value produced
// by {node}.
void MarkAsRepresentation(MachineRepresentation rep, Node* node);
@@ -269,8 +275,10 @@ class InstructionSelector final {
ZoneVector<Instruction*> instructions_;
BoolVector defined_;
BoolVector used_;
+ IntVector effect_level_;
IntVector virtual_registers_;
InstructionScheduler* scheduler_;
+ Frame* frame_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/instruction.cc b/deps/v8/src/compiler/instruction.cc
index 383e27dac6..d4ec6bc943 100644
--- a/deps/v8/src/compiler/instruction.cc
+++ b/deps/v8/src/compiler/instruction.cc
@@ -164,6 +164,9 @@ std::ostream& operator<<(std::ostream& os,
case MachineRepresentation::kFloat64:
os << "|f64";
break;
+ case MachineRepresentation::kSimd128:
+ os << "|s128";
+ break;
case MachineRepresentation::kTagged:
os << "|t";
break;
@@ -615,6 +618,20 @@ InstructionBlocks* InstructionSequence::InstructionBlocksFor(
return blocks;
}
+void InstructionSequence::Validate() {
+ // Validate blocks are in edge-split form: no block with multiple successors
+ // has an edge to a block (== a successor) with more than one predecessors.
+ for (const InstructionBlock* block : instruction_blocks()) {
+ if (block->SuccessorCount() > 1) {
+ for (const RpoNumber& successor_id : block->successors()) {
+ const InstructionBlock* successor = InstructionBlockAt(successor_id);
+ // Expect precisely one predecessor: "block".
+ CHECK(successor->PredecessorCount() == 1 &&
+ successor->predecessors()[0] == block->rpo_number());
+ }
+ }
+ }
+}
void InstructionSequence::ComputeAssemblyOrder(InstructionBlocks* blocks) {
int ao = 0;
@@ -648,6 +665,10 @@ InstructionSequence::InstructionSequence(Isolate* isolate,
representations_(zone()),
deoptimization_entries_(zone()) {
block_starts_.reserve(instruction_blocks_->size());
+
+#if DEBUG
+ Validate();
+#endif
}
@@ -726,6 +747,7 @@ static MachineRepresentation FilterRepresentation(MachineRepresentation rep) {
case MachineRepresentation::kWord64:
case MachineRepresentation::kFloat32:
case MachineRepresentation::kFloat64:
+ case MachineRepresentation::kSimd128:
case MachineRepresentation::kTagged:
return rep;
case MachineRepresentation::kNone:
@@ -819,6 +841,62 @@ void InstructionSequence::Print() const {
Print(config);
}
+void InstructionSequence::PrintBlock(const RegisterConfiguration* config,
+ int block_id) const {
+ OFStream os(stdout);
+ RpoNumber rpo = RpoNumber::FromInt(block_id);
+ const InstructionBlock* block = InstructionBlockAt(rpo);
+ CHECK(block->rpo_number() == rpo);
+
+ os << "B" << block->rpo_number();
+ os << ": AO#" << block->ao_number();
+ if (block->IsDeferred()) os << " (deferred)";
+ if (!block->needs_frame()) os << " (no frame)";
+ if (block->must_construct_frame()) os << " (construct frame)";
+ if (block->must_deconstruct_frame()) os << " (deconstruct frame)";
+ if (block->IsLoopHeader()) {
+ os << " loop blocks: [" << block->rpo_number() << ", " << block->loop_end()
+ << ")";
+ }
+ os << " instructions: [" << block->code_start() << ", " << block->code_end()
+ << ")\n predecessors:";
+
+ for (auto pred : block->predecessors()) {
+ os << " B" << pred.ToInt();
+ }
+ os << "\n";
+
+ for (auto phi : block->phis()) {
+ PrintableInstructionOperand printable_op = {config, phi->output()};
+ os << " phi: " << printable_op << " =";
+ for (auto input : phi->operands()) {
+ os << " v" << input;
+ }
+ os << "\n";
+ }
+
+ ScopedVector<char> buf(32);
+ PrintableInstruction printable_instr;
+ printable_instr.register_configuration_ = config;
+ for (int j = block->first_instruction_index();
+ j <= block->last_instruction_index(); j++) {
+ // TODO(svenpanne) Add some basic formatting to our streams.
+ SNPrintF(buf, "%5d", j);
+ printable_instr.instr_ = InstructionAt(j);
+ os << " " << buf.start() << ": " << printable_instr << "\n";
+ }
+
+ for (auto succ : block->successors()) {
+ os << " B" << succ.ToInt();
+ }
+ os << "\n";
+}
+
+void InstructionSequence::PrintBlock(int block_id) const {
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN);
+ PrintBlock(config, block_id);
+}
FrameStateDescriptor::FrameStateDescriptor(
Zone* zone, FrameStateType type, BailoutId bailout_id,
@@ -901,53 +979,7 @@ std::ostream& operator<<(std::ostream& os,
os << "CST#" << i << ": v" << it->first << " = " << it->second << "\n";
}
for (int i = 0; i < code.InstructionBlockCount(); i++) {
- RpoNumber rpo = RpoNumber::FromInt(i);
- const InstructionBlock* block = code.InstructionBlockAt(rpo);
- CHECK(block->rpo_number() == rpo);
-
- os << "B" << block->rpo_number();
- os << ": AO#" << block->ao_number();
- if (block->IsDeferred()) os << " (deferred)";
- if (!block->needs_frame()) os << " (no frame)";
- if (block->must_construct_frame()) os << " (construct frame)";
- if (block->must_deconstruct_frame()) os << " (deconstruct frame)";
- if (block->IsLoopHeader()) {
- os << " loop blocks: [" << block->rpo_number() << ", "
- << block->loop_end() << ")";
- }
- os << " instructions: [" << block->code_start() << ", "
- << block->code_end() << ")\n predecessors:";
-
- for (auto pred : block->predecessors()) {
- os << " B" << pred.ToInt();
- }
- os << "\n";
-
- for (auto phi : block->phis()) {
- PrintableInstructionOperand printable_op = {
- printable.register_configuration_, phi->output()};
- os << " phi: " << printable_op << " =";
- for (auto input : phi->operands()) {
- os << " v" << input;
- }
- os << "\n";
- }
-
- ScopedVector<char> buf(32);
- PrintableInstruction printable_instr;
- printable_instr.register_configuration_ = printable.register_configuration_;
- for (int j = block->first_instruction_index();
- j <= block->last_instruction_index(); j++) {
- // TODO(svenpanne) Add some basic formatting to our streams.
- SNPrintF(buf, "%5d", j);
- printable_instr.instr_ = code.InstructionAt(j);
- os << " " << buf.start() << ": " << printable_instr << "\n";
- }
-
- for (auto succ : block->successors()) {
- os << " B" << succ.ToInt();
- }
- os << "\n";
+ printable.sequence_->PrintBlock(printable.register_configuration_, i);
}
return os;
}
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index 8a6a0ae92a..9c978cee7c 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -67,8 +67,10 @@ class InstructionOperand {
inline bool IsAnyRegister() const;
inline bool IsRegister() const;
inline bool IsDoubleRegister() const;
+ inline bool IsSimd128Register() const;
inline bool IsStackSlot() const;
inline bool IsDoubleStackSlot() const;
+ inline bool IsSimd128StackSlot() const;
template <typename SubKindOperand>
static SubKindOperand* New(Zone* zone, const SubKindOperand& op) {
@@ -411,7 +413,7 @@ class LocationOperand : public InstructionOperand {
}
int index() const {
- DCHECK(IsStackSlot() || IsDoubleStackSlot());
+ DCHECK(IsStackSlot() || IsDoubleStackSlot() || IsSimd128StackSlot());
return static_cast<int64_t>(value_) >> IndexField::kShift;
}
@@ -427,6 +429,12 @@ class LocationOperand : public InstructionOperand {
IndexField::kShift);
}
+ Simd128Register GetSimd128Register() const {
+ DCHECK(IsSimd128Register());
+ return Simd128Register::from_code(static_cast<int64_t>(value_) >>
+ IndexField::kShift);
+ }
+
LocationKind location_kind() const {
return LocationKindField::decode(value_);
}
@@ -441,6 +449,7 @@ class LocationOperand : public InstructionOperand {
case MachineRepresentation::kWord64:
case MachineRepresentation::kFloat32:
case MachineRepresentation::kFloat64:
+ case MachineRepresentation::kSimd128:
case MachineRepresentation::kTagged:
return true;
case MachineRepresentation::kBit:
@@ -522,6 +531,12 @@ bool InstructionOperand::IsDoubleRegister() const {
IsFloatingPoint(LocationOperand::cast(this)->representation());
}
+bool InstructionOperand::IsSimd128Register() const {
+ return IsAnyRegister() &&
+ LocationOperand::cast(this)->representation() ==
+ MachineRepresentation::kSimd128;
+}
+
bool InstructionOperand::IsStackSlot() const {
return (IsAllocated() || IsExplicit()) &&
LocationOperand::cast(this)->location_kind() ==
@@ -536,6 +551,14 @@ bool InstructionOperand::IsDoubleStackSlot() const {
IsFloatingPoint(LocationOperand::cast(this)->representation());
}
+bool InstructionOperand::IsSimd128StackSlot() const {
+ return (IsAllocated() || IsExplicit()) &&
+ LocationOperand::cast(this)->location_kind() ==
+ LocationOperand::STACK_SLOT &&
+ LocationOperand::cast(this)->representation() ==
+ MachineRepresentation::kSimd128;
+}
+
uint64_t InstructionOperand::GetCanonicalizedValue() const {
if (IsAllocated() || IsExplicit()) {
// TODO(dcarney): put machine type last and mask.
@@ -633,8 +656,14 @@ class ParallelMove final : public ZoneVector<MoveOperands*>, public ZoneObject {
MoveOperands* AddMove(const InstructionOperand& from,
const InstructionOperand& to) {
- auto zone = get_allocator().zone();
- auto move = new (zone) MoveOperands(from, to);
+ Zone* zone = get_allocator().zone();
+ return AddMove(from, to, zone);
+ }
+
+ MoveOperands* AddMove(const InstructionOperand& from,
+ const InstructionOperand& to,
+ Zone* operand_allocation_zone) {
+ MoveOperands* move = new (operand_allocation_zone) MoveOperands(from, to);
push_back(move);
return move;
}
@@ -732,7 +761,6 @@ class Instruction final {
return FlagsConditionField::decode(opcode());
}
- // TODO(titzer): make call into a flags.
static Instruction* New(Zone* zone, InstructionCode opcode) {
return New(zone, opcode, 0, nullptr, 0, nullptr, 0, nullptr);
}
@@ -1323,6 +1351,11 @@ class InstructionSequence final : public ZoneObject {
void Print(const RegisterConfiguration* config) const;
void Print() const;
+ void PrintBlock(const RegisterConfiguration* config, int block_id) const;
+ void PrintBlock(int block_id) const;
+
+ void Validate();
+
private:
friend std::ostream& operator<<(std::ostream& os,
const PrintableInstructionSequence& code);
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
new file mode 100644
index 0000000000..ff31abe518
--- /dev/null
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -0,0 +1,299 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/int64-lowering.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-properties.h"
+
+#include "src/compiler/node.h"
+#include "src/wasm/wasm-module.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Int64Lowering::Int64Lowering(Graph* graph, MachineOperatorBuilder* machine,
+ CommonOperatorBuilder* common, Zone* zone,
+ Signature<MachineRepresentation>* signature)
+ : zone_(zone),
+ graph_(graph),
+ machine_(machine),
+ common_(common),
+ state_(graph, 4),
+ stack_(zone),
+ replacements_(zone->NewArray<Replacement>(graph->NodeCount())),
+ signature_(signature) {
+ memset(replacements_, 0, sizeof(Replacement) * graph->NodeCount());
+}
+
+void Int64Lowering::LowerGraph() {
+ if (4 != kPointerSize) {
+ return;
+ }
+ stack_.push(graph()->end());
+ state_.Set(graph()->end(), State::kOnStack);
+
+ while (!stack_.empty()) {
+ Node* top = stack_.top();
+ if (state_.Get(top) == State::kInputsPushed) {
+ stack_.pop();
+ state_.Set(top, State::kVisited);
+ // All inputs of top have already been reduced, now reduce top.
+ LowerNode(top);
+ } else {
+ // Push all children onto the stack.
+ for (Node* input : top->inputs()) {
+ if (state_.Get(input) == State::kUnvisited) {
+ stack_.push(input);
+ state_.Set(input, State::kOnStack);
+ }
+ }
+ state_.Set(top, State::kInputsPushed);
+ }
+ }
+}
+
+static int GetParameterIndexAfterLowering(
+ Signature<MachineRepresentation>* signature, int old_index) {
+ int result = old_index;
+ for (int i = 0; i < old_index; i++) {
+ if (signature->GetParam(i) == MachineRepresentation::kWord64) {
+ result++;
+ }
+ }
+ return result;
+}
+
+static int GetParameterCountAfterLowering(
+ Signature<MachineRepresentation>* signature) {
+ return GetParameterIndexAfterLowering(
+ signature, static_cast<int>(signature->parameter_count()));
+}
+
+static int GetReturnCountAfterLowering(
+ Signature<MachineRepresentation>* signature) {
+ int result = static_cast<int>(signature->return_count());
+ for (int i = 0; i < static_cast<int>(signature->return_count()); i++) {
+ if (signature->GetReturn(i) == MachineRepresentation::kWord64) {
+ result++;
+ }
+ }
+ return result;
+}
+
+void Int64Lowering::LowerNode(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt64Constant: {
+ int64_t value = OpParameter<int64_t>(node);
+ Node* low_node = graph()->NewNode(
+ common()->Int32Constant(static_cast<int32_t>(value & 0xFFFFFFFF)));
+ Node* high_node = graph()->NewNode(
+ common()->Int32Constant(static_cast<int32_t>(value >> 32)));
+ ReplaceNode(node, low_node, high_node);
+ break;
+ }
+ case IrOpcode::kLoad: {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+
+ if (load_rep.representation() == MachineRepresentation::kWord64) {
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* index_high =
+ graph()->NewNode(machine()->Int32Add(), index,
+ graph()->NewNode(common()->Int32Constant(4)));
+
+ const Operator* load_op = machine()->Load(MachineType::Int32());
+ Node* high_node;
+ if (node->InputCount() > 2) {
+ Node* effect_high = node->InputAt(2);
+ Node* control_high = node->InputAt(3);
+ high_node = graph()->NewNode(load_op, base, index_high, effect_high,
+ control_high);
+ // change the effect change from old_node --> old_effect to
+ // old_node --> high_node --> old_effect.
+ node->ReplaceInput(2, high_node);
+ } else {
+ high_node = graph()->NewNode(load_op, base, index_high);
+ }
+ NodeProperties::ChangeOp(node, load_op);
+ ReplaceNode(node, node, high_node);
+ }
+ break;
+ }
+ case IrOpcode::kStore: {
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ if (store_rep.representation() == MachineRepresentation::kWord64) {
+ // We change the original store node to store the low word, and create
+ // a new store node to store the high word. The effect and control edges
+ // are copied from the original store to the new store node, the effect
+ // edge of the original store is redirected to the new store.
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* index_high =
+ graph()->NewNode(machine()->Int32Add(), index,
+ graph()->NewNode(common()->Int32Constant(4)));
+
+ Node* value = node->InputAt(2);
+ DCHECK(HasReplacementLow(value));
+ DCHECK(HasReplacementHigh(value));
+
+ const Operator* store_op = machine()->Store(StoreRepresentation(
+ MachineRepresentation::kWord32, write_barrier_kind));
+
+ Node* high_node;
+ if (node->InputCount() > 3) {
+ Node* effect_high = node->InputAt(3);
+ Node* control_high = node->InputAt(4);
+ high_node = graph()->NewNode(store_op, base, index_high,
+ GetReplacementHigh(value), effect_high,
+ control_high);
+ node->ReplaceInput(3, high_node);
+
+ } else {
+ high_node = graph()->NewNode(store_op, base, index_high,
+ GetReplacementHigh(value));
+ }
+
+ node->ReplaceInput(2, GetReplacementLow(value));
+ NodeProperties::ChangeOp(node, store_op);
+ ReplaceNode(node, node, high_node);
+ }
+ break;
+ }
+ case IrOpcode::kWord64And: {
+ DCHECK(node->InputCount() == 2);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+
+ Node* low_node =
+ graph()->NewNode(machine()->Word32And(), GetReplacementLow(left),
+ GetReplacementLow(right));
+ Node* high_node =
+ graph()->NewNode(machine()->Word32And(), GetReplacementHigh(left),
+ GetReplacementHigh(right));
+ ReplaceNode(node, low_node, high_node);
+ break;
+ }
+ case IrOpcode::kTruncateInt64ToInt32: {
+ DCHECK(node->InputCount() == 1);
+ Node* input = node->InputAt(0);
+ ReplaceNode(node, GetReplacementLow(input), nullptr);
+ node->NullAllInputs();
+ break;
+ }
+ case IrOpcode::kStart: {
+ int parameter_count = GetParameterCountAfterLowering(signature());
+ // Only exchange the node if the parameter count actually changed.
+ if (parameter_count != signature()->parameter_count()) {
+ int delta =
+ parameter_count - static_cast<int>(signature()->parameter_count());
+ int new_output_count = node->op()->ValueOutputCount() + delta;
+ NodeProperties::ChangeOp(node, common()->Start(new_output_count));
+ }
+ break;
+ }
+ case IrOpcode::kParameter: {
+ DCHECK(node->InputCount() == 1);
+ // Only exchange the node if the parameter count actually changed. We do
+ // not even have to do the default lowering because the the start node,
+ // the only input of a parameter node, only changes if the parameter count
+ // changes.
+ if (GetParameterCountAfterLowering(signature()) !=
+ signature()->parameter_count()) {
+ int old_index = ParameterIndexOf(node->op());
+ int new_index = GetParameterIndexAfterLowering(signature(), old_index);
+ NodeProperties::ChangeOp(node, common()->Parameter(new_index));
+
+ Node* high_node = nullptr;
+ if (signature()->GetParam(old_index) ==
+ MachineRepresentation::kWord64) {
+ high_node = graph()->NewNode(common()->Parameter(new_index + 1),
+ graph()->start());
+ }
+ ReplaceNode(node, node, high_node);
+ }
+ break;
+ }
+ case IrOpcode::kReturn: {
+ DefaultLowering(node);
+ int new_return_count = GetReturnCountAfterLowering(signature());
+ if (signature()->return_count() != new_return_count) {
+ NodeProperties::ChangeOp(node, common()->Return(new_return_count));
+ }
+ break;
+ }
+ case IrOpcode::kCall: {
+ CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
+ if (DefaultLowering(node) ||
+ (descriptor->ReturnCount() == 1 &&
+ descriptor->GetReturnType(0) == MachineType::Int64())) {
+ // We have to adjust the call descriptor.
+ const Operator* op = common()->Call(
+ wasm::ModuleEnv::GetI32WasmCallDescriptor(zone(), descriptor));
+ NodeProperties::ChangeOp(node, op);
+ }
+ if (descriptor->ReturnCount() == 1 &&
+ descriptor->GetReturnType(0) == MachineType::Int64()) {
+ // We access the additional return values through projections.
+ Node* low_node = graph()->NewNode(common()->Projection(0), node);
+ Node* high_node = graph()->NewNode(common()->Projection(1), node);
+ ReplaceNode(node, low_node, high_node);
+ }
+ break;
+ }
+ default: { DefaultLowering(node); }
+ }
+}
+
+bool Int64Lowering::DefaultLowering(Node* node) {
+ bool something_changed = false;
+ for (int i = NodeProperties::PastValueIndex(node) - 1; i >= 0; i--) {
+ Node* input = node->InputAt(i);
+ if (HasReplacementLow(input)) {
+ something_changed = true;
+ node->ReplaceInput(i, GetReplacementLow(input));
+ }
+ if (HasReplacementHigh(input)) {
+ something_changed = true;
+ node->InsertInput(zone(), i + 1, GetReplacementHigh(input));
+ }
+ }
+ return something_changed;
+}
+
+void Int64Lowering::ReplaceNode(Node* old, Node* new_low, Node* new_high) {
+ // if new_low == nullptr, then also new_high == nullptr.
+ DCHECK(new_low != nullptr || new_high == nullptr);
+ replacements_[old->id()].low = new_low;
+ replacements_[old->id()].high = new_high;
+}
+
+bool Int64Lowering::HasReplacementLow(Node* node) {
+ return replacements_[node->id()].low != nullptr;
+}
+
+Node* Int64Lowering::GetReplacementLow(Node* node) {
+ Node* result = replacements_[node->id()].low;
+ DCHECK(result);
+ return result;
+}
+
+bool Int64Lowering::HasReplacementHigh(Node* node) {
+ return replacements_[node->id()].high != nullptr;
+}
+
+Node* Int64Lowering::GetReplacementHigh(Node* node) {
+ Node* result = replacements_[node->id()].high;
+ DCHECK(result);
+ return result;
+}
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/int64-lowering.h b/deps/v8/src/compiler/int64-lowering.h
new file mode 100644
index 0000000000..79a95dc195
--- /dev/null
+++ b/deps/v8/src/compiler/int64-lowering.h
@@ -0,0 +1,63 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INT64_REDUCER_H_
+#define V8_COMPILER_INT64_REDUCER_H_
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-marker.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Int64Lowering {
+ public:
+ Int64Lowering(Graph* graph, MachineOperatorBuilder* machine,
+ CommonOperatorBuilder* common, Zone* zone,
+ Signature<MachineRepresentation>* signature);
+
+ void LowerGraph();
+
+ private:
+ enum class State : uint8_t { kUnvisited, kOnStack, kInputsPushed, kVisited };
+
+ struct Replacement {
+ Node* low;
+ Node* high;
+ };
+
+ Zone* zone() const { return zone_; }
+ Graph* graph() const { return graph_; }
+ MachineOperatorBuilder* machine() const { return machine_; }
+ CommonOperatorBuilder* common() const { return common_; }
+ Signature<MachineRepresentation>* signature() const { return signature_; }
+
+ void LowerNode(Node* node);
+ bool DefaultLowering(Node* node);
+
+ void ReplaceNode(Node* old, Node* new_low, Node* new_high);
+ bool HasReplacementLow(Node* node);
+ Node* GetReplacementLow(Node* node);
+ bool HasReplacementHigh(Node* node);
+ Node* GetReplacementHigh(Node* node);
+
+ Zone* zone_;
+ Graph* const graph_;
+ MachineOperatorBuilder* machine_;
+ CommonOperatorBuilder* common_;
+ NodeMarker<State> state_;
+ ZoneStack<Node*> stack_;
+ Replacement* replacements_;
+ Signature<MachineRepresentation>* signature_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_INT64_REDUCER_H_
diff --git a/deps/v8/src/compiler/interpreter-assembler.cc b/deps/v8/src/compiler/interpreter-assembler.cc
deleted file mode 100644
index 7080d02120..0000000000
--- a/deps/v8/src/compiler/interpreter-assembler.cc
+++ /dev/null
@@ -1,751 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/interpreter-assembler.h"
-
-#include <ostream>
-
-#include "src/code-factory.h"
-#include "src/compiler/graph.h"
-#include "src/compiler/instruction-selector.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/pipeline.h"
-#include "src/compiler/raw-machine-assembler.h"
-#include "src/compiler/schedule.h"
-#include "src/frames.h"
-#include "src/interface-descriptors.h"
-#include "src/interpreter/bytecodes.h"
-#include "src/machine-type.h"
-#include "src/macro-assembler.h"
-#include "src/zone.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-
-InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
- interpreter::Bytecode bytecode)
- : bytecode_(bytecode),
- raw_assembler_(new RawMachineAssembler(
- isolate, new (zone) Graph(zone),
- Linkage::GetInterpreterDispatchDescriptor(zone),
- MachineType::PointerRepresentation(),
- InstructionSelector::SupportedMachineOperatorFlags())),
- accumulator_(
- raw_assembler_->Parameter(Linkage::kInterpreterAccumulatorParameter)),
- bytecode_offset_(raw_assembler_->Parameter(
- Linkage::kInterpreterBytecodeOffsetParameter)),
- context_(
- raw_assembler_->Parameter(Linkage::kInterpreterContextParameter)),
- code_generated_(false) {}
-
-
-InterpreterAssembler::~InterpreterAssembler() {}
-
-
-Handle<Code> InterpreterAssembler::GenerateCode() {
- DCHECK(!code_generated_);
-
- // Disallow empty handlers that never return.
- DCHECK_NE(0, graph()->end()->InputCount());
-
- const char* bytecode_name = interpreter::Bytecodes::ToString(bytecode_);
- Schedule* schedule = raw_assembler_->Export();
- Handle<Code> code = Pipeline::GenerateCodeForCodeStub(
- isolate(), raw_assembler_->call_descriptor(), graph(), schedule,
- Code::STUB, bytecode_name);
-
-#ifdef ENABLE_DISASSEMBLER
- if (FLAG_trace_ignition_codegen) {
- OFStream os(stdout);
- code->Disassemble(bytecode_name, os);
- os << std::flush;
- }
-#endif
-
- code_generated_ = true;
- return code;
-}
-
-
-Node* InterpreterAssembler::GetAccumulator() { return accumulator_; }
-
-
-void InterpreterAssembler::SetAccumulator(Node* value) { accumulator_ = value; }
-
-
-Node* InterpreterAssembler::GetContext() { return context_; }
-
-
-void InterpreterAssembler::SetContext(Node* value) { context_ = value; }
-
-
-Node* InterpreterAssembler::BytecodeOffset() { return bytecode_offset_; }
-
-
-Node* InterpreterAssembler::RegisterFileRawPointer() {
- return raw_assembler_->Parameter(Linkage::kInterpreterRegisterFileParameter);
-}
-
-
-Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
- return raw_assembler_->Parameter(Linkage::kInterpreterBytecodeArrayParameter);
-}
-
-
-Node* InterpreterAssembler::DispatchTableRawPointer() {
- return raw_assembler_->Parameter(Linkage::kInterpreterDispatchTableParameter);
-}
-
-
-Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
- return IntPtrAdd(RegisterFileRawPointer(), RegisterFrameOffset(reg_index));
-}
-
-
-Node* InterpreterAssembler::LoadRegister(int offset) {
- return raw_assembler_->Load(MachineType::AnyTagged(),
- RegisterFileRawPointer(), Int32Constant(offset));
-}
-
-
-Node* InterpreterAssembler::LoadRegister(interpreter::Register reg) {
- return LoadRegister(reg.ToOperand() << kPointerSizeLog2);
-}
-
-
-Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
- return WordShl(index, kPointerSizeLog2);
-}
-
-
-Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
- return raw_assembler_->Load(MachineType::AnyTagged(),
- RegisterFileRawPointer(),
- RegisterFrameOffset(reg_index));
-}
-
-
-Node* InterpreterAssembler::StoreRegister(Node* value, int offset) {
- return raw_assembler_->Store(MachineRepresentation::kTagged,
- RegisterFileRawPointer(), Int32Constant(offset),
- value, kNoWriteBarrier);
-}
-
-
-Node* InterpreterAssembler::StoreRegister(Node* value,
- interpreter::Register reg) {
- return StoreRegister(value, reg.ToOperand() << kPointerSizeLog2);
-}
-
-
-Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
- return raw_assembler_->Store(
- MachineRepresentation::kTagged, RegisterFileRawPointer(),
- RegisterFrameOffset(reg_index), value, kNoWriteBarrier);
-}
-
-
-Node* InterpreterAssembler::NextRegister(Node* reg_index) {
- // Register indexes are negative, so the next index is minus one.
- return IntPtrAdd(reg_index, Int32Constant(-1));
-}
-
-
-Node* InterpreterAssembler::BytecodeOperand(int operand_index) {
- DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
- DCHECK_EQ(interpreter::OperandSize::kByte,
- interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
- return raw_assembler_->Load(
- MachineType::Uint8(), BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(),
- Int32Constant(interpreter::Bytecodes::GetOperandOffset(
- bytecode_, operand_index))));
-}
-
-
-Node* InterpreterAssembler::BytecodeOperandSignExtended(int operand_index) {
- DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
- DCHECK_EQ(interpreter::OperandSize::kByte,
- interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
- Node* load = raw_assembler_->Load(
- MachineType::Int8(), BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(),
- Int32Constant(interpreter::Bytecodes::GetOperandOffset(
- bytecode_, operand_index))));
- // Ensure that we sign extend to full pointer size
- if (kPointerSize == 8) {
- load = raw_assembler_->ChangeInt32ToInt64(load);
- }
- return load;
-}
-
-
-Node* InterpreterAssembler::BytecodeOperandShort(int operand_index) {
- DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
- DCHECK_EQ(interpreter::OperandSize::kShort,
- interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
- if (TargetSupportsUnalignedAccess()) {
- return raw_assembler_->Load(
- MachineType::Uint16(), BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(),
- Int32Constant(interpreter::Bytecodes::GetOperandOffset(
- bytecode_, operand_index))));
- } else {
- int offset =
- interpreter::Bytecodes::GetOperandOffset(bytecode_, operand_index);
- Node* first_byte = raw_assembler_->Load(
- MachineType::Uint8(), BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), Int32Constant(offset)));
- Node* second_byte = raw_assembler_->Load(
- MachineType::Uint8(), BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), Int32Constant(offset + 1)));
-#if V8_TARGET_LITTLE_ENDIAN
- return raw_assembler_->WordOr(WordShl(second_byte, kBitsPerByte),
- first_byte);
-#elif V8_TARGET_BIG_ENDIAN
- return raw_assembler_->WordOr(WordShl(first_byte, kBitsPerByte),
- second_byte);
-#else
-#error "Unknown Architecture"
-#endif
- }
-}
-
-
-Node* InterpreterAssembler::BytecodeOperandShortSignExtended(
- int operand_index) {
- DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
- DCHECK_EQ(interpreter::OperandSize::kShort,
- interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
- int operand_offset =
- interpreter::Bytecodes::GetOperandOffset(bytecode_, operand_index);
- Node* load;
- if (TargetSupportsUnalignedAccess()) {
- load = raw_assembler_->Load(
- MachineType::Int16(), BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), Int32Constant(operand_offset)));
- } else {
-#if V8_TARGET_LITTLE_ENDIAN
- Node* hi_byte_offset = Int32Constant(operand_offset + 1);
- Node* lo_byte_offset = Int32Constant(operand_offset);
-#elif V8_TARGET_BIG_ENDIAN
- Node* hi_byte_offset = Int32Constant(operand_offset);
- Node* lo_byte_offset = Int32Constant(operand_offset + 1);
-#else
-#error "Unknown Architecture"
-#endif
- Node* hi_byte =
- raw_assembler_->Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), hi_byte_offset));
- Node* lo_byte =
- raw_assembler_->Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), lo_byte_offset));
- hi_byte = raw_assembler_->Word32Shl(hi_byte, Int32Constant(kBitsPerByte));
- load = raw_assembler_->Word32Or(hi_byte, lo_byte);
- }
-
- // Ensure that we sign extend to full pointer size
- if (kPointerSize == 8) {
- load = raw_assembler_->ChangeInt32ToInt64(load);
- }
- return load;
-}
-
-
-Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
- switch (interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index)) {
- case interpreter::OperandSize::kByte:
- DCHECK_EQ(
- interpreter::OperandType::kCount8,
- interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
- return BytecodeOperand(operand_index);
- case interpreter::OperandSize::kShort:
- DCHECK_EQ(
- interpreter::OperandType::kCount16,
- interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
- return BytecodeOperandShort(operand_index);
- default:
- UNREACHABLE();
- return nullptr;
- }
-}
-
-
-Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
- DCHECK_EQ(interpreter::OperandType::kImm8,
- interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
- return BytecodeOperandSignExtended(operand_index);
-}
-
-
-Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
- switch (interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index)) {
- case interpreter::OperandSize::kByte:
- DCHECK_EQ(
- interpreter::OperandType::kIdx8,
- interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
- return BytecodeOperand(operand_index);
- case interpreter::OperandSize::kShort:
- DCHECK_EQ(
- interpreter::OperandType::kIdx16,
- interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
- return BytecodeOperandShort(operand_index);
- default:
- UNREACHABLE();
- return nullptr;
- }
-}
-
-
-Node* InterpreterAssembler::BytecodeOperandReg(int operand_index) {
- switch (interpreter::Bytecodes::GetOperandType(bytecode_, operand_index)) {
- case interpreter::OperandType::kReg8:
- case interpreter::OperandType::kRegPair8:
- case interpreter::OperandType::kMaybeReg8:
- DCHECK_EQ(
- interpreter::OperandSize::kByte,
- interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
- return BytecodeOperandSignExtended(operand_index);
- case interpreter::OperandType::kReg16:
- DCHECK_EQ(
- interpreter::OperandSize::kShort,
- interpreter::Bytecodes::GetOperandSize(bytecode_, operand_index));
- return BytecodeOperandShortSignExtended(operand_index);
- default:
- UNREACHABLE();
- return nullptr;
- }
-}
-
-
-Node* InterpreterAssembler::Int32Constant(int value) {
- return raw_assembler_->Int32Constant(value);
-}
-
-
-Node* InterpreterAssembler::IntPtrConstant(intptr_t value) {
- return raw_assembler_->IntPtrConstant(value);
-}
-
-
-Node* InterpreterAssembler::NumberConstant(double value) {
- return raw_assembler_->NumberConstant(value);
-}
-
-
-Node* InterpreterAssembler::HeapConstant(Handle<HeapObject> object) {
- return raw_assembler_->HeapConstant(object);
-}
-
-
-Node* InterpreterAssembler::BooleanConstant(bool value) {
- return raw_assembler_->BooleanConstant(value);
-}
-
-
-Node* InterpreterAssembler::SmiShiftBitsConstant() {
- return Int32Constant(kSmiShiftSize + kSmiTagSize);
-}
-
-
-Node* InterpreterAssembler::SmiTag(Node* value) {
- return raw_assembler_->WordShl(value, SmiShiftBitsConstant());
-}
-
-
-Node* InterpreterAssembler::SmiUntag(Node* value) {
- return raw_assembler_->WordSar(value, SmiShiftBitsConstant());
-}
-
-
-Node* InterpreterAssembler::IntPtrAdd(Node* a, Node* b) {
- return raw_assembler_->IntPtrAdd(a, b);
-}
-
-
-Node* InterpreterAssembler::IntPtrSub(Node* a, Node* b) {
- return raw_assembler_->IntPtrSub(a, b);
-}
-
-
-Node* InterpreterAssembler::WordShl(Node* value, int shift) {
- return raw_assembler_->WordShl(value, Int32Constant(shift));
-}
-
-
-Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
- Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
- BytecodeArray::kConstantPoolOffset);
- Node* entry_offset =
- IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
- WordShl(index, kPointerSizeLog2));
- return raw_assembler_->Load(MachineType::AnyTagged(), constant_pool,
- entry_offset);
-}
-
-
-Node* InterpreterAssembler::LoadFixedArrayElement(Node* fixed_array,
- int index) {
- Node* entry_offset =
- IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
- WordShl(Int32Constant(index), kPointerSizeLog2));
- return raw_assembler_->Load(MachineType::AnyTagged(), fixed_array,
- entry_offset);
-}
-
-
-Node* InterpreterAssembler::LoadObjectField(Node* object, int offset) {
- return raw_assembler_->Load(MachineType::AnyTagged(), object,
- IntPtrConstant(offset - kHeapObjectTag));
-}
-
-
-Node* InterpreterAssembler::LoadContextSlot(Node* context, int slot_index) {
- return raw_assembler_->Load(MachineType::AnyTagged(), context,
- IntPtrConstant(Context::SlotOffset(slot_index)));
-}
-
-
-Node* InterpreterAssembler::LoadContextSlot(Node* context, Node* slot_index) {
- Node* offset =
- IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
- Int32Constant(Context::kHeaderSize - kHeapObjectTag));
- return raw_assembler_->Load(MachineType::AnyTagged(), context, offset);
-}
-
-
-Node* InterpreterAssembler::StoreContextSlot(Node* context, Node* slot_index,
- Node* value) {
- Node* offset =
- IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
- Int32Constant(Context::kHeaderSize - kHeapObjectTag));
- return raw_assembler_->Store(MachineRepresentation::kTagged, context, offset,
- value, kFullWriteBarrier);
-}
-
-
-Node* InterpreterAssembler::LoadTypeFeedbackVector() {
- Node* function = raw_assembler_->Load(
- MachineType::AnyTagged(), RegisterFileRawPointer(),
- IntPtrConstant(InterpreterFrameConstants::kFunctionFromRegisterPointer));
- Node* shared_info =
- LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset);
- Node* vector =
- LoadObjectField(shared_info, SharedFunctionInfo::kFeedbackVectorOffset);
- return vector;
-}
-
-
-Node* InterpreterAssembler::Projection(int index, Node* node) {
- return raw_assembler_->Projection(index, node);
-}
-
-
-Node* InterpreterAssembler::CallConstruct(Node* new_target, Node* constructor,
- Node* first_arg, Node* arg_count) {
- Callable callable = CodeFactory::InterpreterPushArgsAndConstruct(isolate());
- CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), callable.descriptor(), 0, CallDescriptor::kNoFlags);
-
- Node* code_target = HeapConstant(callable.code());
-
- Node** args = zone()->NewArray<Node*>(5);
- args[0] = arg_count;
- args[1] = new_target;
- args[2] = constructor;
- args[3] = first_arg;
- args[4] = GetContext();
-
- return CallN(descriptor, code_target, args);
-}
-
-
-void InterpreterAssembler::CallPrologue() {
- StoreRegister(SmiTag(bytecode_offset_),
- InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer);
-}
-
-
-void InterpreterAssembler::CallEpilogue() {
- // Restore the bytecode offset from the stack frame.
- bytecode_offset_ = SmiUntag(LoadRegister(
- InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
-}
-
-
-Node* InterpreterAssembler::CallN(CallDescriptor* descriptor, Node* code_target,
- Node** args) {
- CallPrologue();
-
- Node* stack_pointer_before_call = nullptr;
- if (FLAG_debug_code) {
- stack_pointer_before_call = raw_assembler_->LoadStackPointer();
- }
- Node* return_val = raw_assembler_->CallN(descriptor, code_target, args);
- if (FLAG_debug_code) {
- Node* stack_pointer_after_call = raw_assembler_->LoadStackPointer();
- AbortIfWordNotEqual(stack_pointer_before_call, stack_pointer_after_call,
- kUnexpectedStackPointer);
- }
-
- CallEpilogue();
- return return_val;
-}
-
-
-Node* InterpreterAssembler::CallJS(Node* function, Node* first_arg,
- Node* arg_count) {
- Callable callable = CodeFactory::InterpreterPushArgsAndCall(isolate());
- CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), callable.descriptor(), 0, CallDescriptor::kNoFlags);
-
- Node* code_target = HeapConstant(callable.code());
-
- Node** args = zone()->NewArray<Node*>(4);
- args[0] = arg_count;
- args[1] = first_arg;
- args[2] = function;
- args[3] = GetContext();
-
- return CallN(descriptor, code_target, args);
-}
-
-
-Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
- Node* target, Node** args) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, 0, CallDescriptor::kNoFlags);
- return CallN(call_descriptor, target, args);
-}
-
-
-Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
- Node* target, Node* arg1, Node* arg2,
- Node* arg3) {
- Node** args = zone()->NewArray<Node*>(4);
- args[0] = arg1;
- args[1] = arg2;
- args[2] = arg3;
- args[3] = GetContext();
- return CallIC(descriptor, target, args);
-}
-
-
-Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
- Node* target, Node* arg1, Node* arg2,
- Node* arg3, Node* arg4) {
- Node** args = zone()->NewArray<Node*>(5);
- args[0] = arg1;
- args[1] = arg2;
- args[2] = arg3;
- args[3] = arg4;
- args[4] = GetContext();
- return CallIC(descriptor, target, args);
-}
-
-
-Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
- Node* target, Node* arg1, Node* arg2,
- Node* arg3, Node* arg4, Node* arg5) {
- Node** args = zone()->NewArray<Node*>(6);
- args[0] = arg1;
- args[1] = arg2;
- args[2] = arg3;
- args[3] = arg4;
- args[4] = arg5;
- args[5] = GetContext();
- return CallIC(descriptor, target, args);
-}
-
-
-Node* InterpreterAssembler::CallRuntime(Node* function_id, Node* first_arg,
- Node* arg_count, int result_size) {
- Callable callable = CodeFactory::InterpreterCEntry(isolate(), result_size);
- CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), callable.descriptor(), 0, CallDescriptor::kNoFlags,
- Operator::kNoProperties, MachineType::AnyTagged(), result_size);
- Node* code_target = HeapConstant(callable.code());
-
- // Get the function entry from the function id.
- Node* function_table = raw_assembler_->ExternalConstant(
- ExternalReference::runtime_function_table_address(isolate()));
- Node* function_offset = raw_assembler_->Int32Mul(
- function_id, Int32Constant(sizeof(Runtime::Function)));
- Node* function = IntPtrAdd(function_table, function_offset);
- Node* function_entry =
- raw_assembler_->Load(MachineType::Pointer(), function,
- Int32Constant(offsetof(Runtime::Function, entry)));
-
- Node** args = zone()->NewArray<Node*>(4);
- args[0] = arg_count;
- args[1] = first_arg;
- args[2] = function_entry;
- args[3] = GetContext();
-
- return CallN(descriptor, code_target, args);
-}
-
-
-Node* InterpreterAssembler::CallRuntime(Runtime::FunctionId function_id,
- Node* arg1) {
- CallPrologue();
- Node* return_val =
- raw_assembler_->CallRuntime1(function_id, arg1, GetContext());
- CallEpilogue();
- return return_val;
-}
-
-
-Node* InterpreterAssembler::CallRuntime(Runtime::FunctionId function_id,
- Node* arg1, Node* arg2) {
- CallPrologue();
- Node* return_val =
- raw_assembler_->CallRuntime2(function_id, arg1, arg2, GetContext());
- CallEpilogue();
- return return_val;
-}
-
-
-Node* InterpreterAssembler::CallRuntime(Runtime::FunctionId function_id,
- Node* arg1, Node* arg2, Node* arg3,
- Node* arg4) {
- CallPrologue();
- Node* return_val = raw_assembler_->CallRuntime4(function_id, arg1, arg2, arg3,
- arg4, GetContext());
- CallEpilogue();
- return return_val;
-}
-
-
-void InterpreterAssembler::Return() {
- Node* exit_trampoline_code_object =
- HeapConstant(isolate()->builtins()->InterpreterExitTrampoline());
- // If the order of the parameters you need to change the call signature below.
- STATIC_ASSERT(0 == Linkage::kInterpreterAccumulatorParameter);
- STATIC_ASSERT(1 == Linkage::kInterpreterRegisterFileParameter);
- STATIC_ASSERT(2 == Linkage::kInterpreterBytecodeOffsetParameter);
- STATIC_ASSERT(3 == Linkage::kInterpreterBytecodeArrayParameter);
- STATIC_ASSERT(4 == Linkage::kInterpreterDispatchTableParameter);
- STATIC_ASSERT(5 == Linkage::kInterpreterContextParameter);
- Node* args[] = { GetAccumulator(),
- RegisterFileRawPointer(),
- BytecodeOffset(),
- BytecodeArrayTaggedPointer(),
- DispatchTableRawPointer(),
- GetContext() };
- raw_assembler_->TailCallN(call_descriptor(), exit_trampoline_code_object,
- args);
-}
-
-
-Node* InterpreterAssembler::Advance(int delta) {
- return IntPtrAdd(BytecodeOffset(), Int32Constant(delta));
-}
-
-
-Node* InterpreterAssembler::Advance(Node* delta) {
- return raw_assembler_->IntPtrAdd(BytecodeOffset(), delta);
-}
-
-
-void InterpreterAssembler::Jump(Node* delta) { DispatchTo(Advance(delta)); }
-
-
-void InterpreterAssembler::JumpIfWordEqual(Node* lhs, Node* rhs, Node* delta) {
- RawMachineLabel match, no_match;
- Node* condition = raw_assembler_->WordEqual(lhs, rhs);
- raw_assembler_->Branch(condition, &match, &no_match);
- raw_assembler_->Bind(&match);
- DispatchTo(Advance(delta));
- raw_assembler_->Bind(&no_match);
- Dispatch();
-}
-
-
-void InterpreterAssembler::Dispatch() {
- DispatchTo(Advance(interpreter::Bytecodes::Size(bytecode_)));
-}
-
-
-void InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
- Node* target_bytecode = raw_assembler_->Load(
- MachineType::Uint8(), BytecodeArrayTaggedPointer(), new_bytecode_offset);
-
- // TODO(rmcilroy): Create a code target dispatch table to avoid conversion
- // from code object on every dispatch.
- Node* target_code_object = raw_assembler_->Load(
- MachineType::Pointer(), DispatchTableRawPointer(),
- raw_assembler_->Word32Shl(target_bytecode,
- Int32Constant(kPointerSizeLog2)));
-
- // If the order of the parameters you need to change the call signature below.
- STATIC_ASSERT(0 == Linkage::kInterpreterAccumulatorParameter);
- STATIC_ASSERT(1 == Linkage::kInterpreterRegisterFileParameter);
- STATIC_ASSERT(2 == Linkage::kInterpreterBytecodeOffsetParameter);
- STATIC_ASSERT(3 == Linkage::kInterpreterBytecodeArrayParameter);
- STATIC_ASSERT(4 == Linkage::kInterpreterDispatchTableParameter);
- STATIC_ASSERT(5 == Linkage::kInterpreterContextParameter);
- Node* args[] = { GetAccumulator(),
- RegisterFileRawPointer(),
- new_bytecode_offset,
- BytecodeArrayTaggedPointer(),
- DispatchTableRawPointer(),
- GetContext() };
- raw_assembler_->TailCallN(call_descriptor(), target_code_object, args);
-}
-
-
-void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
- Node* abort_id = SmiTag(Int32Constant(bailout_reason));
- Node* ret_value = CallRuntime(Runtime::kAbort, abort_id);
- // Unreached, but keeps turbofan happy.
- raw_assembler_->Return(ret_value);
-}
-
-
-void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
- BailoutReason bailout_reason) {
- RawMachineLabel match, no_match;
- Node* condition = raw_assembler_->WordEqual(lhs, rhs);
- raw_assembler_->Branch(condition, &match, &no_match);
- raw_assembler_->Bind(&no_match);
- Abort(bailout_reason);
- raw_assembler_->Bind(&match);
-}
-
-
-// static
-bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
-#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
- return false;
-#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC
- return CpuFeatures::IsSupported(UNALIGNED_ACCESSES);
-#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_X87
- return true;
-#else
-#error "Unknown Architecture"
-#endif
-}
-
-
-// RawMachineAssembler delegate helpers:
-Isolate* InterpreterAssembler::isolate() { return raw_assembler_->isolate(); }
-
-
-Graph* InterpreterAssembler::graph() { return raw_assembler_->graph(); }
-
-
-CallDescriptor* InterpreterAssembler::call_descriptor() const {
- return raw_assembler_->call_descriptor();
-}
-
-
-Zone* InterpreterAssembler::zone() { return raw_assembler_->zone(); }
-
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/interpreter-assembler.h b/deps/v8/src/compiler/interpreter-assembler.h
deleted file mode 100644
index fb79d3eaa2..0000000000
--- a/deps/v8/src/compiler/interpreter-assembler.h
+++ /dev/null
@@ -1,224 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_INTERPRETER_ASSEMBLER_H_
-#define V8_COMPILER_INTERPRETER_ASSEMBLER_H_
-
-// Clients of this interface shouldn't depend on lots of compiler internals.
-// Do not include anything from src/compiler here!
-#include "src/allocation.h"
-#include "src/base/smart-pointers.h"
-#include "src/builtins.h"
-#include "src/frames.h"
-#include "src/interpreter/bytecodes.h"
-#include "src/runtime/runtime.h"
-
-namespace v8 {
-namespace internal {
-
-class CallInterfaceDescriptor;
-class Isolate;
-class Zone;
-
-namespace compiler {
-
-class CallDescriptor;
-class Graph;
-class Node;
-class Operator;
-class RawMachineAssembler;
-class Schedule;
-
-class InterpreterAssembler {
- public:
- InterpreterAssembler(Isolate* isolate, Zone* zone,
- interpreter::Bytecode bytecode);
- virtual ~InterpreterAssembler();
-
- Handle<Code> GenerateCode();
-
- // Returns the count immediate for bytecode operand |operand_index| in the
- // current bytecode.
- Node* BytecodeOperandCount(int operand_index);
- // Returns the index immediate for bytecode operand |operand_index| in the
- // current bytecode.
- Node* BytecodeOperandIdx(int operand_index);
- // Returns the Imm8 immediate for bytecode operand |operand_index| in the
- // current bytecode.
- Node* BytecodeOperandImm(int operand_index);
- // Returns the register index for bytecode operand |operand_index| in the
- // current bytecode.
- Node* BytecodeOperandReg(int operand_index);
-
- // Accumulator.
- Node* GetAccumulator();
- void SetAccumulator(Node* value);
-
- // Context.
- Node* GetContext();
- void SetContext(Node* value);
-
- // Loads from and stores to the interpreter register file.
- Node* LoadRegister(int offset);
- Node* LoadRegister(interpreter::Register reg);
- Node* LoadRegister(Node* reg_index);
- Node* StoreRegister(Node* value, int offset);
- Node* StoreRegister(Node* value, interpreter::Register reg);
- Node* StoreRegister(Node* value, Node* reg_index);
-
- // Returns the next consecutive register.
- Node* NextRegister(Node* reg_index);
-
- // Returns the location in memory of the register |reg_index| in the
- // interpreter register file.
- Node* RegisterLocation(Node* reg_index);
-
- // Constants.
- Node* Int32Constant(int value);
- Node* IntPtrConstant(intptr_t value);
- Node* NumberConstant(double value);
- Node* HeapConstant(Handle<HeapObject> object);
- Node* BooleanConstant(bool value);
-
- // Tag and untag Smi values.
- Node* SmiTag(Node* value);
- Node* SmiUntag(Node* value);
-
- // Basic arithmetic operations.
- Node* IntPtrAdd(Node* a, Node* b);
- Node* IntPtrSub(Node* a, Node* b);
- Node* WordShl(Node* value, int shift);
-
- // Load constant at |index| in the constant pool.
- Node* LoadConstantPoolEntry(Node* index);
-
- // Load an element from a fixed array on the heap.
- Node* LoadFixedArrayElement(Node* fixed_array, int index);
-
- // Load a field from an object on the heap.
- Node* LoadObjectField(Node* object, int offset);
-
- // Load |slot_index| from |context|.
- Node* LoadContextSlot(Node* context, int slot_index);
- Node* LoadContextSlot(Node* context, Node* slot_index);
- // Stores |value| into |slot_index| of |context|.
- Node* StoreContextSlot(Node* context, Node* slot_index, Node* value);
-
- // Load the TypeFeedbackVector for the current function.
- Node* LoadTypeFeedbackVector();
-
- // Project the output value at index |index|
- Node* Projection(int index, Node* node);
-
- // Call constructor |constructor| with |arg_count| arguments (not
- // including receiver) and the first argument located at
- // |first_arg|. The |new_target| is the same as the
- // |constructor| for the new keyword, but differs for the super
- // keyword.
- Node* CallConstruct(Node* new_target, Node* constructor, Node* first_arg,
- Node* arg_count);
-
- // Call JSFunction or Callable |function| with |arg_count|
- // arguments (not including receiver) and the first argument
- // located at |first_arg|.
- Node* CallJS(Node* function, Node* first_arg, Node* arg_count);
-
- // Call an IC code stub.
- Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node* arg1,
- Node* arg2, Node* arg3);
- Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node* arg1,
- Node* arg2, Node* arg3, Node* arg4);
- Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node* arg1,
- Node* arg2, Node* arg3, Node* arg4, Node* arg5);
-
- // Call runtime function.
- Node* CallRuntime(Node* function_id, Node* first_arg, Node* arg_count,
- int return_size = 1);
- Node* CallRuntime(Runtime::FunctionId function_id, Node* arg1);
- Node* CallRuntime(Runtime::FunctionId function_id, Node* arg1, Node* arg2);
- Node* CallRuntime(Runtime::FunctionId function_id, Node* arg1, Node* arg2,
- Node* arg3, Node* arg4);
-
- // Jump relative to the current bytecode by |jump_offset|.
- void Jump(Node* jump_offset);
-
- // Jump relative to the current bytecode by |jump_offset| if the
- // word values |lhs| and |rhs| are equal.
- void JumpIfWordEqual(Node* lhs, Node* rhs, Node* jump_offset);
-
- // Returns from the function.
- void Return();
-
- // Dispatch to the bytecode.
- void Dispatch();
-
- // Abort with the given bailout reason.
- void Abort(BailoutReason bailout_reason);
-
- protected:
- static bool TargetSupportsUnalignedAccess();
-
- // Protected helpers (for testing) which delegate to RawMachineAssembler.
- CallDescriptor* call_descriptor() const;
- Graph* graph();
-
- private:
- // Returns a raw pointer to start of the register file on the stack.
- Node* RegisterFileRawPointer();
- // Returns a tagged pointer to the current function's BytecodeArray object.
- Node* BytecodeArrayTaggedPointer();
- // Returns the offset from the BytecodeArrayPointer of the current bytecode.
- Node* BytecodeOffset();
- // Returns a raw pointer to first entry in the interpreter dispatch table.
- Node* DispatchTableRawPointer();
-
- // Saves and restores interpreter bytecode offset to the interpreter stack
- // frame when performing a call.
- void CallPrologue();
- void CallEpilogue();
-
- // Returns the offset of register |index| relative to RegisterFilePointer().
- Node* RegisterFrameOffset(Node* index);
-
- Node* SmiShiftBitsConstant();
- Node* BytecodeOperand(int operand_index);
- Node* BytecodeOperandSignExtended(int operand_index);
- Node* BytecodeOperandShort(int operand_index);
- Node* BytecodeOperandShortSignExtended(int operand_index);
-
- Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args);
- Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node** args);
-
- // Returns BytecodeOffset() advanced by delta bytecodes. Note: this does not
- // update BytecodeOffset() itself.
- Node* Advance(int delta);
- Node* Advance(Node* delta);
-
- // Starts next instruction dispatch at |new_bytecode_offset|.
- void DispatchTo(Node* new_bytecode_offset);
-
- // Abort operations for debug code.
- void AbortIfWordNotEqual(Node* lhs, Node* rhs, BailoutReason bailout_reason);
-
- // Private helpers which delegate to RawMachineAssembler.
- Isolate* isolate();
- Zone* zone();
-
- interpreter::Bytecode bytecode_;
- base::SmartPointer<RawMachineAssembler> raw_assembler_;
-
- Node* accumulator_;
- Node* bytecode_offset_;
- Node* context_;
-
- bool code_generated_;
-
- DISALLOW_COPY_AND_ASSIGN(InterpreterAssembler);
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_INTERPRETER_ASSEMBLER_H_
diff --git a/deps/v8/src/compiler/ir-operations.txt b/deps/v8/src/compiler/ir-operations.txt
deleted file mode 100644
index e69de29bb2..0000000000
--- a/deps/v8/src/compiler/ir-operations.txt
+++ /dev/null
diff --git a/deps/v8/src/compiler/js-builtin-reducer.cc b/deps/v8/src/compiler/js-builtin-reducer.cc
index a7a7da57cd..3023031c2f 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.cc
+++ b/deps/v8/src/compiler/js-builtin-reducer.cc
@@ -8,6 +8,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/objects-inl.h"
+#include "src/type-cache.h"
#include "src/types.h"
namespace v8 {
@@ -85,10 +86,10 @@ class JSCallReduction {
Node* node_;
};
-
JSBuiltinReducer::JSBuiltinReducer(Editor* editor, JSGraph* jsgraph)
- : AdvancedReducer(editor), jsgraph_(jsgraph) {}
-
+ : AdvancedReducer(editor),
+ jsgraph_(jsgraph),
+ type_cache_(TypeCache::Get()) {}
// ECMA-262, section 15.8.2.11.
Reduction JSBuiltinReducer::ReduceMathMax(Node* node) {
@@ -141,6 +142,31 @@ Reduction JSBuiltinReducer::ReduceMathFround(Node* node) {
return NoChange();
}
+// ES6 section 20.2.2.28 Math.round ( x )
+Reduction JSBuiltinReducer::ReduceMathRound(Node* node) {
+ JSCallReduction r(node);
+ if (r.InputsMatchOne(type_cache_.kIntegerOrMinusZeroOrNaN)) {
+ // Math.round(a:integer \/ -0 \/ NaN) -> a
+ return Replace(r.left());
+ }
+ if (r.InputsMatchOne(Type::Number()) &&
+ machine()->Float64RoundUp().IsSupported()) {
+ // Math.round(a:number) -> Select(Float64LessThan(#0.5, Float64Sub(i, a)),
+ // Float64Sub(i, #1.0), i)
+ // where i = Float64RoundUp(a)
+ Node* value = r.left();
+ Node* integer = graph()->NewNode(machine()->Float64RoundUp().op(), value);
+ Node* real = graph()->NewNode(machine()->Float64Sub(), integer, value);
+ return Replace(graph()->NewNode(
+ common()->Select(MachineRepresentation::kFloat64),
+ graph()->NewNode(machine()->Float64LessThan(),
+ jsgraph()->Float64Constant(0.5), real),
+ graph()->NewNode(machine()->Float64Sub(), integer,
+ jsgraph()->Float64Constant(1.0)),
+ integer));
+ }
+ return NoChange();
+}
Reduction JSBuiltinReducer::Reduce(Node* node) {
Reduction reduction = NoChange();
@@ -158,6 +184,9 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
case kMathFround:
reduction = ReduceMathFround(node);
break;
+ case kMathRound:
+ reduction = ReduceMathRound(node);
+ break;
default:
break;
}
diff --git a/deps/v8/src/compiler/js-builtin-reducer.h b/deps/v8/src/compiler/js-builtin-reducer.h
index cfacdc1e8c..b64b33565d 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.h
+++ b/deps/v8/src/compiler/js-builtin-reducer.h
@@ -9,6 +9,10 @@
namespace v8 {
namespace internal {
+
+// Forward declarations.
+class TypeCache;
+
namespace compiler {
// Forward declarations.
@@ -30,6 +34,7 @@ class JSBuiltinReducer final : public AdvancedReducer {
Reduction ReduceMathMax(Node* node);
Reduction ReduceMathImul(Node* node);
Reduction ReduceMathFround(Node* node);
+ Reduction ReduceMathRound(Node* node);
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
@@ -38,7 +43,8 @@ class JSBuiltinReducer final : public AdvancedReducer {
MachineOperatorBuilder* machine() const;
SimplifiedOperatorBuilder* simplified() const;
- JSGraph* jsgraph_;
+ JSGraph* const jsgraph_;
+ TypeCache const& type_cache_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index a15d6fd6fd..34217e7d9a 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -129,8 +129,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
// Get to the actual frame state from which to extract the arguments;
// we can only optimize this in case the {node} was already inlined into
// some other function (and same for the {arg_array}).
- CreateArgumentsParameters const& p =
- CreateArgumentsParametersOf(arg_array->op());
+ CreateArgumentsType type = CreateArgumentsTypeOf(arg_array->op());
Node* frame_state = NodeProperties::GetFrameStateInput(arg_array, 0);
Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
if (outer_state->opcode() != IrOpcode::kFrameState) return NoChange();
@@ -140,17 +139,22 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
frame_state = outer_state;
}
FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
- if (p.type() == CreateArgumentsParameters::kMappedArguments) {
+ int start_index = 0;
+ if (type == CreateArgumentsType::kMappedArguments) {
// Mapped arguments (sloppy mode) cannot be handled if they are aliased.
Handle<SharedFunctionInfo> shared;
if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
if (shared->internal_formal_parameter_count() != 0) return NoChange();
+ } else if (type == CreateArgumentsType::kRestParameter) {
+ Handle<SharedFunctionInfo> shared;
+ if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+ start_index = shared->internal_formal_parameter_count();
}
// Remove the argArray input from the {node}.
node->RemoveInput(static_cast<int>(--arity));
// Add the actual parameters to the {node}, skipping the receiver.
Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
- for (int i = p.start_index() + 1; i < state_info.parameter_count(); ++i) {
+ for (int i = start_index + 1; i < state_info.parameter_count(); ++i) {
node->InsertInput(graph()->zone(), static_cast<int>(arity),
parameters->InputAt(i));
++arity;
@@ -163,8 +167,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
}
// Change {node} to the new {JSCallFunction} operator.
NodeProperties::ChangeOp(
- node, javascript()->CallFunction(arity, p.language_mode(),
- CallCountFeedback(p.feedback()),
+ node, javascript()->CallFunction(arity, CallCountFeedback(p.feedback()),
convert_mode, p.tail_call_mode()));
// Change context of {node} to the Function.prototype.apply context,
// to ensure any exception is thrown in the correct context.
@@ -204,8 +207,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
--arity;
}
NodeProperties::ChangeOp(
- node, javascript()->CallFunction(arity, p.language_mode(),
- CallCountFeedback(p.feedback()),
+ node, javascript()->CallFunction(arity, CallCountFeedback(p.feedback()),
convert_mode, p.tail_call_mode()));
// Try to further reduce the JSCallFunction {node}.
Reduction const reduction = ReduceJSCallFunction(node);
@@ -287,10 +289,9 @@ Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
jsgraph()->Constant(handle(bound_arguments->get(i), isolate())));
arity++;
}
- NodeProperties::ChangeOp(
- node, javascript()->CallFunction(arity, p.language_mode(),
- CallCountFeedback(p.feedback()),
- convert_mode, p.tail_call_mode()));
+ NodeProperties::ChangeOp(node, javascript()->CallFunction(
+ arity, CallCountFeedback(p.feedback()),
+ convert_mode, p.tail_call_mode()));
// Try to further reduce the JSCallFunction {node}.
Reduction const reduction = ReduceJSCallFunction(node);
return reduction.Changed() ? reduction : Changed(node);
@@ -336,6 +337,7 @@ Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
frame_state, effect, if_false);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ Revisit(graph()->end());
control = graph()->NewNode(common()->IfTrue(), branch);
// Turn the {node} into a {JSCreateArray} call.
@@ -361,6 +363,7 @@ Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
frame_state, effect, if_false);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ Revisit(graph()->end());
control = graph()->NewNode(common()->IfTrue(), branch);
// Specialize the JSCallFunction node to the {target_function}.
@@ -404,8 +407,7 @@ Reduction JSCallReducer::ReduceJSCallConstruct(Node* node) {
NodeProperties::RemoveFrameStateInput(node, 0);
NodeProperties::ReplaceValueInputs(node, target);
NodeProperties::ChangeOp(
- node,
- javascript()->CallRuntime(Runtime::kThrowCalledNonCallable, 1));
+ node, javascript()->CallRuntime(Runtime::kThrowCalledNonCallable));
return Changed(node);
}
@@ -479,6 +481,7 @@ Reduction JSCallReducer::ReduceJSCallConstruct(Node* node) {
frame_state, effect, if_false);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ Revisit(graph()->end());
control = graph()->NewNode(common()->IfTrue(), branch);
// Turn the {node} into a {JSCreateArray} call.
@@ -510,6 +513,7 @@ Reduction JSCallReducer::ReduceJSCallConstruct(Node* node) {
frame_state, effect, if_false);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ Revisit(graph()->end());
control = graph()->NewNode(common()->IfTrue(), branch);
// Specialize the JSCallConstruct node to the {target_function}.
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index 9ffae152ac..f40f05d852 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -20,7 +20,7 @@ class JSOperatorBuilder;
// Performs strength reduction on {JSCallConstruct} and {JSCallFunction} nodes,
// which might allow inlining or other optimizations to be performed afterwards.
-class JSCallReducer final : public Reducer {
+class JSCallReducer final : public AdvancedReducer {
public:
// Flags that control the mode of operation.
enum Flag {
@@ -29,9 +29,12 @@ class JSCallReducer final : public Reducer {
};
typedef base::Flags<Flag> Flags;
- JSCallReducer(JSGraph* jsgraph, Flags flags,
+ JSCallReducer(Editor* editor, JSGraph* jsgraph, Flags flags,
MaybeHandle<Context> native_context)
- : jsgraph_(jsgraph), flags_(flags), native_context_(native_context) {}
+ : AdvancedReducer(editor),
+ jsgraph_(jsgraph),
+ flags_(flags),
+ native_context_(native_context) {}
Reduction Reduce(Node* node) final;
diff --git a/deps/v8/src/compiler/js-context-relaxation.cc b/deps/v8/src/compiler/js-context-relaxation.cc
deleted file mode 100644
index 0ca3c0c9d3..0000000000
--- a/deps/v8/src/compiler/js-context-relaxation.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/frame-states.h"
-#include "src/compiler/js-context-relaxation.h"
-#include "src/compiler/js-operator.h"
-#include "src/compiler/node.h"
-#include "src/compiler/node-properties.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-Reduction JSContextRelaxation::Reduce(Node* node) {
- switch (node->opcode()) {
- case IrOpcode::kJSCallFunction:
- case IrOpcode::kJSToNumber: {
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
- Node* outer_frame = frame_state;
- Node* original_context = NodeProperties::GetContextInput(node);
- Node* candidate_new_context = original_context;
- do {
- FrameStateInfo frame_state_info(
- OpParameter<FrameStateInfo>(outer_frame->op()));
- const FrameStateFunctionInfo* function_info =
- frame_state_info.function_info();
- if (function_info == nullptr ||
- (function_info->context_calling_mode() ==
- CALL_CHANGES_NATIVE_CONTEXT)) {
- break;
- }
- candidate_new_context = outer_frame->InputAt(kFrameStateContextInput);
- outer_frame = outer_frame->InputAt(kFrameStateOuterStateInput);
- } while (outer_frame->opcode() == IrOpcode::kFrameState);
-
- while (true) {
- switch (candidate_new_context->opcode()) {
- case IrOpcode::kParameter:
- case IrOpcode::kJSCreateModuleContext:
- case IrOpcode::kJSCreateScriptContext:
- if (candidate_new_context != original_context) {
- NodeProperties::ReplaceContextInput(node, candidate_new_context);
- return Changed(node);
- } else {
- return NoChange();
- }
- case IrOpcode::kJSCreateCatchContext:
- case IrOpcode::kJSCreateWithContext:
- case IrOpcode::kJSCreateBlockContext:
- candidate_new_context =
- NodeProperties::GetContextInput(candidate_new_context);
- break;
- default:
- return NoChange();
- }
- }
- }
- default:
- break;
- }
- return NoChange();
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/js-context-relaxation.h b/deps/v8/src/compiler/js-context-relaxation.h
deleted file mode 100644
index 4320e92391..0000000000
--- a/deps/v8/src/compiler/js-context-relaxation.h
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_JS_CONTEXT_RELAXATION_H_
-#define V8_COMPILER_JS_CONTEXT_RELAXATION_H_
-
-#include "src/compiler/graph-reducer.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// Ensures that operations that only need to access the native context use the
-// outer-most context rather than the specific context given by the AST graph
-// builder. This makes it possible to use these operations with context
-// specialization (e.g. for generating stubs) without forcing inner contexts to
-// be embedded in generated code thus causing leaks and potentially using the
-// wrong native context (i.e. stubs are shared between native contexts).
-class JSContextRelaxation final : public Reducer {
- public:
- JSContextRelaxation() {}
- ~JSContextRelaxation() final {}
-
- Reduction Reduce(Node* node) final;
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_JS_CONTEXT_RELAXATION_H_
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
new file mode 100644
index 0000000000..df5c8d07df
--- /dev/null
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -0,0 +1,1096 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-create-lowering.h"
+
+#include "src/allocation-site-scopes.h"
+#include "src/code-factory.h"
+#include "src/compilation-dependencies.h"
+#include "src/compiler/access-builder.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/operator-properties.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/compiler/state-values-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+// A helper class to construct inline allocations on the simplified operator
+// level. This keeps track of the effect chain for initial stores on a newly
+// allocated object and also provides helpers for commonly allocated objects.
+class AllocationBuilder final {
+ public:
+ AllocationBuilder(JSGraph* jsgraph, Node* effect, Node* control)
+ : jsgraph_(jsgraph),
+ allocation_(nullptr),
+ effect_(effect),
+ control_(control) {}
+
+ // Primitive allocation of static size.
+ void Allocate(int size, PretenureFlag pretenure = NOT_TENURED) {
+ effect_ = graph()->NewNode(common()->BeginRegion(), effect_);
+ allocation_ =
+ graph()->NewNode(simplified()->Allocate(pretenure),
+ jsgraph()->Constant(size), effect_, control_);
+ effect_ = allocation_;
+ }
+
+ // Primitive store into a field.
+ void Store(const FieldAccess& access, Node* value) {
+ effect_ = graph()->NewNode(simplified()->StoreField(access), allocation_,
+ value, effect_, control_);
+ }
+
+ // Primitive store into an element.
+ void Store(ElementAccess const& access, Node* index, Node* value) {
+ effect_ = graph()->NewNode(simplified()->StoreElement(access), allocation_,
+ index, value, effect_, control_);
+ }
+
+ // Compound allocation of a FixedArray.
+ void AllocateArray(int length, Handle<Map> map,
+ PretenureFlag pretenure = NOT_TENURED) {
+ DCHECK(map->instance_type() == FIXED_ARRAY_TYPE ||
+ map->instance_type() == FIXED_DOUBLE_ARRAY_TYPE);
+ int size = (map->instance_type() == FIXED_ARRAY_TYPE)
+ ? FixedArray::SizeFor(length)
+ : FixedDoubleArray::SizeFor(length);
+ Allocate(size, pretenure);
+ Store(AccessBuilder::ForMap(), map);
+ Store(AccessBuilder::ForFixedArrayLength(), jsgraph()->Constant(length));
+ }
+
+ // Compound store of a constant into a field.
+ void Store(const FieldAccess& access, Handle<Object> value) {
+ Store(access, jsgraph()->Constant(value));
+ }
+
+ void FinishAndChange(Node* node) {
+ NodeProperties::SetType(allocation_, NodeProperties::GetType(node));
+ node->ReplaceInput(0, allocation_);
+ node->ReplaceInput(1, effect_);
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, common()->FinishRegion());
+ }
+
+ Node* Finish() {
+ return graph()->NewNode(common()->FinishRegion(), allocation_, effect_);
+ }
+
+ protected:
+ JSGraph* jsgraph() { return jsgraph_; }
+ Graph* graph() { return jsgraph_->graph(); }
+ CommonOperatorBuilder* common() { return jsgraph_->common(); }
+ SimplifiedOperatorBuilder* simplified() { return jsgraph_->simplified(); }
+
+ private:
+ JSGraph* const jsgraph_;
+ Node* allocation_;
+ Node* effect_;
+ Node* control_;
+};
+
+// Retrieves the frame state holding actual argument values.
+Node* GetArgumentsFrameState(Node* frame_state) {
+ Node* const outer_state = NodeProperties::GetFrameStateInput(frame_state, 0);
+ FrameStateInfo outer_state_info = OpParameter<FrameStateInfo>(outer_state);
+ return outer_state_info.type() == FrameStateType::kArgumentsAdaptor
+ ? outer_state
+ : frame_state;
+}
+
+// Checks whether allocation using the given target and new.target can be
+// inlined.
+bool IsAllocationInlineable(Handle<JSFunction> target,
+ Handle<JSFunction> new_target) {
+ return new_target->has_initial_map() &&
+ new_target->initial_map()->constructor_or_backpointer() == *target;
+}
+
+// When initializing arrays, we'll unfold the loop if the number of
+// elements is known to be of this type.
+const int kElementLoopUnrollLimit = 16;
+
+// Limits up to which context allocations are inlined.
+const int kFunctionContextAllocationLimit = 16;
+const int kBlockContextAllocationLimit = 16;
+
+// Determines whether the given array or object literal boilerplate satisfies
+// all limits to be considered for fast deep-copying and computes the total
+// size of all objects that are part of the graph.
+bool IsFastLiteral(Handle<JSObject> boilerplate, int max_depth,
+ int* max_properties) {
+ DCHECK_GE(max_depth, 0);
+ DCHECK_GE(*max_properties, 0);
+
+ // Make sure the boilerplate map is not deprecated.
+ if (!JSObject::TryMigrateInstance(boilerplate)) return false;
+
+ // Check for too deep nesting.
+ if (max_depth == 0) return false;
+
+ // Check the elements.
+ Isolate* const isolate = boilerplate->GetIsolate();
+ Handle<FixedArrayBase> elements(boilerplate->elements(), isolate);
+ if (elements->length() > 0 &&
+ elements->map() != isolate->heap()->fixed_cow_array_map()) {
+ if (boilerplate->HasFastSmiOrObjectElements()) {
+ Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
+ int length = elements->length();
+ for (int i = 0; i < length; i++) {
+ if ((*max_properties)-- == 0) return false;
+ Handle<Object> value(fast_elements->get(i), isolate);
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ if (!IsFastLiteral(value_object, max_depth - 1, max_properties)) {
+ return false;
+ }
+ }
+ }
+ } else if (!boilerplate->HasFastDoubleElements()) {
+ return false;
+ }
+ }
+
+ // TODO(turbofan): Do we want to support out-of-object properties?
+ Handle<FixedArray> properties(boilerplate->properties(), isolate);
+ if (properties->length() > 0) return false;
+
+ // Check the in-object properties.
+ Handle<DescriptorArray> descriptors(
+ boilerplate->map()->instance_descriptors(), isolate);
+ int limit = boilerplate->map()->NumberOfOwnDescriptors();
+ for (int i = 0; i < limit; i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.type() != DATA) continue;
+ if ((*max_properties)-- == 0) return false;
+ FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
+ if (boilerplate->IsUnboxedDoubleField(field_index)) continue;
+ Handle<Object> value(boilerplate->RawFastPropertyAt(field_index), isolate);
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ if (!IsFastLiteral(value_object, max_depth - 1, max_properties)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+// Maximum depth and total number of elements and properties for literal
+// graphs to be considered for fast deep-copying.
+const int kMaxFastLiteralDepth = 3;
+const int kMaxFastLiteralProperties = 8;
+
+} // namespace
+
+Reduction JSCreateLowering::Reduce(Node* node) {
+ switch (node->opcode()) {
+ case IrOpcode::kJSCreate:
+ return ReduceJSCreate(node);
+ case IrOpcode::kJSCreateArguments:
+ return ReduceJSCreateArguments(node);
+ case IrOpcode::kJSCreateArray:
+ return ReduceJSCreateArray(node);
+ case IrOpcode::kJSCreateIterResultObject:
+ return ReduceJSCreateIterResultObject(node);
+ case IrOpcode::kJSCreateLiteralArray:
+ case IrOpcode::kJSCreateLiteralObject:
+ return ReduceJSCreateLiteral(node);
+ case IrOpcode::kJSCreateFunctionContext:
+ return ReduceJSCreateFunctionContext(node);
+ case IrOpcode::kJSCreateWithContext:
+ return ReduceJSCreateWithContext(node);
+ case IrOpcode::kJSCreateCatchContext:
+ return ReduceJSCreateCatchContext(node);
+ case IrOpcode::kJSCreateBlockContext:
+ return ReduceJSCreateBlockContext(node);
+ default:
+ break;
+ }
+ return NoChange();
+}
+
+Reduction JSCreateLowering::ReduceJSCreate(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreate, node->opcode());
+ Node* const target = NodeProperties::GetValueInput(node, 0);
+ Type* const target_type = NodeProperties::GetType(target);
+ Node* const new_target = NodeProperties::GetValueInput(node, 1);
+ Type* const new_target_type = NodeProperties::GetType(new_target);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ // Extract constructor and original constructor function.
+ if (target_type->IsConstant() &&
+ new_target_type->IsConstant() &&
+ new_target_type->AsConstant()->Value()->IsJSFunction()) {
+ Handle<JSFunction> constructor =
+ Handle<JSFunction>::cast(target_type->AsConstant()->Value());
+ Handle<JSFunction> original_constructor =
+ Handle<JSFunction>::cast(new_target_type->AsConstant()->Value());
+ DCHECK(constructor->IsConstructor());
+ DCHECK(original_constructor->IsConstructor());
+
+ // Check if we can inline the allocation.
+ if (IsAllocationInlineable(constructor, original_constructor)) {
+ // Force completion of inobject slack tracking before
+ // generating code to finalize the instance size.
+ original_constructor->CompleteInobjectSlackTrackingIfActive();
+
+ // Compute instance size from initial map of {original_constructor}.
+ Handle<Map> initial_map(original_constructor->initial_map(), isolate());
+ int const instance_size = initial_map->instance_size();
+
+ // Add a dependency on the {initial_map} to make sure that this code is
+ // deoptimized whenever the {initial_map} of the {original_constructor}
+ // changes.
+ dependencies()->AssumeInitialMapCantChange(initial_map);
+
+ // Emit code to allocate the JSObject instance for the
+ // {original_constructor}.
+ AllocationBuilder a(jsgraph(), effect, graph()->start());
+ a.Allocate(instance_size);
+ a.Store(AccessBuilder::ForMap(), initial_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSObjectElements(),
+ jsgraph()->EmptyFixedArrayConstant());
+ for (int i = 0; i < initial_map->GetInObjectProperties(); ++i) {
+ a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
+ jsgraph()->UndefinedConstant());
+ }
+ a.FinishAndChange(node);
+ return Changed(node);
+ }
+ }
+ return NoChange();
+}
+
+Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateArguments, node->opcode());
+ CreateArgumentsType type = CreateArgumentsTypeOf(node->op());
+ Node* const frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ Node* const outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
+ FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+
+ // Use the ArgumentsAccessStub for materializing both mapped and unmapped
+ // arguments object, but only for non-inlined (i.e. outermost) frames.
+ if (outer_state->opcode() != IrOpcode::kFrameState) {
+ switch (type) {
+ case CreateArgumentsType::kMappedArguments: {
+ // TODO(mstarzinger): Duplicate parameters are not handled yet.
+ Handle<SharedFunctionInfo> shared_info;
+ if (!state_info.shared_info().ToHandle(&shared_info) ||
+ shared_info->has_duplicate_parameters()) {
+ return NoChange();
+ }
+ // TODO(bmeurer): Actually we don't need a frame state here.
+ Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState);
+ const Operator* new_op = common()->Call(desc);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ node->InsertInput(graph()->zone(), 0, stub_code);
+ NodeProperties::ChangeOp(node, new_op);
+ return Changed(node);
+ }
+ case CreateArgumentsType::kUnmappedArguments: {
+ // TODO(bmeurer): Actually we don't need a frame state here.
+ Callable callable = CodeFactory::FastNewStrictArguments(isolate());
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState);
+ const Operator* new_op = common()->Call(desc);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ node->InsertInput(graph()->zone(), 0, stub_code);
+ NodeProperties::ChangeOp(node, new_op);
+ return Changed(node);
+ }
+ case CreateArgumentsType::kRestParameter: {
+ // TODO(bmeurer): Actually we don't need a frame state here.
+ Callable callable = CodeFactory::FastNewRestParameter(isolate());
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState);
+ const Operator* new_op = common()->Call(desc);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ node->InsertInput(graph()->zone(), 0, stub_code);
+ NodeProperties::ChangeOp(node, new_op);
+ return Changed(node);
+ }
+ }
+ UNREACHABLE();
+ } else if (outer_state->opcode() == IrOpcode::kFrameState) {
+ // Use inline allocation for all mapped arguments objects within inlined
+ // (i.e. non-outermost) frames, independent of the object size.
+ if (type == CreateArgumentsType::kMappedArguments) {
+ Handle<SharedFunctionInfo> shared;
+ if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+ Node* const callee = NodeProperties::GetValueInput(node, 0);
+ Node* const control = NodeProperties::GetControlInput(node);
+ Node* const context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ // TODO(mstarzinger): Duplicate parameters are not handled yet.
+ if (shared->has_duplicate_parameters()) return NoChange();
+ // Choose the correct frame state and frame state info depending on
+ // whether there conceptually is an arguments adaptor frame in the call
+ // chain.
+ Node* const args_state = GetArgumentsFrameState(frame_state);
+ FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
+ // Prepare element backing store to be used by arguments object.
+ bool has_aliased_arguments = false;
+ Node* const elements = AllocateAliasedArguments(
+ effect, control, args_state, context, shared, &has_aliased_arguments);
+ effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
+ // Load the arguments object map from the current native context.
+ Node* const load_native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ Node* const load_arguments_map = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForContextSlot(
+ has_aliased_arguments ? Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX
+ : Context::SLOPPY_ARGUMENTS_MAP_INDEX)),
+ load_native_context, effect, control);
+ // Actually allocate and initialize the arguments object.
+ AllocationBuilder a(jsgraph(), effect, control);
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+ int length = args_state_info.parameter_count() - 1; // Minus receiver.
+ STATIC_ASSERT(JSSloppyArgumentsObject::kSize == 5 * kPointerSize);
+ a.Allocate(JSSloppyArgumentsObject::kSize);
+ a.Store(AccessBuilder::ForMap(), load_arguments_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+ a.Store(AccessBuilder::ForJSObjectElements(), elements);
+ a.Store(AccessBuilder::ForArgumentsLength(), jsgraph()->Constant(length));
+ a.Store(AccessBuilder::ForArgumentsCallee(), callee);
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+ } else if (type == CreateArgumentsType::kUnmappedArguments) {
+ // Use inline allocation for all unmapped arguments objects within inlined
+ // (i.e. non-outermost) frames, independent of the object size.
+ Node* const control = NodeProperties::GetControlInput(node);
+ Node* const context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ // Choose the correct frame state and frame state info depending on
+ // whether there conceptually is an arguments adaptor frame in the call
+ // chain.
+ Node* const args_state = GetArgumentsFrameState(frame_state);
+ FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
+ // Prepare element backing store to be used by arguments object.
+ Node* const elements = AllocateArguments(effect, control, args_state);
+ effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
+ // Load the arguments object map from the current native context.
+ Node* const load_native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ Node* const load_arguments_map = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForContextSlot(
+ Context::STRICT_ARGUMENTS_MAP_INDEX)),
+ load_native_context, effect, control);
+ // Actually allocate and initialize the arguments object.
+ AllocationBuilder a(jsgraph(), effect, control);
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+ int length = args_state_info.parameter_count() - 1; // Minus receiver.
+ STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
+ a.Allocate(JSStrictArgumentsObject::kSize);
+ a.Store(AccessBuilder::ForMap(), load_arguments_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+ a.Store(AccessBuilder::ForJSObjectElements(), elements);
+ a.Store(AccessBuilder::ForArgumentsLength(), jsgraph()->Constant(length));
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+ } else if (type == CreateArgumentsType::kRestParameter) {
+ Handle<SharedFunctionInfo> shared;
+ if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+ int start_index = shared->internal_formal_parameter_count();
+ // Use inline allocation for all unmapped arguments objects within inlined
+ // (i.e. non-outermost) frames, independent of the object size.
+ Node* const control = NodeProperties::GetControlInput(node);
+ Node* const context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ // Choose the correct frame state and frame state info depending on
+ // whether there conceptually is an arguments adaptor frame in the call
+ // chain.
+ Node* const args_state = GetArgumentsFrameState(frame_state);
+ FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
+ // Prepare element backing store to be used by the rest array.
+ Node* const elements =
+ AllocateRestArguments(effect, control, args_state, start_index);
+ effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
+ // Load the JSArray object map from the current native context.
+ Node* const load_native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ Node* const load_jsarray_map = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForContextSlot(
+ Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX)),
+ load_native_context, effect, control);
+ // Actually allocate and initialize the jsarray.
+ AllocationBuilder a(jsgraph(), effect, control);
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+
+ // -1 to minus receiver
+ int argument_count = args_state_info.parameter_count() - 1;
+ int length = std::max(0, argument_count - start_index);
+ STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+ a.Allocate(JSArray::kSize);
+ a.Store(AccessBuilder::ForMap(), load_jsarray_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+ a.Store(AccessBuilder::ForJSObjectElements(), elements);
+ a.Store(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS),
+ jsgraph()->Constant(length));
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+ }
+ }
+
+ return NoChange();
+}
+
+Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
+ int capacity,
+ Handle<AllocationSite> site) {
+ DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Extract transition and tenuring feedback from the {site} and add
+ // appropriate code dependencies on the {site} if deoptimization is
+ // enabled.
+ PretenureFlag pretenure = site->GetPretenureMode();
+ ElementsKind elements_kind = site->GetElementsKind();
+ DCHECK(IsFastElementsKind(elements_kind));
+ dependencies()->AssumeTenuringDecision(site);
+ dependencies()->AssumeTransitionStable(site);
+
+ // Retrieve the initial map for the array from the appropriate native context.
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ Node* js_array_map = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::ArrayMapIndex(elements_kind), true),
+ native_context, native_context, effect);
+
+ // Setup elements and properties.
+ Node* elements;
+ if (capacity == 0) {
+ elements = jsgraph()->EmptyFixedArrayConstant();
+ } else {
+ elements = effect =
+ AllocateElements(effect, control, elements_kind, capacity, pretenure);
+ }
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+
+ // Perform the allocation of the actual JSArray object.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.Allocate(JSArray::kSize, pretenure);
+ a.Store(AccessBuilder::ForMap(), js_array_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+ a.Store(AccessBuilder::ForJSObjectElements(), elements);
+ a.Store(AccessBuilder::ForJSArrayLength(elements_kind), length);
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+}
+
+Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
+ CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Node* new_target = NodeProperties::GetValueInput(node, 1);
+
+ // TODO(bmeurer): Optimize the subclassing case.
+ if (target != new_target) return NoChange();
+
+ // Check if we have a feedback {site} on the {node}.
+ Handle<AllocationSite> site = p.site();
+ if (p.site().is_null()) return NoChange();
+
+ // Attempt to inline calls to the Array constructor for the relevant cases
+ // where either no arguments are provided, or exactly one unsigned number
+ // argument is given.
+ if (site->CanInlineCall()) {
+ if (p.arity() == 0) {
+ Node* length = jsgraph()->ZeroConstant();
+ int capacity = JSArray::kPreallocatedArrayElements;
+ return ReduceNewArray(node, length, capacity, site);
+ } else if (p.arity() == 1) {
+ Node* length = NodeProperties::GetValueInput(node, 2);
+ Type* length_type = NodeProperties::GetType(length);
+ if (length_type->Is(Type::SignedSmall()) &&
+ length_type->Min() >= 0 &&
+ length_type->Max() <= kElementLoopUnrollLimit) {
+ int capacity = static_cast<int>(length_type->Max());
+ return ReduceNewArray(node, length, capacity, site);
+ }
+ }
+ }
+
+ return NoChange();
+}
+
+Reduction JSCreateLowering::ReduceJSCreateIterResultObject(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateIterResultObject, node->opcode());
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Node* done = NodeProperties::GetValueInput(node, 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+
+ // Load the JSIteratorResult map for the {context}.
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ Node* iterator_result_map = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::ITERATOR_RESULT_MAP_INDEX, true),
+ native_context, native_context, effect);
+
+ // Emit code to allocate the JSIteratorResult instance.
+ AllocationBuilder a(jsgraph(), effect, graph()->start());
+ a.Allocate(JSIteratorResult::kSize);
+ a.Store(AccessBuilder::ForMap(), iterator_result_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSObjectElements(),
+ jsgraph()->EmptyFixedArrayConstant());
+ a.Store(AccessBuilder::ForJSIteratorResultValue(), value);
+ a.Store(AccessBuilder::ForJSIteratorResultDone(), done);
+ STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
+ a.FinishAndChange(node);
+ return Changed(node);
+}
+
+Reduction JSCreateLowering::ReduceJSCreateLiteral(Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kJSCreateLiteralArray ||
+ node->opcode() == IrOpcode::kJSCreateLiteralObject);
+ CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ Handle<LiteralsArray> literals_array;
+ if (GetSpecializationLiterals(node).ToHandle(&literals_array)) {
+ Handle<Object> literal(literals_array->literal(p.index()), isolate());
+ if (literal->IsAllocationSite()) {
+ Handle<AllocationSite> site = Handle<AllocationSite>::cast(literal);
+ Handle<JSObject> boilerplate(JSObject::cast(site->transition_info()),
+ isolate());
+ int max_properties = kMaxFastLiteralProperties;
+ if (IsFastLiteral(boilerplate, kMaxFastLiteralDepth, &max_properties)) {
+ AllocationSiteUsageContext site_context(isolate(), site, false);
+ site_context.EnterNewScope();
+ Node* value = effect =
+ AllocateFastLiteral(effect, control, boilerplate, &site_context);
+ site_context.ExitScope(site, boilerplate);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+ }
+ }
+
+ return NoChange();
+}
+
+Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateFunctionContext, node->opcode());
+ int slot_count = OpParameter<int>(node->op());
+ Node* const closure = NodeProperties::GetValueInput(node, 0);
+
+ // Use inline allocation for function contexts up to a size limit.
+ if (slot_count < kFunctionContextAllocationLimit) {
+ // JSCreateFunctionContext[slot_count < limit]](fun)
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* extension = jsgraph()->TheHoleConstant();
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ AllocationBuilder a(jsgraph(), effect, control);
+ STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
+ int context_length = slot_count + Context::MIN_CONTEXT_SLOTS;
+ a.AllocateArray(context_length, factory()->function_context_map());
+ a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
+ a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
+ a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
+ a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
+ native_context);
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < context_length; ++i) {
+ a.Store(AccessBuilder::ForContextSlot(i), jsgraph()->UndefinedConstant());
+ }
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+ }
+
+ return NoChange();
+}
+
+Reduction JSCreateLowering::ReduceJSCreateWithContext(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateWithContext, node->opcode());
+ Node* object = NodeProperties::GetValueInput(node, 0);
+ Node* closure = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ AllocationBuilder a(jsgraph(), effect, control);
+ STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
+ a.AllocateArray(Context::MIN_CONTEXT_SLOTS, factory()->with_context_map());
+ a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
+ a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
+ a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), object);
+ a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
+ native_context);
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+}
+
+Reduction JSCreateLowering::ReduceJSCreateCatchContext(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateCatchContext, node->opcode());
+ Handle<String> name = OpParameter<Handle<String>>(node);
+ Node* exception = NodeProperties::GetValueInput(node, 0);
+ Node* closure = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ AllocationBuilder a(jsgraph(), effect, control);
+ STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
+ a.AllocateArray(Context::MIN_CONTEXT_SLOTS + 1,
+ factory()->catch_context_map());
+ a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
+ a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
+ a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), name);
+ a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
+ native_context);
+ a.Store(AccessBuilder::ForContextSlot(Context::THROWN_OBJECT_INDEX),
+ exception);
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+}
+
+Reduction JSCreateLowering::ReduceJSCreateBlockContext(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateBlockContext, node->opcode());
+ Handle<ScopeInfo> scope_info = OpParameter<Handle<ScopeInfo>>(node);
+ int const context_length = scope_info->ContextLength();
+ Node* const closure = NodeProperties::GetValueInput(node, 0);
+
+ // Use inline allocation for block contexts up to a size limit.
+ if (context_length < kBlockContextAllocationLimit) {
+ // JSCreateBlockContext[scope[length < limit]](fun)
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* extension = jsgraph()->Constant(scope_info);
+ Node* native_context = effect = graph()->NewNode(
+ javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
+ context, context, effect);
+ AllocationBuilder a(jsgraph(), effect, control);
+ STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
+ a.AllocateArray(context_length, factory()->block_context_map());
+ a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
+ a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
+ a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
+ a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
+ native_context);
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < context_length; ++i) {
+ a.Store(AccessBuilder::ForContextSlot(i), jsgraph()->UndefinedConstant());
+ }
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+ }
+
+ return NoChange();
+}
+
+// Helper that allocates a FixedArray holding argument values recorded in the
+// given {frame_state}. Serves as backing store for JSCreateArguments nodes.
+Node* JSCreateLowering::AllocateArguments(Node* effect, Node* control,
+ Node* frame_state) {
+ FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+ int argument_count = state_info.parameter_count() - 1; // Minus receiver.
+ if (argument_count == 0) return jsgraph()->EmptyFixedArrayConstant();
+
+ // Prepare an iterator over argument values recorded in the frame state.
+ Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
+ StateValuesAccess parameters_access(parameters);
+ auto parameters_it = ++parameters_access.begin();
+
+ // Actually allocate the backing store.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.AllocateArray(argument_count, factory()->fixed_array_map());
+ for (int i = 0; i < argument_count; ++i, ++parameters_it) {
+ a.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
+ }
+ return a.Finish();
+}
+
+// Helper that allocates a FixedArray holding argument values recorded in the
+// given {frame_state}. Serves as backing store for JSCreateArguments nodes.
+Node* JSCreateLowering::AllocateRestArguments(Node* effect, Node* control,
+ Node* frame_state,
+ int start_index) {
+ FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+ int argument_count = state_info.parameter_count() - 1; // Minus receiver.
+ int num_elements = std::max(0, argument_count - start_index);
+ if (num_elements == 0) return jsgraph()->EmptyFixedArrayConstant();
+
+ // Prepare an iterator over argument values recorded in the frame state.
+ Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
+ StateValuesAccess parameters_access(parameters);
+ auto parameters_it = ++parameters_access.begin();
+
+ // Skip unused arguments.
+ for (int i = 0; i < start_index; i++) {
+ ++parameters_it;
+ }
+
+ // Actually allocate the backing store.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.AllocateArray(num_elements, factory()->fixed_array_map());
+ for (int i = 0; i < num_elements; ++i, ++parameters_it) {
+ a.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
+ }
+ return a.Finish();
+}
+
+// Helper that allocates a FixedArray serving as a parameter map for values
+// recorded in the given {frame_state}. Some elements map to slots within the
+// given {context}. Serves as backing store for JSCreateArguments nodes.
+Node* JSCreateLowering::AllocateAliasedArguments(
+ Node* effect, Node* control, Node* frame_state, Node* context,
+ Handle<SharedFunctionInfo> shared, bool* has_aliased_arguments) {
+ FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+ int argument_count = state_info.parameter_count() - 1; // Minus receiver.
+ if (argument_count == 0) return jsgraph()->EmptyFixedArrayConstant();
+
+ // If there is no aliasing, the arguments object elements are not special in
+ // any way, we can just return an unmapped backing store instead.
+ int parameter_count = shared->internal_formal_parameter_count();
+ if (parameter_count == 0) {
+ return AllocateArguments(effect, control, frame_state);
+ }
+
+ // Calculate number of argument values being aliased/mapped.
+ int mapped_count = Min(argument_count, parameter_count);
+ *has_aliased_arguments = true;
+
+ // Prepare an iterator over argument values recorded in the frame state.
+ Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
+ StateValuesAccess parameters_access(parameters);
+ auto paratemers_it = ++parameters_access.begin();
+
+ // The unmapped argument values recorded in the frame state are stored yet
+ // another indirection away and then linked into the parameter map below,
+ // whereas mapped argument values are replaced with a hole instead.
+ AllocationBuilder aa(jsgraph(), effect, control);
+ aa.AllocateArray(argument_count, factory()->fixed_array_map());
+ for (int i = 0; i < mapped_count; ++i, ++paratemers_it) {
+ aa.Store(AccessBuilder::ForFixedArraySlot(i), jsgraph()->TheHoleConstant());
+ }
+ for (int i = mapped_count; i < argument_count; ++i, ++paratemers_it) {
+ aa.Store(AccessBuilder::ForFixedArraySlot(i), (*paratemers_it).node);
+ }
+ Node* arguments = aa.Finish();
+
+ // Actually allocate the backing store.
+ AllocationBuilder a(jsgraph(), arguments, control);
+ a.AllocateArray(mapped_count + 2, factory()->sloppy_arguments_elements_map());
+ a.Store(AccessBuilder::ForFixedArraySlot(0), context);
+ a.Store(AccessBuilder::ForFixedArraySlot(1), arguments);
+ for (int i = 0; i < mapped_count; ++i) {
+ int idx = Context::MIN_CONTEXT_SLOTS + parameter_count - 1 - i;
+ a.Store(AccessBuilder::ForFixedArraySlot(i + 2), jsgraph()->Constant(idx));
+ }
+ return a.Finish();
+}
+
+Node* JSCreateLowering::AllocateElements(Node* effect, Node* control,
+ ElementsKind elements_kind,
+ int capacity,
+ PretenureFlag pretenure) {
+ DCHECK_LE(1, capacity);
+ DCHECK_LE(capacity, JSArray::kInitialMaxFastElementArray);
+
+ Handle<Map> elements_map = IsFastDoubleElementsKind(elements_kind)
+ ? factory()->fixed_double_array_map()
+ : factory()->fixed_array_map();
+ ElementAccess access = IsFastDoubleElementsKind(elements_kind)
+ ? AccessBuilder::ForFixedDoubleArrayElement()
+ : AccessBuilder::ForFixedArrayElement();
+ Node* value =
+ IsFastDoubleElementsKind(elements_kind)
+ ? jsgraph()->Float64Constant(bit_cast<double>(kHoleNanInt64))
+ : jsgraph()->TheHoleConstant();
+
+ // Actually allocate the backing store.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.AllocateArray(capacity, elements_map, pretenure);
+ for (int i = 0; i < capacity; ++i) {
+ Node* index = jsgraph()->Constant(i);
+ a.Store(access, index, value);
+ }
+ return a.Finish();
+}
+
+Node* JSCreateLowering::AllocateFastLiteral(
+ Node* effect, Node* control, Handle<JSObject> boilerplate,
+ AllocationSiteUsageContext* site_context) {
+ Handle<AllocationSite> current_site(*site_context->current(), isolate());
+ dependencies()->AssumeTransitionStable(current_site);
+
+ PretenureFlag pretenure = NOT_TENURED;
+ if (FLAG_allocation_site_pretenuring) {
+ Handle<AllocationSite> top_site(*site_context->top(), isolate());
+ pretenure = top_site->GetPretenureMode();
+ if (current_site.is_identical_to(top_site)) {
+ // We install a dependency for pretenuring only on the outermost literal.
+ dependencies()->AssumeTenuringDecision(top_site);
+ }
+ }
+
+ // Setup the properties backing store.
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+
+ // Setup the elements backing store.
+ Node* elements = AllocateFastLiteralElements(effect, control, boilerplate,
+ pretenure, site_context);
+ if (elements->op()->EffectOutputCount() > 0) effect = elements;
+
+ // Compute the in-object properties to store first (might have effects).
+ Handle<Map> boilerplate_map(boilerplate->map(), isolate());
+ ZoneVector<std::pair<FieldAccess, Node*>> inobject_fields(zone());
+ inobject_fields.reserve(boilerplate_map->GetInObjectProperties());
+ int const boilerplate_nof = boilerplate_map->NumberOfOwnDescriptors();
+ for (int i = 0; i < boilerplate_nof; ++i) {
+ PropertyDetails const property_details =
+ boilerplate_map->instance_descriptors()->GetDetails(i);
+ if (property_details.type() != DATA) continue;
+ Handle<Name> property_name(
+ boilerplate_map->instance_descriptors()->GetKey(i), isolate());
+ FieldIndex index = FieldIndex::ForDescriptor(*boilerplate_map, i);
+ FieldAccess access = {kTaggedBase, index.offset(), property_name,
+ Type::Tagged(), MachineType::AnyTagged()};
+ Node* value;
+ if (boilerplate->IsUnboxedDoubleField(index)) {
+ access.machine_type = MachineType::Float64();
+ access.type = Type::Number();
+ value = jsgraph()->Constant(boilerplate->RawFastDoublePropertyAt(index));
+ } else {
+ Handle<Object> boilerplate_value(boilerplate->RawFastPropertyAt(index),
+ isolate());
+ if (boilerplate_value->IsJSObject()) {
+ Handle<JSObject> boilerplate_object =
+ Handle<JSObject>::cast(boilerplate_value);
+ Handle<AllocationSite> current_site = site_context->EnterNewScope();
+ value = effect = AllocateFastLiteral(effect, control,
+ boilerplate_object, site_context);
+ site_context->ExitScope(current_site, boilerplate_object);
+ } else if (property_details.representation().IsDouble()) {
+ // Allocate a mutable HeapNumber box and store the value into it.
+ value = effect = AllocateMutableHeapNumber(
+ Handle<HeapNumber>::cast(boilerplate_value)->value(),
+ effect, control);
+ } else if (property_details.representation().IsSmi()) {
+ // Ensure that value is stored as smi.
+ value = boilerplate_value->IsUninitialized()
+ ? jsgraph()->ZeroConstant()
+ : jsgraph()->Constant(boilerplate_value);
+ } else {
+ value = jsgraph()->Constant(boilerplate_value);
+ }
+ }
+ inobject_fields.push_back(std::make_pair(access, value));
+ }
+
+ // Fill slack at the end of the boilerplate object with filler maps.
+ int const boilerplate_length = boilerplate_map->GetInObjectProperties();
+ for (int index = static_cast<int>(inobject_fields.size());
+ index < boilerplate_length; ++index) {
+ FieldAccess access =
+ AccessBuilder::ForJSObjectInObjectProperty(boilerplate_map, index);
+ Node* value = jsgraph()->HeapConstant(factory()->one_pointer_filler_map());
+ inobject_fields.push_back(std::make_pair(access, value));
+ }
+
+ // Actually allocate and initialize the object.
+ AllocationBuilder builder(jsgraph(), effect, control);
+ builder.Allocate(boilerplate_map->instance_size(), pretenure);
+ builder.Store(AccessBuilder::ForMap(), boilerplate_map);
+ builder.Store(AccessBuilder::ForJSObjectProperties(), properties);
+ builder.Store(AccessBuilder::ForJSObjectElements(), elements);
+ if (boilerplate_map->IsJSArrayMap()) {
+ Handle<JSArray> boilerplate_array = Handle<JSArray>::cast(boilerplate);
+ builder.Store(
+ AccessBuilder::ForJSArrayLength(boilerplate_array->GetElementsKind()),
+ handle(boilerplate_array->length(), isolate()));
+ }
+ for (auto const inobject_field : inobject_fields) {
+ builder.Store(inobject_field.first, inobject_field.second);
+ }
+ return builder.Finish();
+}
+
+Node* JSCreateLowering::AllocateFastLiteralElements(
+ Node* effect, Node* control, Handle<JSObject> boilerplate,
+ PretenureFlag pretenure, AllocationSiteUsageContext* site_context) {
+ Handle<FixedArrayBase> boilerplate_elements(boilerplate->elements(),
+ isolate());
+
+ // Empty or copy-on-write elements just store a constant.
+ if (boilerplate_elements->length() == 0 ||
+ boilerplate_elements->map() == isolate()->heap()->fixed_cow_array_map()) {
+ if (pretenure == TENURED &&
+ isolate()->heap()->InNewSpace(*boilerplate_elements)) {
+ // If we would like to pretenure a fixed cow array, we must ensure that
+ // the array is already in old space, otherwise we'll create too many
+ // old-to-new-space pointers (overflowing the store buffer).
+ boilerplate_elements = Handle<FixedArrayBase>(
+ isolate()->factory()->CopyAndTenureFixedCOWArray(
+ Handle<FixedArray>::cast(boilerplate_elements)));
+ boilerplate->set_elements(*boilerplate_elements);
+ }
+ return jsgraph()->HeapConstant(boilerplate_elements);
+ }
+
+ // Compute the elements to store first (might have effects).
+ int const elements_length = boilerplate_elements->length();
+ Handle<Map> elements_map(boilerplate_elements->map(), isolate());
+ ZoneVector<Node*> elements_values(elements_length, zone());
+ if (elements_map->instance_type() == FIXED_DOUBLE_ARRAY_TYPE) {
+ Handle<FixedDoubleArray> elements =
+ Handle<FixedDoubleArray>::cast(boilerplate_elements);
+ for (int i = 0; i < elements_length; ++i) {
+ if (elements->is_the_hole(i)) {
+ // TODO(turbofan): We cannot currently safely pass thru the (signaling)
+ // hole NaN in C++ code, as the C++ compiler on Intel might use FPU
+ // instructions/registers for doubles and therefore make the NaN quiet.
+ // We should consider passing doubles in the compiler as raw int64
+ // values to prevent this.
+ elements_values[i] = effect =
+ graph()->NewNode(simplified()->LoadElement(
+ AccessBuilder::ForFixedDoubleArrayElement()),
+ jsgraph()->HeapConstant(elements),
+ jsgraph()->Constant(i), effect, control);
+ } else {
+ elements_values[i] = jsgraph()->Constant(elements->get_scalar(i));
+ }
+ }
+ } else {
+ Handle<FixedArray> elements =
+ Handle<FixedArray>::cast(boilerplate_elements);
+ for (int i = 0; i < elements_length; ++i) {
+ if (elements->is_the_hole(i)) {
+ elements_values[i] = jsgraph()->TheHoleConstant();
+ } else {
+ Handle<Object> element_value(elements->get(i), isolate());
+ if (element_value->IsJSObject()) {
+ Handle<JSObject> boilerplate_object =
+ Handle<JSObject>::cast(element_value);
+ Handle<AllocationSite> current_site = site_context->EnterNewScope();
+ elements_values[i] = effect = AllocateFastLiteral(
+ effect, control, boilerplate_object, site_context);
+ site_context->ExitScope(current_site, boilerplate_object);
+ } else {
+ elements_values[i] = jsgraph()->Constant(element_value);
+ }
+ }
+ }
+ }
+
+ // Allocate the backing store array and store the elements.
+ AllocationBuilder builder(jsgraph(), effect, control);
+ builder.AllocateArray(elements_length, elements_map, pretenure);
+ ElementAccess const access =
+ (elements_map->instance_type() == FIXED_DOUBLE_ARRAY_TYPE)
+ ? AccessBuilder::ForFixedDoubleArrayElement()
+ : AccessBuilder::ForFixedArrayElement();
+ for (int i = 0; i < elements_length; ++i) {
+ builder.Store(access, jsgraph()->Constant(i), elements_values[i]);
+ }
+ return builder.Finish();
+}
+
+Node* JSCreateLowering::AllocateMutableHeapNumber(double value, Node* effect,
+ Node* control) {
+ // TODO(turbofan): Support inline allocation of MutableHeapNumber
+ // (requires proper alignment on Allocate, and Begin/FinishRegion).
+ Callable callable = CodeFactory::AllocateMutableHeapNumber(isolate());
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), jsgraph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNoFlags, Operator::kNoThrow);
+ Node* result = effect = graph()->NewNode(
+ common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
+ jsgraph()->NoContextConstant(), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForHeapNumberValue()), result,
+ jsgraph()->Constant(value), effect, control);
+ return result;
+}
+
+MaybeHandle<LiteralsArray> JSCreateLowering::GetSpecializationLiterals(
+ Node* node) {
+ Node* const closure = NodeProperties::GetValueInput(node, 0);
+ switch (closure->opcode()) {
+ case IrOpcode::kHeapConstant: {
+ Handle<HeapObject> object = OpParameter<Handle<HeapObject>>(closure);
+ return handle(Handle<JSFunction>::cast(object)->literals());
+ }
+ case IrOpcode::kParameter: {
+ int const index = ParameterIndexOf(closure->op());
+ // The closure is always the last parameter to a JavaScript function, and
+ // {Parameter} indices start at -1, so value outputs of {Start} look like
+ // this: closure, receiver, param0, ..., paramN, context.
+ if (index == -1) {
+ return literals_array_;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ return MaybeHandle<LiteralsArray>();
+}
+
+Factory* JSCreateLowering::factory() const { return isolate()->factory(); }
+
+Graph* JSCreateLowering::graph() const { return jsgraph()->graph(); }
+
+Isolate* JSCreateLowering::isolate() const { return jsgraph()->isolate(); }
+
+JSOperatorBuilder* JSCreateLowering::javascript() const {
+ return jsgraph()->javascript();
+}
+
+CommonOperatorBuilder* JSCreateLowering::common() const {
+ return jsgraph()->common();
+}
+
+SimplifiedOperatorBuilder* JSCreateLowering::simplified() const {
+ return jsgraph()->simplified();
+}
+
+MachineOperatorBuilder* JSCreateLowering::machine() const {
+ return jsgraph()->machine();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/js-create-lowering.h b/deps/v8/src/compiler/js-create-lowering.h
new file mode 100644
index 0000000000..d9d184b8e2
--- /dev/null
+++ b/deps/v8/src/compiler/js-create-lowering.h
@@ -0,0 +1,99 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_CREATE_LOWERING_H_
+#define V8_COMPILER_JS_CREATE_LOWERING_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class AllocationSiteUsageContext;
+class CompilationDependencies;
+class Factory;
+
+
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class JSGraph;
+class JSOperatorBuilder;
+class MachineOperatorBuilder;
+class SimplifiedOperatorBuilder;
+
+
+// Lowers JSCreate-level operators to fast (inline) allocations.
+class JSCreateLowering final : public AdvancedReducer {
+ public:
+ JSCreateLowering(Editor* editor, CompilationDependencies* dependencies,
+ JSGraph* jsgraph, MaybeHandle<LiteralsArray> literals_array,
+ Zone* zone)
+ : AdvancedReducer(editor),
+ dependencies_(dependencies),
+ jsgraph_(jsgraph),
+ literals_array_(literals_array),
+ zone_(zone) {}
+ ~JSCreateLowering() final {}
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ Reduction ReduceJSCreate(Node* node);
+ Reduction ReduceJSCreateArguments(Node* node);
+ Reduction ReduceJSCreateArray(Node* node);
+ Reduction ReduceJSCreateIterResultObject(Node* node);
+ Reduction ReduceJSCreateLiteral(Node* node);
+ Reduction ReduceJSCreateFunctionContext(Node* node);
+ Reduction ReduceJSCreateWithContext(Node* node);
+ Reduction ReduceJSCreateCatchContext(Node* node);
+ Reduction ReduceJSCreateBlockContext(Node* node);
+ Reduction ReduceNewArray(Node* node, Node* length, int capacity,
+ Handle<AllocationSite> site);
+
+ Node* AllocateArguments(Node* effect, Node* control, Node* frame_state);
+ Node* AllocateRestArguments(Node* effect, Node* control, Node* frame_state,
+ int start_index);
+ Node* AllocateAliasedArguments(Node* effect, Node* control, Node* frame_state,
+ Node* context, Handle<SharedFunctionInfo>,
+ bool* has_aliased_arguments);
+ Node* AllocateElements(Node* effect, Node* control,
+ ElementsKind elements_kind, int capacity,
+ PretenureFlag pretenure);
+ Node* AllocateFastLiteral(Node* effect, Node* control,
+ Handle<JSObject> boilerplate,
+ AllocationSiteUsageContext* site_context);
+ Node* AllocateFastLiteralElements(Node* effect, Node* control,
+ Handle<JSObject> boilerplate,
+ PretenureFlag pretenure,
+ AllocationSiteUsageContext* site_context);
+ Node* AllocateMutableHeapNumber(double value, Node* effect, Node* control);
+
+ // Infers the LiteralsArray to use for a given {node}.
+ MaybeHandle<LiteralsArray> GetSpecializationLiterals(Node* node);
+
+ Factory* factory() const;
+ Graph* graph() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Isolate* isolate() const;
+ JSOperatorBuilder* javascript() const;
+ CommonOperatorBuilder* common() const;
+ SimplifiedOperatorBuilder* simplified() const;
+ MachineOperatorBuilder* machine() const;
+ CompilationDependencies* dependencies() const { return dependencies_; }
+ Zone* zone() const { return zone_; }
+
+ CompilationDependencies* const dependencies_;
+ JSGraph* const jsgraph_;
+ MaybeHandle<LiteralsArray> const literals_array_;
+ Zone* const zone_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_JS_CREATE_LOWERING_H_
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index 15ce908a1c..b6cd40d21d 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -62,16 +62,11 @@ Reduction JSGenericLowering::Reduce(Node* node) {
return Changed(node);
}
-
-#define REPLACE_BINARY_OP_IC_CALL(Op, token) \
- void JSGenericLowering::Lower##Op(Node* node) { \
- BinaryOperationParameters const& p = \
- BinaryOperationParametersOf(node->op()); \
- CallDescriptor::Flags flags = AdjustFrameStatesForCall(node); \
- ReplaceWithStubCall(node, \
- CodeFactory::BinaryOpIC(isolate(), token, \
- strength(p.language_mode())), \
- CallDescriptor::kPatchableCallSiteWithNop | flags); \
+#define REPLACE_BINARY_OP_IC_CALL(Op, token) \
+ void JSGenericLowering::Lower##Op(Node* node) { \
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node); \
+ ReplaceWithStubCall(node, CodeFactory::BinaryOpIC(isolate(), token), \
+ CallDescriptor::kPatchableCallSiteWithNop | flags); \
}
REPLACE_BINARY_OP_IC_CALL(JSBitwiseOr, Token::BIT_OR)
REPLACE_BINARY_OP_IC_CALL(JSBitwiseXor, Token::BIT_XOR)
@@ -89,34 +84,25 @@ REPLACE_BINARY_OP_IC_CALL(JSModulus, Token::MOD)
// These ops are not language mode dependent; we arbitrarily pass Strength::WEAK
// here.
-#define REPLACE_COMPARE_IC_CALL(op, token) \
- void JSGenericLowering::Lower##op(Node* node) { \
- ReplaceWithCompareIC(node, token, Strength::WEAK); \
+#define REPLACE_COMPARE_IC_CALL(op, token) \
+ void JSGenericLowering::Lower##op(Node* node) { \
+ ReplaceWithCompareIC(node, token); \
}
REPLACE_COMPARE_IC_CALL(JSEqual, Token::EQ)
REPLACE_COMPARE_IC_CALL(JSNotEqual, Token::NE)
REPLACE_COMPARE_IC_CALL(JSStrictEqual, Token::EQ_STRICT)
REPLACE_COMPARE_IC_CALL(JSStrictNotEqual, Token::NE_STRICT)
+REPLACE_COMPARE_IC_CALL(JSLessThan, Token::LT)
+REPLACE_COMPARE_IC_CALL(JSGreaterThan, Token::GT)
+REPLACE_COMPARE_IC_CALL(JSLessThanOrEqual, Token::LTE)
+REPLACE_COMPARE_IC_CALL(JSGreaterThanOrEqual, Token::GTE)
#undef REPLACE_COMPARE_IC_CALL
-#define REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE(op, token) \
- void JSGenericLowering::Lower##op(Node* node) { \
- ReplaceWithCompareIC(node, token, \
- strength(OpParameter<LanguageMode>(node))); \
- }
-REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE(JSLessThan, Token::LT)
-REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE(JSGreaterThan, Token::GT)
-REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE(JSLessThanOrEqual, Token::LTE)
-REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE(JSGreaterThanOrEqual, Token::GTE)
-#undef REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE
-
-
#define REPLACE_RUNTIME_CALL(op, fun) \
void JSGenericLowering::Lower##op(Node* node) { \
ReplaceWithRuntimeCall(node, fun); \
}
-REPLACE_RUNTIME_CALL(JSCreateFunctionContext, Runtime::kNewFunctionContext)
REPLACE_RUNTIME_CALL(JSCreateWithContext, Runtime::kPushWithContext)
REPLACE_RUNTIME_CALL(JSCreateModuleContext, Runtime::kPushModuleContext)
REPLACE_RUNTIME_CALL(JSConvertReceiver, Runtime::kConvertReceiver)
@@ -131,10 +117,8 @@ static CallDescriptor::Flags FlagsForNode(Node* node) {
return result;
}
-
-void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token,
- Strength str) {
- Callable callable = CodeFactory::CompareIC(isolate(), token, str);
+void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token) {
+ Callable callable = CodeFactory::CompareIC(isolate(), token);
// Create a new call node asking a CompareIC for help.
NodeVector inputs(zone());
@@ -267,7 +251,9 @@ void JSGenericLowering::LowerJSToString(Node* node) {
void JSGenericLowering::LowerJSToName(Node* node) {
- ReplaceWithRuntimeCall(node, Runtime::kToName);
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ Callable callable = CodeFactory::ToName(isolate());
+ ReplaceWithStubCall(node, callable, flags);
}
@@ -279,99 +265,187 @@ void JSGenericLowering::LowerJSToObject(Node* node) {
void JSGenericLowering::LowerJSLoadProperty(Node* node) {
+ Node* closure = NodeProperties::GetValueInput(node, 2);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
const PropertyAccess& p = PropertyAccessOf(node->op());
- Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(
- isolate(), p.language_mode(), UNINITIALIZED);
+ Callable callable =
+ CodeFactory::KeyedLoadICInOptimizedCode(isolate(), UNINITIALIZED);
+ // Load the type feedback vector from the closure.
+ Node* shared_info = effect = graph()->NewNode(
+ machine()->Load(MachineType::AnyTagged()), closure,
+ jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
+ kHeapObjectTag),
+ effect, control);
+ Node* vector = effect = graph()->NewNode(
+ machine()->Load(MachineType::AnyTagged()), shared_info,
+ jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+ kHeapObjectTag),
+ effect, control);
node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
+ node->ReplaceInput(3, vector);
+ node->ReplaceInput(6, effect);
ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSLoadNamed(Node* node) {
+ Node* closure = NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
NamedAccess const& p = NamedAccessOf(node->op());
Callable callable = CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_INSIDE_TYPEOF, p.language_mode(), UNINITIALIZED);
+ isolate(), NOT_INSIDE_TYPEOF, UNINITIALIZED);
+ // Load the type feedback vector from the closure.
+ Node* shared_info = effect = graph()->NewNode(
+ machine()->Load(MachineType::AnyTagged()), closure,
+ jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
+ kHeapObjectTag),
+ effect, control);
+ Node* vector = effect = graph()->NewNode(
+ machine()->Load(MachineType::AnyTagged()), shared_info,
+ jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+ kHeapObjectTag),
+ effect, control);
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
+ node->ReplaceInput(3, vector);
+ node->ReplaceInput(6, effect);
ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSLoadGlobal(Node* node) {
+ Node* closure = NodeProperties::GetValueInput(node, 0);
Node* context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
const LoadGlobalParameters& p = LoadGlobalParametersOf(node->op());
Callable callable = CodeFactory::LoadICInOptimizedCode(
- isolate(), p.typeof_mode(), SLOPPY, UNINITIALIZED);
+ isolate(), p.typeof_mode(), UNINITIALIZED);
+ // Load the type feedback vector from the closure.
+ Node* shared_info = effect = graph()->NewNode(
+ machine()->Load(MachineType::AnyTagged()), closure,
+ jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
+ kHeapObjectTag),
+ effect, control);
+ Node* vector = effect = graph()->NewNode(
+ machine()->Load(MachineType::AnyTagged()), shared_info,
+ jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+ kHeapObjectTag),
+ effect, control);
// Load global object from the context.
- Node* native_context =
+ Node* native_context = effect =
graph()->NewNode(machine()->Load(MachineType::AnyTagged()), context,
jsgraph()->IntPtrConstant(
Context::SlotOffset(Context::NATIVE_CONTEXT_INDEX)),
- effect, graph()->start());
- Node* global = graph()->NewNode(
+ effect, control);
+ Node* global = effect = graph()->NewNode(
machine()->Load(MachineType::AnyTagged()), native_context,
jsgraph()->IntPtrConstant(Context::SlotOffset(Context::EXTENSION_INDEX)),
- effect, graph()->start());
+ effect, control);
node->InsertInput(zone(), 0, global);
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
+ node->ReplaceInput(3, vector);
+ node->ReplaceInput(6, effect);
ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSStoreProperty(Node* node) {
+ Node* closure = NodeProperties::GetValueInput(node, 3);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
PropertyAccess const& p = PropertyAccessOf(node->op());
LanguageMode language_mode = p.language_mode();
Callable callable = CodeFactory::KeyedStoreICInOptimizedCode(
isolate(), language_mode, UNINITIALIZED);
- DCHECK(p.feedback().index() != -1);
+ // Load the type feedback vector from the closure.
+ Node* shared_info = effect = graph()->NewNode(
+ machine()->Load(MachineType::AnyTagged()), closure,
+ jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
+ kHeapObjectTag),
+ effect, control);
+ Node* vector = effect = graph()->NewNode(
+ machine()->Load(MachineType::AnyTagged()), shared_info,
+ jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+ kHeapObjectTag),
+ effect, control);
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
- ReplaceWithStubCall(node, callable,
- CallDescriptor::kPatchableCallSite | flags);
+ node->ReplaceInput(4, vector);
+ node->ReplaceInput(7, effect);
+ ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSStoreNamed(Node* node) {
+ Node* closure = NodeProperties::GetValueInput(node, 2);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
NamedAccess const& p = NamedAccessOf(node->op());
Callable callable = CodeFactory::StoreICInOptimizedCode(
isolate(), p.language_mode(), UNINITIALIZED);
+ // Load the type feedback vector from the closure.
+ Node* shared_info = effect = graph()->NewNode(
+ machine()->Load(MachineType::AnyTagged()), closure,
+ jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
+ kHeapObjectTag),
+ effect, control);
+ Node* vector = effect = graph()->NewNode(
+ machine()->Load(MachineType::AnyTagged()), shared_info,
+ jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+ kHeapObjectTag),
+ effect, control);
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
- DCHECK(p.feedback().index() != -1);
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
- ReplaceWithStubCall(node, callable,
- CallDescriptor::kPatchableCallSite | flags);
+ node->ReplaceInput(4, vector);
+ node->ReplaceInput(7, effect);
+ ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
+ Node* closure = NodeProperties::GetValueInput(node, 1);
Node* context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
const StoreGlobalParameters& p = StoreGlobalParametersOf(node->op());
Callable callable = CodeFactory::StoreICInOptimizedCode(
isolate(), p.language_mode(), UNINITIALIZED);
+ // Load the type feedback vector from the closure.
+ Node* shared_info = effect = graph()->NewNode(
+ machine()->Load(MachineType::AnyTagged()), closure,
+ jsgraph()->IntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
+ kHeapObjectTag),
+ effect, control);
+ Node* vector = effect = graph()->NewNode(
+ machine()->Load(MachineType::AnyTagged()), shared_info,
+ jsgraph()->IntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+ kHeapObjectTag),
+ effect, control);
// Load global object from the context.
- Node* native_context =
+ Node* native_context = effect =
graph()->NewNode(machine()->Load(MachineType::AnyTagged()), context,
jsgraph()->IntPtrConstant(
Context::SlotOffset(Context::NATIVE_CONTEXT_INDEX)),
- effect, graph()->start());
- Node* global = graph()->NewNode(
+ effect, control);
+ Node* global = effect = graph()->NewNode(
machine()->Load(MachineType::AnyTagged()), native_context,
jsgraph()->IntPtrConstant(Context::SlotOffset(Context::EXTENSION_INDEX)),
- effect, graph()->start());
+ effect, control);
node->InsertInput(zone(), 0, global);
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
- DCHECK(p.feedback().index() != -1);
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.feedback().index()));
- ReplaceWithStubCall(node, callable,
- CallDescriptor::kPatchableCallSite | flags);
+ node->ReplaceInput(4, vector);
+ node->ReplaceInput(7, effect);
+ ReplaceWithStubCall(node, callable, flags);
}
@@ -433,38 +507,24 @@ void JSGenericLowering::LowerJSStoreContext(Node* node) {
}
-void JSGenericLowering::LowerJSLoadDynamic(Node* node) {
- const DynamicAccess& access = DynamicAccessOf(node->op());
- Runtime::FunctionId function_id =
- (access.typeof_mode() == NOT_INSIDE_TYPEOF)
- ? Runtime::kLoadLookupSlot
- : Runtime::kLoadLookupSlotNoReferenceError;
- Node* projection = graph()->NewNode(common()->Projection(0), node);
- NodeProperties::ReplaceUses(node, projection, node, node, node);
- node->RemoveInput(NodeProperties::FirstValueIndex(node));
- node->InsertInput(zone(), 1, jsgraph()->Constant(access.name()));
- ReplaceWithRuntimeCall(node, function_id);
- projection->ReplaceInput(0, node);
-}
-
-
void JSGenericLowering::LowerJSCreate(Node* node) {
- ReplaceWithRuntimeCall(node, Runtime::kNewObject);
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ Callable callable = CodeFactory::FastNewObject(isolate());
+ ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSCreateArguments(Node* node) {
- const CreateArgumentsParameters& p = CreateArgumentsParametersOf(node->op());
- switch (p.type()) {
- case CreateArgumentsParameters::kMappedArguments:
+ CreateArgumentsType const type = CreateArgumentsTypeOf(node->op());
+ switch (type) {
+ case CreateArgumentsType::kMappedArguments:
ReplaceWithRuntimeCall(node, Runtime::kNewSloppyArguments_Generic);
break;
- case CreateArgumentsParameters::kUnmappedArguments:
- ReplaceWithRuntimeCall(node, Runtime::kNewStrictArguments_Generic);
+ case CreateArgumentsType::kUnmappedArguments:
+ ReplaceWithRuntimeCall(node, Runtime::kNewStrictArguments);
break;
- case CreateArgumentsParameters::kRestArray:
- node->InsertInput(zone(), 1, jsgraph()->Constant(p.start_index()));
- ReplaceWithRuntimeCall(node, Runtime::kNewRestArguments_Generic);
+ case CreateArgumentsType::kRestParameter:
+ ReplaceWithRuntimeCall(node, Runtime::kNewRestParameter);
break;
}
}
@@ -473,7 +533,8 @@ void JSGenericLowering::LowerJSCreateArguments(Node* node) {
void JSGenericLowering::LowerJSCreateArray(Node* node) {
CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
int const arity = static_cast<int>(p.arity());
- Node* new_target = node->InputAt(1);
+ Handle<AllocationSite> const site = p.site();
+
// TODO(turbofan): We embed the AllocationSite from the Operator at this
// point, which we should not do once we want to both consume the feedback
// but at the same time shared the optimized code across native contexts,
@@ -481,21 +542,93 @@ void JSGenericLowering::LowerJSCreateArray(Node* node) {
// stored in the type feedback vector after all). Once we go for cross
// context code generation, we should somehow find a way to get to the
// allocation site for the actual native context at runtime.
- Node* type_info = p.site().is_null() ? jsgraph()->UndefinedConstant()
- : jsgraph()->HeapConstant(p.site());
- node->RemoveInput(1);
- node->InsertInput(zone(), 1 + arity, new_target);
- node->InsertInput(zone(), 2 + arity, type_info);
- ReplaceWithRuntimeCall(node, Runtime::kNewArray, arity + 3);
+ if (!site.is_null()) {
+ // Reduce {node} to the appropriate ArrayConstructorStub backend.
+ // Note that these stubs "behave" like JSFunctions, which means they
+ // expect a receiver on the stack, which they remove. We just push
+ // undefined for the receiver.
+ ElementsKind elements_kind = site->GetElementsKind();
+ AllocationSiteOverrideMode override_mode =
+ (AllocationSite::GetMode(elements_kind) == TRACK_ALLOCATION_SITE)
+ ? DISABLE_ALLOCATION_SITES
+ : DONT_OVERRIDE;
+ if (arity == 0) {
+ ArrayNoArgumentConstructorStub stub(isolate(), elements_kind,
+ override_mode);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 1,
+ CallDescriptor::kNeedsFrameState);
+ node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
+ node->InsertInput(graph()->zone(), 3, jsgraph()->UndefinedConstant());
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+ } else if (arity == 1) {
+ // TODO(bmeurer): Optimize for the 0 length non-holey case?
+ ArraySingleArgumentConstructorStub stub(
+ isolate(), GetHoleyElementsKind(elements_kind), override_mode);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 2,
+ CallDescriptor::kNeedsFrameState);
+ node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(1));
+ node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+ } else {
+ ArrayNArgumentsConstructorStub stub(isolate(), elements_kind,
+ override_mode);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(),
+ arity + 1, CallDescriptor::kNeedsFrameState);
+ node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(arity));
+ node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+ }
+ } else {
+ Node* new_target = node->InputAt(1);
+ Node* type_info = site.is_null() ? jsgraph()->UndefinedConstant()
+ : jsgraph()->HeapConstant(site);
+ node->RemoveInput(1);
+ node->InsertInput(zone(), 1 + arity, new_target);
+ node->InsertInput(zone(), 2 + arity, type_info);
+ ReplaceWithRuntimeCall(node, Runtime::kNewArray, arity + 3);
+ }
}
void JSGenericLowering::LowerJSCreateClosure(Node* node) {
- CreateClosureParameters p = CreateClosureParametersOf(node->op());
- node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.shared_info()));
- ReplaceWithRuntimeCall(node, (p.pretenure() == TENURED)
- ? Runtime::kNewClosure_Tenured
- : Runtime::kNewClosure);
+ CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ Handle<SharedFunctionInfo> const shared_info = p.shared_info();
+ node->InsertInput(zone(), 0, jsgraph()->HeapConstant(shared_info));
+
+ // Use the FastNewClosureStub that allocates in new space only for nested
+ // functions that don't need literals cloning.
+ if (p.pretenure() == NOT_TENURED && shared_info->num_literals() == 0) {
+ Callable callable = CodeFactory::FastNewClosure(
+ isolate(), shared_info->language_mode(), shared_info->kind());
+ ReplaceWithStubCall(node, callable, flags);
+ } else {
+ ReplaceWithRuntimeCall(node, (p.pretenure() == TENURED)
+ ? Runtime::kNewClosure_Tenured
+ : Runtime::kNewClosure);
+ }
+}
+
+
+void JSGenericLowering::LowerJSCreateFunctionContext(Node* node) {
+ int const slot_count = OpParameter<int>(node->op());
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+
+ // Use the FastNewContextStub only for function contexts up maximum size.
+ if (slot_count <= FastNewContextStub::kMaximumSlots) {
+ Callable callable = CodeFactory::FastNewContext(isolate(), slot_count);
+ ReplaceWithStubCall(node, callable, flags);
+ } else {
+ ReplaceWithRuntimeCall(node, Runtime::kNewFunctionContext);
+ }
}
@@ -506,19 +639,42 @@ void JSGenericLowering::LowerJSCreateIterResultObject(Node* node) {
void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ int const length = Handle<FixedArray>::cast(p.constant())->length();
node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.index()));
node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
- node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
- ReplaceWithRuntimeCall(node, Runtime::kCreateArrayLiteral);
+
+ // Use the FastCloneShallowArrayStub only for shallow boilerplates up to the
+ // initial length limit for arrays with "fast" elements kind.
+ if ((p.flags() & ArrayLiteral::kShallowElements) != 0 &&
+ (p.flags() & ArrayLiteral::kIsStrong) == 0 &&
+ length < JSArray::kInitialMaxFastElementArray) {
+ Callable callable = CodeFactory::FastCloneShallowArray(isolate());
+ ReplaceWithStubCall(node, callable, flags);
+ } else {
+ node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
+ ReplaceWithRuntimeCall(node, Runtime::kCreateArrayLiteral);
+ }
}
void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ int const length = Handle<FixedArray>::cast(p.constant())->length();
node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.index()));
node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
- ReplaceWithRuntimeCall(node, Runtime::kCreateObjectLiteral);
+
+ // Use the FastCloneShallowObjectStub only for shallow boilerplates without
+ // elements up to the number of properties that the stubs can handle.
+ if ((p.flags() & ObjectLiteral::kShallowProperties) != 0 &&
+ length <= FastCloneShallowObjectStub::kMaximumClonedProperties) {
+ Callable callable = CodeFactory::FastCloneShallowObject(isolate(), length);
+ ReplaceWithStubCall(node, callable, flags);
+ } else {
+ ReplaceWithRuntimeCall(node, Runtime::kCreateObjectLiteral);
+ }
}
@@ -614,173 +770,7 @@ void JSGenericLowering::LowerJSForInNext(Node* node) {
void JSGenericLowering::LowerJSForInPrepare(Node* node) {
- Node* object = NodeProperties::GetValueInput(node, 0);
- Node* context = NodeProperties::GetContextInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
-
- // Get the set of properties to enumerate.
- Runtime::Function const* function =
- Runtime::FunctionForId(Runtime::kGetPropertyNamesFast);
- CallDescriptor const* descriptor = Linkage::GetRuntimeCallDescriptor(
- zone(), function->function_id, 1, Operator::kNoProperties,
- CallDescriptor::kNeedsFrameState);
- Node* cache_type = effect = graph()->NewNode(
- common()->Call(descriptor),
- jsgraph()->CEntryStubConstant(function->result_size), object,
- jsgraph()->ExternalConstant(function->function_id),
- jsgraph()->Int32Constant(1), context, frame_state, effect, control);
- control = graph()->NewNode(common()->IfSuccess(), cache_type);
-
- Node* object_map = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), object,
- jsgraph()->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag),
- effect, control);
- Node* cache_type_map = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), cache_type,
- jsgraph()->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag),
- effect, control);
- Node* meta_map = jsgraph()->HeapConstant(isolate()->factory()->meta_map());
-
- // If we got a map from the GetPropertyNamesFast runtime call, we can do a
- // fast modification check. Otherwise, we got a fixed array, and we have to
- // perform a slow check on every iteration.
- Node* check0 =
- graph()->NewNode(machine()->WordEqual(), cache_type_map, meta_map);
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
-
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* cache_array_true0;
- Node* cache_length_true0;
- Node* cache_type_true0;
- Node* etrue0;
- {
- // Enum cache case.
- Node* cache_type_enum_length = etrue0 = graph()->NewNode(
- machine()->Load(MachineType::Uint32()), cache_type,
- jsgraph()->IntPtrConstant(Map::kBitField3Offset - kHeapObjectTag),
- effect, if_true0);
- cache_type_enum_length =
- graph()->NewNode(machine()->Word32And(), cache_type_enum_length,
- jsgraph()->Uint32Constant(Map::EnumLengthBits::kMask));
-
- Node* check1 =
- graph()->NewNode(machine()->Word32Equal(), cache_type_enum_length,
- jsgraph()->Int32Constant(0));
- Node* branch1 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check1, if_true0);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* cache_array_true1;
- Node* etrue1;
- {
- // No properties to enumerate.
- cache_array_true1 =
- jsgraph()->HeapConstant(isolate()->factory()->empty_fixed_array());
- etrue1 = etrue0;
- }
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* cache_array_false1;
- Node* efalse1;
- {
- // Load the enumeration cache from the instance descriptors of {object}.
- Node* object_map_descriptors = efalse1 = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), object_map,
- jsgraph()->IntPtrConstant(Map::kDescriptorsOffset - kHeapObjectTag),
- etrue0, if_false1);
- Node* object_map_enum_cache = efalse1 = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), object_map_descriptors,
- jsgraph()->IntPtrConstant(DescriptorArray::kEnumCacheOffset -
- kHeapObjectTag),
- efalse1, if_false1);
- cache_array_false1 = efalse1 = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), object_map_enum_cache,
- jsgraph()->IntPtrConstant(
- DescriptorArray::kEnumCacheBridgeCacheOffset - kHeapObjectTag),
- efalse1, if_false1);
- }
-
- if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- etrue0 =
- graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
- cache_array_true0 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- cache_array_true1, cache_array_false1, if_true0);
-
- cache_length_true0 = graph()->NewNode(
- machine()->WordShl(),
- machine()->Is64()
- ? graph()->NewNode(machine()->ChangeUint32ToUint64(),
- cache_type_enum_length)
- : cache_type_enum_length,
- jsgraph()->Int32Constant(kSmiShiftSize + kSmiTagSize));
- cache_type_true0 = cache_type;
- }
-
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* cache_array_false0;
- Node* cache_length_false0;
- Node* cache_type_false0;
- Node* efalse0;
- {
- // FixedArray case.
- cache_type_false0 = jsgraph()->OneConstant(); // Smi means slow check
- cache_array_false0 = cache_type;
- cache_length_false0 = efalse0 = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), cache_array_false0,
- jsgraph()->IntPtrConstant(FixedArray::kLengthOffset - kHeapObjectTag),
- effect, if_false0);
- }
-
- control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
- Node* cache_array =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- cache_array_true0, cache_array_false0, control);
- Node* cache_length =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- cache_length_true0, cache_length_false0, control);
- cache_type =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- cache_type_true0, cache_type_false0, control);
-
- for (auto edge : node->use_edges()) {
- if (NodeProperties::IsEffectEdge(edge)) {
- edge.UpdateTo(effect);
- } else if (NodeProperties::IsControlEdge(edge)) {
- Node* const use = edge.from();
- if (use->opcode() == IrOpcode::kIfSuccess) {
- use->ReplaceUses(control);
- use->Kill();
- } else if (use->opcode() == IrOpcode::kIfException) {
- edge.UpdateTo(cache_type_true0);
- } else {
- UNREACHABLE();
- }
- } else {
- Node* const use = edge.from();
- DCHECK(NodeProperties::IsValueEdge(edge));
- DCHECK_EQ(IrOpcode::kProjection, use->opcode());
- switch (ProjectionIndexOf(use->op())) {
- case 0:
- use->ReplaceUses(cache_type);
- break;
- case 1:
- use->ReplaceUses(cache_array);
- break;
- case 2:
- use->ReplaceUses(cache_length);
- break;
- default:
- UNREACHABLE();
- break;
- }
- use->Kill();
- }
- }
+ ReplaceWithRuntimeCall(node, Runtime::kForInPrepare);
}
diff --git a/deps/v8/src/compiler/js-generic-lowering.h b/deps/v8/src/compiler/js-generic-lowering.h
index ffce9126df..f4acdf6305 100644
--- a/deps/v8/src/compiler/js-generic-lowering.h
+++ b/deps/v8/src/compiler/js-generic-lowering.h
@@ -36,7 +36,7 @@ class JSGenericLowering final : public Reducer {
#undef DECLARE_LOWER
// Helpers to replace existing nodes with a generic call.
- void ReplaceWithCompareIC(Node* node, Token::Value token, Strength strength);
+ void ReplaceWithCompareIC(Node* node, Token::Value token);
void ReplaceWithStubCall(Node* node, Callable c, CallDescriptor::Flags flags);
void ReplaceWithRuntimeCall(Node* node, Runtime::FunctionId f, int args = -1);
diff --git a/deps/v8/src/compiler/js-global-object-specialization.cc b/deps/v8/src/compiler/js-global-object-specialization.cc
index e6f01b3efb..132dec6ffb 100644
--- a/deps/v8/src/compiler/js-global-object-specialization.cc
+++ b/deps/v8/src/compiler/js-global-object-specialization.cc
@@ -27,11 +27,10 @@ struct JSGlobalObjectSpecialization::ScriptContextTableLookupResult {
JSGlobalObjectSpecialization::JSGlobalObjectSpecialization(
- Editor* editor, JSGraph* jsgraph, Flags flags,
+ Editor* editor, JSGraph* jsgraph,
MaybeHandle<Context> native_context, CompilationDependencies* dependencies)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
- flags_(flags),
native_context_(native_context),
dependencies_(dependencies),
type_cache_(TypeCache::Get()) {}
@@ -49,7 +48,6 @@ Reduction JSGlobalObjectSpecialization::Reduce(Node* node) {
return NoChange();
}
-
Reduction JSGlobalObjectSpecialization::ReduceJSLoadGlobal(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadGlobal, node->opcode());
Handle<Name> name = LoadGlobalParametersOf(node->op()).name();
@@ -88,47 +86,36 @@ Reduction JSGlobalObjectSpecialization::ReduceJSLoadGlobal(Node* node) {
return Replace(value);
}
- // Load from non-configurable, data property on the global can be lowered to
- // a field load, even without deoptimization, because the property cannot be
- // deleted or reconfigured to an accessor/interceptor property. Yet, if
- // deoptimization support is available, we can constant-fold certain global
- // properties or at least lower them to field loads annotated with more
- // precise type feedback.
- Type* property_cell_value_type = Type::Tagged();
- if (flags() & kDeoptimizationEnabled) {
- // Record a code dependency on the cell if we can benefit from the
- // additional feedback, or the global property is configurable (i.e.
- // can be deleted or reconfigured to an accessor property).
- if (property_details.cell_type() != PropertyCellType::kMutable ||
- property_details.IsConfigurable()) {
- dependencies()->AssumePropertyCell(property_cell);
- }
+ // Record a code dependency on the cell if we can benefit from the
+ // additional feedback, or the global property is configurable (i.e.
+ // can be deleted or reconfigured to an accessor property).
+ if (property_details.cell_type() != PropertyCellType::kMutable ||
+ property_details.IsConfigurable()) {
+ dependencies()->AssumePropertyCell(property_cell);
+ }
- // Load from constant/undefined global property can be constant-folded.
- if ((property_details.cell_type() == PropertyCellType::kConstant ||
- property_details.cell_type() == PropertyCellType::kUndefined)) {
- Node* value = jsgraph()->Constant(property_cell_value);
- ReplaceWithValue(node, value);
- return Replace(value);
- }
+ // Load from constant/undefined global property can be constant-folded.
+ if (property_details.cell_type() == PropertyCellType::kConstant ||
+ property_details.cell_type() == PropertyCellType::kUndefined) {
+ Node* value = jsgraph()->Constant(property_cell_value);
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
- // Load from constant type cell can benefit from type feedback.
- if (property_details.cell_type() == PropertyCellType::kConstantType) {
- // Compute proper type based on the current value in the cell.
- if (property_cell_value->IsSmi()) {
- property_cell_value_type = type_cache_.kSmi;
- } else if (property_cell_value->IsNumber()) {
- property_cell_value_type = type_cache_.kHeapNumber;
- } else {
- Handle<Map> property_cell_value_map(
- Handle<HeapObject>::cast(property_cell_value)->map(), isolate());
- property_cell_value_type =
- Type::Class(property_cell_value_map, graph()->zone());
- }
+ // Load from constant type cell can benefit from type feedback.
+ Type* property_cell_value_type = Type::Tagged();
+ if (property_details.cell_type() == PropertyCellType::kConstantType) {
+ // Compute proper type based on the current value in the cell.
+ if (property_cell_value->IsSmi()) {
+ property_cell_value_type = type_cache_.kSmi;
+ } else if (property_cell_value->IsNumber()) {
+ property_cell_value_type = type_cache_.kHeapNumber;
+ } else {
+ Handle<Map> property_cell_value_map(
+ Handle<HeapObject>::cast(property_cell_value)->map(), isolate());
+ property_cell_value_type =
+ Type::Class(property_cell_value_map, graph()->zone());
}
- } else if (property_details.IsConfigurable()) {
- // Access to configurable global properties requires deoptimization support.
- return NoChange();
}
Node* value = effect = graph()->NewNode(
simplified()->LoadField(
@@ -178,9 +165,8 @@ Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
return NoChange();
}
case PropertyCellType::kConstant: {
- // Store to constant property cell requires deoptimization support,
- // because we might even need to eager deoptimize for mismatch.
- if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+ // Record a code dependency on the cell, and just deoptimize if the new
+ // value doesn't match the previous value stored inside the cell.
dependencies()->AssumePropertyCell(property_cell);
Node* check =
graph()->NewNode(simplified()->ReferenceEqual(Type::Tagged()), value,
@@ -193,13 +179,13 @@ Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
frame_state, effect, if_false);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ Revisit(graph()->end());
control = graph()->NewNode(common()->IfTrue(), branch);
break;
}
case PropertyCellType::kConstantType: {
- // Store to constant-type property cell requires deoptimization support,
- // because we might even need to eager deoptimize for mismatch.
- if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+ // Record a code dependency on the cell, and just deoptimize if the new
+ // values' type doesn't match the type of the previous value in the cell.
dependencies()->AssumePropertyCell(property_cell);
Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), value);
Type* property_cell_value_type = Type::TaggedSigned();
@@ -213,6 +199,7 @@ Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
frame_state, effect, if_true);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ Revisit(graph()->end());
control = graph()->NewNode(common()->IfFalse(), branch);
// Load the {value} map check against the {property_cell} map.
@@ -234,6 +221,7 @@ Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
frame_state, effect, if_false);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ Revisit(graph()->end());
control = graph()->NewNode(common()->IfTrue(), branch);
effect = graph()->NewNode(
simplified()->StoreField(
@@ -243,13 +231,11 @@ Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
}
case PropertyCellType::kMutable: {
// Store to non-configurable, data property on the global can be lowered
- // to a field store, even without deoptimization, because the property
- // cannot be deleted or reconfigured to an accessor/interceptor property.
+ // to a field store, even without recording a code dependency on the cell,
+ // because the property cannot be deleted or reconfigured to an accessor
+ // or interceptor property.
if (property_details.IsConfigurable()) {
- // With deoptimization support, we can lower stores even to configurable
- // data properties on the global object, by adding a code dependency on
- // the cell.
- if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+ // Protect lowering by recording a code dependency on the cell.
dependencies()->AssumePropertyCell(property_cell);
}
effect = graph()->NewNode(
diff --git a/deps/v8/src/compiler/js-global-object-specialization.h b/deps/v8/src/compiler/js-global-object-specialization.h
index 83d890c938..3ffc67a377 100644
--- a/deps/v8/src/compiler/js-global-object-specialization.h
+++ b/deps/v8/src/compiler/js-global-object-specialization.h
@@ -5,7 +5,6 @@
#ifndef V8_COMPILER_JS_GLOBAL_OBJECT_SPECIALIZATION_H_
#define V8_COMPILER_JS_GLOBAL_OBJECT_SPECIALIZATION_H_
-#include "src/base/flags.h"
#include "src/compiler/graph-reducer.h"
namespace v8 {
@@ -30,14 +29,7 @@ class SimplifiedOperatorBuilder;
// nodes.
class JSGlobalObjectSpecialization final : public AdvancedReducer {
public:
- // Flags that control the mode of operation.
- enum Flag {
- kNoFlags = 0u,
- kDeoptimizationEnabled = 1u << 0,
- };
- typedef base::Flags<Flag> Flags;
-
- JSGlobalObjectSpecialization(Editor* editor, JSGraph* jsgraph, Flags flags,
+ JSGlobalObjectSpecialization(Editor* editor, JSGraph* jsgraph,
MaybeHandle<Context> native_context,
CompilationDependencies* dependencies);
@@ -61,12 +53,10 @@ class JSGlobalObjectSpecialization final : public AdvancedReducer {
CommonOperatorBuilder* common() const;
JSOperatorBuilder* javascript() const;
SimplifiedOperatorBuilder* simplified() const;
- Flags flags() const { return flags_; }
MaybeHandle<Context> native_context() const { return native_context_; }
CompilationDependencies* dependencies() const { return dependencies_; }
JSGraph* const jsgraph_;
- Flags const flags_;
MaybeHandle<Context> native_context_;
CompilationDependencies* const dependencies_;
TypeCache const& type_cache_;
@@ -74,8 +64,6 @@ class JSGlobalObjectSpecialization final : public AdvancedReducer {
DISALLOW_COPY_AND_ASSIGN(JSGlobalObjectSpecialization);
};
-DEFINE_OPERATORS_FOR_FLAGS(JSGlobalObjectSpecialization::Flags)
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 99a1547b9a..2244f9bbfe 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -205,6 +205,7 @@ Reduction JSInliner::InlineCall(Node* call, Node* new_target, Node* context,
case IrOpcode::kThrow:
NodeProperties::MergeControlToEnd(jsgraph_->graph(), jsgraph_->common(),
input);
+ Revisit(jsgraph_->graph()->end());
break;
default:
UNREACHABLE();
@@ -243,8 +244,7 @@ Node* JSInliner::CreateArtificialFrameState(Node* node, Node* outer_frame_state,
Handle<SharedFunctionInfo> shared) {
const FrameStateFunctionInfo* state_info =
jsgraph_->common()->CreateFrameStateFunctionInfo(
- frame_state_type, parameter_count + 1, 0, shared,
- CALL_MAINTAINS_NATIVE_CONTEXT);
+ frame_state_type, parameter_count + 1, 0, shared);
const Operator* op = jsgraph_->common()->FrameState(
BailoutId(-1), OutputFrameStateCombine::Ignore(), state_info);
@@ -267,10 +267,18 @@ Node* JSInliner::CreateArtificialFrameState(Node* node, Node* outer_frame_state,
namespace {
// TODO(mstarzinger,verwaest): Move this predicate onto SharedFunctionInfo?
-bool NeedsImplicitReceiver(Handle<JSFunction> function, Isolate* isolate) {
- Code* construct_stub = function->shared()->construct_stub();
- return construct_stub != *isolate->builtins()->JSBuiltinsConstructStub() &&
- construct_stub != *isolate->builtins()->ConstructedNonConstructable();
+bool NeedsImplicitReceiver(Handle<SharedFunctionInfo> shared_info) {
+ DisallowHeapAllocation no_gc;
+ Isolate* const isolate = shared_info->GetIsolate();
+ Code* const construct_stub = shared_info->construct_stub();
+ return construct_stub != *isolate->builtins()->JSBuiltinsConstructStub();
+}
+
+bool IsNonConstructible(Handle<SharedFunctionInfo> shared_info) {
+ DisallowHeapAllocation no_gc;
+ Isolate* const isolate = shared_info->GetIsolate();
+ Code* const construct_stub = shared_info->construct_stub();
+ return construct_stub == *isolate->builtins()->ConstructedNonConstructable();
}
} // namespace
@@ -294,20 +302,21 @@ Reduction JSInliner::Reduce(Node* node) {
Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
DCHECK(IrOpcode::IsInlineeOpcode(node->opcode()));
JSCallAccessor call(node);
+ Handle<SharedFunctionInfo> shared_info(function->shared());
// Function must be inlineable.
- if (!function->shared()->IsInlineable()) {
+ if (!shared_info->IsInlineable()) {
TRACE("Not inlining %s into %s because callee is not inlineable\n",
- function->shared()->DebugName()->ToCString().get(),
+ shared_info->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
return NoChange();
}
// Constructor must be constructable.
if (node->opcode() == IrOpcode::kJSCallConstruct &&
- !function->IsConstructor()) {
+ IsNonConstructible(shared_info)) {
TRACE("Not inlining %s into %s because constructor is not constructable.\n",
- function->shared()->DebugName()->ToCString().get(),
+ shared_info->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
return NoChange();
}
@@ -315,17 +324,17 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
// Class constructors are callable, but [[Call]] will raise an exception.
// See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList ).
if (node->opcode() == IrOpcode::kJSCallFunction &&
- IsClassConstructor(function->shared()->kind())) {
+ IsClassConstructor(shared_info->kind())) {
TRACE("Not inlining %s into %s because callee is a class constructor.\n",
- function->shared()->DebugName()->ToCString().get(),
+ shared_info->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
return NoChange();
}
// Function contains break points.
- if (function->shared()->HasDebugInfo()) {
+ if (shared_info->HasDebugInfo()) {
TRACE("Not inlining %s into %s because callee may contain break points\n",
- function->shared()->DebugName()->ToCString().get(),
+ shared_info->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
return NoChange();
}
@@ -341,7 +350,7 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
if (function->context()->native_context() !=
info_->context()->native_context()) {
TRACE("Not inlining %s into %s because of different native contexts\n",
- function->shared()->DebugName()->ToCString().get(),
+ shared_info->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
return NoChange();
}
@@ -352,12 +361,12 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
for (Node* frame_state = call.frame_state_after();
frame_state->opcode() == IrOpcode::kFrameState;
frame_state = frame_state->InputAt(kFrameStateOuterStateInput)) {
- FrameStateInfo const& info = OpParameter<FrameStateInfo>(frame_state);
- Handle<SharedFunctionInfo> shared_info;
- if (info.shared_info().ToHandle(&shared_info) &&
- *shared_info == function->shared()) {
+ FrameStateInfo const& frame_info = OpParameter<FrameStateInfo>(frame_state);
+ Handle<SharedFunctionInfo> frame_shared_info;
+ if (frame_info.shared_info().ToHandle(&frame_shared_info) &&
+ *frame_shared_info == *shared_info) {
TRACE("Not inlining %s into %s because call is recursive\n",
- function->shared()->DebugName()->ToCString().get(),
+ shared_info->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
return NoChange();
}
@@ -366,7 +375,7 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
// TODO(turbofan): Inlining into a try-block is not yet supported.
if (NodeProperties::IsExceptionalCall(node)) {
TRACE("Not inlining %s into %s because of surrounding try-block\n",
- function->shared()->DebugName()->ToCString().get(),
+ shared_info->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
return NoChange();
}
@@ -374,13 +383,11 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
Zone zone;
ParseInfo parse_info(&zone, function);
CompilationInfo info(&parse_info);
- if (info_->is_deoptimization_enabled()) {
- info.MarkAsDeoptimizationEnabled();
- }
+ if (info_->is_deoptimization_enabled()) info.MarkAsDeoptimizationEnabled();
if (!Compiler::ParseAndAnalyze(info.parse_info())) {
TRACE("Not inlining %s into %s because parsing failed\n",
- function->shared()->DebugName()->ToCString().get(),
+ shared_info->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
if (info_->isolate()->has_pending_exception()) {
info_->isolate()->clear_pending_exception();
@@ -394,28 +401,28 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
if (is_strong(info.language_mode()) &&
call.formal_arguments() < parameter_count) {
TRACE("Not inlining %s into %s because too few arguments for strong mode\n",
- function->shared()->DebugName()->ToCString().get(),
+ shared_info->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
return NoChange();
}
if (!Compiler::EnsureDeoptimizationSupport(&info)) {
TRACE("Not inlining %s into %s because deoptimization support failed\n",
- function->shared()->DebugName()->ToCString().get(),
+ shared_info->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
return NoChange();
}
// Remember that we inlined this function. This needs to be called right
// after we ensure deoptimization support so that the code flusher
// does not remove the code with the deoptimization support.
- info_->AddInlinedFunction(info.shared_info());
+ info_->AddInlinedFunction(shared_info);
// ----------------------------------------------------------------
// After this point, we've made a decision to inline this function.
// We shall not bailout from inlining if we got here.
TRACE("Inlining %s into %s\n",
- function->shared()->DebugName()->ToCString().get(),
+ shared_info->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
// TODO(mstarzinger): We could use the temporary zone for the graph because
@@ -442,7 +449,7 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
// Note that the context has to be the callers context (input to call node).
Node* receiver = jsgraph_->UndefinedConstant(); // Implicit receiver.
if (node->opcode() == IrOpcode::kJSCallConstruct &&
- NeedsImplicitReceiver(function, info_->isolate())) {
+ NeedsImplicitReceiver(shared_info)) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* context = NodeProperties::GetContextInput(node);
Node* create = jsgraph_->graph()->NewNode(
@@ -491,7 +498,7 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
// in that frame state tho, as the conversion of the receiver can be repeated
// any number of times, it's not observable.
if (node->opcode() == IrOpcode::kJSCallFunction &&
- is_sloppy(info.language_mode()) && !function->shared()->native()) {
+ is_sloppy(info.language_mode()) && !shared_info->native()) {
const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
Node* effect = NodeProperties::GetEffectInput(node);
Node* convert = jsgraph_->graph()->NewNode(
@@ -509,7 +516,7 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
if (call.formal_arguments() != parameter_count) {
frame_state = CreateArtificialFrameState(
node, frame_state, call.formal_arguments(),
- FrameStateType::kArgumentsAdaptor, info.shared_info());
+ FrameStateType::kArgumentsAdaptor, shared_info);
}
return InlineCall(node, new_target, context, frame_state, start, end);
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index ca5cb932b4..abeb11001d 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -49,20 +49,14 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceIncrementStatsCounter(node);
case Runtime::kInlineIsArray:
return ReduceIsInstanceType(node, JS_ARRAY_TYPE);
- case Runtime::kInlineIsDate:
- return ReduceIsInstanceType(node, JS_DATE_TYPE);
case Runtime::kInlineIsTypedArray:
return ReduceIsInstanceType(node, JS_TYPED_ARRAY_TYPE);
- case Runtime::kInlineIsFunction:
- return ReduceIsFunction(node);
case Runtime::kInlineIsRegExp:
return ReduceIsInstanceType(node, JS_REGEXP_TYPE);
case Runtime::kInlineIsJSReceiver:
return ReduceIsJSReceiver(node);
case Runtime::kInlineIsSmi:
return ReduceIsSmi(node);
- case Runtime::kInlineJSValueGetValue:
- return ReduceJSValueGetValue(node);
case Runtime::kInlineMathClz32:
return ReduceMathClz32(node);
case Runtime::kInlineMathFloor:
@@ -71,8 +65,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceMathSqrt(node);
case Runtime::kInlineValueOf:
return ReduceValueOf(node);
- case Runtime::kInlineIsMinusZero:
- return ReduceIsMinusZero(node);
case Runtime::kInlineFixedArrayGet:
return ReduceFixedArrayGet(node);
case Runtime::kInlineFixedArraySet:
@@ -148,6 +140,7 @@ Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) {
graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
frame_state, effect, control);
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ Revisit(graph()->end());
node->TrimInputCount(0);
NodeProperties::ChangeOp(node, common()->Dead());
@@ -229,89 +222,8 @@ Reduction JSIntrinsicLowering::ReduceIsInstanceType(
}
-Reduction JSIntrinsicLowering::ReduceIsFunction(Node* node) {
- Node* value = NodeProperties::GetValueInput(node, 0);
- Type* value_type = NodeProperties::GetType(value);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- if (value_type->Is(Type::Function())) {
- value = jsgraph()->TrueConstant();
- } else {
- // if (%_IsSmi(value)) {
- // return false;
- // } else {
- // return FIRST_FUNCTION_TYPE <= %_GetInstanceType(%_GetMap(value))
- // }
- STATIC_ASSERT(LAST_TYPE == LAST_FUNCTION_TYPE);
-
- Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), value);
- Node* branch = graph()->NewNode(common()->Branch(), check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = jsgraph()->FalseConstant();
-
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- value, effect, if_false),
- effect, if_false);
- Node* vfalse =
- graph()->NewNode(machine()->Uint32LessThanOrEqual(),
- jsgraph()->Int32Constant(FIRST_FUNCTION_TYPE), efalse);
-
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue, vfalse, control);
- }
- ReplaceWithValue(node, node, effect, control);
- return Replace(value);
-}
-
-
Reduction JSIntrinsicLowering::ReduceIsJSReceiver(Node* node) {
- Node* value = NodeProperties::GetValueInput(node, 0);
- Type* value_type = NodeProperties::GetType(value);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- if (value_type->Is(Type::Receiver())) {
- value = jsgraph()->TrueConstant();
- } else if (!value_type->Maybe(Type::Receiver())) {
- value = jsgraph()->FalseConstant();
- } else {
- // if (%_IsSmi(value)) {
- // return false;
- // } else {
- // return FIRST_JS_RECEIVER_TYPE <= %_GetInstanceType(%_GetMap(value))
- // }
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
-
- Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), value);
- Node* branch = graph()->NewNode(common()->Branch(), check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = jsgraph()->FalseConstant();
-
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- value, effect, if_false),
- effect, if_false);
- Node* vfalse = graph()->NewNode(
- machine()->Uint32LessThanOrEqual(),
- jsgraph()->Int32Constant(FIRST_JS_RECEIVER_TYPE), efalse);
-
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue, vfalse, control);
- }
- ReplaceWithValue(node, node, effect, control);
- return Replace(value);
+ return Change(node, simplified()->ObjectIsReceiver());
}
@@ -320,15 +232,6 @@ Reduction JSIntrinsicLowering::ReduceIsSmi(Node* node) {
}
-Reduction JSIntrinsicLowering::ReduceJSValueGetValue(Node* node) {
- Node* value = NodeProperties::GetValueInput(node, 0);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- return Change(node, simplified()->LoadField(AccessBuilder::ForValue()), value,
- effect, control);
-}
-
-
Reduction JSIntrinsicLowering::ReduceMathClz32(Node* node) {
return Change(node, machine()->Word32Clz());
}
@@ -420,30 +323,6 @@ Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op) {
}
-Reduction JSIntrinsicLowering::ReduceIsMinusZero(Node* node) {
- Node* value = NodeProperties::GetValueInput(node, 0);
- Node* effect = NodeProperties::GetEffectInput(node);
-
- Node* double_lo =
- graph()->NewNode(machine()->Float64ExtractLowWord32(), value);
- Node* check1 = graph()->NewNode(machine()->Word32Equal(), double_lo,
- jsgraph()->ZeroConstant());
-
- Node* double_hi =
- graph()->NewNode(machine()->Float64ExtractHighWord32(), value);
- Node* check2 = graph()->NewNode(
- machine()->Word32Equal(), double_hi,
- jsgraph()->Int32Constant(static_cast<int32_t>(0x80000000)));
-
- ReplaceWithValue(node, node, effect);
-
- Node* and_result = graph()->NewNode(machine()->Word32And(), check1, check2);
-
- return Change(node, machine()->Word32Equal(), and_result,
- jsgraph()->Int32Constant(1));
-}
-
-
Reduction JSIntrinsicLowering::ReduceFixedArrayGet(Node* node) {
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -507,12 +386,43 @@ Reduction JSIntrinsicLowering::ReduceSubString(Node* node) {
Reduction JSIntrinsicLowering::ReduceToInteger(Node* node) {
Node* value = NodeProperties::GetValueInput(node, 0);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // ToInteger is a no-op on integer values and -0.
Type* value_type = NodeProperties::GetType(value);
if (value_type->Is(type_cache().kIntegerOrMinusZero)) {
ReplaceWithValue(node, value);
return Replace(value);
}
- return NoChange();
+
+ Node* check = graph()->NewNode(simplified()->ObjectIsSmi(), value);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+ Node* vtrue = value;
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = effect;
+ Node* vfalse;
+ {
+ vfalse = efalse =
+ graph()->NewNode(javascript()->CallRuntime(Runtime::kToInteger), value,
+ context, frame_state, efalse, if_false);
+ if_false = graph()->NewNode(common()->IfSuccess(), vfalse);
+ }
+
+ control = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, control);
+ // TODO(bmeurer, mstarzinger): Rewire IfException inputs to {vfalse}.
+ ReplaceWithValue(node, value, effect, control);
+ return Changed(value);
}
@@ -589,20 +499,20 @@ Reduction JSIntrinsicLowering::ReduceToString(Node* node) {
Reduction JSIntrinsicLowering::ReduceCall(Node* node) {
size_t const arity = CallRuntimeParametersOf(node->op()).arity();
- NodeProperties::ChangeOp(
- node, javascript()->CallFunction(arity, STRICT, VectorSlotPair(),
- ConvertReceiverMode::kAny,
- TailCallMode::kDisallow));
+ NodeProperties::ChangeOp(node,
+ javascript()->CallFunction(arity, VectorSlotPair(),
+ ConvertReceiverMode::kAny,
+ TailCallMode::kDisallow));
return Changed(node);
}
Reduction JSIntrinsicLowering::ReduceTailCall(Node* node) {
size_t const arity = CallRuntimeParametersOf(node->op()).arity();
- NodeProperties::ChangeOp(
- node, javascript()->CallFunction(arity, STRICT, VectorSlotPair(),
- ConvertReceiverMode::kAny,
- TailCallMode::kAllow));
+ NodeProperties::ChangeOp(node,
+ javascript()->CallFunction(arity, VectorSlotPair(),
+ ConvertReceiverMode::kAny,
+ TailCallMode::kAllow));
return Changed(node);
}
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h
index 1977a5847d..d8e1102afa 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.h
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.h
@@ -44,12 +44,9 @@ class JSIntrinsicLowering final : public AdvancedReducer {
Reduction ReduceDoubleHi(Node* node);
Reduction ReduceDoubleLo(Node* node);
Reduction ReduceIncrementStatsCounter(Node* node);
- Reduction ReduceIsMinusZero(Node* node);
Reduction ReduceIsInstanceType(Node* node, InstanceType instance_type);
- Reduction ReduceIsFunction(Node* node);
Reduction ReduceIsJSReceiver(Node* node);
Reduction ReduceIsSmi(Node* node);
- Reduction ReduceJSValueGetValue(Node* node);
Reduction ReduceMathClz32(Node* node);
Reduction ReduceMathFloor(Node* node);
Reduction ReduceMathSqrt(Node* node);
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 06cf770f33..2c11794dba 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -38,6 +38,8 @@ JSNativeContextSpecialization::JSNativeContextSpecialization(
Reduction JSNativeContextSpecialization::Reduce(Node* node) {
switch (node->opcode()) {
+ case IrOpcode::kJSLoadContext:
+ return ReduceJSLoadContext(node);
case IrOpcode::kJSLoadNamed:
return ReduceJSLoadNamed(node);
case IrOpcode::kJSStoreNamed:
@@ -52,6 +54,21 @@ Reduction JSNativeContextSpecialization::Reduce(Node* node) {
return NoChange();
}
+Reduction JSNativeContextSpecialization::ReduceJSLoadContext(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
+ ContextAccess const& access = ContextAccessOf(node->op());
+ Handle<Context> native_context;
+ // Specialize JSLoadContext(NATIVE_CONTEXT_INDEX) to the known native
+ // context (if any), so we can constant-fold those fields, which is
+ // safe, since the NATIVE_CONTEXT_INDEX slot is always immutable.
+ if (access.index() == Context::NATIVE_CONTEXT_INDEX &&
+ GetNativeContext(node).ToHandle(&native_context)) {
+ Node* value = jsgraph()->HeapConstant(native_context);
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ return NoChange();
+}
Reduction JSNativeContextSpecialization::ReduceNamedAccess(
Node* node, Node* value, MapHandleList const& receiver_maps,
@@ -418,6 +435,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
frame_state, exit_effect, exit_control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ Revisit(graph()->end());
// Generate the final merge point for all (polymorphic) branches.
int const control_count = static_cast<int>(controls.size());
@@ -443,21 +461,49 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
}
+Reduction JSNativeContextSpecialization::ReduceNamedAccess(
+ Node* node, Node* value, FeedbackNexus const& nexus, Handle<Name> name,
+ AccessMode access_mode, LanguageMode language_mode) {
+ DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
+ node->opcode() == IrOpcode::kJSStoreNamed);
+
+ // Check if the {nexus} reports type feedback for the IC.
+ if (nexus.IsUninitialized()) {
+ if ((flags() & kDeoptimizationEnabled) &&
+ (flags() & kBailoutOnUninitialized)) {
+ // TODO(turbofan): Implement all eager bailout points correctly in
+ // the graph builder.
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ if (!OpParameter<FrameStateInfo>(frame_state).bailout_id().IsNone()) {
+ return ReduceSoftDeoptimize(node);
+ }
+ }
+ return NoChange();
+ }
+
+ // Extract receiver maps from the IC using the {nexus}.
+ MapHandleList receiver_maps;
+ if (nexus.ExtractMaps(&receiver_maps) == 0) return NoChange();
+ DCHECK_LT(0, receiver_maps.length());
+
+ // Try to lower the named access based on the {receiver_maps}.
+ return ReduceNamedAccess(node, value, receiver_maps, name, access_mode,
+ language_mode);
+}
+
+
Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
NamedAccess const& p = NamedAccessOf(node->op());
Node* const value = jsgraph()->Dead();
// Extract receiver maps from the LOAD_IC using the LoadICNexus.
- MapHandleList receiver_maps;
if (!p.feedback().IsValid()) return NoChange();
LoadICNexus nexus(p.feedback().vector(), p.feedback().slot());
- if (nexus.ExtractMaps(&receiver_maps) == 0) return NoChange();
- DCHECK_LT(0, receiver_maps.length());
// Try to lower the named access based on the {receiver_maps}.
- return ReduceNamedAccess(node, value, receiver_maps, p.name(),
- AccessMode::kLoad, p.language_mode());
+ return ReduceNamedAccess(node, value, nexus, p.name(), AccessMode::kLoad,
+ p.language_mode());
}
@@ -467,15 +513,12 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreNamed(Node* node) {
Node* const value = NodeProperties::GetValueInput(node, 1);
// Extract receiver maps from the STORE_IC using the StoreICNexus.
- MapHandleList receiver_maps;
if (!p.feedback().IsValid()) return NoChange();
StoreICNexus nexus(p.feedback().vector(), p.feedback().slot());
- if (nexus.ExtractMaps(&receiver_maps) == 0) return NoChange();
- DCHECK_LT(0, receiver_maps.length());
// Try to lower the named access based on the {receiver_maps}.
- return ReduceNamedAccess(node, value, receiver_maps, p.name(),
- AccessMode::kStore, p.language_mode());
+ return ReduceNamedAccess(node, value, nexus, p.name(), AccessMode::kStore,
+ p.language_mode());
}
@@ -705,7 +748,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
Type* element_type = Type::Any();
MachineType element_machine_type = MachineType::AnyTagged();
if (IsFastDoubleElementsKind(elements_kind)) {
- element_type = type_cache_.kFloat64;
+ element_type = Type::Number();
element_machine_type = MachineType::Float64();
} else if (IsFastSmiElementsKind(elements_kind)) {
element_type = type_cache_.kSmi;
@@ -850,6 +893,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
frame_state, exit_effect, exit_control);
// TODO(bmeurer): This should be on the AdvancedReducer somehow.
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ Revisit(graph()->end());
// Generate the final merge point for all (polymorphic) branches.
int const control_count = static_cast<int>(controls.size());
@@ -882,6 +926,20 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
node->opcode() == IrOpcode::kJSStoreProperty);
+ // Check if the {nexus} reports type feedback for the IC.
+ if (nexus.IsUninitialized()) {
+ if ((flags() & kDeoptimizationEnabled) &&
+ (flags() & kBailoutOnUninitialized)) {
+ // TODO(turbofan): Implement all eager bailout points correctly in
+ // the graph builder.
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ if (!OpParameter<FrameStateInfo>(frame_state).bailout_id().IsNone()) {
+ return ReduceSoftDeoptimize(node);
+ }
+ }
+ return NoChange();
+ }
+
// Extract receiver maps from the {nexus}.
MapHandleList receiver_maps;
if (nexus.ExtractMaps(&receiver_maps) == 0) return NoChange();
@@ -921,6 +979,22 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
}
+Reduction JSNativeContextSpecialization::ReduceSoftDeoptimize(Node* node) {
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* deoptimize =
+ graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kSoft), frame_state,
+ effect, control);
+ // TODO(bmeurer): This should be on the AdvancedReducer somehow.
+ NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
+ Revisit(graph()->end());
+ node->TrimInputCount(0);
+ NodeProperties::ChangeOp(node, common()->Dead());
+ return Changed(node);
+}
+
+
Reduction JSNativeContextSpecialization::ReduceJSLoadProperty(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadProperty, node->opcode());
PropertyAccess const& p = PropertyAccessOf(node->op());
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 45ff87f619..4251d72fc4 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -38,7 +38,8 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
// Flags that control the mode of operation.
enum Flag {
kNoFlags = 0u,
- kDeoptimizationEnabled = 1u << 0,
+ kBailoutOnUninitialized = 1u << 0,
+ kDeoptimizationEnabled = 1u << 1,
};
typedef base::Flags<Flag> Flags;
@@ -50,6 +51,7 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
Reduction Reduce(Node* node) final;
private:
+ Reduction ReduceJSLoadContext(Node* node);
Reduction ReduceJSLoadNamed(Node* node);
Reduction ReduceJSStoreNamed(Node* node);
Reduction ReduceJSLoadProperty(Node* node);
@@ -66,11 +68,17 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
LanguageMode language_mode,
KeyedAccessStoreMode store_mode);
Reduction ReduceNamedAccess(Node* node, Node* value,
+ FeedbackNexus const& nexus, Handle<Name> name,
+ AccessMode access_mode,
+ LanguageMode language_mode);
+ Reduction ReduceNamedAccess(Node* node, Node* value,
MapHandleList const& receiver_maps,
Handle<Name> name, AccessMode access_mode,
LanguageMode language_mode,
Node* index = nullptr);
+ Reduction ReduceSoftDeoptimize(Node* node);
+
// Adds stability dependencies on all prototypes of every class in
// {receiver_type} up to (and including) the {holder}.
void AssumePrototypesStable(Type* receiver_type,
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index 1455f0a9a9..5fcd51928d 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -52,63 +52,6 @@ ToBooleanHints ToBooleanHintsOf(Operator const* op) {
}
-size_t hash_value(TailCallMode mode) {
- return base::hash_value(static_cast<unsigned>(mode));
-}
-
-
-std::ostream& operator<<(std::ostream& os, TailCallMode mode) {
- switch (mode) {
- case TailCallMode::kAllow:
- return os << "ALLOW_TAIL_CALLS";
- case TailCallMode::kDisallow:
- return os << "DISALLOW_TAIL_CALLS";
- }
- UNREACHABLE();
- return os;
-}
-
-
-bool operator==(BinaryOperationParameters const& lhs,
- BinaryOperationParameters const& rhs) {
- return lhs.language_mode() == rhs.language_mode() &&
- lhs.hints() == rhs.hints();
-}
-
-
-bool operator!=(BinaryOperationParameters const& lhs,
- BinaryOperationParameters const& rhs) {
- return !(lhs == rhs);
-}
-
-
-size_t hash_value(BinaryOperationParameters const& p) {
- return base::hash_combine(p.language_mode(), p.hints());
-}
-
-
-std::ostream& operator<<(std::ostream& os, BinaryOperationParameters const& p) {
- return os << p.language_mode() << ", " << p.hints();
-}
-
-
-BinaryOperationParameters const& BinaryOperationParametersOf(
- Operator const* op) {
- DCHECK(op->opcode() == IrOpcode::kJSBitwiseOr ||
- op->opcode() == IrOpcode::kJSBitwiseXor ||
- op->opcode() == IrOpcode::kJSBitwiseAnd ||
- op->opcode() == IrOpcode::kJSShiftLeft ||
- op->opcode() == IrOpcode::kJSShiftRight ||
- op->opcode() == IrOpcode::kJSShiftRightLogical ||
- op->opcode() == IrOpcode::kJSAdd ||
- op->opcode() == IrOpcode::kJSSubtract ||
- op->opcode() == IrOpcode::kJSMultiply ||
- op->opcode() == IrOpcode::kJSDivide ||
- op->opcode() == IrOpcode::kJSModulus);
- return OpParameter<BinaryOperationParameters>(op);
-}
-
-
bool operator==(CallConstructParameters const& lhs,
CallConstructParameters const& rhs) {
return lhs.arity() == rhs.arity() && lhs.feedback() == rhs.feedback();
@@ -138,8 +81,7 @@ CallConstructParameters const& CallConstructParametersOf(Operator const* op) {
std::ostream& operator<<(std::ostream& os, CallFunctionParameters const& p) {
- os << p.arity() << ", " << p.language_mode() << ", " << p.convert_mode()
- << ", " << p.tail_call_mode();
+ os << p.arity() << ", " << p.convert_mode() << ", " << p.tail_call_mode();
return os;
}
@@ -216,38 +158,6 @@ ContextAccess const& ContextAccessOf(Operator const* op) {
}
-DynamicAccess::DynamicAccess(const Handle<String>& name, TypeofMode typeof_mode)
- : name_(name), typeof_mode_(typeof_mode) {}
-
-
-bool operator==(DynamicAccess const& lhs, DynamicAccess const& rhs) {
- UNIMPLEMENTED();
- return true;
-}
-
-
-bool operator!=(DynamicAccess const& lhs, DynamicAccess const& rhs) {
- return !(lhs == rhs);
-}
-
-
-size_t hash_value(DynamicAccess const& access) {
- UNIMPLEMENTED();
- return 0;
-}
-
-
-std::ostream& operator<<(std::ostream& os, DynamicAccess const& access) {
- return os << Brief(*access.name()) << ", " << access.typeof_mode();
-}
-
-
-DynamicAccess const& DynamicAccessOf(Operator const* op) {
- DCHECK_EQ(IrOpcode::kJSLoadDynamic, op->opcode());
- return OpParameter<DynamicAccess>(op);
-}
-
-
bool operator==(NamedAccess const& lhs, NamedAccess const& rhs) {
return lhs.name().location() == rhs.name().location() &&
lhs.language_mode() == rhs.language_mode() &&
@@ -367,32 +277,9 @@ const StoreGlobalParameters& StoreGlobalParametersOf(const Operator* op) {
}
-bool operator==(CreateArgumentsParameters const& lhs,
- CreateArgumentsParameters const& rhs) {
- return lhs.type() == rhs.type() && lhs.start_index() == rhs.start_index();
-}
-
-
-bool operator!=(CreateArgumentsParameters const& lhs,
- CreateArgumentsParameters const& rhs) {
- return !(lhs == rhs);
-}
-
-
-size_t hash_value(CreateArgumentsParameters const& p) {
- return base::hash_combine(p.type(), p.start_index());
-}
-
-
-std::ostream& operator<<(std::ostream& os, CreateArgumentsParameters const& p) {
- return os << p.type() << ", " << p.start_index();
-}
-
-
-const CreateArgumentsParameters& CreateArgumentsParametersOf(
- const Operator* op) {
+CreateArgumentsType const& CreateArgumentsTypeOf(const Operator* op) {
DCHECK_EQ(IrOpcode::kJSCreateArguments, op->opcode());
- return OpParameter<CreateArgumentsParameters>(op);
+ return OpParameter<CreateArgumentsType>(op);
}
@@ -486,12 +373,15 @@ const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op) {
return OpParameter<CreateLiteralParameters>(op);
}
-
#define CACHED_OP_LIST(V) \
V(Equal, Operator::kNoProperties, 2, 1) \
V(NotEqual, Operator::kNoProperties, 2, 1) \
V(StrictEqual, Operator::kNoThrow, 2, 1) \
V(StrictNotEqual, Operator::kNoThrow, 2, 1) \
+ V(LessThan, Operator::kNoProperties, 2, 1) \
+ V(GreaterThan, Operator::kNoProperties, 2, 1) \
+ V(LessThanOrEqual, Operator::kNoProperties, 2, 1) \
+ V(GreaterThanOrEqual, Operator::kNoProperties, 2, 1) \
V(ToNumber, Operator::kNoProperties, 1, 1) \
V(ToString, Operator::kNoProperties, 1, 1) \
V(ToName, Operator::kNoProperties, 1, 1) \
@@ -512,14 +402,6 @@ const CreateLiteralParameters& CreateLiteralParametersOf(const Operator* op) {
V(CreateWithContext, Operator::kNoProperties, 2, 1) \
V(CreateModuleContext, Operator::kNoProperties, 2, 1)
-
-#define CACHED_OP_LIST_WITH_LANGUAGE_MODE(V) \
- V(LessThan, Operator::kNoProperties, 2, 1) \
- V(GreaterThan, Operator::kNoProperties, 2, 1) \
- V(LessThanOrEqual, Operator::kNoProperties, 2, 1) \
- V(GreaterThanOrEqual, Operator::kNoProperties, 2, 1)
-
-
struct JSOperatorGlobalCache final {
#define CACHED(Name, properties, value_input_count, value_output_count) \
struct Name##Operator final : public Operator { \
@@ -533,25 +415,6 @@ struct JSOperatorGlobalCache final {
Name##Operator k##Name##Operator;
CACHED_OP_LIST(CACHED)
#undef CACHED
-
-
-#define CACHED_WITH_LANGUAGE_MODE(Name, properties, value_input_count, \
- value_output_count) \
- template <LanguageMode kLanguageMode> \
- struct Name##Operator final : public Operator1<LanguageMode> { \
- Name##Operator() \
- : Operator1<LanguageMode>( \
- IrOpcode::kJS##Name, properties, "JS" #Name, value_input_count, \
- Operator::ZeroIfPure(properties), \
- Operator::ZeroIfEliminatable(properties), value_output_count, \
- Operator::ZeroIfPure(properties), \
- Operator::ZeroIfNoThrow(properties), kLanguageMode) {} \
- }; \
- Name##Operator<SLOPPY> k##Name##SloppyOperator; \
- Name##Operator<STRICT> k##Name##StrictOperator; \
- Name##Operator<STRONG> k##Name##StrongOperator;
- CACHED_OP_LIST_WITH_LANGUAGE_MODE(CACHED_WITH_LANGUAGE_MODE)
-#undef CACHED_WITH_LANGUAGE_MODE
};
@@ -570,156 +433,104 @@ JSOperatorBuilder::JSOperatorBuilder(Zone* zone)
CACHED_OP_LIST(CACHED)
#undef CACHED
-
-#define CACHED_WITH_LANGUAGE_MODE(Name, properties, value_input_count, \
- value_output_count) \
- const Operator* JSOperatorBuilder::Name(LanguageMode language_mode) { \
- switch (language_mode) { \
- case SLOPPY: \
- return &cache_.k##Name##SloppyOperator; \
- case STRICT: \
- return &cache_.k##Name##StrictOperator; \
- case STRONG: \
- return &cache_.k##Name##StrongOperator; \
- default: \
- break; /* %*!%^$#@ */ \
- } \
- UNREACHABLE(); \
- return nullptr; \
- }
-CACHED_OP_LIST_WITH_LANGUAGE_MODE(CACHED_WITH_LANGUAGE_MODE)
-#undef CACHED_WITH_LANGUAGE_MODE
-
-
-const Operator* JSOperatorBuilder::BitwiseOr(LanguageMode language_mode,
- BinaryOperationHints hints) {
+const Operator* JSOperatorBuilder::BitwiseOr(BinaryOperationHints hints) {
// TODO(turbofan): Cache most important versions of this operator.
- BinaryOperationParameters parameters(language_mode, hints);
- return new (zone()) Operator1<BinaryOperationParameters>( //--
- IrOpcode::kJSBitwiseOr, Operator::kNoProperties, // opcode
- "JSBitwiseOr", // name
- 2, 1, 1, 1, 1, 2, // inputs/outputs
- parameters); // parameter
+ return new (zone()) Operator1<BinaryOperationHints>( //--
+ IrOpcode::kJSBitwiseOr, Operator::kNoProperties, // opcode
+ "JSBitwiseOr", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ hints); // parameter
}
-
-const Operator* JSOperatorBuilder::BitwiseXor(LanguageMode language_mode,
- BinaryOperationHints hints) {
+const Operator* JSOperatorBuilder::BitwiseXor(BinaryOperationHints hints) {
// TODO(turbofan): Cache most important versions of this operator.
- BinaryOperationParameters parameters(language_mode, hints);
- return new (zone()) Operator1<BinaryOperationParameters>( //--
- IrOpcode::kJSBitwiseXor, Operator::kNoProperties, // opcode
- "JSBitwiseXor", // name
- 2, 1, 1, 1, 1, 2, // inputs/outputs
- parameters); // parameter
+ return new (zone()) Operator1<BinaryOperationHints>( //--
+ IrOpcode::kJSBitwiseXor, Operator::kNoProperties, // opcode
+ "JSBitwiseXor", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ hints); // parameter
}
-
-const Operator* JSOperatorBuilder::BitwiseAnd(LanguageMode language_mode,
- BinaryOperationHints hints) {
+const Operator* JSOperatorBuilder::BitwiseAnd(BinaryOperationHints hints) {
// TODO(turbofan): Cache most important versions of this operator.
- BinaryOperationParameters parameters(language_mode, hints);
- return new (zone()) Operator1<BinaryOperationParameters>( //--
- IrOpcode::kJSBitwiseAnd, Operator::kNoProperties, // opcode
- "JSBitwiseAnd", // name
- 2, 1, 1, 1, 1, 2, // inputs/outputs
- parameters); // parameter
+ return new (zone()) Operator1<BinaryOperationHints>( //--
+ IrOpcode::kJSBitwiseAnd, Operator::kNoProperties, // opcode
+ "JSBitwiseAnd", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ hints); // parameter
}
-
-const Operator* JSOperatorBuilder::ShiftLeft(LanguageMode language_mode,
- BinaryOperationHints hints) {
+const Operator* JSOperatorBuilder::ShiftLeft(BinaryOperationHints hints) {
// TODO(turbofan): Cache most important versions of this operator.
- BinaryOperationParameters parameters(language_mode, hints);
- return new (zone()) Operator1<BinaryOperationParameters>( //--
- IrOpcode::kJSShiftLeft, Operator::kNoProperties, // opcode
- "JSShiftLeft", // name
- 2, 1, 1, 1, 1, 2, // inputs/outputs
- parameters); // parameter
+ return new (zone()) Operator1<BinaryOperationHints>( //--
+ IrOpcode::kJSShiftLeft, Operator::kNoProperties, // opcode
+ "JSShiftLeft", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ hints); // parameter
}
-
-const Operator* JSOperatorBuilder::ShiftRight(LanguageMode language_mode,
- BinaryOperationHints hints) {
+const Operator* JSOperatorBuilder::ShiftRight(BinaryOperationHints hints) {
// TODO(turbofan): Cache most important versions of this operator.
- BinaryOperationParameters parameters(language_mode, hints);
- return new (zone()) Operator1<BinaryOperationParameters>( //--
- IrOpcode::kJSShiftRight, Operator::kNoProperties, // opcode
- "JSShiftRight", // name
- 2, 1, 1, 1, 1, 2, // inputs/outputs
- parameters); // parameter
+ return new (zone()) Operator1<BinaryOperationHints>( //--
+ IrOpcode::kJSShiftRight, Operator::kNoProperties, // opcode
+ "JSShiftRight", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ hints); // parameter
}
-
const Operator* JSOperatorBuilder::ShiftRightLogical(
- LanguageMode language_mode, BinaryOperationHints hints) {
+ BinaryOperationHints hints) {
// TODO(turbofan): Cache most important versions of this operator.
- BinaryOperationParameters parameters(language_mode, hints);
- return new (zone()) Operator1<BinaryOperationParameters>( //--
+ return new (zone()) Operator1<BinaryOperationHints>( //--
IrOpcode::kJSShiftRightLogical, Operator::kNoProperties, // opcode
"JSShiftRightLogical", // name
2, 1, 1, 1, 1, 2, // inputs/outputs
- parameters); // parameter
+ hints); // parameter
}
-
-const Operator* JSOperatorBuilder::Add(LanguageMode language_mode,
- BinaryOperationHints hints) {
+const Operator* JSOperatorBuilder::Add(BinaryOperationHints hints) {
// TODO(turbofan): Cache most important versions of this operator.
- BinaryOperationParameters parameters(language_mode, hints);
- return new (zone()) Operator1<BinaryOperationParameters>( //--
- IrOpcode::kJSAdd, Operator::kNoProperties, // opcode
- "JSAdd", // name
- 2, 1, 1, 1, 1, 2, // inputs/outputs
- parameters); // parameter
+ return new (zone()) Operator1<BinaryOperationHints>( //--
+ IrOpcode::kJSAdd, Operator::kNoProperties, // opcode
+ "JSAdd", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ hints); // parameter
}
-
-const Operator* JSOperatorBuilder::Subtract(LanguageMode language_mode,
- BinaryOperationHints hints) {
+const Operator* JSOperatorBuilder::Subtract(BinaryOperationHints hints) {
// TODO(turbofan): Cache most important versions of this operator.
- BinaryOperationParameters parameters(language_mode, hints);
- return new (zone()) Operator1<BinaryOperationParameters>( //--
- IrOpcode::kJSSubtract, Operator::kNoProperties, // opcode
- "JSSubtract", // name
- 2, 1, 1, 1, 1, 2, // inputs/outputs
- parameters); // parameter
+ return new (zone()) Operator1<BinaryOperationHints>( //--
+ IrOpcode::kJSSubtract, Operator::kNoProperties, // opcode
+ "JSSubtract", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ hints); // parameter
}
-
-const Operator* JSOperatorBuilder::Multiply(LanguageMode language_mode,
- BinaryOperationHints hints) {
+const Operator* JSOperatorBuilder::Multiply(BinaryOperationHints hints) {
// TODO(turbofan): Cache most important versions of this operator.
- BinaryOperationParameters parameters(language_mode, hints);
- return new (zone()) Operator1<BinaryOperationParameters>( //--
- IrOpcode::kJSMultiply, Operator::kNoProperties, // opcode
- "JSMultiply", // name
- 2, 1, 1, 1, 1, 2, // inputs/outputs
- parameters); // parameter
+ return new (zone()) Operator1<BinaryOperationHints>( //--
+ IrOpcode::kJSMultiply, Operator::kNoProperties, // opcode
+ "JSMultiply", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ hints); // parameter
}
-
-const Operator* JSOperatorBuilder::Divide(LanguageMode language_mode,
- BinaryOperationHints hints) {
+const Operator* JSOperatorBuilder::Divide(BinaryOperationHints hints) {
// TODO(turbofan): Cache most important versions of this operator.
- BinaryOperationParameters parameters(language_mode, hints);
- return new (zone()) Operator1<BinaryOperationParameters>( //--
- IrOpcode::kJSDivide, Operator::kNoProperties, // opcode
- "JSDivide", // name
- 2, 1, 1, 1, 1, 2, // inputs/outputs
- parameters); // parameter
+ return new (zone()) Operator1<BinaryOperationHints>( //--
+ IrOpcode::kJSDivide, Operator::kNoProperties, // opcode
+ "JSDivide", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ hints); // parameter
}
-
-const Operator* JSOperatorBuilder::Modulus(LanguageMode language_mode,
- BinaryOperationHints hints) {
+const Operator* JSOperatorBuilder::Modulus(BinaryOperationHints hints) {
// TODO(turbofan): Cache most important versions of this operator.
- BinaryOperationParameters parameters(language_mode, hints);
- return new (zone()) Operator1<BinaryOperationParameters>( //--
- IrOpcode::kJSModulus, Operator::kNoProperties, // opcode
- "JSModulus", // name
- 2, 1, 1, 1, 1, 2, // inputs/outputs
- parameters); // parameter
+ return new (zone()) Operator1<BinaryOperationHints>( //--
+ IrOpcode::kJSModulus, Operator::kNoProperties, // opcode
+ "JSModulus", // name
+ 2, 1, 1, 1, 1, 2, // inputs/outputs
+ hints); // parameter
}
@@ -732,12 +543,11 @@ const Operator* JSOperatorBuilder::ToBoolean(ToBooleanHints hints) {
hints); // parameter
}
-
const Operator* JSOperatorBuilder::CallFunction(
- size_t arity, LanguageMode language_mode, VectorSlotPair const& feedback,
+ size_t arity, VectorSlotPair const& feedback,
ConvertReceiverMode convert_mode, TailCallMode tail_call_mode) {
- CallFunctionParameters parameters(arity, language_mode, feedback,
- tail_call_mode, convert_mode);
+ CallFunctionParameters parameters(arity, feedback, tail_call_mode,
+ convert_mode);
return new (zone()) Operator1<CallFunctionParameters>( // --
IrOpcode::kJSCallFunction, Operator::kNoProperties, // opcode
"JSCallFunction", // name
@@ -746,10 +556,22 @@ const Operator* JSOperatorBuilder::CallFunction(
}
+const Operator* JSOperatorBuilder::CallRuntime(Runtime::FunctionId id) {
+ const Runtime::Function* f = Runtime::FunctionForId(id);
+ return CallRuntime(f, f->nargs);
+}
+
+
const Operator* JSOperatorBuilder::CallRuntime(Runtime::FunctionId id,
size_t arity) {
- CallRuntimeParameters parameters(id, arity);
- const Runtime::Function* f = Runtime::FunctionForId(parameters.id());
+ const Runtime::Function* f = Runtime::FunctionForId(id);
+ return CallRuntime(f, arity);
+}
+
+
+const Operator* JSOperatorBuilder::CallRuntime(const Runtime::Function* f,
+ size_t arity) {
+ CallRuntimeParameters parameters(f->function_id, arity);
DCHECK(f->nargs == -1 || f->nargs == static_cast<int>(parameters.arity()));
return new (zone()) Operator1<CallRuntimeParameters>( // --
IrOpcode::kJSCallRuntime, Operator::kNoProperties, // opcode
@@ -779,11 +601,9 @@ const Operator* JSOperatorBuilder::ConvertReceiver(
convert_mode); // parameter
}
-
-const Operator* JSOperatorBuilder::LoadNamed(LanguageMode language_mode,
- Handle<Name> name,
+const Operator* JSOperatorBuilder::LoadNamed(Handle<Name> name,
const VectorSlotPair& feedback) {
- NamedAccess access(language_mode, name, feedback);
+ NamedAccess access(SLOPPY, name, feedback);
return new (zone()) Operator1<NamedAccess>( // --
IrOpcode::kJSLoadNamed, Operator::kNoProperties, // opcode
"JSLoadNamed", // name
@@ -791,10 +611,9 @@ const Operator* JSOperatorBuilder::LoadNamed(LanguageMode language_mode,
access); // parameter
}
-
const Operator* JSOperatorBuilder::LoadProperty(
- LanguageMode language_mode, VectorSlotPair const& feedback) {
- PropertyAccess access(language_mode, feedback);
+ VectorSlotPair const& feedback) {
+ PropertyAccess access(SLOPPY, feedback);
return new (zone()) Operator1<PropertyAccess>( // --
IrOpcode::kJSLoadProperty, Operator::kNoProperties, // opcode
"JSLoadProperty", // name
@@ -882,26 +701,12 @@ const Operator* JSOperatorBuilder::StoreContext(size_t depth, size_t index) {
}
-const Operator* JSOperatorBuilder::LoadDynamic(const Handle<String>& name,
- TypeofMode typeof_mode) {
- DynamicAccess access(name, typeof_mode);
- return new (zone()) Operator1<DynamicAccess>( // --
- IrOpcode::kJSLoadDynamic, Operator::kNoProperties, // opcode
- "JSLoadDynamic", // name
- 2, 1, 1, 1, 1, 2, // counts
- access); // parameter
-}
-
-
-const Operator* JSOperatorBuilder::CreateArguments(
- CreateArgumentsParameters::Type type, int start_index) {
- DCHECK_IMPLIES(start_index, type == CreateArgumentsParameters::kRestArray);
- CreateArgumentsParameters parameters(type, start_index);
- return new (zone()) Operator1<CreateArgumentsParameters>( // --
- IrOpcode::kJSCreateArguments, Operator::kNoThrow, // opcode
- "JSCreateArguments", // name
- 1, 1, 1, 1, 1, 0, // counts
- parameters); // parameter
+const Operator* JSOperatorBuilder::CreateArguments(CreateArgumentsType type) {
+ return new (zone()) Operator1<CreateArgumentsType>( // --
+ IrOpcode::kJSCreateArguments, Operator::kNoThrow, // opcode
+ "JSCreateArguments", // name
+ 1, 1, 1, 1, 1, 0, // counts
+ type); // parameter
}
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index ca7c7ea657..070e71efac 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -51,42 +51,6 @@ ConvertReceiverMode ConvertReceiverModeOf(Operator const* op);
ToBooleanHints ToBooleanHintsOf(Operator const* op);
-// Defines whether tail call optimization is allowed.
-enum class TailCallMode : unsigned { kAllow, kDisallow };
-
-size_t hash_value(TailCallMode);
-
-std::ostream& operator<<(std::ostream&, TailCallMode);
-
-
-// Defines the language mode and hints for a JavaScript binary operations.
-// This is used as parameter by JSAdd, JSSubtract, etc. operators.
-class BinaryOperationParameters final {
- public:
- BinaryOperationParameters(LanguageMode language_mode,
- BinaryOperationHints hints)
- : language_mode_(language_mode), hints_(hints) {}
-
- LanguageMode language_mode() const { return language_mode_; }
- BinaryOperationHints hints() const { return hints_; }
-
- private:
- LanguageMode const language_mode_;
- BinaryOperationHints const hints_;
-};
-
-bool operator==(BinaryOperationParameters const&,
- BinaryOperationParameters const&);
-bool operator!=(BinaryOperationParameters const&,
- BinaryOperationParameters const&);
-
-size_t hash_value(BinaryOperationParameters const&);
-
-std::ostream& operator<<(std::ostream&, BinaryOperationParameters const&);
-
-BinaryOperationParameters const& BinaryOperationParametersOf(Operator const*);
-
-
// Defines the arity and the feedback for a JavaScript constructor call. This is
// used as a parameter by JSCallConstruct operators.
class CallConstructParameters final {
@@ -116,20 +80,15 @@ CallConstructParameters const& CallConstructParametersOf(Operator const*);
// used as a parameter by JSCallFunction operators.
class CallFunctionParameters final {
public:
- CallFunctionParameters(size_t arity, LanguageMode language_mode,
- VectorSlotPair const& feedback,
+ CallFunctionParameters(size_t arity, VectorSlotPair const& feedback,
TailCallMode tail_call_mode,
ConvertReceiverMode convert_mode)
: bit_field_(ArityField::encode(arity) |
ConvertReceiverModeField::encode(convert_mode) |
- LanguageModeField::encode(language_mode) |
TailCallModeField::encode(tail_call_mode)),
feedback_(feedback) {}
size_t arity() const { return ArityField::decode(bit_field_); }
- LanguageMode language_mode() const {
- return LanguageModeField::decode(bit_field_);
- }
ConvertReceiverMode convert_mode() const {
return ConvertReceiverModeField::decode(bit_field_);
}
@@ -151,9 +110,8 @@ class CallFunctionParameters final {
return base::hash_combine(p.bit_field_, p.feedback_);
}
- typedef BitField<size_t, 0, 27> ArityField;
- typedef BitField<ConvertReceiverMode, 27, 2> ConvertReceiverModeField;
- typedef BitField<LanguageMode, 29, 2> LanguageModeField;
+ typedef BitField<size_t, 0, 29> ArityField;
+ typedef BitField<ConvertReceiverMode, 29, 2> ConvertReceiverModeField;
typedef BitField<TailCallMode, 31, 1> TailCallModeField;
const uint32_t bit_field_;
@@ -221,30 +179,6 @@ std::ostream& operator<<(std::ostream&, ContextAccess const&);
ContextAccess const& ContextAccessOf(Operator const*);
-// Defines the name for a dynamic variable lookup. This is used as a parameter
-// by JSLoadDynamic and JSStoreDynamic operators.
-class DynamicAccess final {
- public:
- DynamicAccess(const Handle<String>& name, TypeofMode typeof_mode);
-
- const Handle<String>& name() const { return name_; }
- TypeofMode typeof_mode() const { return typeof_mode_; }
-
- private:
- const Handle<String> name_;
- const TypeofMode typeof_mode_;
-};
-
-size_t hash_value(DynamicAccess const&);
-
-bool operator==(DynamicAccess const&, DynamicAccess const&);
-bool operator!=(DynamicAccess const&, DynamicAccess const&);
-
-std::ostream& operator<<(std::ostream&, DynamicAccess const&);
-
-DynamicAccess const& DynamicAccessOf(Operator const*);
-
-
// Defines the property of an object for a named access. This is
// used as a parameter by the JSLoadNamed and JSStoreNamed operators.
class NamedAccess final {
@@ -356,33 +290,8 @@ std::ostream& operator<<(std::ostream&, PropertyAccess const&);
PropertyAccess const& PropertyAccessOf(const Operator* op);
-// Defines specifics about arguments object or rest parameter creation. This is
-// used as a parameter by JSCreateArguments operators.
-class CreateArgumentsParameters final {
- public:
- enum Type { kMappedArguments, kUnmappedArguments, kRestArray };
- CreateArgumentsParameters(Type type, int start_index)
- : type_(type), start_index_(start_index) {}
-
- Type type() const { return type_; }
- int start_index() const { return start_index_; }
-
- private:
- const Type type_;
- const int start_index_;
-};
-
-bool operator==(CreateArgumentsParameters const&,
- CreateArgumentsParameters const&);
-bool operator!=(CreateArgumentsParameters const&,
- CreateArgumentsParameters const&);
-
-size_t hash_value(CreateArgumentsParameters const&);
-
-std::ostream& operator<<(std::ostream&, CreateArgumentsParameters const&);
-
-const CreateArgumentsParameters& CreateArgumentsParametersOf(
- const Operator* op);
+// CreateArgumentsType is used as parameter to JSCreateArguments nodes.
+CreateArgumentsType const& CreateArgumentsTypeOf(const Operator* op);
// Defines shared information for the array that should be created. This is
@@ -475,31 +384,21 @@ class JSOperatorBuilder final : public ZoneObject {
const Operator* NotEqual();
const Operator* StrictEqual();
const Operator* StrictNotEqual();
- const Operator* LessThan(LanguageMode language_mode);
- const Operator* GreaterThan(LanguageMode language_mode);
- const Operator* LessThanOrEqual(LanguageMode language_mode);
- const Operator* GreaterThanOrEqual(LanguageMode language_mode);
- const Operator* BitwiseOr(LanguageMode language_mode,
- BinaryOperationHints hints);
- const Operator* BitwiseXor(LanguageMode language_mode,
- BinaryOperationHints hints);
- const Operator* BitwiseAnd(LanguageMode language_mode,
- BinaryOperationHints hints);
- const Operator* ShiftLeft(LanguageMode language_mode,
- BinaryOperationHints hints);
- const Operator* ShiftRight(LanguageMode language_mode,
- BinaryOperationHints hints);
- const Operator* ShiftRightLogical(LanguageMode language_mode,
- BinaryOperationHints hints);
- const Operator* Add(LanguageMode language_mode, BinaryOperationHints hints);
- const Operator* Subtract(LanguageMode language_mode,
- BinaryOperationHints hints);
- const Operator* Multiply(LanguageMode language_mode,
- BinaryOperationHints hints);
- const Operator* Divide(LanguageMode language_mode,
- BinaryOperationHints hints);
- const Operator* Modulus(LanguageMode language_mode,
- BinaryOperationHints hints);
+ const Operator* LessThan();
+ const Operator* GreaterThan();
+ const Operator* LessThanOrEqual();
+ const Operator* GreaterThanOrEqual();
+ const Operator* BitwiseOr(BinaryOperationHints hints);
+ const Operator* BitwiseXor(BinaryOperationHints hints);
+ const Operator* BitwiseAnd(BinaryOperationHints hints);
+ const Operator* ShiftLeft(BinaryOperationHints hints);
+ const Operator* ShiftRight(BinaryOperationHints hints);
+ const Operator* ShiftRightLogical(BinaryOperationHints hints);
+ const Operator* Add(BinaryOperationHints hints);
+ const Operator* Subtract(BinaryOperationHints hints);
+ const Operator* Multiply(BinaryOperationHints hints);
+ const Operator* Divide(BinaryOperationHints hints);
+ const Operator* Modulus(BinaryOperationHints hints);
const Operator* ToBoolean(ToBooleanHints hints);
const Operator* ToNumber();
@@ -509,8 +408,7 @@ class JSOperatorBuilder final : public ZoneObject {
const Operator* Yield();
const Operator* Create();
- const Operator* CreateArguments(CreateArgumentsParameters::Type type,
- int start_index);
+ const Operator* CreateArguments(CreateArgumentsType type);
const Operator* CreateArray(size_t arity, Handle<AllocationSite> site);
const Operator* CreateClosure(Handle<SharedFunctionInfo> shared_info,
PretenureFlag pretenure);
@@ -523,19 +421,18 @@ class JSOperatorBuilder final : public ZoneObject {
int literal_flags, int literal_index);
const Operator* CallFunction(
- size_t arity, LanguageMode language_mode,
- VectorSlotPair const& feedback = VectorSlotPair(),
+ size_t arity, VectorSlotPair const& feedback = VectorSlotPair(),
ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny,
TailCallMode tail_call_mode = TailCallMode::kDisallow);
+ const Operator* CallRuntime(Runtime::FunctionId id);
const Operator* CallRuntime(Runtime::FunctionId id, size_t arity);
+ const Operator* CallRuntime(const Runtime::Function* function, size_t arity);
const Operator* CallConstruct(size_t arity, VectorSlotPair const& feedback);
const Operator* ConvertReceiver(ConvertReceiverMode convert_mode);
- const Operator* LoadProperty(LanguageMode language_mode,
- VectorSlotPair const& feedback);
- const Operator* LoadNamed(LanguageMode language_mode, Handle<Name> name,
- VectorSlotPair const& feedback);
+ const Operator* LoadProperty(VectorSlotPair const& feedback);
+ const Operator* LoadNamed(Handle<Name> name, VectorSlotPair const& feedback);
const Operator* StoreProperty(LanguageMode language_mode,
VectorSlotPair const& feedback);
@@ -556,9 +453,6 @@ class JSOperatorBuilder final : public ZoneObject {
const Operator* LoadContext(size_t depth, size_t index, bool immutable);
const Operator* StoreContext(size_t depth, size_t index);
- const Operator* LoadDynamic(const Handle<String>& name,
- TypeofMode typeof_mode);
-
const Operator* TypeOf();
const Operator* InstanceOf();
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 5e0712a7f1..11ae3a9709 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -11,7 +11,6 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
-#include "src/compiler/state-values-utils.h"
#include "src/type-cache.h"
#include "src/types.h"
@@ -19,86 +18,6 @@ namespace v8 {
namespace internal {
namespace compiler {
-namespace {
-
-// A helper class to construct inline allocations on the simplified operator
-// level. This keeps track of the effect chain for initial stores on a newly
-// allocated object and also provides helpers for commonly allocated objects.
-class AllocationBuilder final {
- public:
- AllocationBuilder(JSGraph* jsgraph, Node* effect, Node* control)
- : jsgraph_(jsgraph),
- allocation_(nullptr),
- effect_(effect),
- control_(control) {}
-
- // Primitive allocation of static size.
- void Allocate(int size, PretenureFlag pretenure = NOT_TENURED) {
- effect_ = graph()->NewNode(common()->BeginRegion(), effect_);
- allocation_ =
- graph()->NewNode(simplified()->Allocate(pretenure),
- jsgraph()->Constant(size), effect_, control_);
- effect_ = allocation_;
- }
-
- // Primitive store into a field.
- void Store(const FieldAccess& access, Node* value) {
- effect_ = graph()->NewNode(simplified()->StoreField(access), allocation_,
- value, effect_, control_);
- }
-
- // Primitive store into an element.
- void Store(ElementAccess const& access, Node* index, Node* value) {
- effect_ = graph()->NewNode(simplified()->StoreElement(access), allocation_,
- index, value, effect_, control_);
- }
-
- // Compound allocation of a FixedArray.
- void AllocateArray(int length, Handle<Map> map,
- PretenureFlag pretenure = NOT_TENURED) {
- DCHECK(map->instance_type() == FIXED_ARRAY_TYPE ||
- map->instance_type() == FIXED_DOUBLE_ARRAY_TYPE);
- int size = (map->instance_type() == FIXED_ARRAY_TYPE)
- ? FixedArray::SizeFor(length)
- : FixedDoubleArray::SizeFor(length);
- Allocate(size, pretenure);
- Store(AccessBuilder::ForMap(), map);
- Store(AccessBuilder::ForFixedArrayLength(), jsgraph()->Constant(length));
- }
-
- // Compound store of a constant into a field.
- void Store(const FieldAccess& access, Handle<Object> value) {
- Store(access, jsgraph()->Constant(value));
- }
-
- void FinishAndChange(Node* node) {
- NodeProperties::SetType(allocation_, NodeProperties::GetType(node));
- node->ReplaceInput(0, allocation_);
- node->ReplaceInput(1, effect_);
- node->TrimInputCount(2);
- NodeProperties::ChangeOp(node, common()->FinishRegion());
- }
-
- Node* Finish() {
- return graph()->NewNode(common()->FinishRegion(), allocation_, effect_);
- }
-
- protected:
- JSGraph* jsgraph() { return jsgraph_; }
- Graph* graph() { return jsgraph_->graph(); }
- CommonOperatorBuilder* common() { return jsgraph_->common(); }
- SimplifiedOperatorBuilder* simplified() { return jsgraph_->simplified(); }
-
- private:
- JSGraph* const jsgraph_;
- Node* allocation_;
- Node* effect_;
- Node* control_;
-};
-
-} // namespace
-
-
// A helper class to simplify the process of reducing a single binop node with a
// JSOperator. This class manages the rewriting of context, control, and effect
// dependencies during lowering of a binop and contains numerous helper
@@ -218,17 +137,6 @@ class JSBinopReduction final {
return ChangeToPureOperator(op, false, type);
}
- // TODO(turbofan): Strong mode should be killed soonish!
- bool IsStrong() const {
- if (node_->opcode() == IrOpcode::kJSLessThan ||
- node_->opcode() == IrOpcode::kJSLessThanOrEqual ||
- node_->opcode() == IrOpcode::kJSGreaterThan ||
- node_->opcode() == IrOpcode::kJSGreaterThanOrEqual) {
- return is_strong(OpParameter<LanguageMode>(node_));
- }
- return is_strong(BinaryOperationParametersOf(node_->op()).language_mode());
- }
-
bool LeftInputIs(Type* t) { return left_type()->Is(t); }
bool RightInputIs(Type* t) { return right_type()->Is(t); }
@@ -457,7 +365,7 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
// JSAdd(x:number, y:number) => NumberAdd(x, y)
return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
}
- if (r.NeitherInputCanBe(Type::StringOrReceiver()) && !r.IsStrong()) {
+ if (r.NeitherInputCanBe(Type::StringOrReceiver())) {
// JSAdd(x:-string, y:-string) => NumberAdd(ToNumber(x), ToNumber(y))
Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
r.ConvertInputsToNumber(frame_state);
@@ -499,7 +407,7 @@ Reduction JSTypedLowering::ReduceNumberBinop(Node* node,
if (flags() & kDisableBinaryOpReduction) return NoChange();
JSBinopReduction r(this, node);
- if (r.IsStrong() || numberOp == simplified()->NumberModulus()) {
+ if (numberOp == simplified()->NumberModulus()) {
if (r.BothInputsAre(Type::Number())) {
return r.ChangeToPureOperator(numberOp, Type::Number());
}
@@ -515,13 +423,6 @@ Reduction JSTypedLowering::ReduceInt32Binop(Node* node, const Operator* intOp) {
if (flags() & kDisableBinaryOpReduction) return NoChange();
JSBinopReduction r(this, node);
- if (r.IsStrong()) {
- if (r.BothInputsAre(Type::Number())) {
- r.ConvertInputsToUI32(kSigned, kSigned);
- return r.ChangeToPureOperator(intOp, Type::Integral32());
- }
- return NoChange();
- }
Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
r.ConvertInputsToNumber(frame_state);
r.ConvertInputsToUI32(kSigned, kSigned);
@@ -535,13 +436,6 @@ Reduction JSTypedLowering::ReduceUI32Shift(Node* node,
if (flags() & kDisableBinaryOpReduction) return NoChange();
JSBinopReduction r(this, node);
- if (r.IsStrong()) {
- if (r.BothInputsAre(Type::Number())) {
- r.ConvertInputsToUI32(left_signedness, kUnsigned);
- return r.ChangeToPureOperator(shift_op);
- }
- return NoChange();
- }
Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
r.ConvertInputsToNumber(frame_state);
r.ConvertInputsToUI32(left_signedness, kUnsigned);
@@ -588,9 +482,6 @@ Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
less_than_or_equal = machine()->Int32LessThanOrEqual();
} else {
// TODO(turbofan): mixed signed/unsigned int32 comparisons.
- if (r.IsStrong() && !r.BothInputsAre(Type::Number())) {
- return NoChange();
- }
Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
r.ConvertInputsToNumber(frame_state);
less_than = simplified()->NumberLessThan();
@@ -780,8 +671,18 @@ Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
}
}
}
- // Check if we have a cached conversion.
+ // Try constant-folding of JSToNumber with constant inputs.
Type* input_type = NodeProperties::GetType(input);
+ if (input_type->IsConstant()) {
+ Handle<Object> input_value = input_type->AsConstant()->Value();
+ if (input_value->IsString()) {
+ return Replace(jsgraph()->Constant(
+ String::ToNumber(Handle<String>::cast(input_value))));
+ } else if (input_value->IsOddball()) {
+ return Replace(jsgraph()->Constant(
+ Oddball::ToNumber(Handle<Oddball>::cast(input_value))));
+ }
+ }
if (input_type->Is(Type::Number())) {
// JSToNumber(x:number) => x
return Changed(input);
@@ -1221,7 +1122,7 @@ Reduction JSTypedLowering::ReduceJSInstanceOf(Node* node) {
// If we need an access check or the object is a Proxy, make a runtime call
// to finish the lowering.
Node* bool_result_runtime_has_in_proto_chain_case = graph()->NewNode(
- javascript()->CallRuntime(Runtime::kHasInPrototypeChain, 2), r.left(),
+ javascript()->CallRuntime(Runtime::kHasInPrototypeChain), r.left(),
prototype, context, frame_state, effect, control);
control = graph()->NewNode(common()->IfFalse(), branch_is_proxy);
@@ -1422,663 +1323,6 @@ Reduction JSTypedLowering::ReduceJSConvertReceiver(Node* node) {
}
-namespace {
-
-// Maximum instance size for which allocations will be inlined.
-const int kMaxInlineInstanceSize = 64 * kPointerSize;
-
-
-// Checks whether allocation using the given constructor can be inlined.
-bool IsAllocationInlineable(Handle<JSFunction> constructor) {
- // TODO(bmeurer): Further relax restrictions on inlining, i.e.
- // instance type and maybe instance size (inobject properties
- // are limited anyways by the runtime).
- return constructor->has_initial_map() &&
- constructor->initial_map()->instance_type() == JS_OBJECT_TYPE &&
- constructor->initial_map()->instance_size() < kMaxInlineInstanceSize;
-}
-
-} // namespace
-
-
-Reduction JSTypedLowering::ReduceJSCreate(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCreate, node->opcode());
- Node* const target = NodeProperties::GetValueInput(node, 0);
- Type* const target_type = NodeProperties::GetType(target);
- Node* const new_target = NodeProperties::GetValueInput(node, 1);
- Node* const effect = NodeProperties::GetEffectInput(node);
- // TODO(turbofan): Add support for NewTarget passed to JSCreate.
- if (target != new_target) return NoChange();
- // Extract constructor function.
- if (target_type->IsConstant() &&
- target_type->AsConstant()->Value()->IsJSFunction()) {
- Handle<JSFunction> constructor =
- Handle<JSFunction>::cast(target_type->AsConstant()->Value());
- DCHECK(constructor->IsConstructor());
- // Force completion of inobject slack tracking before
- // generating code to finalize the instance size.
- constructor->CompleteInobjectSlackTrackingIfActive();
-
- // TODO(bmeurer): We fall back to the runtime in case we cannot inline
- // the allocation here, which is sort of expensive. We should think about
- // a soft fallback to some NewObjectCodeStub.
- if (IsAllocationInlineable(constructor)) {
- // Compute instance size from initial map of {constructor}.
- Handle<Map> initial_map(constructor->initial_map(), isolate());
- int const instance_size = initial_map->instance_size();
-
- // Add a dependency on the {initial_map} to make sure that this code is
- // deoptimized whenever the {initial_map} of the {constructor} changes.
- dependencies()->AssumeInitialMapCantChange(initial_map);
-
- // Emit code to allocate the JSObject instance for the {constructor}.
- AllocationBuilder a(jsgraph(), effect, graph()->start());
- a.Allocate(instance_size);
- a.Store(AccessBuilder::ForMap(), initial_map);
- a.Store(AccessBuilder::ForJSObjectProperties(),
- jsgraph()->EmptyFixedArrayConstant());
- a.Store(AccessBuilder::ForJSObjectElements(),
- jsgraph()->EmptyFixedArrayConstant());
- for (int i = 0; i < initial_map->GetInObjectProperties(); ++i) {
- a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
- jsgraph()->UndefinedConstant());
- }
- a.FinishAndChange(node);
- return Changed(node);
- }
- }
- return NoChange();
-}
-
-
-namespace {
-
-// Retrieves the frame state holding actual argument values.
-Node* GetArgumentsFrameState(Node* frame_state) {
- Node* const outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
- FrameStateInfo outer_state_info = OpParameter<FrameStateInfo>(outer_state);
- return outer_state_info.type() == FrameStateType::kArgumentsAdaptor
- ? outer_state
- : frame_state;
-}
-
-} // namespace
-
-
-Reduction JSTypedLowering::ReduceJSCreateArguments(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCreateArguments, node->opcode());
- CreateArgumentsParameters const& p = CreateArgumentsParametersOf(node->op());
- Node* const frame_state = NodeProperties::GetFrameStateInput(node, 0);
- Node* const outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
- FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
-
- // Use the ArgumentsAccessStub for materializing both mapped and unmapped
- // arguments object, but only for non-inlined (i.e. outermost) frames.
- if (outer_state->opcode() != IrOpcode::kFrameState) {
- Isolate* isolate = jsgraph()->isolate();
- int parameter_count = state_info.parameter_count() - 1;
- int parameter_offset = parameter_count * kPointerSize;
- int offset = StandardFrameConstants::kCallerSPOffset + parameter_offset;
- Node* parameter_pointer = graph()->NewNode(
- machine()->IntAdd(), graph()->NewNode(machine()->LoadFramePointer()),
- jsgraph()->IntPtrConstant(offset));
-
- if (p.type() != CreateArgumentsParameters::kRestArray) {
- Handle<SharedFunctionInfo> shared;
- if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
- bool unmapped = p.type() == CreateArgumentsParameters::kUnmappedArguments;
- Callable callable = CodeFactory::ArgumentsAccess(
- isolate, unmapped, shared->has_duplicate_parameters());
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate, graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNeedsFrameState);
- const Operator* new_op = common()->Call(desc);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- node->InsertInput(graph()->zone(), 0, stub_code);
- node->InsertInput(graph()->zone(), 2,
- jsgraph()->Constant(parameter_count));
- node->InsertInput(graph()->zone(), 3, parameter_pointer);
- NodeProperties::ChangeOp(node, new_op);
- return Changed(node);
- } else {
- Callable callable = CodeFactory::RestArgumentsAccess(isolate);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate, graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNeedsFrameState);
- const Operator* new_op = common()->Call(desc);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- node->InsertInput(graph()->zone(), 0, stub_code);
- node->ReplaceInput(1, jsgraph()->Constant(parameter_count));
- node->InsertInput(graph()->zone(), 2, parameter_pointer);
- node->InsertInput(graph()->zone(), 3,
- jsgraph()->Constant(p.start_index()));
- NodeProperties::ChangeOp(node, new_op);
- return Changed(node);
- }
- } else if (outer_state->opcode() == IrOpcode::kFrameState) {
- // Use inline allocation for all mapped arguments objects within inlined
- // (i.e. non-outermost) frames, independent of the object size.
- if (p.type() == CreateArgumentsParameters::kMappedArguments) {
- Handle<SharedFunctionInfo> shared;
- if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
- Node* const callee = NodeProperties::GetValueInput(node, 0);
- Node* const control = NodeProperties::GetControlInput(node);
- Node* const context = NodeProperties::GetContextInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- // TODO(mstarzinger): Duplicate parameters are not handled yet.
- if (shared->has_duplicate_parameters()) return NoChange();
- // Choose the correct frame state and frame state info depending on
- // whether there conceptually is an arguments adaptor frame in the call
- // chain.
- Node* const args_state = GetArgumentsFrameState(frame_state);
- FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
- // Prepare element backing store to be used by arguments object.
- bool has_aliased_arguments = false;
- Node* const elements = AllocateAliasedArguments(
- effect, control, args_state, context, shared, &has_aliased_arguments);
- effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
- // Load the arguments object map from the current native context.
- Node* const load_native_context = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, context, effect);
- Node* const load_arguments_map = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForContextSlot(
- has_aliased_arguments ? Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX
- : Context::SLOPPY_ARGUMENTS_MAP_INDEX)),
- load_native_context, effect, control);
- // Actually allocate and initialize the arguments object.
- AllocationBuilder a(jsgraph(), effect, control);
- Node* properties = jsgraph()->EmptyFixedArrayConstant();
- int length = args_state_info.parameter_count() - 1; // Minus receiver.
- STATIC_ASSERT(Heap::kSloppyArgumentsObjectSize == 5 * kPointerSize);
- a.Allocate(Heap::kSloppyArgumentsObjectSize);
- a.Store(AccessBuilder::ForMap(), load_arguments_map);
- a.Store(AccessBuilder::ForJSObjectProperties(), properties);
- a.Store(AccessBuilder::ForJSObjectElements(), elements);
- a.Store(AccessBuilder::ForArgumentsLength(), jsgraph()->Constant(length));
- a.Store(AccessBuilder::ForArgumentsCallee(), callee);
- RelaxControls(node);
- a.FinishAndChange(node);
- return Changed(node);
- } else if (p.type() == CreateArgumentsParameters::kUnmappedArguments) {
- // Use inline allocation for all unmapped arguments objects within inlined
- // (i.e. non-outermost) frames, independent of the object size.
- Node* const control = NodeProperties::GetControlInput(node);
- Node* const context = NodeProperties::GetContextInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- // Choose the correct frame state and frame state info depending on
- // whether there conceptually is an arguments adaptor frame in the call
- // chain.
- Node* const args_state = GetArgumentsFrameState(frame_state);
- FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
- // Prepare element backing store to be used by arguments object.
- Node* const elements = AllocateArguments(effect, control, args_state);
- effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
- // Load the arguments object map from the current native context.
- Node* const load_native_context = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, context, effect);
- Node* const load_arguments_map = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForContextSlot(
- Context::STRICT_ARGUMENTS_MAP_INDEX)),
- load_native_context, effect, control);
- // Actually allocate and initialize the arguments object.
- AllocationBuilder a(jsgraph(), effect, control);
- Node* properties = jsgraph()->EmptyFixedArrayConstant();
- int length = args_state_info.parameter_count() - 1; // Minus receiver.
- STATIC_ASSERT(Heap::kStrictArgumentsObjectSize == 4 * kPointerSize);
- a.Allocate(Heap::kStrictArgumentsObjectSize);
- a.Store(AccessBuilder::ForMap(), load_arguments_map);
- a.Store(AccessBuilder::ForJSObjectProperties(), properties);
- a.Store(AccessBuilder::ForJSObjectElements(), elements);
- a.Store(AccessBuilder::ForArgumentsLength(), jsgraph()->Constant(length));
- RelaxControls(node);
- a.FinishAndChange(node);
- return Changed(node);
- } else if (p.type() == CreateArgumentsParameters::kRestArray) {
- // Use inline allocation for all unmapped arguments objects within inlined
- // (i.e. non-outermost) frames, independent of the object size.
- Node* const control = NodeProperties::GetControlInput(node);
- Node* const context = NodeProperties::GetContextInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- // Choose the correct frame state and frame state info depending on
- // whether there conceptually is an arguments adaptor frame in the call
- // chain.
- Node* const args_state = GetArgumentsFrameState(frame_state);
- FrameStateInfo args_state_info = OpParameter<FrameStateInfo>(args_state);
- // Prepare element backing store to be used by the rest array.
- Node* const elements =
- AllocateRestArguments(effect, control, args_state, p.start_index());
- effect = elements->op()->EffectOutputCount() > 0 ? elements : effect;
- // Load the JSArray object map from the current native context.
- Node* const load_native_context = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, context, effect);
- Node* const load_jsarray_map = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForContextSlot(
- Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX)),
- load_native_context, effect, control);
- // Actually allocate and initialize the jsarray.
- AllocationBuilder a(jsgraph(), effect, control);
- Node* properties = jsgraph()->EmptyFixedArrayConstant();
-
- // -1 to minus receiver
- int argument_count = args_state_info.parameter_count() - 1;
- int length = std::max(0, argument_count - p.start_index());
- STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
- a.Allocate(JSArray::kSize);
- a.Store(AccessBuilder::ForMap(), load_jsarray_map);
- a.Store(AccessBuilder::ForJSObjectProperties(), properties);
- a.Store(AccessBuilder::ForJSObjectElements(), elements);
- a.Store(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS),
- jsgraph()->Constant(length));
- RelaxControls(node);
- a.FinishAndChange(node);
- return Changed(node);
- }
- }
-
- return NoChange();
-}
-
-
-Reduction JSTypedLowering::ReduceNewArray(Node* node, Node* length,
- int capacity,
- Handle<AllocationSite> site) {
- DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
- Node* context = NodeProperties::GetContextInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- // Extract transition and tenuring feedback from the {site} and add
- // appropriate code dependencies on the {site} if deoptimization is
- // enabled.
- PretenureFlag pretenure = site->GetPretenureMode();
- ElementsKind elements_kind = site->GetElementsKind();
- DCHECK(IsFastElementsKind(elements_kind));
- if (flags() & kDeoptimizationEnabled) {
- dependencies()->AssumeTenuringDecision(site);
- dependencies()->AssumeTransitionStable(site);
- }
-
- // Retrieve the initial map for the array from the appropriate native context.
- Node* native_context = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, context, effect);
- Node* js_array_map = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::ArrayMapIndex(elements_kind), true),
- native_context, native_context, effect);
-
- // Setup elements and properties.
- Node* elements;
- if (capacity == 0) {
- elements = jsgraph()->EmptyFixedArrayConstant();
- } else {
- elements = effect =
- AllocateElements(effect, control, elements_kind, capacity, pretenure);
- }
- Node* properties = jsgraph()->EmptyFixedArrayConstant();
-
- // Perform the allocation of the actual JSArray object.
- AllocationBuilder a(jsgraph(), effect, control);
- a.Allocate(JSArray::kSize, pretenure);
- a.Store(AccessBuilder::ForMap(), js_array_map);
- a.Store(AccessBuilder::ForJSObjectProperties(), properties);
- a.Store(AccessBuilder::ForJSObjectElements(), elements);
- a.Store(AccessBuilder::ForJSArrayLength(elements_kind), length);
- RelaxControls(node);
- a.FinishAndChange(node);
- return Changed(node);
-}
-
-
-Reduction JSTypedLowering::ReduceJSCreateArray(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
- CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
- Node* target = NodeProperties::GetValueInput(node, 0);
- Node* new_target = NodeProperties::GetValueInput(node, 1);
-
- // TODO(bmeurer): Optimize the subclassing case.
- if (target != new_target) return NoChange();
-
- // Check if we have a feedback {site} on the {node}.
- Handle<AllocationSite> site = p.site();
- if (p.site().is_null()) return NoChange();
-
- // Attempt to inline calls to the Array constructor for the relevant cases
- // where either no arguments are provided, or exactly one unsigned number
- // argument is given.
- if (site->CanInlineCall()) {
- if (p.arity() == 0) {
- Node* length = jsgraph()->ZeroConstant();
- int capacity = JSArray::kPreallocatedArrayElements;
- return ReduceNewArray(node, length, capacity, site);
- } else if (p.arity() == 1) {
- Node* length = NodeProperties::GetValueInput(node, 2);
- Type* length_type = NodeProperties::GetType(length);
- if (length_type->Is(type_cache_.kElementLoopUnrollType)) {
- int capacity = static_cast<int>(length_type->Max());
- return ReduceNewArray(node, length, capacity, site);
- }
- }
- }
-
- // Reduce {node} to the appropriate ArrayConstructorStub backend.
- // Note that these stubs "behave" like JSFunctions, which means they
- // expect a receiver on the stack, which they remove. We just push
- // undefined for the receiver.
- ElementsKind elements_kind = site->GetElementsKind();
- AllocationSiteOverrideMode override_mode =
- (AllocationSite::GetMode(elements_kind) == TRACK_ALLOCATION_SITE)
- ? DISABLE_ALLOCATION_SITES
- : DONT_OVERRIDE;
- if (p.arity() == 0) {
- ArrayNoArgumentConstructorStub stub(isolate(), elements_kind,
- override_mode);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 1,
- CallDescriptor::kNeedsFrameState);
- node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
- node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
- node->InsertInput(graph()->zone(), 3, jsgraph()->UndefinedConstant());
- NodeProperties::ChangeOp(node, common()->Call(desc));
- return Changed(node);
- } else if (p.arity() == 1) {
- // TODO(bmeurer): Optimize for the 0 length non-holey case?
- ArraySingleArgumentConstructorStub stub(
- isolate(), GetHoleyElementsKind(elements_kind), override_mode);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 2,
- CallDescriptor::kNeedsFrameState);
- node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
- node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
- node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(1));
- node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
- NodeProperties::ChangeOp(node, common()->Call(desc));
- return Changed(node);
- } else {
- int const arity = static_cast<int>(p.arity());
- ArrayNArgumentsConstructorStub stub(isolate(), elements_kind,
- override_mode);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(),
- arity + 1, CallDescriptor::kNeedsFrameState);
- node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
- node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
- node->InsertInput(graph()->zone(), 3, jsgraph()->Int32Constant(arity));
- node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
- NodeProperties::ChangeOp(node, common()->Call(desc));
- return Changed(node);
- }
-}
-
-
-Reduction JSTypedLowering::ReduceJSCreateClosure(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCreateClosure, node->opcode());
- CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
- Handle<SharedFunctionInfo> shared = p.shared_info();
-
- // Use the FastNewClosureStub that allocates in new space only for nested
- // functions that don't need literals cloning.
- if (p.pretenure() == NOT_TENURED && shared->num_literals() == 0) {
- Isolate* isolate = jsgraph()->isolate();
- Callable callable = CodeFactory::FastNewClosure(
- isolate, shared->language_mode(), shared->kind());
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate, graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNoFlags);
- const Operator* new_op = common()->Call(desc);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- node->InsertInput(graph()->zone(), 0, stub_code);
- node->InsertInput(graph()->zone(), 1, jsgraph()->HeapConstant(shared));
- NodeProperties::ChangeOp(node, new_op);
- return Changed(node);
- }
-
- return NoChange();
-}
-
-
-Reduction JSTypedLowering::ReduceJSCreateIterResultObject(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCreateIterResultObject, node->opcode());
- Node* value = NodeProperties::GetValueInput(node, 0);
- Node* done = NodeProperties::GetValueInput(node, 1);
- Node* context = NodeProperties::GetContextInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
-
- // Load the JSIteratorResult map for the {context}.
- Node* native_context = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, context, effect);
- Node* iterator_result_map = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::ITERATOR_RESULT_MAP_INDEX, true),
- native_context, native_context, effect);
-
- // Emit code to allocate the JSIteratorResult instance.
- AllocationBuilder a(jsgraph(), effect, graph()->start());
- a.Allocate(JSIteratorResult::kSize);
- a.Store(AccessBuilder::ForMap(), iterator_result_map);
- a.Store(AccessBuilder::ForJSObjectProperties(),
- jsgraph()->EmptyFixedArrayConstant());
- a.Store(AccessBuilder::ForJSObjectElements(),
- jsgraph()->EmptyFixedArrayConstant());
- a.Store(AccessBuilder::ForJSIteratorResultValue(), value);
- a.Store(AccessBuilder::ForJSIteratorResultDone(), done);
- STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
- a.FinishAndChange(node);
- return Changed(node);
-}
-
-
-Reduction JSTypedLowering::ReduceJSCreateLiteralArray(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCreateLiteralArray, node->opcode());
- CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
- Handle<FixedArray> const constants = Handle<FixedArray>::cast(p.constant());
- int const length = constants->length();
- int const flags = p.flags();
-
- // Use the FastCloneShallowArrayStub only for shallow boilerplates up to the
- // initial length limit for arrays with "fast" elements kind.
- // TODO(rossberg): Teach strong mode to FastCloneShallowArrayStub.
- if ((flags & ArrayLiteral::kShallowElements) != 0 &&
- (flags & ArrayLiteral::kIsStrong) == 0 &&
- length < JSArray::kInitialMaxFastElementArray) {
- Isolate* isolate = jsgraph()->isolate();
- Callable callable = CodeFactory::FastCloneShallowArray(isolate);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate, graph()->zone(), callable.descriptor(), 0,
- (OperatorProperties::GetFrameStateInputCount(node->op()) != 0)
- ? CallDescriptor::kNeedsFrameState
- : CallDescriptor::kNoFlags);
- const Operator* new_op = common()->Call(desc);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* literal_index = jsgraph()->SmiConstant(p.index());
- Node* constant_elements = jsgraph()->HeapConstant(constants);
- node->InsertInput(graph()->zone(), 0, stub_code);
- node->InsertInput(graph()->zone(), 2, literal_index);
- node->InsertInput(graph()->zone(), 3, constant_elements);
- NodeProperties::ChangeOp(node, new_op);
- return Changed(node);
- }
-
- return NoChange();
-}
-
-
-Reduction JSTypedLowering::ReduceJSCreateLiteralObject(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCreateLiteralObject, node->opcode());
- CreateLiteralParameters const& p = CreateLiteralParametersOf(node->op());
- Handle<FixedArray> const constants = Handle<FixedArray>::cast(p.constant());
- // Constants are pairs, see ObjectLiteral::properties_count().
- int const length = constants->length() / 2;
- int const flags = p.flags();
-
- // Use the FastCloneShallowObjectStub only for shallow boilerplates without
- // elements up to the number of properties that the stubs can handle.
- if ((flags & ObjectLiteral::kShallowProperties) != 0 &&
- length <= FastCloneShallowObjectStub::kMaximumClonedProperties) {
- Isolate* isolate = jsgraph()->isolate();
- Callable callable = CodeFactory::FastCloneShallowObject(isolate, length);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate, graph()->zone(), callable.descriptor(), 0,
- (OperatorProperties::GetFrameStateInputCount(node->op()) != 0)
- ? CallDescriptor::kNeedsFrameState
- : CallDescriptor::kNoFlags);
- const Operator* new_op = common()->Call(desc);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* literal_index = jsgraph()->SmiConstant(p.index());
- Node* literal_flags = jsgraph()->SmiConstant(flags);
- Node* constant_elements = jsgraph()->HeapConstant(constants);
- node->InsertInput(graph()->zone(), 0, stub_code);
- node->InsertInput(graph()->zone(), 2, literal_index);
- node->InsertInput(graph()->zone(), 3, constant_elements);
- node->InsertInput(graph()->zone(), 4, literal_flags);
- NodeProperties::ChangeOp(node, new_op);
- return Changed(node);
- }
-
- return NoChange();
-}
-
-
-Reduction JSTypedLowering::ReduceJSCreateFunctionContext(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCreateFunctionContext, node->opcode());
- int slot_count = OpParameter<int>(node->op());
- Node* const closure = NodeProperties::GetValueInput(node, 0);
-
- // Use inline allocation for function contexts up to a size limit.
- if (slot_count < kFunctionContextAllocationLimit) {
- // JSCreateFunctionContext[slot_count < limit]](fun)
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- Node* context = NodeProperties::GetContextInput(node);
- Node* extension = jsgraph()->TheHoleConstant();
- Node* native_context = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, context, effect);
- AllocationBuilder a(jsgraph(), effect, control);
- STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
- int context_length = slot_count + Context::MIN_CONTEXT_SLOTS;
- a.AllocateArray(context_length, factory()->function_context_map());
- a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
- a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
- a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
- a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
- native_context);
- for (int i = Context::MIN_CONTEXT_SLOTS; i < context_length; ++i) {
- a.Store(AccessBuilder::ForContextSlot(i), jsgraph()->UndefinedConstant());
- }
- RelaxControls(node);
- a.FinishAndChange(node);
- return Changed(node);
- }
-
- // Use the FastNewContextStub only for function contexts up maximum size.
- if (slot_count <= FastNewContextStub::kMaximumSlots) {
- Isolate* isolate = jsgraph()->isolate();
- Callable callable = CodeFactory::FastNewContext(isolate, slot_count);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate, graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNoFlags);
- const Operator* new_op = common()->Call(desc);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- node->InsertInput(graph()->zone(), 0, stub_code);
- NodeProperties::ChangeOp(node, new_op);
- return Changed(node);
- }
-
- return NoChange();
-}
-
-
-Reduction JSTypedLowering::ReduceJSCreateWithContext(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCreateWithContext, node->opcode());
- Node* object = NodeProperties::GetValueInput(node, 0);
- Node* closure = NodeProperties::GetValueInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- Node* context = NodeProperties::GetContextInput(node);
- Node* native_context = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, context, effect);
- AllocationBuilder a(jsgraph(), effect, control);
- STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
- a.AllocateArray(Context::MIN_CONTEXT_SLOTS, factory()->with_context_map());
- a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
- a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
- a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), object);
- a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
- native_context);
- RelaxControls(node);
- a.FinishAndChange(node);
- return Changed(node);
-}
-
-
-Reduction JSTypedLowering::ReduceJSCreateCatchContext(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCreateCatchContext, node->opcode());
- Handle<String> name = OpParameter<Handle<String>>(node);
- Node* exception = NodeProperties::GetValueInput(node, 0);
- Node* closure = NodeProperties::GetValueInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- Node* context = NodeProperties::GetContextInput(node);
- Node* native_context = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, context, effect);
- AllocationBuilder a(jsgraph(), effect, control);
- STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
- a.AllocateArray(Context::MIN_CONTEXT_SLOTS + 1,
- factory()->catch_context_map());
- a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
- a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
- a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), name);
- a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
- native_context);
- a.Store(AccessBuilder::ForContextSlot(Context::THROWN_OBJECT_INDEX),
- exception);
- RelaxControls(node);
- a.FinishAndChange(node);
- return Changed(node);
-}
-
-
-Reduction JSTypedLowering::ReduceJSCreateBlockContext(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCreateBlockContext, node->opcode());
- Handle<ScopeInfo> scope_info = OpParameter<Handle<ScopeInfo>>(node);
- int context_length = scope_info->ContextLength();
- Node* const closure = NodeProperties::GetValueInput(node, 0);
-
- // Use inline allocation for block contexts up to a size limit.
- if (context_length < kBlockContextAllocationLimit) {
- // JSCreateBlockContext[scope[length < limit]](fun)
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
- Node* context = NodeProperties::GetContextInput(node);
- Node* extension = jsgraph()->Constant(scope_info);
- Node* native_context = effect = graph()->NewNode(
- javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, context, effect);
- AllocationBuilder a(jsgraph(), effect, control);
- STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
- a.AllocateArray(context_length, factory()->block_context_map());
- a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
- a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
- a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
- a.Store(AccessBuilder::ForContextSlot(Context::NATIVE_CONTEXT_INDEX),
- native_context);
- for (int i = Context::MIN_CONTEXT_SLOTS; i < context_length; ++i) {
- a.Store(AccessBuilder::ForContextSlot(i), jsgraph()->UndefinedConstant());
- }
- RelaxControls(node);
- a.FinishAndChange(node);
- return Changed(node);
- }
-
- return NoChange();
-}
-
-
Reduction JSTypedLowering::ReduceJSCallConstruct(Node* node) {
DCHECK_EQ(IrOpcode::kJSCallConstruct, node->opcode());
CallConstructParameters const& p = CallConstructParametersOf(node->op());
@@ -2252,9 +1496,8 @@ Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
// Maybe we did at least learn something about the {receiver}.
if (p.convert_mode() != convert_mode) {
NodeProperties::ChangeOp(
- node,
- javascript()->CallFunction(p.arity(), p.language_mode(), p.feedback(),
- convert_mode, p.tail_call_mode()));
+ node, javascript()->CallFunction(p.arity(), p.feedback(), convert_mode,
+ p.tail_call_mode()));
return Changed(node);
}
@@ -2270,159 +1513,6 @@ Reduction JSTypedLowering::ReduceJSForInDone(Node* node) {
}
-Reduction JSTypedLowering::ReduceJSForInPrepare(Node* node) {
- DCHECK_EQ(IrOpcode::kJSForInPrepare, node->opcode());
- Node* receiver = NodeProperties::GetValueInput(node, 0);
- Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- // Get the set of properties to enumerate.
- Node* cache_type = effect = graph()->NewNode(
- javascript()->CallRuntime(Runtime::kGetPropertyNamesFast, 1), receiver,
- context, frame_state, effect, control);
- control = graph()->NewNode(common()->IfSuccess(), cache_type);
-
- Node* receiver_map = effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- receiver, effect, control);
- Node* cache_type_map = effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- cache_type, effect, control);
- Node* meta_map = jsgraph()->HeapConstant(factory()->meta_map());
-
- // If we got a map from the GetPropertyNamesFast runtime call, we can do a
- // fast modification check. Otherwise, we got a fixed array, and we have to
- // perform a slow check on every iteration.
- Node* check0 = graph()->NewNode(simplified()->ReferenceEqual(Type::Any()),
- cache_type_map, meta_map);
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
-
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* cache_array_true0;
- Node* cache_length_true0;
- Node* cache_type_true0;
- Node* etrue0;
- {
- // Enum cache case.
- Node* cache_type_enum_length = etrue0 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapBitField3()), cache_type,
- effect, if_true0);
- cache_length_true0 = graph()->NewNode(
- simplified()->NumberBitwiseAnd(), cache_type_enum_length,
- jsgraph()->Int32Constant(Map::EnumLengthBits::kMask));
-
- Node* check1 =
- graph()->NewNode(machine()->Word32Equal(), cache_length_true0,
- jsgraph()->Int32Constant(0));
- Node* branch1 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check1, if_true0);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* cache_array_true1;
- Node* etrue1;
- {
- // No properties to enumerate.
- cache_array_true1 =
- jsgraph()->HeapConstant(factory()->empty_fixed_array());
- etrue1 = etrue0;
- }
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* cache_array_false1;
- Node* efalse1;
- {
- // Load the enumeration cache from the instance descriptors of {receiver}.
- Node* receiver_map_descriptors = efalse1 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapDescriptors()),
- receiver_map, etrue0, if_false1);
- Node* object_map_enum_cache = efalse1 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForDescriptorArrayEnumCache()),
- receiver_map_descriptors, efalse1, if_false1);
- cache_array_false1 = efalse1 = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForDescriptorArrayEnumCacheBridgeCache()),
- object_map_enum_cache, efalse1, if_false1);
- }
-
- if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- etrue0 =
- graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
- cache_array_true0 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- cache_array_true1, cache_array_false1, if_true0);
-
- cache_type_true0 = cache_type;
- }
-
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* cache_array_false0;
- Node* cache_length_false0;
- Node* cache_type_false0;
- Node* efalse0;
- {
- // FixedArray case.
- cache_type_false0 = jsgraph()->OneConstant(); // Smi means slow check
- cache_array_false0 = cache_type;
- cache_length_false0 = efalse0 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
- cache_array_false0, effect, if_false0);
- }
-
- control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
- Node* cache_array =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- cache_array_true0, cache_array_false0, control);
- Node* cache_length =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- cache_length_true0, cache_length_false0, control);
- cache_type =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- cache_type_true0, cache_type_false0, control);
-
- for (auto edge : node->use_edges()) {
- Node* const use = edge.from();
- if (NodeProperties::IsEffectEdge(edge)) {
- edge.UpdateTo(effect);
- Revisit(use);
- } else {
- if (NodeProperties::IsControlEdge(edge)) {
- if (use->opcode() == IrOpcode::kIfSuccess) {
- Replace(use, control);
- } else if (use->opcode() == IrOpcode::kIfException) {
- edge.UpdateTo(cache_type_true0);
- continue;
- } else {
- UNREACHABLE();
- }
- } else {
- DCHECK(NodeProperties::IsValueEdge(edge));
- DCHECK_EQ(IrOpcode::kProjection, use->opcode());
- switch (ProjectionIndexOf(use->op())) {
- case 0:
- Replace(use, cache_type);
- break;
- case 1:
- Replace(use, cache_array);
- break;
- case 2:
- Replace(use, cache_length);
- break;
- default:
- UNREACHABLE();
- break;
- }
- }
- use->Kill();
- }
- }
- return NoChange(); // All uses were replaced already above.
-}
-
-
Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
DCHECK_EQ(IrOpcode::kJSForInNext, node->opcode());
Node* receiver = NodeProperties::GetValueInput(node, 0);
@@ -2464,38 +1554,12 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
Node* efalse0;
Node* vfalse0;
{
- // Check if the {cache_type} is zero, which indicates proxy.
- Node* check1 = graph()->NewNode(simplified()->ReferenceEqual(Type::Any()),
- cache_type, jsgraph()->ZeroConstant());
- Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check1, if_false0);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1;
- Node* vtrue1;
- {
- // Don't do filtering for proxies.
- etrue1 = effect;
- vtrue1 = key;
- }
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* efalse1;
- Node* vfalse1;
- {
- // Filter the {key} to check if it's still a valid property of the
- // {receiver} (does the ToName conversion implicitly).
- vfalse1 = efalse1 = graph()->NewNode(
- javascript()->CallRuntime(Runtime::kForInFilter, 2), receiver, key,
- context, frame_state, effect, if_false1);
- if_false1 = graph()->NewNode(common()->IfSuccess(), vfalse1);
- }
-
- if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- efalse0 =
- graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
- vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue1, vfalse1, if_false0);
+ // Filter the {key} to check if it's still a valid property of the
+ // {receiver} (does the ToName conversion implicitly).
+ vfalse0 = efalse0 = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kForInFilter), receiver, key,
+ context, frame_state, effect, if_false0);
+ if_false0 = graph()->NewNode(common()->IfSuccess(), vfalse0);
}
control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
@@ -2640,28 +1704,6 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSStoreContext(node);
case IrOpcode::kJSConvertReceiver:
return ReduceJSConvertReceiver(node);
- case IrOpcode::kJSCreate:
- return ReduceJSCreate(node);
- case IrOpcode::kJSCreateArguments:
- return ReduceJSCreateArguments(node);
- case IrOpcode::kJSCreateArray:
- return ReduceJSCreateArray(node);
- case IrOpcode::kJSCreateClosure:
- return ReduceJSCreateClosure(node);
- case IrOpcode::kJSCreateIterResultObject:
- return ReduceJSCreateIterResultObject(node);
- case IrOpcode::kJSCreateLiteralArray:
- return ReduceJSCreateLiteralArray(node);
- case IrOpcode::kJSCreateLiteralObject:
- return ReduceJSCreateLiteralObject(node);
- case IrOpcode::kJSCreateFunctionContext:
- return ReduceJSCreateFunctionContext(node);
- case IrOpcode::kJSCreateWithContext:
- return ReduceJSCreateWithContext(node);
- case IrOpcode::kJSCreateCatchContext:
- return ReduceJSCreateCatchContext(node);
- case IrOpcode::kJSCreateBlockContext:
- return ReduceJSCreateBlockContext(node);
case IrOpcode::kJSCallConstruct:
return ReduceJSCallConstruct(node);
case IrOpcode::kJSCallFunction:
@@ -2670,8 +1712,6 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSForInDone(node);
case IrOpcode::kJSForInNext:
return ReduceJSForInNext(node);
- case IrOpcode::kJSForInPrepare:
- return ReduceJSForInPrepare(node);
case IrOpcode::kJSForInStep:
return ReduceJSForInStep(node);
case IrOpcode::kSelect:
@@ -2690,139 +1730,6 @@ Node* JSTypedLowering::Word32Shl(Node* const lhs, int32_t const rhs) {
}
-// Helper that allocates a FixedArray holding argument values recorded in the
-// given {frame_state}. Serves as backing store for JSCreateArguments nodes.
-Node* JSTypedLowering::AllocateArguments(Node* effect, Node* control,
- Node* frame_state) {
- FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
- int argument_count = state_info.parameter_count() - 1; // Minus receiver.
- if (argument_count == 0) return jsgraph()->EmptyFixedArrayConstant();
-
- // Prepare an iterator over argument values recorded in the frame state.
- Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
- StateValuesAccess parameters_access(parameters);
- auto parameters_it = ++parameters_access.begin();
-
- // Actually allocate the backing store.
- AllocationBuilder a(jsgraph(), effect, control);
- a.AllocateArray(argument_count, factory()->fixed_array_map());
- for (int i = 0; i < argument_count; ++i, ++parameters_it) {
- a.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
- }
- return a.Finish();
-}
-
-
-// Helper that allocates a FixedArray holding argument values recorded in the
-// given {frame_state}. Serves as backing store for JSCreateArguments nodes.
-Node* JSTypedLowering::AllocateRestArguments(Node* effect, Node* control,
- Node* frame_state,
- int start_index) {
- FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
- int argument_count = state_info.parameter_count() - 1; // Minus receiver.
- int num_elements = std::max(0, argument_count - start_index);
- if (num_elements == 0) return jsgraph()->EmptyFixedArrayConstant();
-
- // Prepare an iterator over argument values recorded in the frame state.
- Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
- StateValuesAccess parameters_access(parameters);
- auto parameters_it = ++parameters_access.begin();
-
- // Skip unused arguments.
- for (int i = 0; i < start_index; i++) {
- ++parameters_it;
- }
-
- // Actually allocate the backing store.
- AllocationBuilder a(jsgraph(), effect, control);
- a.AllocateArray(num_elements, factory()->fixed_array_map());
- for (int i = 0; i < num_elements; ++i, ++parameters_it) {
- a.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
- }
- return a.Finish();
-}
-
-
-// Helper that allocates a FixedArray serving as a parameter map for values
-// recorded in the given {frame_state}. Some elements map to slots within the
-// given {context}. Serves as backing store for JSCreateArguments nodes.
-Node* JSTypedLowering::AllocateAliasedArguments(
- Node* effect, Node* control, Node* frame_state, Node* context,
- Handle<SharedFunctionInfo> shared, bool* has_aliased_arguments) {
- FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
- int argument_count = state_info.parameter_count() - 1; // Minus receiver.
- if (argument_count == 0) return jsgraph()->EmptyFixedArrayConstant();
-
- // If there is no aliasing, the arguments object elements are not special in
- // any way, we can just return an unmapped backing store instead.
- int parameter_count = shared->internal_formal_parameter_count();
- if (parameter_count == 0) {
- return AllocateArguments(effect, control, frame_state);
- }
-
- // Calculate number of argument values being aliased/mapped.
- int mapped_count = Min(argument_count, parameter_count);
- *has_aliased_arguments = true;
-
- // Prepare an iterator over argument values recorded in the frame state.
- Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
- StateValuesAccess parameters_access(parameters);
- auto paratemers_it = ++parameters_access.begin();
-
- // The unmapped argument values recorded in the frame state are stored yet
- // another indirection away and then linked into the parameter map below,
- // whereas mapped argument values are replaced with a hole instead.
- AllocationBuilder aa(jsgraph(), effect, control);
- aa.AllocateArray(argument_count, factory()->fixed_array_map());
- for (int i = 0; i < mapped_count; ++i, ++paratemers_it) {
- aa.Store(AccessBuilder::ForFixedArraySlot(i), jsgraph()->TheHoleConstant());
- }
- for (int i = mapped_count; i < argument_count; ++i, ++paratemers_it) {
- aa.Store(AccessBuilder::ForFixedArraySlot(i), (*paratemers_it).node);
- }
- Node* arguments = aa.Finish();
-
- // Actually allocate the backing store.
- AllocationBuilder a(jsgraph(), arguments, control);
- a.AllocateArray(mapped_count + 2, factory()->sloppy_arguments_elements_map());
- a.Store(AccessBuilder::ForFixedArraySlot(0), context);
- a.Store(AccessBuilder::ForFixedArraySlot(1), arguments);
- for (int i = 0; i < mapped_count; ++i) {
- int idx = Context::MIN_CONTEXT_SLOTS + parameter_count - 1 - i;
- a.Store(AccessBuilder::ForFixedArraySlot(i + 2), jsgraph()->Constant(idx));
- }
- return a.Finish();
-}
-
-
-Node* JSTypedLowering::AllocateElements(Node* effect, Node* control,
- ElementsKind elements_kind,
- int capacity, PretenureFlag pretenure) {
- DCHECK_LE(1, capacity);
- DCHECK_LE(capacity, JSArray::kInitialMaxFastElementArray);
-
- Handle<Map> elements_map = IsFastDoubleElementsKind(elements_kind)
- ? factory()->fixed_double_array_map()
- : factory()->fixed_array_map();
- ElementAccess access = IsFastDoubleElementsKind(elements_kind)
- ? AccessBuilder::ForFixedDoubleArrayElement()
- : AccessBuilder::ForFixedArrayElement();
- Node* value =
- IsFastDoubleElementsKind(elements_kind)
- ? jsgraph()->Float64Constant(bit_cast<double>(kHoleNanInt64))
- : jsgraph()->TheHoleConstant();
-
- // Actually allocate the backing store.
- AllocationBuilder a(jsgraph(), effect, control);
- a.AllocateArray(capacity, elements_map, pretenure);
- for (int i = 0; i < capacity; ++i) {
- Node* index = jsgraph()->Constant(i);
- a.Store(access, index, value);
- }
- return a.Finish();
-}
-
-
Factory* JSTypedLowering::factory() const { return jsgraph()->factory(); }
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index 68ce74e624..4621a45e28 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -68,41 +68,18 @@ class JSTypedLowering final : public AdvancedReducer {
Reduction ReduceJSToString(Node* node);
Reduction ReduceJSToObject(Node* node);
Reduction ReduceJSConvertReceiver(Node* node);
- Reduction ReduceJSCreate(Node* node);
- Reduction ReduceJSCreateArguments(Node* node);
- Reduction ReduceJSCreateArray(Node* node);
- Reduction ReduceJSCreateClosure(Node* node);
- Reduction ReduceJSCreateIterResultObject(Node* node);
- Reduction ReduceJSCreateLiteralArray(Node* node);
- Reduction ReduceJSCreateLiteralObject(Node* node);
- Reduction ReduceJSCreateFunctionContext(Node* node);
- Reduction ReduceJSCreateWithContext(Node* node);
- Reduction ReduceJSCreateCatchContext(Node* node);
- Reduction ReduceJSCreateBlockContext(Node* node);
Reduction ReduceJSCallConstruct(Node* node);
Reduction ReduceJSCallFunction(Node* node);
Reduction ReduceJSForInDone(Node* node);
Reduction ReduceJSForInNext(Node* node);
- Reduction ReduceJSForInPrepare(Node* node);
Reduction ReduceJSForInStep(Node* node);
Reduction ReduceSelect(Node* node);
Reduction ReduceNumberBinop(Node* node, const Operator* numberOp);
Reduction ReduceInt32Binop(Node* node, const Operator* intOp);
Reduction ReduceUI32Shift(Node* node, Signedness left_signedness,
const Operator* shift_op);
- Reduction ReduceNewArray(Node* node, Node* length, int capacity,
- Handle<AllocationSite> site);
Node* Word32Shl(Node* const lhs, int32_t const rhs);
- Node* AllocateArguments(Node* effect, Node* control, Node* frame_state);
- Node* AllocateRestArguments(Node* effect, Node* control, Node* frame_state,
- int start_index);
- Node* AllocateAliasedArguments(Node* effect, Node* control, Node* frame_state,
- Node* context, Handle<SharedFunctionInfo>,
- bool* has_aliased_arguments);
- Node* AllocateElements(Node* effect, Node* control,
- ElementsKind elements_kind, int capacity,
- PretenureFlag pretenure);
Factory* factory() const;
Graph* graph() const;
@@ -115,10 +92,6 @@ class JSTypedLowering final : public AdvancedReducer {
CompilationDependencies* dependencies() const;
Flags flags() const { return flags_; }
- // Limits up to which context allocations are inlined.
- static const int kFunctionContextAllocationLimit = 16;
- static const int kBlockContextAllocationLimit = 16;
-
CompilationDependencies* dependencies_;
Flags flags_;
JSGraph* jsgraph_;
diff --git a/deps/v8/src/compiler/jump-threading.cc b/deps/v8/src/compiler/jump-threading.cc
index 7b53b5cbc3..5abd34633b 100644
--- a/deps/v8/src/compiler/jump-threading.cc
+++ b/deps/v8/src/compiler/jump-threading.cc
@@ -53,10 +53,10 @@ struct JumpThreadingState {
RpoNumber onstack() { return RpoNumber::FromInt(-2); }
};
-
bool JumpThreading::ComputeForwarding(Zone* local_zone,
ZoneVector<RpoNumber>& result,
- InstructionSequence* code) {
+ InstructionSequence* code,
+ bool frame_at_start) {
ZoneStack<RpoNumber> stack(local_zone);
JumpThreadingState state = {false, result, stack};
state.Clear(code->InstructionBlockCount());
@@ -91,7 +91,14 @@ bool JumpThreading::ComputeForwarding(Zone* local_zone,
} else if (instr->arch_opcode() == kArchJmp) {
// try to forward the jump instruction.
TRACE(" jmp\n");
- fw = code->InputRpo(instr, 0);
+ // if this block deconstructs the frame, we can't forward it.
+ // TODO(mtrofin): we can still forward if we end up building
+ // the frame at start. So we should move the decision of whether
+ // to build a frame or not in the register allocator, and trickle it
+ // here and to the code generator.
+ if (frame_at_start || !block->must_deconstruct_frame()) {
+ fw = code->InputRpo(instr, 0);
+ }
fallthru = false;
} else {
// can't skip other instructions.
diff --git a/deps/v8/src/compiler/jump-threading.h b/deps/v8/src/compiler/jump-threading.h
index fa74ee9a52..84520ba3ed 100644
--- a/deps/v8/src/compiler/jump-threading.h
+++ b/deps/v8/src/compiler/jump-threading.h
@@ -18,7 +18,7 @@ class JumpThreading {
// Compute the forwarding map of basic blocks to their ultimate destination.
// Returns {true} if there is at least one block that is forwarded.
static bool ComputeForwarding(Zone* local_zone, ZoneVector<RpoNumber>& result,
- InstructionSequence* code);
+ InstructionSequence* code, bool frame_at_start);
// Rewrite the instructions to forward jumps and branches.
// May also negate some branches.
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 2eef9291e9..d4a366563a 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -63,9 +63,6 @@ std::ostream& operator<<(std::ostream& os, const CallDescriptor::Kind& k) {
case CallDescriptor::kCallAddress:
os << "Addr";
break;
- case CallDescriptor::kLazyBailout:
- os << "LazyBail";
- break;
}
return os;
}
@@ -120,14 +117,7 @@ bool CallDescriptor::CanTailCall(const Node* node,
CallDescriptor* Linkage::ComputeIncoming(Zone* zone, CompilationInfo* info) {
- if (info->code_stub() != nullptr) {
- // Use the code stub interface descriptor.
- CodeStub* stub = info->code_stub();
- CallInterfaceDescriptor descriptor = stub->GetCallInterfaceDescriptor();
- return GetStubCallDescriptor(
- info->isolate(), zone, descriptor, stub->GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties);
- }
+ DCHECK(!info->IsStub());
if (info->has_literal()) {
// If we already have the function literal, use the number of parameters
// plus the receiver.
@@ -155,13 +145,14 @@ int Linkage::FrameStateInputCount(Runtime::FunctionId function) {
switch (function) {
case Runtime::kAllocateInTargetSpace:
case Runtime::kCreateIterResultObject:
- case Runtime::kDefineClassMethod: // TODO(jarin): Is it safe?
+ case Runtime::kDefineDataPropertyInLiteral:
case Runtime::kDefineGetterPropertyUnchecked: // TODO(jarin): Is it safe?
case Runtime::kDefineSetterPropertyUnchecked: // TODO(jarin): Is it safe?
case Runtime::kFinalizeClassDefinition: // TODO(conradw): Is it safe?
case Runtime::kForInDone:
case Runtime::kForInStep:
case Runtime::kGetSuperConstructor:
+ case Runtime::kIsFunction:
case Runtime::kNewClosure:
case Runtime::kNewClosure_Tenured:
case Runtime::kNewFunctionContext:
@@ -174,8 +165,6 @@ int Linkage::FrameStateInputCount(Runtime::FunctionId function) {
case Runtime::kTraceEnter:
case Runtime::kTraceExit:
return 0;
- case Runtime::kInlineArguments:
- case Runtime::kInlineArgumentsLength:
case Runtime::kInlineGetPrototype:
case Runtime::kInlineRegExpConstructResult:
case Runtime::kInlineRegExpExec:
@@ -242,6 +231,9 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
if (locations.return_count_ > 1) {
locations.AddReturn(regloc(kReturnRegister1));
}
+ if (locations.return_count_ > 2) {
+ locations.AddReturn(regloc(kReturnRegister2));
+ }
for (size_t i = 0; i < return_count; i++) {
types.AddReturn(MachineType::AnyTagged());
}
@@ -287,31 +279,6 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
}
-CallDescriptor* Linkage::GetLazyBailoutDescriptor(Zone* zone) {
- const size_t return_count = 0;
- const size_t parameter_count = 0;
-
- LocationSignature::Builder locations(zone, return_count, parameter_count);
- MachineSignature::Builder types(zone, return_count, parameter_count);
-
- // The target is ignored, but we need to give some values here.
- MachineType target_type = MachineType::AnyTagged();
- LinkageLocation target_loc = regloc(kJSFunctionRegister);
- return new (zone) CallDescriptor( // --
- CallDescriptor::kLazyBailout, // kind
- target_type, // target MachineType
- target_loc, // target location
- types.Build(), // machine_sig
- locations.Build(), // location_sig
- 0, // stack_parameter_count
- Operator::kNoThrow, // properties
- kNoCalleeSaved, // callee-saved
- kNoCalleeSaved, // callee-saved fp
- CallDescriptor::kNeedsFrameState, // flags
- "lazy-bailout");
-}
-
-
CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
int js_parameter_count,
CallDescriptor::Flags flags) {
@@ -350,10 +317,10 @@ CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
// The target for JS function calls is the JSFunction object.
MachineType target_type = MachineType::AnyTagged();
- // TODO(titzer): When entering into an OSR function from unoptimized code,
- // the JSFunction is not in a register, but it is on the stack in an
- // unaddressable spill slot. We hack this in the OSR prologue. Fix.
- LinkageLocation target_loc = regloc(kJSFunctionRegister);
+ // When entering into an OSR function from unoptimized code the JSFunction
+ // is not in a register, but it is on the stack in the marker spill slot.
+ LinkageLocation target_loc = is_osr ? LinkageLocation::ForSavedCallerMarker()
+ : regloc(kJSFunctionRegister);
return new (zone) CallDescriptor( // --
CallDescriptor::kCallJSFunction, // kind
target_type, // target MachineType
@@ -369,60 +336,6 @@ CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
"js-call");
}
-
-CallDescriptor* Linkage::GetInterpreterDispatchDescriptor(Zone* zone) {
- MachineSignature::Builder types(zone, 0, 6);
- LocationSignature::Builder locations(zone, 0, 6);
-
- // Add registers for fixed parameters passed via interpreter dispatch.
- STATIC_ASSERT(0 == Linkage::kInterpreterAccumulatorParameter);
- types.AddParam(MachineType::AnyTagged());
- locations.AddParam(regloc(kInterpreterAccumulatorRegister));
-
- STATIC_ASSERT(1 == Linkage::kInterpreterRegisterFileParameter);
- types.AddParam(MachineType::Pointer());
- locations.AddParam(regloc(kInterpreterRegisterFileRegister));
-
- STATIC_ASSERT(2 == Linkage::kInterpreterBytecodeOffsetParameter);
- types.AddParam(MachineType::IntPtr());
- locations.AddParam(regloc(kInterpreterBytecodeOffsetRegister));
-
- STATIC_ASSERT(3 == Linkage::kInterpreterBytecodeArrayParameter);
- types.AddParam(MachineType::AnyTagged());
- locations.AddParam(regloc(kInterpreterBytecodeArrayRegister));
-
- STATIC_ASSERT(4 == Linkage::kInterpreterDispatchTableParameter);
- types.AddParam(MachineType::Pointer());
-#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X87)
- // TODO(rmcilroy): Make the context param the one spilled to the stack once
- // Turbofan supports modified stack arguments in tail calls.
- locations.AddParam(
- LinkageLocation::ForCallerFrameSlot(kInterpreterDispatchTableSpillSlot));
-#else
- locations.AddParam(regloc(kInterpreterDispatchTableRegister));
-#endif
-
- STATIC_ASSERT(5 == Linkage::kInterpreterContextParameter);
- types.AddParam(MachineType::AnyTagged());
- locations.AddParam(regloc(kContextRegister));
-
- LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
- return new (zone) CallDescriptor( // --
- CallDescriptor::kCallCodeObject, // kind
- MachineType::None(), // target MachineType
- target_loc, // target location
- types.Build(), // machine_sig
- locations.Build(), // location_sig
- 0, // stack_parameter_count
- Operator::kNoProperties, // properties
- kNoCalleeSaved, // callee-saved registers
- kNoCalleeSaved, // callee-saved fp regs
- CallDescriptor::kSupportsTailCalls | // flags
- CallDescriptor::kCanUseRoots, // flags
- "interpreter-dispatch");
-}
-
-
// TODO(all): Add support for return representations/locations to
// CallInterfaceDescriptor.
// TODO(turbofan): cache call descriptors for code stub calls.
@@ -448,6 +361,9 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
if (locations.return_count_ > 1) {
locations.AddReturn(regloc(kReturnRegister1));
}
+ if (locations.return_count_ > 2) {
+ locations.AddReturn(regloc(kReturnRegister2));
+ }
for (size_t i = 0; i < return_count; i++) {
types.AddReturn(return_type);
}
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index 252f044321..3012f56e01 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -76,6 +76,12 @@ class LinkageLocation {
kPointerSize);
}
+ static LinkageLocation ForSavedCallerMarker() {
+ return ForCalleeFrameSlot((StandardFrameConstants::kCallerPCOffset -
+ StandardFrameConstants::kMarkerOffset) /
+ kPointerSize);
+ }
+
static LinkageLocation ConvertToTailCallerLocation(
LinkageLocation caller_location, int stack_param_delta) {
if (!caller_location.IsRegister()) {
@@ -140,8 +146,7 @@ class CallDescriptor final : public ZoneObject {
enum Kind {
kCallCodeObject, // target is a Code object
kCallJSFunction, // target is a JSFunction object
- kCallAddress, // target is a machine pointer
- kLazyBailout // the call is no-op, only used for lazy bailout
+ kCallAddress // target is a machine pointer
};
enum Flag {
@@ -153,9 +158,12 @@ class CallDescriptor final : public ZoneObject {
kHasLocalCatchHandler = 1u << 4,
kSupportsTailCalls = 1u << 5,
kCanUseRoots = 1u << 6,
- // Indicates that the native stack should be used for a code object. This
- // information is important for native calls on arm64.
+ // (arm64 only) native stack should be used for arguments.
kUseNativeStack = 1u << 7,
+ // (arm64 only) call instruction has to restore JSSP.
+ kRestoreJSSP = 1u << 8,
+ // Causes the code generator to initialize the root register.
+ kInitializeRootRegister = 1u << 9,
kPatchableCallSiteWithNop = kPatchableCallSite | kNeedsNopAfterCall
};
typedef base::Flags<Flag> Flags;
@@ -222,6 +230,9 @@ class CallDescriptor final : public ZoneObject {
bool NeedsFrameState() const { return flags() & kNeedsFrameState; }
bool SupportsTailCalls() const { return flags() & kSupportsTailCalls; }
bool UseNativeStack() const { return flags() & kUseNativeStack; }
+ bool InitializeRootRegister() const {
+ return flags() & kInitializeRootRegister;
+ }
LinkageLocation GetReturnLocation(size_t index) const {
return location_sig_->GetReturn(index);
@@ -313,8 +324,6 @@ class Linkage : public ZoneObject {
Zone* zone, Runtime::FunctionId function, int parameter_count,
Operator::Properties properties, CallDescriptor::Flags flags);
- static CallDescriptor* GetLazyBailoutDescriptor(Zone* zone);
-
static CallDescriptor* GetStubCallDescriptor(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count, CallDescriptor::Flags flags,
@@ -326,13 +335,9 @@ class Linkage : public ZoneObject {
// for the host platform. This simplified calling convention only supports
// integers and pointers of one word size each, i.e. no floating point,
// structs, pointers to members, etc.
- static CallDescriptor* GetSimplifiedCDescriptor(Zone* zone,
- const MachineSignature* sig);
-
- // Creates a call descriptor for interpreter handler code stubs. These are not
- // intended to be called directly but are instead dispatched to by the
- // interpreter.
- static CallDescriptor* GetInterpreterDispatchDescriptor(Zone* zone);
+ static CallDescriptor* GetSimplifiedCDescriptor(
+ Zone* zone, const MachineSignature* sig,
+ bool set_initialize_root_flag = false);
// Get the location of an (incoming) parameter to this function.
LinkageLocation GetParameterLocation(int index) const {
@@ -383,15 +388,6 @@ class Linkage : public ZoneObject {
// A special {OsrValue} index to indicate the context spill slot.
static const int kOsrContextSpillSlotIndex = -1;
- // Special parameter indices used to pass fixed register data through
- // interpreter dispatches.
- static const int kInterpreterAccumulatorParameter = 0;
- static const int kInterpreterRegisterFileParameter = 1;
- static const int kInterpreterBytecodeOffsetParameter = 2;
- static const int kInterpreterBytecodeArrayParameter = 3;
- static const int kInterpreterDispatchTableParameter = 4;
- static const int kInterpreterContextParameter = 5;
-
private:
CallDescriptor* const incoming_;
diff --git a/deps/v8/src/compiler/live-range-separator.cc b/deps/v8/src/compiler/live-range-separator.cc
index 980c9442bc..e3cd0a3137 100644
--- a/deps/v8/src/compiler/live-range-separator.cc
+++ b/deps/v8/src/compiler/live-range-separator.cc
@@ -119,8 +119,10 @@ void LiveRangeSeparator::Splinter() {
void LiveRangeMerger::MarkRangesSpilledInDeferredBlocks() {
+ const InstructionSequence *code = data()->code();
for (TopLevelLiveRange *top : data()->live_ranges()) {
- if (top == nullptr || top->IsEmpty() || top->splinter() == nullptr) {
+ if (top == nullptr || top->IsEmpty() || top->splinter() == nullptr ||
+ top->HasSpillOperand() || !top->splinter()->HasSpillRange()) {
continue;
}
@@ -131,7 +133,10 @@ void LiveRangeMerger::MarkRangesSpilledInDeferredBlocks() {
break;
}
}
- if (child == nullptr) top->MarkSpilledInDeferredBlock();
+ if (child == nullptr) {
+ top->TreatAsSpilledInDeferredBlock(data()->allocation_zone(),
+ code->InstructionBlockCount());
+ }
}
}
diff --git a/deps/v8/src/compiler/liveness-analyzer.h b/deps/v8/src/compiler/liveness-analyzer.h
index 1e2f85b45e..9b09724eef 100644
--- a/deps/v8/src/compiler/liveness-analyzer.h
+++ b/deps/v8/src/compiler/liveness-analyzer.h
@@ -85,6 +85,10 @@ class LivenessAnalyzerBlock {
void Bind(int var) { entries_.push_back(Entry(Entry::kBind, var)); }
void Checkpoint(Node* node) { entries_.push_back(Entry(node)); }
void AddPredecessor(LivenessAnalyzerBlock* b) { predecessors_.push_back(b); }
+ LivenessAnalyzerBlock* GetPredecessor() {
+ DCHECK(predecessors_.size() == 1);
+ return predecessors_[0];
+ }
private:
class Entry {
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 511a10dd02..3b6f21b151 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -91,6 +91,10 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
return OpParameter<CheckedStoreRepresentation>(op);
}
+MachineRepresentation StackSlotRepresentationOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kStackSlot, op->opcode());
+ return OpParameter<MachineRepresentation>(op);
+}
#define PURE_OP_LIST(V) \
V(Word32And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
@@ -144,13 +148,17 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 0, 1) \
V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 0, 1) \
+ V(TruncateFloat32ToInt32, Operator::kNoProperties, 1, 0, 1) \
+ V(TruncateFloat32ToUint32, Operator::kNoProperties, 1, 0, 1) \
V(TryTruncateFloat32ToInt64, Operator::kNoProperties, 1, 0, 2) \
V(TryTruncateFloat64ToInt64, Operator::kNoProperties, 1, 0, 2) \
V(TryTruncateFloat32ToUint64, Operator::kNoProperties, 1, 0, 2) \
V(TryTruncateFloat64ToUint64, Operator::kNoProperties, 1, 0, 2) \
V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundInt32ToFloat32, Operator::kNoProperties, 1, 0, 1) \
V(RoundInt64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
V(RoundInt64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
+ V(RoundUint32ToFloat32, Operator::kNoProperties, 1, 0, 1) \
V(RoundUint64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
V(RoundUint64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
V(ChangeInt32ToInt64, Operator::kNoProperties, 1, 0, 1) \
@@ -186,11 +194,14 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
V(Float64InsertLowWord32, Operator::kNoProperties, 2, 0, 1) \
V(Float64InsertHighWord32, Operator::kNoProperties, 2, 0, 1) \
V(LoadStackPointer, Operator::kNoProperties, 0, 0, 1) \
- V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1)
+ V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1) \
+ V(LoadParentFramePointer, Operator::kNoProperties, 0, 0, 1)
#define PURE_OPTIONAL_OP_LIST(V) \
V(Word32Ctz, Operator::kNoProperties, 1, 0, 1) \
V(Word64Ctz, Operator::kNoProperties, 1, 0, 1) \
+ V(Word32ReverseBits, Operator::kNoProperties, 1, 0, 1) \
+ V(Word64ReverseBits, Operator::kNoProperties, 1, 0, 1) \
V(Word32Popcnt, Operator::kNoProperties, 1, 0, 1) \
V(Word64Popcnt, Operator::kNoProperties, 1, 0, 1) \
V(Float32Max, Operator::kNoProperties, 2, 0, 1) \
@@ -207,10 +218,10 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
V(Float32RoundTiesEven, Operator::kNoProperties, 1, 0, 1) \
V(Float64RoundTiesEven, Operator::kNoProperties, 1, 0, 1)
-
#define MACHINE_TYPE_LIST(V) \
V(Float32) \
V(Float64) \
+ V(Simd128) \
V(Int8) \
V(Uint8) \
V(Int16) \
@@ -222,17 +233,16 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
V(Pointer) \
V(AnyTagged)
-
#define MACHINE_REPRESENTATION_LIST(V) \
V(kFloat32) \
V(kFloat64) \
+ V(kSimd128) \
V(kWord8) \
V(kWord16) \
V(kWord32) \
V(kWord64) \
V(kTagged)
-
struct MachineOperatorGlobalCache {
#define PURE(Name, properties, value_input_count, control_input_count, \
output_count) \
@@ -279,6 +289,18 @@ struct MachineOperatorGlobalCache {
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
+#define STACKSLOT(Type) \
+ struct StackSlot##Type##Operator final \
+ : public Operator1<MachineRepresentation> { \
+ StackSlot##Type##Operator() \
+ : Operator1<MachineRepresentation>( \
+ IrOpcode::kStackSlot, Operator::kNoThrow, "StackSlot", 0, 0, 0, \
+ 1, 0, 0, MachineType::Type().representation()) {} \
+ }; \
+ StackSlot##Type##Operator kStackSlot##Type;
+ MACHINE_TYPE_LIST(STACKSLOT)
+#undef STACKSLOT
+
#define STORE(Type) \
struct Store##Type##Operator : public Operator1<StoreRepresentation> { \
explicit Store##Type##Operator(WriteBarrierKind write_barrier_kind) \
@@ -379,6 +401,16 @@ const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
return nullptr;
}
+const Operator* MachineOperatorBuilder::StackSlot(MachineRepresentation rep) {
+#define STACKSLOT(Type) \
+ if (rep == MachineType::Type().representation()) { \
+ return &cache_.kStackSlot##Type; \
+ }
+ MACHINE_TYPE_LIST(STACKSLOT)
+#undef STACKSLOT
+ UNREACHABLE();
+ return nullptr;
+}
const Operator* MachineOperatorBuilder::Store(StoreRepresentation store_rep) {
switch (store_rep.representation()) {
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 00fefe3539..c5a80aa609 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -102,6 +102,7 @@ typedef MachineRepresentation CheckedStoreRepresentation;
CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const*);
+MachineRepresentation StackSlotRepresentationOf(Operator const* op);
// Interface for building machine-level operators. These operators are
// machine-level but machine-independent and thus define a language suitable
@@ -134,12 +135,15 @@ class MachineOperatorBuilder final : public ZoneObject {
kWord64Ctz = 1u << 17,
kWord32Popcnt = 1u << 18,
kWord64Popcnt = 1u << 19,
+ kWord32ReverseBits = 1u << 20,
+ kWord64ReverseBits = 1u << 21,
kAllOptionalOps = kFloat32Max | kFloat32Min | kFloat64Max | kFloat64Min |
kFloat32RoundDown | kFloat64RoundDown | kFloat32RoundUp |
kFloat64RoundUp | kFloat32RoundTruncate |
kFloat64RoundTruncate | kFloat64RoundTiesAway |
kFloat32RoundTiesEven | kFloat64RoundTiesEven |
- kWord32Ctz | kWord64Ctz | kWord32Popcnt | kWord64Popcnt
+ kWord32Ctz | kWord64Ctz | kWord32Popcnt | kWord64Popcnt |
+ kWord32ReverseBits | kWord64ReverseBits
};
typedef base::Flags<Flag, unsigned> Flags;
@@ -160,6 +164,8 @@ class MachineOperatorBuilder final : public ZoneObject {
const OptionalOperator Word32Ctz();
const OptionalOperator Word32Popcnt();
const OptionalOperator Word64Popcnt();
+ const OptionalOperator Word32ReverseBits();
+ const OptionalOperator Word64ReverseBits();
bool Word32ShiftIsSafe() const { return flags_ & kWord32ShiftIsSafe; }
const Operator* Word64And();
@@ -213,6 +219,8 @@ class MachineOperatorBuilder final : public ZoneObject {
const Operator* ChangeFloat32ToFloat64();
const Operator* ChangeFloat64ToInt32(); // narrowing
const Operator* ChangeFloat64ToUint32(); // narrowing
+ const Operator* TruncateFloat32ToInt32();
+ const Operator* TruncateFloat32ToUint32();
const Operator* TryTruncateFloat32ToInt64();
const Operator* TryTruncateFloat64ToInt64();
const Operator* TryTruncateFloat32ToUint64();
@@ -227,8 +235,10 @@ class MachineOperatorBuilder final : public ZoneObject {
const Operator* TruncateFloat64ToFloat32();
const Operator* TruncateFloat64ToInt32(TruncationMode);
const Operator* TruncateInt64ToInt32();
+ const Operator* RoundInt32ToFloat32();
const Operator* RoundInt64ToFloat32();
const Operator* RoundInt64ToFloat64();
+ const Operator* RoundUint32ToFloat32();
const Operator* RoundUint64ToFloat32();
const Operator* RoundUint64ToFloat64();
@@ -303,9 +313,12 @@ class MachineOperatorBuilder final : public ZoneObject {
// store [base + index], value
const Operator* Store(StoreRepresentation rep);
+ const Operator* StackSlot(MachineRepresentation rep);
+
// Access to the machine stack.
const Operator* LoadStackPointer();
const Operator* LoadFramePointer();
+ const Operator* LoadParentFramePointer();
// checked-load heap, index, length
const Operator* CheckedLoad(CheckedLoadRepresentation);
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/mips/code-generator-mips.cc
index 75e4b9e7a8..cdd7e34866 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/mips/code-generator-mips.cc
@@ -227,19 +227,25 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
if (mode_ > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value_, exit());
}
- if (mode_ > RecordWriteMode::kValueIsMap) {
- __ CheckPageFlag(value_, scratch0_,
- MemoryChunk::kPointersToHereAreInterestingMask, eq,
- exit());
- }
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
+ RememberedSetAction const remembered_set_action =
+ mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
+ : OMIT_REMEMBERED_SET;
SaveFPRegsMode const save_fp_mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- // TODO(turbofan): Once we get frame elision working, we need to save
- // and restore lr properly here if the frame was elided.
+ if (!frame()->needs_frame()) {
+ // We need to save and restore ra if the frame was elided.
+ __ Push(ra);
+ }
RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
- EMIT_REMEMBERED_SET, save_fp_mode);
+ remembered_set_action, save_fp_mode);
__ Addu(scratch1_, object_, index_);
__ CallStub(&stub);
+ if (!frame()->needs_frame()) {
+ __ Pop(ra);
+ }
}
private:
@@ -546,11 +552,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
frame_access_state()->ClearSPDelta();
break;
}
- case kArchLazyBailout: {
- EnsureSpaceForLazyDeopt();
- RecordCallPosition(instr);
- break;
- }
case kArchPrepareCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
__ PrepareCallCFunction(num_parameters, kScratchReg);
@@ -604,6 +605,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchFramePointer:
__ mov(i.OutputRegister(), fp);
break;
+ case kArchParentFramePointer:
+ if (frame_access_state()->frame()->needs_frame()) {
+ __ lw(i.OutputRegister(), MemOperand(fp, 0));
+ } else {
+ __ mov(i.OutputRegister(), fp);
+ }
+ break;
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
break;
@@ -625,6 +633,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ bind(ool->exit());
break;
}
+ case kArchStackSlot: {
+ FrameOffset offset =
+ frame_access_state()->GetFrameOffset(i.InputInt32(0));
+ __ Addu(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
+ Operand(offset.offset()));
+ break;
+ }
case kMipsAdd:
__ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
@@ -688,6 +703,70 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kMipsClz:
__ Clz(i.OutputRegister(), i.InputRegister(0));
break;
+ case kMipsCtz: {
+ Register reg1 = kScratchReg;
+ Register reg2 = kScratchReg2;
+ Label skip_for_zero;
+ Label end;
+ // Branch if the operand is zero
+ __ Branch(&skip_for_zero, eq, i.InputRegister(0), Operand(zero_reg));
+ // Find the number of bits before the last bit set to 1.
+ __ Subu(reg2, zero_reg, i.InputRegister(0));
+ __ And(reg2, reg2, i.InputRegister(0));
+ __ clz(reg2, reg2);
+ // Get the number of bits after the last bit set to 1.
+ __ li(reg1, 0x1F);
+ __ Subu(i.OutputRegister(), reg1, reg2);
+ __ Branch(&end);
+ __ bind(&skip_for_zero);
+ // If the operand is zero, return word length as the result.
+ __ li(i.OutputRegister(), 0x20);
+ __ bind(&end);
+ } break;
+ case kMipsPopcnt: {
+ Register reg1 = kScratchReg;
+ Register reg2 = kScratchReg2;
+ uint32_t m1 = 0x55555555;
+ uint32_t m2 = 0x33333333;
+ uint32_t m4 = 0x0f0f0f0f;
+ uint32_t m8 = 0x00ff00ff;
+ uint32_t m16 = 0x0000ffff;
+
+ // Put count of ones in every 2 bits into those 2 bits.
+ __ li(at, m1);
+ __ srl(reg1, i.InputRegister(0), 1);
+ __ And(reg2, i.InputRegister(0), at);
+ __ And(reg1, reg1, at);
+ __ addu(reg1, reg1, reg2);
+
+ // Put count of ones in every 4 bits into those 4 bits.
+ __ li(at, m2);
+ __ srl(reg2, reg1, 2);
+ __ And(reg2, reg2, at);
+ __ And(reg1, reg1, at);
+ __ addu(reg1, reg1, reg2);
+
+ // Put count of ones in every 8 bits into those 8 bits.
+ __ li(at, m4);
+ __ srl(reg2, reg1, 4);
+ __ And(reg2, reg2, at);
+ __ And(reg1, reg1, at);
+ __ addu(reg1, reg1, reg2);
+
+ // Put count of ones in every 16 bits into those 16 bits.
+ __ li(at, m8);
+ __ srl(reg2, reg1, 8);
+ __ And(reg2, reg2, at);
+ __ And(reg1, reg1, at);
+ __ addu(reg1, reg1, reg2);
+
+ // Calculate total number of ones.
+ __ li(at, m16);
+ __ srl(reg2, reg1, 16);
+ __ And(reg2, reg2, at);
+ __ And(reg1, reg1, at);
+ __ addu(i.OutputRegister(), reg1, reg2);
+ } break;
case kMipsShl:
if (instr->InputAt(1)->IsRegister()) {
__ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
@@ -950,6 +1029,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ cvt_s_w(i.OutputDoubleRegister(), scratch);
break;
}
+ case kMipsCvtSUw: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
+ __ cvt_s_d(i.OutputDoubleRegister(), i.OutputDoubleRegister());
+ break;
+ }
case kMipsCvtDUw: {
FPURegister scratch = kScratchDoubleReg;
__ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
@@ -1010,6 +1095,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
break;
}
+ case kMipsTruncUwS: {
+ FPURegister scratch = kScratchDoubleReg;
+ // TODO(plind): Fix wrong param order of Trunc_uw_s() macro-asm function.
+ __ Trunc_uw_s(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
+ break;
+ }
case kMipsFloat64ExtractLowWord32:
__ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0));
break;
@@ -1416,19 +1507,10 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
MipsOperandConverter i(this, instr);
Register input = i.InputRegister(0);
size_t const case_count = instr->InputCount() - 2;
- Label here;
__ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
- __ BlockTrampolinePoolFor(case_count + 6);
- __ bal(&here);
- __ sll(at, input, 2); // Branch delay slot.
- __ bind(&here);
- __ addu(at, at, ra);
- __ lw(at, MemOperand(at, 4 * v8::internal::Assembler::kInstrSize));
- __ jr(at);
- __ nop(); // Branch delay slot nop.
- for (size_t index = 0; index < case_count; ++index) {
- __ dd(GetLabel(i.InputRpo(index + 2)));
- }
+ __ GenerateSwitchTable(input, case_count, [&i, this](size_t index) {
+ return GetLabel(i.InputRpo(index + 2));
+ });
}
@@ -1465,8 +1547,6 @@ void CodeGenerator::AssemblePrologue() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- // TODO(titzer): cannot address target function == local #-1
- __ lw(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
diff --git a/deps/v8/src/compiler/mips/instruction-codes-mips.h b/deps/v8/src/compiler/mips/instruction-codes-mips.h
index c9381775c8..64aecd0ee4 100644
--- a/deps/v8/src/compiler/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/mips/instruction-codes-mips.h
@@ -28,6 +28,8 @@ namespace compiler {
V(MipsNor) \
V(MipsXor) \
V(MipsClz) \
+ V(MipsCtz) \
+ V(MipsPopcnt) \
V(MipsShl) \
V(MipsShr) \
V(MipsSar) \
@@ -76,9 +78,11 @@ namespace compiler {
V(MipsFloorWS) \
V(MipsCeilWS) \
V(MipsTruncUwD) \
+ V(MipsTruncUwS) \
V(MipsCvtDW) \
V(MipsCvtDUw) \
V(MipsCvtSW) \
+ V(MipsCvtSUw) \
V(MipsLb) \
V(MipsLbu) \
V(MipsSb) \
@@ -103,7 +107,6 @@ namespace compiler {
V(MipsStoreToStackSlot) \
V(MipsStackClaim)
-
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
// are encoded into the InstructionCode of the instruction and tell the
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index 61cea76b22..df972f73bc 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -151,7 +151,8 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord32:
opcode = kMipsLw;
break;
- case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -231,7 +232,8 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord32:
opcode = kMipsSw;
break;
- case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -401,10 +403,19 @@ void InstructionSelector::VisitWord32Clz(Node* node) {
}
-void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord32Ctz(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsCtz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsPopcnt, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
void InstructionSelector::VisitInt32Add(Node* node) {
@@ -503,6 +514,16 @@ void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
}
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+ VisitRR(this, kMipsCvtSW, node);
+}
+
+
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+ VisitRR(this, kMipsCvtSUw, node);
+}
+
+
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
VisitRR(this, kMipsCvtDW, node);
}
@@ -513,6 +534,16 @@ void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
}
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+ VisitRR(this, kMipsTruncWS, node);
+}
+
+
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+ VisitRR(this, kMipsTruncUwS, node);
+}
+
+
void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
MipsOperandGenerator g(this);
Node* value = node->InputAt(0);
@@ -821,9 +852,11 @@ void InstructionSelector::EmitPrepareArguments(
// Poke any stack arguments.
int slot = kCArgSlotCount;
for (PushParameter input : (*arguments)) {
- Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
- g.TempImmediate(slot << kPointerSizeLog2));
- ++slot;
+ if (input.node()) {
+ Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node()),
+ g.TempImmediate(slot << kPointerSizeLog2));
+ ++slot;
+ }
}
} else {
// Possibly align stack here for functions.
@@ -869,9 +902,10 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -1318,7 +1352,9 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesEven;
}
- return flags | MachineOperatorBuilder::kInt32DivIsSafe |
+ return flags | MachineOperatorBuilder::kWord32Ctz |
+ MachineOperatorBuilder::kWord32Popcnt |
+ MachineOperatorBuilder::kInt32DivIsSafe |
MachineOperatorBuilder::kUint32DivIsSafe |
MachineOperatorBuilder::kWord32ShiftIsSafe |
MachineOperatorBuilder::kFloat64Min |
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
index 1b81aa5698..373a1a6abc 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -227,19 +227,25 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
if (mode_ > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value_, exit());
}
- if (mode_ > RecordWriteMode::kValueIsMap) {
- __ CheckPageFlag(value_, scratch0_,
- MemoryChunk::kPointersToHereAreInterestingMask, eq,
- exit());
- }
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
+ RememberedSetAction const remembered_set_action =
+ mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
+ : OMIT_REMEMBERED_SET;
SaveFPRegsMode const save_fp_mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- // TODO(turbofan): Once we get frame elision working, we need to save
- // and restore lr properly here if the frame was elided.
+ if (!frame()->needs_frame()) {
+ // We need to save and restore ra if the frame was elided.
+ __ Push(ra);
+ }
RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
- EMIT_REMEMBERED_SET, save_fp_mode);
+ remembered_set_action, save_fp_mode);
__ Daddu(scratch1_, object_, index_);
__ CallStub(&stub);
+ if (!frame()->needs_frame()) {
+ __ Pop(ra);
+ }
}
private:
@@ -556,11 +562,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
frame_access_state()->ClearSPDelta();
break;
}
- case kArchLazyBailout: {
- EnsureSpaceForLazyDeopt();
- RecordCallPosition(instr);
- break;
- }
case kArchPrepareCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
__ PrepareCallCFunction(num_parameters, kScratchReg);
@@ -614,6 +615,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchFramePointer:
__ mov(i.OutputRegister(), fp);
break;
+ case kArchParentFramePointer:
+ if (frame_access_state()->frame()->needs_frame()) {
+ __ ld(i.OutputRegister(), MemOperand(fp, 0));
+ } else {
+ __ mov(i.OutputRegister(), fp);
+ }
+ break;
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
break;
@@ -635,6 +643,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ bind(ool->exit());
break;
}
+ case kArchStackSlot: {
+ FrameOffset offset =
+ frame_access_state()->GetFrameOffset(i.InputInt32(0));
+ __ Daddu(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
+ Operand(offset.offset()));
+ break;
+ }
case kMips64Add:
__ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
@@ -735,6 +750,142 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kMips64Dclz:
__ dclz(i.OutputRegister(), i.InputRegister(0));
break;
+ case kMips64Ctz: {
+ Register reg1 = kScratchReg;
+ Register reg2 = kScratchReg2;
+ Label skip_for_zero;
+ Label end;
+ // Branch if the operand is zero
+ __ Branch(&skip_for_zero, eq, i.InputRegister(0), Operand(zero_reg));
+ // Find the number of bits before the last bit set to 1.
+ __ Subu(reg2, zero_reg, i.InputRegister(0));
+ __ And(reg2, reg2, i.InputRegister(0));
+ __ clz(reg2, reg2);
+ // Get the number of bits after the last bit set to 1.
+ __ li(reg1, 0x1F);
+ __ Subu(i.OutputRegister(), reg1, reg2);
+ __ Branch(&end);
+ __ bind(&skip_for_zero);
+ // If the operand is zero, return word length as the result.
+ __ li(i.OutputRegister(), 0x20);
+ __ bind(&end);
+ } break;
+ case kMips64Dctz: {
+ Register reg1 = kScratchReg;
+ Register reg2 = kScratchReg2;
+ Label skip_for_zero;
+ Label end;
+ // Branch if the operand is zero
+ __ Branch(&skip_for_zero, eq, i.InputRegister(0), Operand(zero_reg));
+ // Find the number of bits before the last bit set to 1.
+ __ Dsubu(reg2, zero_reg, i.InputRegister(0));
+ __ And(reg2, reg2, i.InputRegister(0));
+ __ dclz(reg2, reg2);
+ // Get the number of bits after the last bit set to 1.
+ __ li(reg1, 0x3F);
+ __ Subu(i.OutputRegister(), reg1, reg2);
+ __ Branch(&end);
+ __ bind(&skip_for_zero);
+ // If the operand is zero, return word length as the result.
+ __ li(i.OutputRegister(), 0x40);
+ __ bind(&end);
+ } break;
+ case kMips64Popcnt: {
+ Register reg1 = kScratchReg;
+ Register reg2 = kScratchReg2;
+ uint32_t m1 = 0x55555555;
+ uint32_t m2 = 0x33333333;
+ uint32_t m4 = 0x0f0f0f0f;
+ uint32_t m8 = 0x00ff00ff;
+ uint32_t m16 = 0x0000ffff;
+
+ // Put count of ones in every 2 bits into those 2 bits.
+ __ li(at, m1);
+ __ dsrl(reg1, i.InputRegister(0), 1);
+ __ And(reg2, i.InputRegister(0), at);
+ __ And(reg1, reg1, at);
+ __ Daddu(reg1, reg1, reg2);
+
+ // Put count of ones in every 4 bits into those 4 bits.
+ __ li(at, m2);
+ __ dsrl(reg2, reg1, 2);
+ __ And(reg2, reg2, at);
+ __ And(reg1, reg1, at);
+ __ Daddu(reg1, reg1, reg2);
+
+ // Put count of ones in every 8 bits into those 8 bits.
+ __ li(at, m4);
+ __ dsrl(reg2, reg1, 4);
+ __ And(reg2, reg2, at);
+ __ And(reg1, reg1, at);
+ __ Daddu(reg1, reg1, reg2);
+
+ // Put count of ones in every 16 bits into those 16 bits.
+ __ li(at, m8);
+ __ dsrl(reg2, reg1, 8);
+ __ And(reg2, reg2, at);
+ __ And(reg1, reg1, at);
+ __ Daddu(reg1, reg1, reg2);
+
+ // Calculate total number of ones.
+ __ li(at, m16);
+ __ dsrl(reg2, reg1, 16);
+ __ And(reg2, reg2, at);
+ __ And(reg1, reg1, at);
+ __ Daddu(i.OutputRegister(), reg1, reg2);
+ } break;
+ case kMips64Dpopcnt: {
+ Register reg1 = kScratchReg;
+ Register reg2 = kScratchReg2;
+ uint64_t m1 = 0x5555555555555555;
+ uint64_t m2 = 0x3333333333333333;
+ uint64_t m4 = 0x0f0f0f0f0f0f0f0f;
+ uint64_t m8 = 0x00ff00ff00ff00ff;
+ uint64_t m16 = 0x0000ffff0000ffff;
+ uint64_t m32 = 0x00000000ffffffff;
+
+ // Put count of ones in every 2 bits into those 2 bits.
+ __ li(at, m1);
+ __ dsrl(reg1, i.InputRegister(0), 1);
+ __ and_(reg2, i.InputRegister(0), at);
+ __ and_(reg1, reg1, at);
+ __ Daddu(reg1, reg1, reg2);
+
+ // Put count of ones in every 4 bits into those 4 bits.
+ __ li(at, m2);
+ __ dsrl(reg2, reg1, 2);
+ __ and_(reg2, reg2, at);
+ __ and_(reg1, reg1, at);
+ __ Daddu(reg1, reg1, reg2);
+
+ // Put count of ones in every 8 bits into those 8 bits.
+ __ li(at, m4);
+ __ dsrl(reg2, reg1, 4);
+ __ and_(reg2, reg2, at);
+ __ and_(reg1, reg1, at);
+ __ Daddu(reg1, reg1, reg2);
+
+ // Put count of ones in every 16 bits into those 16 bits.
+ __ li(at, m8);
+ __ dsrl(reg2, reg1, 8);
+ __ and_(reg2, reg2, at);
+ __ and_(reg1, reg1, at);
+ __ Daddu(reg1, reg1, reg2);
+
+ // Put count of ones in every 32 bits into those 32 bits.
+ __ li(at, m16);
+ __ dsrl(reg2, reg1, 16);
+ __ and_(reg2, reg2, at);
+ __ and_(reg1, reg1, at);
+ __ Daddu(reg1, reg1, reg2);
+
+ // Calculate total number of ones.
+ __ li(at, m32);
+ __ dsrl32(reg2, reg1, 0);
+ __ and_(reg2, reg2, at);
+ __ and_(reg1, reg1, at);
+ __ Daddu(i.OutputRegister(), reg1, reg2);
+ } break;
case kMips64Shl:
if (instr->InputAt(1)->IsRegister()) {
__ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
@@ -1065,6 +1216,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ cvt_s_w(i.OutputDoubleRegister(), scratch);
break;
}
+ case kMips64CvtSUw: {
+ __ Cvt_s_uw(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
case kMips64CvtSL: {
FPURegister scratch = kScratchDoubleReg;
__ dmtc1(i.InputRegister(0), scratch);
@@ -1200,6 +1355,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
break;
}
+ case kMips64TruncUwS: {
+ FPURegister scratch = kScratchDoubleReg;
+ // TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
+ __ Trunc_uw_s(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
+ break;
+ }
case kMips64TruncUlS: {
FPURegister scratch = kScratchDoubleReg;
Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
@@ -1648,27 +1809,15 @@ void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
AssembleArchJump(i.InputRpo(1));
}
-
void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
MipsOperandConverter i(this, instr);
Register input = i.InputRegister(0);
size_t const case_count = instr->InputCount() - 2;
- Label here;
__ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
- __ BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 + 7);
- // Ensure that dd-ed labels use 8 byte aligned addresses.
- __ Align(8);
- __ bal(&here);
- __ dsll(at, input, 3); // Branch delay slot.
- __ bind(&here);
- __ daddu(at, at, ra);
- __ ld(at, MemOperand(at, 4 * v8::internal::Assembler::kInstrSize));
- __ jr(at);
- __ nop(); // Branch delay slot nop.
- for (size_t index = 0; index < case_count; ++index) {
- __ dd(GetLabel(i.InputRpo(index + 2)));
- }
+ __ GenerateSwitchTable(input, case_count, [&i, this](size_t index) {
+ return GetLabel(i.InputRpo(index + 2));
+ });
}
@@ -1705,8 +1854,6 @@ void CodeGenerator::AssemblePrologue() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- // TODO(titzer): cannot address target function == local #-1
- __ ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
diff --git a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
index 778c6add0f..9e94c090cd 100644
--- a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
@@ -44,6 +44,10 @@ namespace compiler {
V(Mips64Dext) \
V(Mips64Dins) \
V(Mips64Dclz) \
+ V(Mips64Ctz) \
+ V(Mips64Dctz) \
+ V(Mips64Popcnt) \
+ V(Mips64Dpopcnt) \
V(Mips64Dshl) \
V(Mips64Dshr) \
V(Mips64Dsar) \
@@ -93,11 +97,13 @@ namespace compiler {
V(Mips64TruncLS) \
V(Mips64TruncLD) \
V(Mips64TruncUwD) \
+ V(Mips64TruncUwS) \
V(Mips64TruncUlS) \
V(Mips64TruncUlD) \
V(Mips64CvtDW) \
V(Mips64CvtSL) \
V(Mips64CvtSW) \
+ V(Mips64CvtSUw) \
V(Mips64CvtSUl) \
V(Mips64CvtDL) \
V(Mips64CvtDUw) \
@@ -130,7 +136,6 @@ namespace compiler {
V(Mips64StoreToStackSlot) \
V(Mips64StackClaim)
-
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
// are encoded into the InstructionCode of the instruction and tell the
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index 1b12bd9aec..44a5470aca 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -159,6 +159,7 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord64:
opcode = kMips64Ld;
break;
+ case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -241,6 +242,7 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord64:
opcode = kMips64Sd;
break;
+ case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -562,16 +564,36 @@ void InstructionSelector::VisitWord32Clz(Node* node) {
}
-void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord32Ctz(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Ctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
-void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord64Ctz(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Dctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Popcnt, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitWord64Popcnt(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Dpopcnt, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
void InstructionSelector::VisitWord64Ror(Node* node) {
@@ -802,6 +824,16 @@ void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
}
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+ VisitRR(this, kMips64CvtSW, node);
+}
+
+
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+ VisitRR(this, kMips64CvtSUw, node);
+}
+
+
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
VisitRR(this, kMips64CvtDW, node);
}
@@ -812,6 +844,16 @@ void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
}
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+ VisitRR(this, kMips64TruncWS, node);
+}
+
+
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+ VisitRR(this, kMips64TruncUwS, node);
+}
+
+
void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
Mips64OperandGenerator g(this);
Node* value = node->InputAt(0);
@@ -1307,6 +1349,7 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
break;
case MachineRepresentation::kBit:
case MachineRepresentation::kTagged:
+ case MachineRepresentation::kSimd128:
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -1356,6 +1399,7 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
break;
case MachineRepresentation::kBit:
case MachineRepresentation::kTagged:
+ case MachineRepresentation::kSimd128:
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -1846,7 +1890,11 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::kWord32ShiftIsSafe |
+ return MachineOperatorBuilder::kWord32Ctz |
+ MachineOperatorBuilder::kWord64Ctz |
+ MachineOperatorBuilder::kWord32Popcnt |
+ MachineOperatorBuilder::kWord64Popcnt |
+ MachineOperatorBuilder::kWord32ShiftIsSafe |
MachineOperatorBuilder::kInt32DivIsSafe |
MachineOperatorBuilder::kUint32DivIsSafe |
MachineOperatorBuilder::kFloat64Min |
diff --git a/deps/v8/src/compiler/move-optimizer.cc b/deps/v8/src/compiler/move-optimizer.cc
index bde3f7fe36..477f139a14 100644
--- a/deps/v8/src/compiler/move-optimizer.cc
+++ b/deps/v8/src/compiler/move-optimizer.cc
@@ -10,14 +10,17 @@ namespace compiler {
namespace {
-typedef std::pair<InstructionOperand, InstructionOperand> MoveKey;
+struct MoveKey {
+ InstructionOperand source;
+ InstructionOperand destination;
+};
struct MoveKeyCompare {
bool operator()(const MoveKey& a, const MoveKey& b) const {
- if (a.first.EqualsCanonicalized(b.first)) {
- return a.second.CompareCanonicalized(b.second);
+ if (a.source.EqualsCanonicalized(b.source)) {
+ return a.destination.CompareCanonicalized(b.destination);
}
- return a.first.CompareCanonicalized(b.first);
+ return a.source.CompareCanonicalized(b.source);
}
};
@@ -32,39 +35,6 @@ typedef ZoneMap<MoveKey, unsigned, MoveKeyCompare> MoveMap;
typedef ZoneSet<InstructionOperand, CompareOperandModuloType> OperandSet;
-bool GapsCanMoveOver(Instruction* instr, Zone* zone) {
- if (instr->IsNop()) return true;
- if (instr->ClobbersTemps() || instr->ClobbersRegisters() ||
- instr->ClobbersDoubleRegisters()) {
- return false;
- }
- if (instr->arch_opcode() != ArchOpcode::kArchNop) return false;
-
- ZoneSet<InstructionOperand, OperandCompare> operands(zone);
- for (size_t i = 0; i < instr->InputCount(); ++i) {
- operands.insert(*instr->InputAt(i));
- }
- for (size_t i = 0; i < instr->OutputCount(); ++i) {
- operands.insert(*instr->OutputAt(i));
- }
- for (size_t i = 0; i < instr->TempCount(); ++i) {
- operands.insert(*instr->TempAt(i));
- }
- for (int i = Instruction::GapPosition::FIRST_GAP_POSITION;
- i <= Instruction::GapPosition::LAST_GAP_POSITION; ++i) {
- ParallelMove* moves = instr->parallel_moves()[i];
- if (moves == nullptr) continue;
- for (MoveOperands* move : *moves) {
- if (operands.count(move->source()) > 0 ||
- operands.count(move->destination()) > 0) {
- return false;
- }
- }
- }
- return true;
-}
-
-
int FindFirstNonEmptySlot(const Instruction* instr) {
int i = Instruction::FIRST_GAP_POSITION;
for (; i <= Instruction::LAST_GAP_POSITION; i++) {
@@ -85,11 +55,13 @@ int FindFirstNonEmptySlot(const Instruction* instr) {
MoveOptimizer::MoveOptimizer(Zone* local_zone, InstructionSequence* code)
: local_zone_(local_zone),
code_(code),
- to_finalize_(local_zone),
local_vector_(local_zone) {}
void MoveOptimizer::Run() {
+ for (Instruction* instruction : code()->instructions()) {
+ CompressGaps(instruction);
+ }
for (InstructionBlock* block : code()->instruction_blocks()) {
CompressBlock(block);
}
@@ -111,13 +83,140 @@ void MoveOptimizer::Run() {
}
OptimizeMerge(block);
}
- for (Instruction* gap : to_finalize_) {
+ for (Instruction* gap : code()->instructions()) {
FinalizeMoves(gap);
}
}
+void MoveOptimizer::RemoveClobberedDestinations(Instruction* instruction) {
+ if (instruction->IsCall()) return;
+ ParallelMove* moves = instruction->parallel_moves()[0];
+ if (moves == nullptr) return;
+
+ DCHECK(instruction->parallel_moves()[1] == nullptr ||
+ instruction->parallel_moves()[1]->empty());
+
+ OperandSet outputs(local_zone());
+ OperandSet inputs(local_zone());
+
+ // Outputs and temps are treated together as potentially clobbering a
+ // destination operand.
+ for (size_t i = 0; i < instruction->OutputCount(); ++i) {
+ outputs.insert(*instruction->OutputAt(i));
+ }
+ for (size_t i = 0; i < instruction->TempCount(); ++i) {
+ outputs.insert(*instruction->TempAt(i));
+ }
+
+ // Input operands block elisions.
+ for (size_t i = 0; i < instruction->InputCount(); ++i) {
+ inputs.insert(*instruction->InputAt(i));
+ }
+
+ // Elide moves made redundant by the instruction.
+ for (MoveOperands* move : *moves) {
+ if (outputs.find(move->destination()) != outputs.end() &&
+ inputs.find(move->destination()) == inputs.end()) {
+ move->Eliminate();
+ }
+ }
+
+ // The ret instruction makes any assignment before it unnecessary, except for
+ // the one for its input.
+ if (instruction->opcode() == ArchOpcode::kArchRet) {
+ for (MoveOperands* move : *moves) {
+ if (inputs.find(move->destination()) == inputs.end()) {
+ move->Eliminate();
+ }
+ }
+ }
+}
+
+void MoveOptimizer::MigrateMoves(Instruction* to, Instruction* from) {
+ if (from->IsCall()) return;
+
+ ParallelMove* from_moves = from->parallel_moves()[0];
+ if (from_moves == nullptr || from_moves->empty()) return;
+
+ ZoneSet<InstructionOperand, OperandCompare> dst_cant_be(local_zone());
+ ZoneSet<InstructionOperand, OperandCompare> src_cant_be(local_zone());
+
+ // If an operand is an input to the instruction, we cannot move assignments
+ // where it appears on the LHS.
+ for (size_t i = 0; i < from->InputCount(); ++i) {
+ dst_cant_be.insert(*from->InputAt(i));
+ }
+ // If an operand is output to the instruction, we cannot move assignments
+ // where it appears on the RHS, because we would lose its value before the
+ // instruction.
+ // Same for temp operands.
+ // The output can't appear on the LHS because we performed
+ // RemoveClobberedDestinations for the "from" instruction.
+ for (size_t i = 0; i < from->OutputCount(); ++i) {
+ src_cant_be.insert(*from->OutputAt(i));
+ }
+ for (size_t i = 0; i < from->TempCount(); ++i) {
+ src_cant_be.insert(*from->TempAt(i));
+ }
+ for (MoveOperands* move : *from_moves) {
+ if (move->IsRedundant()) continue;
+ // Assume dest has a value "V". If we have a "dest = y" move, then we can't
+ // move "z = dest", because z would become y rather than "V".
+ // We assume CompressMoves has happened before this, which means we don't
+ // have more than one assignment to dest.
+ src_cant_be.insert(move->destination());
+ }
+
+ ZoneSet<MoveKey, MoveKeyCompare> move_candidates(local_zone());
+ // We start with all the moves that don't have conflicting source or
+ // destination operands are eligible for being moved down.
+ for (MoveOperands* move : *from_moves) {
+ if (move->IsRedundant()) continue;
+ if (dst_cant_be.find(move->destination()) == dst_cant_be.end()) {
+ MoveKey key = {move->source(), move->destination()};
+ move_candidates.insert(key);
+ }
+ }
+ if (move_candidates.empty()) return;
+
+ // Stabilize the candidate set.
+ bool changed = false;
+ do {
+ changed = false;
+ for (auto iter = move_candidates.begin(); iter != move_candidates.end();) {
+ auto current = iter;
+ ++iter;
+ InstructionOperand src = current->source;
+ if (src_cant_be.find(src) != src_cant_be.end()) {
+ src_cant_be.insert(current->destination);
+ move_candidates.erase(current);
+ changed = true;
+ }
+ }
+ } while (changed);
+
+ ParallelMove to_move(local_zone());
+ for (MoveOperands* move : *from_moves) {
+ if (move->IsRedundant()) continue;
+ MoveKey key = {move->source(), move->destination()};
+ if (move_candidates.find(key) != move_candidates.end()) {
+ to_move.AddMove(move->source(), move->destination(), code_zone());
+ move->Eliminate();
+ }
+ }
+ if (to_move.empty()) return;
+
+ ParallelMove* dest =
+ to->GetOrCreateParallelMove(Instruction::GapPosition::START, code_zone());
-void MoveOptimizer::CompressMoves(ParallelMove* left, ParallelMove* right) {
+ CompressMoves(&to_move, dest);
+ DCHECK(dest->empty());
+ for (MoveOperands* m : to_move) {
+ dest->push_back(m);
+ }
+}
+
+void MoveOptimizer::CompressMoves(ParallelMove* left, MoveOpVector* right) {
if (right == nullptr) return;
MoveOpVector& eliminated = local_vector();
@@ -147,54 +246,49 @@ void MoveOptimizer::CompressMoves(ParallelMove* left, ParallelMove* right) {
DCHECK(eliminated.empty());
}
+void MoveOptimizer::CompressGaps(Instruction* instruction) {
+ int i = FindFirstNonEmptySlot(instruction);
+ bool has_moves = i <= Instruction::LAST_GAP_POSITION;
+ USE(has_moves);
+
+ if (i == Instruction::LAST_GAP_POSITION) {
+ std::swap(instruction->parallel_moves()[Instruction::FIRST_GAP_POSITION],
+ instruction->parallel_moves()[Instruction::LAST_GAP_POSITION]);
+ } else if (i == Instruction::FIRST_GAP_POSITION) {
+ CompressMoves(
+ instruction->parallel_moves()[Instruction::FIRST_GAP_POSITION],
+ instruction->parallel_moves()[Instruction::LAST_GAP_POSITION]);
+ }
+ // We either have no moves, or, after swapping or compressing, we have
+ // all the moves in the first gap position, and none in the second/end gap
+ // position.
+ ParallelMove* first =
+ instruction->parallel_moves()[Instruction::FIRST_GAP_POSITION];
+ ParallelMove* last =
+ instruction->parallel_moves()[Instruction::LAST_GAP_POSITION];
+ USE(first);
+ USE(last);
+
+ DCHECK(!has_moves ||
+ (first != nullptr && (last == nullptr || last->empty())));
+}
-// Smash all consecutive moves into the left most move slot and accumulate them
-// as much as possible across instructions.
void MoveOptimizer::CompressBlock(InstructionBlock* block) {
- Instruction* prev_instr = nullptr;
- for (int index = block->code_start(); index < block->code_end(); ++index) {
- Instruction* instr = code()->instructions()[index];
- int i = FindFirstNonEmptySlot(instr);
- bool has_moves = i <= Instruction::LAST_GAP_POSITION;
-
- if (i == Instruction::LAST_GAP_POSITION) {
- std::swap(instr->parallel_moves()[Instruction::FIRST_GAP_POSITION],
- instr->parallel_moves()[Instruction::LAST_GAP_POSITION]);
- } else if (i == Instruction::FIRST_GAP_POSITION) {
- CompressMoves(instr->parallel_moves()[Instruction::FIRST_GAP_POSITION],
- instr->parallel_moves()[Instruction::LAST_GAP_POSITION]);
- }
- // We either have no moves, or, after swapping or compressing, we have
- // all the moves in the first gap position, and none in the second/end gap
- // position.
- ParallelMove* first =
- instr->parallel_moves()[Instruction::FIRST_GAP_POSITION];
- ParallelMove* last =
- instr->parallel_moves()[Instruction::LAST_GAP_POSITION];
- USE(last);
-
- DCHECK(!has_moves ||
- (first != nullptr && (last == nullptr || last->empty())));
-
- if (prev_instr != nullptr) {
- if (has_moves) {
- // Smash first into prev_instr, killing left.
- ParallelMove* pred_moves = prev_instr->parallel_moves()[0];
- CompressMoves(pred_moves, first);
- }
- // Slide prev_instr down so we always know where to look for it.
- std::swap(prev_instr->parallel_moves()[0], instr->parallel_moves()[0]);
- }
+ int first_instr_index = block->first_instruction_index();
+ int last_instr_index = block->last_instruction_index();
- prev_instr = instr->parallel_moves()[0] == nullptr ? nullptr : instr;
- if (GapsCanMoveOver(instr, local_zone())) continue;
- if (prev_instr != nullptr) {
- to_finalize_.push_back(prev_instr);
- prev_instr = nullptr;
- }
- }
- if (prev_instr != nullptr) {
- to_finalize_.push_back(prev_instr);
+ // Start by removing gap assignments where the output of the subsequent
+ // instruction appears on LHS, as long as they are not needed by its input.
+ Instruction* prev_instr = code()->instructions()[first_instr_index];
+ RemoveClobberedDestinations(prev_instr);
+
+ for (int index = first_instr_index + 1; index <= last_instr_index; ++index) {
+ Instruction* instr = code()->instructions()[index];
+ // Migrate to the gap of prev_instr eligible moves from instr.
+ MigrateMoves(instr, prev_instr);
+ // Remove gap assignments clobbered by instr's output.
+ RemoveClobberedDestinations(instr);
+ prev_instr = instr;
}
}
@@ -211,6 +305,12 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
// things that would prevent moving gap moves across them.
for (RpoNumber& pred_index : block->predecessors()) {
const InstructionBlock* pred = code()->InstructionBlockAt(pred_index);
+
+ // If the predecessor has more than one successor, we shouldn't attempt to
+ // move down to this block (one of the successors) any of the gap moves,
+ // because their effect may be necessary to the other successors.
+ if (pred->SuccessorCount() > 1) return;
+
const Instruction* last_instr =
code()->instructions()[pred->last_instruction_index()];
if (last_instr->IsCall()) return;
@@ -246,21 +346,54 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
}
}
}
- if (move_map.empty() || correct_counts != move_map.size()) return;
+ if (move_map.empty() || correct_counts == 0) return;
+
// Find insertion point.
- Instruction* instr = nullptr;
- for (int i = block->first_instruction_index();
- i <= block->last_instruction_index(); ++i) {
- instr = code()->instructions()[i];
- if (!GapsCanMoveOver(instr, local_zone()) || !instr->AreMovesRedundant())
- break;
+ Instruction* instr = code()->instructions()[block->first_instruction_index()];
+
+ if (correct_counts != move_map.size()) {
+ // Moves that are unique to each predecessor won't be pushed to the common
+ // successor.
+ OperandSet conflicting_srcs(local_zone());
+ for (auto iter = move_map.begin(), end = move_map.end(); iter != end;) {
+ auto current = iter;
+ ++iter;
+ if (current->second != block->PredecessorCount()) {
+ InstructionOperand dest = current->first.destination;
+ // Not all the moves in all the gaps are the same. Maybe some are. If
+ // there are such moves, we could move them, but the destination of the
+ // moves staying behind can't appear as a source of a common move,
+ // because the move staying behind will clobber this destination.
+ conflicting_srcs.insert(dest);
+ move_map.erase(current);
+ }
+ }
+
+ bool changed = false;
+ do {
+ // If a common move can't be pushed to the common successor, then its
+ // destination also can't appear as source to any move being pushed.
+ changed = false;
+ for (auto iter = move_map.begin(), end = move_map.end(); iter != end;) {
+ auto current = iter;
+ ++iter;
+ DCHECK_EQ(block->PredecessorCount(), current->second);
+ if (conflicting_srcs.find(current->first.source) !=
+ conflicting_srcs.end()) {
+ conflicting_srcs.insert(current->first.destination);
+ move_map.erase(current);
+ changed = true;
+ }
+ }
+ } while (changed);
}
+
+ if (move_map.empty()) return;
+
DCHECK_NOT_NULL(instr);
bool gap_initialized = true;
- if (instr->parallel_moves()[0] == nullptr ||
- instr->parallel_moves()[0]->empty()) {
- to_finalize_.push_back(instr);
- } else {
+ if (instr->parallel_moves()[0] != nullptr &&
+ !instr->parallel_moves()[0]->empty()) {
// Will compress after insertion.
gap_initialized = false;
std::swap(instr->parallel_moves()[0], instr->parallel_moves()[1]);
@@ -275,12 +408,12 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
if (move->IsRedundant()) continue;
MoveKey key = {move->source(), move->destination()};
auto it = move_map.find(key);
- USE(it);
- DCHECK(it != move_map.end());
- if (first_iteration) {
- moves->AddMove(move->source(), move->destination());
+ if (it != move_map.end()) {
+ if (first_iteration) {
+ moves->AddMove(move->source(), move->destination());
+ }
+ move->Eliminate();
}
- move->Eliminate();
}
first_iteration = false;
}
@@ -288,6 +421,7 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
if (!gap_initialized) {
CompressMoves(instr->parallel_moves()[0], instr->parallel_moves()[1]);
}
+ CompressBlock(block);
}
@@ -316,8 +450,10 @@ void MoveOptimizer::FinalizeMoves(Instruction* instr) {
MoveOpVector& loads = local_vector();
DCHECK(loads.empty());
+ ParallelMove* parallel_moves = instr->parallel_moves()[0];
+ if (parallel_moves == nullptr) return;
// Find all the loads.
- for (MoveOperands* move : *instr->parallel_moves()[0]) {
+ for (MoveOperands* move : *parallel_moves) {
if (move->IsRedundant()) continue;
if (move->source().IsConstant() || IsSlot(move->source())) {
loads.push_back(move);
diff --git a/deps/v8/src/compiler/move-optimizer.h b/deps/v8/src/compiler/move-optimizer.h
index c9a3289d6b..8e932a0d73 100644
--- a/deps/v8/src/compiler/move-optimizer.h
+++ b/deps/v8/src/compiler/move-optimizer.h
@@ -26,15 +26,30 @@ class MoveOptimizer final {
Zone* code_zone() const { return code()->zone(); }
MoveOpVector& local_vector() { return local_vector_; }
- void CompressBlock(InstructionBlock* blocke);
- void CompressMoves(ParallelMove* left, ParallelMove* right);
+ // Consolidate moves into the first gap.
+ void CompressGaps(Instruction* instr);
+
+ // Attempt to push down to the last instruction those moves that can.
+ void CompressBlock(InstructionBlock* block);
+
+ // Consolidate moves into the first gap.
+ void CompressMoves(ParallelMove* left, MoveOpVector* right);
+
+ // Push down those moves in the gap of from that do not change the
+ // semantics of the from instruction, nor the semantics of the moves
+ // that remain behind.
+ void MigrateMoves(Instruction* to, Instruction* from);
+
+ void RemoveClobberedDestinations(Instruction* instruction);
+
const Instruction* LastInstruction(const InstructionBlock* block) const;
+
+ // Consolidate common moves appearing accross all predecessors of a block.
void OptimizeMerge(InstructionBlock* block);
void FinalizeMoves(Instruction* instr);
Zone* const local_zone_;
InstructionSequence* const code_;
- Instructions to_finalize_;
MoveOpVector local_vector_;
DISALLOW_COPY_AND_ASSIGN(MoveOptimizer);
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index cb6c3c43d8..ac9cc34dd9 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -4,11 +4,12 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
+#include "src/compiler/js-operator.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/verifier.h"
-#include "src/types-inl.h"
+#include "src/handles-inl.h"
namespace v8 {
namespace internal {
@@ -123,6 +124,7 @@ bool NodeProperties::IsControlEdge(Edge edge) {
// static
bool NodeProperties::IsExceptionalCall(Node* node) {
+ if (node->op()->HasProperty(Operator::kNoThrow)) return false;
for (Edge const edge : node->use_edges()) {
if (!NodeProperties::IsControlEdge(edge)) continue;
if (edge.from()->opcode() == IrOpcode::kIfException) return true;
@@ -334,6 +336,16 @@ MaybeHandle<Context> NodeProperties::GetSpecializationNativeContext(
Node* node, MaybeHandle<Context> native_context) {
while (true) {
switch (node->opcode()) {
+ case IrOpcode::kJSLoadContext: {
+ ContextAccess const& access = ContextAccessOf(node->op());
+ if (access.index() != Context::NATIVE_CONTEXT_INDEX) {
+ return MaybeHandle<Context>();
+ }
+ // Skip over the intermediate contexts, we're only interested in the
+ // very last context in the context chain anyway.
+ node = NodeProperties::GetContextInput(node);
+ break;
+ }
case IrOpcode::kJSCreateBlockContext:
case IrOpcode::kJSCreateCatchContext:
case IrOpcode::kJSCreateFunctionContext:
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index a97fdfa54b..c78e15e25b 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -128,7 +128,6 @@
#define JS_CONTEXT_OP_LIST(V) \
V(JSLoadContext) \
V(JSStoreContext) \
- V(JSLoadDynamic) \
V(JSCreateFunctionContext) \
V(JSCreateCatchContext) \
V(JSCreateWithContext) \
@@ -202,6 +201,7 @@
V(StoreBuffer) \
V(StoreElement) \
V(ObjectIsNumber) \
+ V(ObjectIsReceiver) \
V(ObjectIsSmi)
// Opcodes for Machine-level operators.
@@ -227,6 +227,7 @@
MACHINE_COMPARE_BINOP_LIST(V) \
V(Load) \
V(Store) \
+ V(StackSlot) \
V(Word32And) \
V(Word32Or) \
V(Word32Xor) \
@@ -236,6 +237,7 @@
V(Word32Ror) \
V(Word32Clz) \
V(Word32Ctz) \
+ V(Word32ReverseBits) \
V(Word32Popcnt) \
V(Word64Popcnt) \
V(Word64And) \
@@ -247,6 +249,7 @@
V(Word64Ror) \
V(Word64Clz) \
V(Word64Ctz) \
+ V(Word64ReverseBits) \
V(Int32Add) \
V(Int32AddWithOverflow) \
V(Int32Sub) \
@@ -270,6 +273,8 @@
V(ChangeFloat32ToFloat64) \
V(ChangeFloat64ToInt32) \
V(ChangeFloat64ToUint32) \
+ V(TruncateFloat32ToInt32) \
+ V(TruncateFloat32ToUint32) \
V(TryTruncateFloat32ToInt64) \
V(TryTruncateFloat64ToInt64) \
V(TryTruncateFloat32ToUint64) \
@@ -281,8 +286,10 @@
V(TruncateFloat64ToFloat32) \
V(TruncateFloat64ToInt32) \
V(TruncateInt64ToInt32) \
+ V(RoundInt32ToFloat32) \
V(RoundInt64ToFloat32) \
V(RoundInt64ToFloat64) \
+ V(RoundUint32ToFloat32) \
V(RoundUint64ToFloat32) \
V(RoundUint64ToFloat64) \
V(BitcastFloat32ToInt32) \
@@ -321,6 +328,7 @@
V(Float64InsertHighWord32) \
V(LoadStackPointer) \
V(LoadFramePointer) \
+ V(LoadParentFramePointer) \
V(CheckedLoad) \
V(CheckedStore)
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index bd704a3650..1ee31d5ba5 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -55,7 +55,6 @@ int OperatorProperties::GetFrameStateInputCount(const Operator* op) {
case IrOpcode::kJSCreateLiteralRegExp:
// Context operations
- case IrOpcode::kJSLoadDynamic:
case IrOpcode::kJSCreateScriptContext:
// Conversions
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 2204424706..21c34fc77e 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -30,8 +30,8 @@
#include "src/compiler/instruction-selector.h"
#include "src/compiler/js-builtin-reducer.h"
#include "src/compiler/js-call-reducer.h"
-#include "src/compiler/js-context-relaxation.h"
#include "src/compiler/js-context-specialization.h"
+#include "src/compiler/js-create-lowering.h"
#include "src/compiler/js-frame-specialization.h"
#include "src/compiler/js-generic-lowering.h"
#include "src/compiler/js-global-object-specialization.h"
@@ -276,11 +276,8 @@ class PipelineData {
info()->isolate(), instruction_zone(), instruction_blocks);
}
- void InitializeRegisterAllocationData(const RegisterConfiguration* config,
- CallDescriptor* descriptor,
- const char* debug_name) {
+ void InitializeFrameData(CallDescriptor* descriptor) {
DCHECK(frame_ == nullptr);
- DCHECK(register_allocation_data_ == nullptr);
int fixed_frame_size = 0;
if (descriptor != nullptr) {
fixed_frame_size = (descriptor->IsCFunctionCall())
@@ -289,6 +286,12 @@ class PipelineData {
: StandardFrameConstants::kFixedSlotCount;
}
frame_ = new (instruction_zone()) Frame(fixed_frame_size, descriptor);
+ }
+
+ void InitializeRegisterAllocationData(const RegisterConfiguration* config,
+ CallDescriptor* descriptor,
+ const char* debug_name) {
+ DCHECK(register_allocation_data_ == nullptr);
register_allocation_data_ = new (register_allocation_zone())
RegisterAllocationData(config, register_allocation_zone(), frame(),
sequence(), debug_name);
@@ -512,7 +515,7 @@ struct GraphBuilderPhase {
if (data->info()->shared_info()->HasBytecodeArray()) {
BytecodeGraphBuilder graph_builder(temp_zone, data->info(),
data->jsgraph());
- succeeded = graph_builder.CreateGraph(stack_check);
+ succeeded = graph_builder.CreateGraph();
} else {
AstGraphBuilderWithPositions graph_builder(
temp_zone, data->info(), data->jsgraph(), data->loop_assignment(),
@@ -536,7 +539,7 @@ struct InliningPhase {
data->common());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->common(), data->machine());
- JSCallReducer call_reducer(data->jsgraph(),
+ JSCallReducer call_reducer(&graph_reducer, data->jsgraph(),
data->info()->is_deoptimization_enabled()
? JSCallReducer::kDeoptimizationEnabled
: JSCallReducer::kNoFlags,
@@ -549,17 +552,19 @@ struct InliningPhase {
JSFrameSpecialization frame_specialization(data->info()->osr_frame(),
data->jsgraph());
JSGlobalObjectSpecialization global_object_specialization(
- &graph_reducer, data->jsgraph(),
- data->info()->is_deoptimization_enabled()
- ? JSGlobalObjectSpecialization::kDeoptimizationEnabled
- : JSGlobalObjectSpecialization::kNoFlags,
- data->native_context(), data->info()->dependencies());
+ &graph_reducer, data->jsgraph(), data->native_context(),
+ data->info()->dependencies());
+ JSNativeContextSpecialization::Flags flags =
+ JSNativeContextSpecialization::kNoFlags;
+ if (data->info()->is_bailout_on_uninitialized()) {
+ flags |= JSNativeContextSpecialization::kBailoutOnUninitialized;
+ }
+ if (data->info()->is_deoptimization_enabled()) {
+ flags |= JSNativeContextSpecialization::kDeoptimizationEnabled;
+ }
JSNativeContextSpecialization native_context_specialization(
- &graph_reducer, data->jsgraph(),
- data->info()->is_deoptimization_enabled()
- ? JSNativeContextSpecialization::kDeoptimizationEnabled
- : JSNativeContextSpecialization::kNoFlags,
- data->native_context(), data->info()->dependencies(), temp_zone);
+ &graph_reducer, data->jsgraph(), flags, data->native_context(),
+ data->info()->dependencies(), temp_zone);
JSInliningHeuristic inlining(&graph_reducer,
data->info()->is_inlining_enabled()
? JSInliningHeuristic::kGeneralInlining
@@ -570,7 +575,9 @@ struct InliningPhase {
if (data->info()->is_frame_specializing()) {
AddReducer(data, &graph_reducer, &frame_specialization);
}
- AddReducer(data, &graph_reducer, &global_object_specialization);
+ if (data->info()->is_deoptimization_enabled()) {
+ AddReducer(data, &graph_reducer, &global_object_specialization);
+ }
AddReducer(data, &graph_reducer, &native_context_specialization);
AddReducer(data, &graph_reducer, &context_specialization);
AddReducer(data, &graph_reducer, &call_reducer);
@@ -610,6 +617,13 @@ struct TypedLoweringPhase {
data->common());
LoadElimination load_elimination(&graph_reducer);
JSBuiltinReducer builtin_reducer(&graph_reducer, data->jsgraph());
+ MaybeHandle<LiteralsArray> literals_array =
+ data->info()->is_native_context_specializing()
+ ? handle(data->info()->closure()->literals(), data->isolate())
+ : MaybeHandle<LiteralsArray>();
+ JSCreateLowering create_lowering(
+ &graph_reducer, data->info()->dependencies(), data->jsgraph(),
+ literals_array, temp_zone);
JSTypedLowering::Flags typed_lowering_flags = JSTypedLowering::kNoFlags;
if (data->info()->is_deoptimization_enabled()) {
typed_lowering_flags |= JSTypedLowering::kDeoptimizationEnabled;
@@ -629,6 +643,9 @@ struct TypedLoweringPhase {
data->common(), data->machine());
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &builtin_reducer);
+ if (data->info()->is_deoptimization_enabled()) {
+ AddReducer(data, &graph_reducer, &create_lowering);
+ }
AddReducer(data, &graph_reducer, &typed_lowering);
AddReducer(data, &graph_reducer, &intrinsic_lowering);
AddReducer(data, &graph_reducer, &load_elimination);
@@ -664,8 +681,11 @@ struct EscapeAnalysisPhase {
JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
EscapeAnalysisReducer escape_reducer(&graph_reducer, data->jsgraph(),
&escape_analysis, temp_zone);
+ escape_reducer.SetExistsVirtualAllocate(
+ escape_analysis.ExistsVirtualAllocate());
AddReducer(data, &graph_reducer, &escape_reducer);
graph_reducer.ReduceGraph();
+ escape_reducer.VerifyReplacement();
}
};
@@ -779,7 +799,6 @@ struct GenericLoweringPhase {
void Run(PipelineData* data, Zone* temp_zone) {
JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
- JSContextRelaxation context_relaxing;
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
@@ -789,7 +808,6 @@ struct GenericLoweringPhase {
SelectLowering select_lowering(data->jsgraph()->graph(),
data->jsgraph()->common());
TailCallOptimization tco(data->common(), data->graph());
- AddReducer(data, &graph_reducer, &context_relaxing);
AddReducer(data, &graph_reducer, &dead_code_elimination);
AddReducer(data, &graph_reducer, &common_reducer);
AddReducer(data, &graph_reducer, &generic_lowering);
@@ -820,7 +838,7 @@ struct InstructionSelectionPhase {
void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
InstructionSelector selector(
temp_zone, data->graph()->NodeCount(), linkage, data->sequence(),
- data->schedule(), data->source_positions(),
+ data->schedule(), data->source_positions(), data->frame(),
data->info()->is_source_positions_enabled()
? InstructionSelector::kAllSourcePositions
: InstructionSelector::kCallSourcePositions);
@@ -986,9 +1004,10 @@ struct FrameElisionPhase {
struct JumpThreadingPhase {
static const char* phase_name() { return "jump threading"; }
- void Run(PipelineData* data, Zone* temp_zone) {
+ void Run(PipelineData* data, Zone* temp_zone, bool frame_at_start) {
ZoneVector<RpoNumber> result(temp_zone);
- if (JumpThreading::ComputeForwarding(temp_zone, result, data->sequence())) {
+ if (JumpThreading::ComputeForwarding(temp_zone, result, data->sequence(),
+ frame_at_start)) {
JumpThreading::ApplyForwarding(result, data->sequence());
}
}
@@ -1060,13 +1079,6 @@ void Pipeline::RunPrintAndVerify(const char* phase, bool untyped) {
Handle<Code> Pipeline::GenerateCode() {
- // TODO(mstarzinger): This is just a temporary hack to make TurboFan work,
- // the correct solution is to restore the context register after invoking
- // builtins from full-codegen.
- if (Context::IsJSBuiltin(isolate()->native_context(), info()->closure())) {
- return Handle<Code>::null();
- }
-
ZonePool zone_pool;
base::SmartPointer<PipelineStatistics> pipeline_statistics;
@@ -1080,13 +1092,14 @@ Handle<Code> Pipeline::GenerateCode() {
if (json_file != nullptr) {
OFStream json_of(json_file);
Handle<Script> script = info()->script();
- FunctionLiteral* function = info()->literal();
base::SmartArrayPointer<char> function_name = info()->GetDebugName();
int pos = info()->shared_info()->start_position();
json_of << "{\"function\":\"" << function_name.get()
<< "\", \"sourcePosition\":" << pos << ", \"source\":\"";
- if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+ if (info()->has_literal() && !script->IsUndefined() &&
+ !script->source()->IsUndefined()) {
DisallowHeapAllocation no_allocation;
+ FunctionLiteral* function = info()->literal();
int start = function->start_position();
int len = function->end_position() - start;
String::SubStringRange source(String::cast(script->source()), start,
@@ -1222,10 +1235,9 @@ Handle<Code> Pipeline::GenerateCode() {
Handle<Code> Pipeline::GenerateCodeForCodeStub(Isolate* isolate,
CallDescriptor* call_descriptor,
Graph* graph, Schedule* schedule,
- Code::Kind kind,
+ Code::Flags flags,
const char* debug_name) {
- CompilationInfo info(debug_name, isolate, graph->zone());
- info.set_output_code_kind(kind);
+ CompilationInfo info(debug_name, isolate, graph->zone(), flags);
// Construct a pipeline for scheduling and code generation.
ZonePool zone_pool;
@@ -1296,6 +1308,7 @@ bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
PipelineData data(&zone_pool, &info, sequence);
Pipeline pipeline(&info);
pipeline.data_ = &data;
+ pipeline.data_->InitializeFrameData(nullptr);
pipeline.AllocateRegisters(config, nullptr, run_verifier);
return !data.compilation_failed();
}
@@ -1318,6 +1331,7 @@ Handle<Code> Pipeline::ScheduleAndGenerateCode(
data->InitializeInstructionSequence();
+ data->InitializeFrameData(call_descriptor);
// Select and schedule instructions covering the scheduled graph.
Linkage linkage(call_descriptor);
Run<InstructionSelectionPhase>(&linkage);
@@ -1339,6 +1353,7 @@ Handle<Code> Pipeline::ScheduleAndGenerateCode(
BeginPhaseKind("register allocation");
bool run_verifier = FLAG_turbo_verify_allocation;
+
// Allocate registers.
AllocateRegisters(
RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
@@ -1349,10 +1364,16 @@ Handle<Code> Pipeline::ScheduleAndGenerateCode(
}
BeginPhaseKind("code generation");
-
+ // TODO(mtrofin): move this off to the register allocator.
+ bool generate_frame_at_start =
+ !FLAG_turbo_frame_elision || !data_->info()->IsStub() ||
+ !data_->frame()->needs_frame() ||
+ data_->sequence()->instruction_blocks().front()->needs_frame() ||
+ linkage.GetIncomingDescriptor()->CalleeSavedFPRegisters() != 0 ||
+ linkage.GetIncomingDescriptor()->CalleeSavedRegisters() != 0;
// Optimimize jumps.
if (FLAG_turbo_jt) {
- Run<JumpThreadingPhase>();
+ Run<JumpThreadingPhase>(generate_frame_at_start);
}
// Generate final machine code.
@@ -1456,7 +1477,8 @@ void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
Run<MergeSplintersPhase>();
}
- if (FLAG_turbo_frame_elision) {
+ // We plan to enable frame elision only for stubs and bytecode handlers.
+ if (FLAG_turbo_frame_elision && info()->IsStub()) {
Run<LocateSpillSlotsPhase>();
Run<FrameElisionPhase>();
}
@@ -1492,6 +1514,8 @@ void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
data->DeleteRegisterAllocationZone();
}
+Isolate* Pipeline::isolate() const { return info()->isolate(); }
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index af94018f07..edb8191862 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -7,11 +7,12 @@
// Clients of this interface shouldn't depend on lots of compiler internals.
// Do not include anything from src/compiler here!
-#include "src/compiler.h"
+#include "src/objects.h"
namespace v8 {
namespace internal {
+class CompilationInfo;
class RegisterConfiguration;
namespace compiler {
@@ -35,7 +36,7 @@ class Pipeline {
static Handle<Code> GenerateCodeForCodeStub(Isolate* isolate,
CallDescriptor* call_descriptor,
Graph* graph, Schedule* schedule,
- Code::Kind kind,
+ Code::Flags flags,
const char* debug_name);
// Run the pipeline on a machine graph and generate code. If {schedule} is
@@ -57,23 +58,27 @@ class Pipeline {
Schedule* schedule = nullptr);
private:
- CompilationInfo* info_;
- PipelineData* data_;
-
// Helpers for executing pipeline phases.
template <typename Phase>
void Run();
template <typename Phase, typename Arg0>
void Run(Arg0 arg_0);
-
- CompilationInfo* info() const { return info_; }
- Isolate* isolate() { return info_->isolate(); }
+ template <typename Phase, typename Arg0, typename Arg1>
+ void Run(Arg0 arg_0, Arg1 arg_1);
void BeginPhaseKind(const char* phase_kind);
void RunPrintAndVerify(const char* phase, bool untyped = false);
Handle<Code> ScheduleAndGenerateCode(CallDescriptor* call_descriptor);
void AllocateRegisters(const RegisterConfiguration* config,
CallDescriptor* descriptor, bool run_verifier);
+
+ CompilationInfo* info() const { return info_; }
+ Isolate* isolate() const;
+
+ CompilationInfo* const info_;
+ PipelineData* data_;
+
+ DISALLOW_COPY_AND_ASSIGN(Pipeline);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index 154cd644bd..7fc6dd9d07 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -167,6 +167,19 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
: OutOfLineCode(gen),
object_(object),
offset_(offset),
+ offset_immediate_(0),
+ value_(value),
+ scratch0_(scratch0),
+ scratch1_(scratch1),
+ mode_(mode) {}
+
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
+ Register value, Register scratch0, Register scratch1,
+ RecordWriteMode mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ offset_(no_reg),
+ offset_immediate_(offset),
value_(value),
scratch0_(scratch0),
scratch1_(scratch1),
@@ -176,24 +189,39 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
if (mode_ > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value_, exit());
}
- if (mode_ > RecordWriteMode::kValueIsMap) {
- __ CheckPageFlag(value_, scratch0_,
- MemoryChunk::kPointersToHereAreInterestingMask, eq,
- exit());
- }
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
+ RememberedSetAction const remembered_set_action =
+ mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
+ : OMIT_REMEMBERED_SET;
SaveFPRegsMode const save_fp_mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
- // TODO(turbofan): Once we get frame elision working, we need to save
- // and restore lr properly here if the frame was elided.
+ if (!frame()->needs_frame()) {
+ // We need to save and restore lr if the frame was elided.
+ __ mflr(scratch1_);
+ __ Push(scratch1_);
+ }
RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
- EMIT_REMEMBERED_SET, save_fp_mode);
- __ add(scratch1_, object_, offset_);
+ remembered_set_action, save_fp_mode);
+ if (offset_.is(no_reg)) {
+ __ addi(scratch1_, object_, Operand(offset_immediate_));
+ } else {
+ DCHECK_EQ(0, offset_immediate_);
+ __ add(scratch1_, object_, offset_);
+ }
__ CallStub(&stub);
+ if (!frame()->needs_frame()) {
+ // We need to save and restore lr if the frame was elided.
+ __ Pop(scratch1_);
+ __ mtlr(scratch1_);
+ }
}
private:
Register const object_;
Register const offset_;
+ int32_t const offset_immediate_; // Valid if offset_.is(no_reg).
Register const value_;
Register const scratch0_;
Register const scratch1_;
@@ -651,13 +679,7 @@ void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
}
if (frame()->needs_frame()) {
- if (FLAG_enable_embedded_constant_pool) {
- __ LoadP(kConstantPoolRegister,
- MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
- }
- __ LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
- __ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ mtlr(r0);
+ __ RestoreFrameStateForTailCall();
}
frame_access_state()->SetFrameAccessToSP();
}
@@ -740,13 +762,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
frame_access_state()->ClearSPDelta();
break;
}
- case kArchLazyBailout: {
- v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
- masm());
- EnsureSpaceForLazyDeopt();
- RecordCallPosition(instr);
- break;
- }
case kArchPrepareCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
__ PrepareCallCFunction(num_parameters, kScratchReg);
@@ -807,6 +822,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ mr(i.OutputRegister(), fp);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
+ case kArchParentFramePointer:
+ if (frame_access_state()->frame()->needs_frame()) {
+ __ LoadP(i.OutputRegister(), MemOperand(fp, 0));
+ } else {
+ __ mr(i.OutputRegister(), fp);
+ }
+ break;
case kArchTruncateDoubleToI:
// TODO(mbrandy): move slow call to stub out of line.
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
@@ -816,19 +838,38 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
RecordWriteMode mode =
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
Register object = i.InputRegister(0);
- Register offset = i.InputRegister(1);
Register value = i.InputRegister(2);
Register scratch0 = i.TempRegister(0);
Register scratch1 = i.TempRegister(1);
- auto ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
- scratch0, scratch1, mode);
- __ StorePX(value, MemOperand(object, offset));
+ OutOfLineRecordWrite* ool;
+
+ AddressingMode addressing_mode =
+ AddressingModeField::decode(instr->opcode());
+ if (addressing_mode == kMode_MRI) {
+ int32_t offset = i.InputInt32(1);
+ ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
+ scratch0, scratch1, mode);
+ __ StoreP(value, MemOperand(object, offset));
+ } else {
+ DCHECK_EQ(kMode_MRR, addressing_mode);
+ Register offset(i.InputRegister(1));
+ ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
+ scratch0, scratch1, mode);
+ __ StorePX(value, MemOperand(object, offset));
+ }
__ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask, ne,
ool->entry());
__ bind(ool->exit());
break;
}
+ case kArchStackSlot: {
+ FrameOffset offset =
+ frame_access_state()->GetFrameOffset(i.InputInt32(0));
+ __ addi(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
+ Operand(offset.offset()));
+ break;
+ }
case kPPC_And:
if (HasRegisterInput(instr, 1)) {
__ and_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
@@ -1194,10 +1235,19 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
#endif
+ case kPPC_Int32ToFloat32:
+ __ ConvertIntToFloat(i.InputRegister(0), i.OutputDoubleRegister());
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
case kPPC_Int32ToDouble:
__ ConvertIntToDouble(i.InputRegister(0), i.OutputDoubleRegister());
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
+ case kPPC_Uint32ToFloat32:
+ __ ConvertUnsignedIntToFloat(i.InputRegister(0),
+ i.OutputDoubleRegister());
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
case kPPC_Uint32ToDouble:
__ ConvertUnsignedIntToDouble(i.InputRegister(0),
i.OutputDoubleRegister());
@@ -1581,8 +1631,6 @@ void CodeGenerator::AssemblePrologue() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- // TODO(titzer): cannot address target function == local #-1
- __ LoadP(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
diff --git a/deps/v8/src/compiler/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
index a3bf80e503..877ebb5c12 100644
--- a/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
+++ b/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
@@ -82,7 +82,9 @@ namespace compiler {
V(PPC_Int64ToDouble) \
V(PPC_Uint64ToFloat32) \
V(PPC_Uint64ToDouble) \
+ V(PPC_Int32ToFloat32) \
V(PPC_Int32ToDouble) \
+ V(PPC_Uint32ToFloat32) \
V(PPC_Uint32ToDouble) \
V(PPC_Float32ToDouble) \
V(PPC_DoubleToInt32) \
@@ -114,7 +116,6 @@ namespace compiler {
V(PPC_StoreFloat32) \
V(PPC_StoreDouble)
-
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
// are encoded into the InstructionCode of the instruction and tell the
diff --git a/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc b/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
index fc90cdd628..fd1df6a495 100644
--- a/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
@@ -81,7 +81,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_Int64ToDouble:
case kPPC_Uint64ToFloat32:
case kPPC_Uint64ToDouble:
+ case kPPC_Int32ToFloat32:
case kPPC_Int32ToDouble:
+ case kPPC_Uint32ToFloat32:
case kPPC_Uint32ToDouble:
case kPPC_Float32ToDouble:
case kPPC_DoubleToInt32:
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
index f6ebbdf5d6..244e6f44c5 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -200,6 +200,7 @@ void InstructionSelector::VisitLoad(Node* node) {
#else
case MachineRepresentation::kWord64: // Fall through.
#endif
+ case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -227,13 +228,25 @@ void InstructionSelector::VisitStore(Node* node) {
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
MachineRepresentation rep = store_rep.representation();
- // TODO(ppc): I guess this could be done in a better way.
if (write_barrier_kind != kNoWriteBarrier) {
DCHECK_EQ(MachineRepresentation::kTagged, rep);
+ AddressingMode addressing_mode;
InstructionOperand inputs[3];
size_t input_count = 0;
inputs[input_count++] = g.UseUniqueRegister(base);
- inputs[input_count++] = g.UseUniqueRegister(offset);
+ // OutOfLineRecordWrite uses the offset in an 'add' instruction as well as
+ // for the store itself, so we must check compatibility with both.
+ if (g.CanBeImmediate(offset, kInt16Imm)
+#if V8_TARGET_ARCH_PPC64
+ && g.CanBeImmediate(offset, kInt16Imm_4ByteAligned)
+#endif
+ ) {
+ inputs[input_count++] = g.UseImmediate(offset);
+ addressing_mode = kMode_MRI;
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(offset);
+ addressing_mode = kMode_MRR;
+ }
inputs[input_count++] = (write_barrier_kind == kMapWriteBarrier)
? g.UseRegister(value)
: g.UseUniqueRegister(value);
@@ -255,6 +268,7 @@ void InstructionSelector::VisitStore(Node* node) {
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
size_t const temp_count = arraysize(temps);
InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= AddressingModeField::encode(addressing_mode);
code |= MiscField::encode(static_cast<int>(record_write_mode));
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
@@ -289,6 +303,7 @@ void InstructionSelector::VisitStore(Node* node) {
#else
case MachineRepresentation::kWord64: // Fall through.
#endif
+ case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -340,6 +355,7 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
#if !V8_TARGET_ARCH_PPC64
case MachineRepresentation::kWord64: // Fall through.
#endif
+ case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -385,6 +401,7 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
#if !V8_TARGET_ARCH_PPC64
case MachineRepresentation::kWord64: // Fall through.
#endif
+ case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -825,6 +842,14 @@ void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
#endif
+void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
+#endif
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add, kInt16Imm);
}
@@ -940,6 +965,16 @@ void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
}
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+ VisitRR(this, kPPC_Int32ToFloat32, node);
+}
+
+
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+ VisitRR(this, kPPC_Uint32ToFloat32, node);
+}
+
+
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
VisitRR(this, kPPC_Int32ToDouble, node);
}
@@ -1010,6 +1045,16 @@ void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
}
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+ VisitRR(this, kPPC_DoubleToInt32, node);
+}
+
+
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+ VisitRR(this, kPPC_DoubleToUint32, node);
+}
+
+
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
// TODO(mbrandy): inspect input to see if nop is appropriate.
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index 4df2bde448..0d4b8cb200 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -63,7 +63,7 @@ void RawMachineAssembler::Goto(RawMachineLabel* label) {
void RawMachineAssembler::Branch(Node* condition, RawMachineLabel* true_val,
RawMachineLabel* false_val) {
DCHECK(current_block_ != schedule()->end());
- Node* branch = AddNode(common()->Branch(), condition);
+ Node* branch = MakeNode(common()->Branch(), 1, &condition);
schedule()->AddBranch(CurrentBlock(), branch, Use(true_val), Use(false_val));
current_block_ = nullptr;
}
@@ -152,6 +152,19 @@ Node* RawMachineAssembler::CallNWithFrameState(CallDescriptor* desc,
return AddNode(common()->Call(desc), input_count, buffer);
}
+Node* RawMachineAssembler::CallRuntime0(Runtime::FunctionId function,
+ Node* context) {
+ CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
+ zone(), function, 0, Operator::kNoProperties, CallDescriptor::kNoFlags);
+ int return_count = static_cast<int>(descriptor->ReturnCount());
+
+ Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
+ Node* ref = AddNode(
+ common()->ExternalConstant(ExternalReference(function, isolate())));
+ Node* arity = Int32Constant(0);
+
+ return AddNode(common()->Call(descriptor), centry, ref, arity, context);
+}
Node* RawMachineAssembler::CallRuntime1(Runtime::FunctionId function,
Node* arg1, Node* context) {
@@ -183,6 +196,21 @@ Node* RawMachineAssembler::CallRuntime2(Runtime::FunctionId function,
context);
}
+Node* RawMachineAssembler::CallRuntime3(Runtime::FunctionId function,
+ Node* arg1, Node* arg2, Node* arg3,
+ Node* context) {
+ CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
+ zone(), function, 3, Operator::kNoProperties, CallDescriptor::kNoFlags);
+ int return_count = static_cast<int>(descriptor->ReturnCount());
+
+ Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
+ Node* ref = AddNode(
+ common()->ExternalConstant(ExternalReference(function, isolate())));
+ Node* arity = Int32Constant(3);
+
+ return AddNode(common()->Call(descriptor), centry, arg1, arg2, arg3, ref,
+ arity, context);
+}
Node* RawMachineAssembler::CallRuntime4(Runtime::FunctionId function,
Node* arg1, Node* arg2, Node* arg3,
@@ -266,6 +294,51 @@ Node* RawMachineAssembler::TailCallRuntime2(Runtime::FunctionId function,
return tail_call;
}
+Node* RawMachineAssembler::TailCallRuntime3(Runtime::FunctionId function,
+ Node* arg1, Node* arg2, Node* arg3,
+ Node* context) {
+ const int kArity = 3;
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ zone(), function, kArity, Operator::kNoProperties,
+ CallDescriptor::kSupportsTailCalls);
+ int return_count = static_cast<int>(desc->ReturnCount());
+
+ Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
+ Node* ref = AddNode(
+ common()->ExternalConstant(ExternalReference(function, isolate())));
+ Node* arity = Int32Constant(kArity);
+
+ Node* nodes[] = {centry, arg1, arg2, arg3, ref, arity, context};
+ Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
+
+ NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
+ schedule()->AddTailCall(CurrentBlock(), tail_call);
+ current_block_ = nullptr;
+ return tail_call;
+}
+
+Node* RawMachineAssembler::TailCallRuntime4(Runtime::FunctionId function,
+ Node* arg1, Node* arg2, Node* arg3,
+ Node* arg4, Node* context) {
+ const int kArity = 4;
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ zone(), function, kArity, Operator::kNoProperties,
+ CallDescriptor::kSupportsTailCalls);
+ int return_count = static_cast<int>(desc->ReturnCount());
+
+ Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
+ Node* ref = AddNode(
+ common()->ExternalConstant(ExternalReference(function, isolate())));
+ Node* arity = Int32Constant(kArity);
+
+ Node* nodes[] = {centry, arg1, arg2, arg3, arg4, ref, arity, context};
+ Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
+
+ NodeProperties::MergeControlToEnd(graph(), common(), tail_call);
+ schedule()->AddTailCall(CurrentBlock(), tail_call);
+ current_block_ = nullptr;
+ return tail_call;
+}
Node* RawMachineAssembler::CallCFunction0(MachineType return_type,
Node* function) {
@@ -354,9 +427,24 @@ BasicBlock* RawMachineAssembler::CurrentBlock() {
return current_block_;
}
+Node* RawMachineAssembler::Phi(MachineRepresentation rep, int input_count,
+ Node* const* inputs) {
+ Node** buffer = new (zone()->New(sizeof(Node*) * (input_count + 1)))
+ Node*[input_count + 1];
+ std::copy(inputs, inputs + input_count, buffer);
+ buffer[input_count] = graph()->start();
+ return AddNode(common()->Phi(rep, input_count), input_count + 1, buffer);
+}
+
+void RawMachineAssembler::AppendPhiInput(Node* phi, Node* new_input) {
+ const Operator* op = phi->op();
+ const Operator* new_op = common()->ResizeMergeOrPhi(op, phi->InputCount());
+ phi->InsertInput(zone(), phi->InputCount() - 1, new_input);
+ NodeProperties::ChangeOp(phi, new_op);
+}
Node* RawMachineAssembler::AddNode(const Operator* op, int input_count,
- Node** inputs) {
+ Node* const* inputs) {
DCHECK_NOT_NULL(schedule_);
DCHECK_NOT_NULL(current_block_);
Node* node = MakeNode(op, input_count, inputs);
@@ -364,9 +452,8 @@ Node* RawMachineAssembler::AddNode(const Operator* op, int input_count,
return node;
}
-
Node* RawMachineAssembler::MakeNode(const Operator* op, int input_count,
- Node** inputs) {
+ Node* const* inputs) {
// The raw machine assembler nodes do not have effect and control inputs,
// so we disable checking input counts here.
return graph()->NewNodeUnchecked(op, input_count, inputs);
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 5c232ed1d1..a0cb7a0bfb 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -79,6 +79,9 @@ class RawMachineAssembler {
Node* Int32Constant(int32_t value) {
return AddNode(common()->Int32Constant(value));
}
+ Node* StackSlot(MachineRepresentation rep) {
+ return AddNode(machine()->StackSlot(rep));
+ }
Node* Int64Constant(int64_t value) {
return AddNode(common()->Int64Constant(value));
}
@@ -147,7 +150,7 @@ class RawMachineAssembler {
return AddNode(machine()->WordEqual(), a, b);
}
Node* WordNotEqual(Node* a, Node* b) {
- return WordBinaryNot(WordEqual(a, b));
+ return Word32BinaryNot(WordEqual(a, b));
}
Node* WordNot(Node* a) {
if (machine()->Is32()) {
@@ -156,13 +159,6 @@ class RawMachineAssembler {
return Word64Not(a);
}
}
- Node* WordBinaryNot(Node* a) {
- if (machine()->Is32()) {
- return Word32BinaryNot(a);
- } else {
- return Word64BinaryNot(a);
- }
- }
Node* Word32And(Node* a, Node* b) {
return AddNode(machine()->Word32And(), a, b);
@@ -221,10 +217,9 @@ class RawMachineAssembler {
return AddNode(machine()->Word64Equal(), a, b);
}
Node* Word64NotEqual(Node* a, Node* b) {
- return Word64BinaryNot(Word64Equal(a, b));
+ return Word32BinaryNot(Word64Equal(a, b));
}
Node* Word64Not(Node* a) { return Word64Xor(a, Int64Constant(-1)); }
- Node* Word64BinaryNot(Node* a) { return Word64Equal(a, Int64Constant(0)); }
Node* Int32Add(Node* a, Node* b) {
return AddNode(machine()->Int32Add(), a, b);
@@ -275,6 +270,10 @@ class RawMachineAssembler {
Node* Int32GreaterThanOrEqual(Node* a, Node* b) {
return Int32LessThanOrEqual(b, a);
}
+ Node* Uint32GreaterThan(Node* a, Node* b) { return Uint32LessThan(b, a); }
+ Node* Uint32GreaterThanOrEqual(Node* a, Node* b) {
+ return Uint32LessThanOrEqual(b, a);
+ }
Node* Int32Neg(Node* a) { return Int32Sub(Int32Constant(0), a); }
Node* Int64Add(Node* a, Node* b) {
@@ -315,6 +314,10 @@ class RawMachineAssembler {
Node* Int64GreaterThanOrEqual(Node* a, Node* b) {
return Int64LessThanOrEqual(b, a);
}
+ Node* Uint64GreaterThan(Node* a, Node* b) { return Uint64LessThan(b, a); }
+ Node* Uint64GreaterThanOrEqual(Node* a, Node* b) {
+ return Uint64LessThanOrEqual(b, a);
+ }
Node* Uint64Div(Node* a, Node* b) {
return AddNode(machine()->Uint64Div(), a, b);
}
@@ -339,6 +342,19 @@ class RawMachineAssembler {
#undef INTPTR_BINOP
+#define UINTPTR_BINOP(prefix, name) \
+ Node* UintPtr##name(Node* a, Node* b) { \
+ return kPointerSize == 8 ? prefix##64##name(a, b) \
+ : prefix##32##name(a, b); \
+ }
+
+ UINTPTR_BINOP(Uint, LessThan);
+ UINTPTR_BINOP(Uint, LessThanOrEqual);
+ UINTPTR_BINOP(Uint, GreaterThanOrEqual);
+ UINTPTR_BINOP(Uint, GreaterThan);
+
+#undef UINTPTR_BINOP
+
Node* Float32Add(Node* a, Node* b) {
return AddNode(machine()->Float32Add(), a, b);
}
@@ -363,7 +379,7 @@ class RawMachineAssembler {
return AddNode(machine()->Float32Equal(), a, b);
}
Node* Float32NotEqual(Node* a, Node* b) {
- return WordBinaryNot(Float32Equal(a, b));
+ return Word32BinaryNot(Float32Equal(a, b));
}
Node* Float32LessThan(Node* a, Node* b) {
return AddNode(machine()->Float32LessThan(), a, b);
@@ -403,7 +419,7 @@ class RawMachineAssembler {
return AddNode(machine()->Float64Equal(), a, b);
}
Node* Float64NotEqual(Node* a, Node* b) {
- return WordBinaryNot(Float64Equal(a, b));
+ return Word32BinaryNot(Float64Equal(a, b));
}
Node* Float64LessThan(Node* a, Node* b) {
return AddNode(machine()->Float64LessThan(), a, b);
@@ -432,10 +448,11 @@ class RawMachineAssembler {
Node* ChangeFloat64ToUint32(Node* a) {
return AddNode(machine()->ChangeFloat64ToUint32(), a);
}
- Node* TruncateFloat32ToInt64(Node* a) {
- // TODO(ahaas): Remove this function as soon as it is not used anymore in
- // WebAssembly.
- return AddNode(machine()->TryTruncateFloat32ToInt64(), a);
+ Node* TruncateFloat32ToInt32(Node* a) {
+ return AddNode(machine()->TruncateFloat32ToInt32(), a);
+ }
+ Node* TruncateFloat32ToUint32(Node* a) {
+ return AddNode(machine()->TruncateFloat32ToUint32(), a);
}
Node* TryTruncateFloat32ToInt64(Node* a) {
return AddNode(machine()->TryTruncateFloat32ToInt64(), a);
@@ -448,11 +465,6 @@ class RawMachineAssembler {
Node* TryTruncateFloat64ToInt64(Node* a) {
return AddNode(machine()->TryTruncateFloat64ToInt64(), a);
}
- Node* TruncateFloat32ToUint64(Node* a) {
- // TODO(ahaas): Remove this function as soon as it is not used anymore in
- // WebAssembly.
- return AddNode(machine()->TryTruncateFloat32ToUint64(), a);
- }
Node* TryTruncateFloat32ToUint64(Node* a) {
return AddNode(machine()->TryTruncateFloat32ToUint64(), a);
}
@@ -479,12 +491,18 @@ class RawMachineAssembler {
Node* TruncateInt64ToInt32(Node* a) {
return AddNode(machine()->TruncateInt64ToInt32(), a);
}
+ Node* RoundInt32ToFloat32(Node* a) {
+ return AddNode(machine()->RoundInt32ToFloat32(), a);
+ }
Node* RoundInt64ToFloat32(Node* a) {
return AddNode(machine()->RoundInt64ToFloat32(), a);
}
Node* RoundInt64ToFloat64(Node* a) {
return AddNode(machine()->RoundInt64ToFloat64(), a);
}
+ Node* RoundUint32ToFloat32(Node* a) {
+ return AddNode(machine()->RoundUint32ToFloat32(), a);
+ }
Node* RoundUint64ToFloat32(Node* a) {
return AddNode(machine()->RoundUint64ToFloat32(), a);
}
@@ -548,6 +566,9 @@ class RawMachineAssembler {
// Stack operations.
Node* LoadStackPointer() { return AddNode(machine()->LoadStackPointer()); }
Node* LoadFramePointer() { return AddNode(machine()->LoadFramePointer()); }
+ Node* LoadParentFramePointer() {
+ return AddNode(machine()->LoadParentFramePointer());
+ }
// Parameters.
Node* Parameter(size_t index);
@@ -568,11 +589,16 @@ class RawMachineAssembler {
// Call a given call descriptor and the given arguments and frame-state.
Node* CallNWithFrameState(CallDescriptor* desc, Node* function, Node** args,
Node* frame_state);
+ // Call to a runtime function with zero arguments.
+ Node* CallRuntime0(Runtime::FunctionId function, Node* context);
// Call to a runtime function with one arguments.
Node* CallRuntime1(Runtime::FunctionId function, Node* arg0, Node* context);
// Call to a runtime function with two arguments.
Node* CallRuntime2(Runtime::FunctionId function, Node* arg1, Node* arg2,
Node* context);
+ // Call to a runtime function with three arguments.
+ Node* CallRuntime3(Runtime::FunctionId function, Node* arg1, Node* arg2,
+ Node* arg3, Node* context);
// Call to a runtime function with four arguments.
Node* CallRuntime4(Runtime::FunctionId function, Node* arg1, Node* arg2,
Node* arg3, Node* arg4, Node* context);
@@ -602,7 +628,12 @@ class RawMachineAssembler {
// Tail call to a runtime function with two arguments.
Node* TailCallRuntime2(Runtime::FunctionId function, Node* arg1, Node* arg2,
Node* context);
-
+ // Tail call to a runtime function with three arguments.
+ Node* TailCallRuntime3(Runtime::FunctionId function, Node* arg1, Node* arg2,
+ Node* arg3, Node* context);
+ // Tail call to a runtime function with four arguments.
+ Node* TailCallRuntime4(Runtime::FunctionId function, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4, Node* context);
// ===========================================================================
// The following utility methods deal with control flow, hence might switch
@@ -622,24 +653,26 @@ class RawMachineAssembler {
// Variables.
Node* Phi(MachineRepresentation rep, Node* n1, Node* n2) {
- return AddNode(common()->Phi(rep, 2), n1, n2);
+ return AddNode(common()->Phi(rep, 2), n1, n2, graph()->start());
}
Node* Phi(MachineRepresentation rep, Node* n1, Node* n2, Node* n3) {
- return AddNode(common()->Phi(rep, 3), n1, n2, n3);
+ return AddNode(common()->Phi(rep, 3), n1, n2, n3, graph()->start());
}
Node* Phi(MachineRepresentation rep, Node* n1, Node* n2, Node* n3, Node* n4) {
- return AddNode(common()->Phi(rep, 4), n1, n2, n3, n4);
+ return AddNode(common()->Phi(rep, 4), n1, n2, n3, n4, graph()->start());
}
+ Node* Phi(MachineRepresentation rep, int input_count, Node* const* inputs);
+ void AppendPhiInput(Node* phi, Node* new_input);
// ===========================================================================
// The following generic node creation methods can be used for operators that
// are not covered by the above utility methods. There should rarely be a need
// to do that outside of testing though.
- Node* AddNode(const Operator* op, int input_count, Node** inputs);
+ Node* AddNode(const Operator* op, int input_count, Node* const* inputs);
Node* AddNode(const Operator* op) {
- return AddNode(op, 0, static_cast<Node**>(nullptr));
+ return AddNode(op, 0, static_cast<Node* const*>(nullptr));
}
template <class... TArgs>
@@ -649,7 +682,7 @@ class RawMachineAssembler {
}
private:
- Node* MakeNode(const Operator* op, int input_count, Node** inputs);
+ Node* MakeNode(const Operator* op, int input_count, Node* const* inputs);
BasicBlock* Use(RawMachineLabel* label);
BasicBlock* EnsureBlock(RawMachineLabel* label);
BasicBlock* CurrentBlock();
diff --git a/deps/v8/src/compiler/register-allocator-verifier.cc b/deps/v8/src/compiler/register-allocator-verifier.cc
index 463795ecf2..0b12e149e8 100644
--- a/deps/v8/src/compiler/register-allocator-verifier.cc
+++ b/deps/v8/src/compiler/register-allocator-verifier.cc
@@ -578,7 +578,26 @@ class RegisterAllocatorVerifier::BlockMaps {
CHECK_EQ(succ_vreg, pred_val.second->define_vreg);
}
if (pred_val.second->succ_vreg != kInvalidVreg) {
- CHECK_EQ(succ_vreg, pred_val.second->succ_vreg);
+ if (succ_vreg != pred_val.second->succ_vreg) {
+ // When a block introduces 2 identical phis A and B, and both are
+ // operands to other phis C and D, and we optimized the moves
+ // defining A or B such that they now appear in the block defining
+ // A and B, the back propagation will get confused when visiting
+ // upwards from C and D. The operand in the block defining A and B
+ // will be attributed to C (or D, depending which of these is
+ // visited first).
+ CHECK(IsPhi(pred_val.second->succ_vreg));
+ CHECK(IsPhi(succ_vreg));
+ const PhiData* current_phi = GetPhi(succ_vreg);
+ const PhiData* assigned_phi = GetPhi(pred_val.second->succ_vreg);
+ CHECK_EQ(current_phi->operands.size(),
+ assigned_phi->operands.size());
+ CHECK_EQ(current_phi->definition_rpo,
+ assigned_phi->definition_rpo);
+ for (size_t i = 0; i < current_phi->operands.size(); ++i) {
+ CHECK_EQ(current_phi->operands[i], assigned_phi->operands[i]);
+ }
+ }
} else {
pred_val.second->succ_vreg = succ_vreg;
block_ids.insert(pred_rpo.ToSize());
diff --git a/deps/v8/src/compiler/register-allocator.cc b/deps/v8/src/compiler/register-allocator.cc
index 232ad9fec1..02ba1f17c2 100644
--- a/deps/v8/src/compiler/register-allocator.cc
+++ b/deps/v8/src/compiler/register-allocator.cc
@@ -104,6 +104,8 @@ int GetByteWidth(MachineRepresentation rep) {
case MachineRepresentation::kWord64:
case MachineRepresentation::kFloat64:
return 8;
+ case MachineRepresentation::kSimd128:
+ return 16;
case MachineRepresentation::kNone:
break;
}
@@ -113,6 +115,165 @@ int GetByteWidth(MachineRepresentation rep) {
} // namespace
+class LiveRangeBound {
+ public:
+ explicit LiveRangeBound(LiveRange* range, bool skip)
+ : range_(range), start_(range->Start()), end_(range->End()), skip_(skip) {
+ DCHECK(!range->IsEmpty());
+ }
+
+ bool CanCover(LifetimePosition position) {
+ return start_ <= position && position < end_;
+ }
+
+ LiveRange* const range_;
+ const LifetimePosition start_;
+ const LifetimePosition end_;
+ const bool skip_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(LiveRangeBound);
+};
+
+
+struct FindResult {
+ LiveRange* cur_cover_;
+ LiveRange* pred_cover_;
+};
+
+
+class LiveRangeBoundArray {
+ public:
+ LiveRangeBoundArray() : length_(0), start_(nullptr) {}
+
+ bool ShouldInitialize() { return start_ == nullptr; }
+
+ void Initialize(Zone* zone, TopLevelLiveRange* range) {
+ length_ = range->GetChildCount();
+
+ start_ = zone->NewArray<LiveRangeBound>(length_);
+ LiveRangeBound* curr = start_;
+ // Normally, spilled ranges do not need connecting moves, because the spill
+ // location has been assigned at definition. For ranges spilled in deferred
+ // blocks, that is not the case, so we need to connect the spilled children.
+ for (LiveRange *i = range; i != nullptr; i = i->next(), ++curr) {
+ new (curr) LiveRangeBound(i, i->spilled());
+ }
+ }
+
+ LiveRangeBound* Find(const LifetimePosition position) const {
+ size_t left_index = 0;
+ size_t right_index = length_;
+ while (true) {
+ size_t current_index = left_index + (right_index - left_index) / 2;
+ DCHECK(right_index > current_index);
+ LiveRangeBound* bound = &start_[current_index];
+ if (bound->start_ <= position) {
+ if (position < bound->end_) return bound;
+ DCHECK(left_index < current_index);
+ left_index = current_index;
+ } else {
+ right_index = current_index;
+ }
+ }
+ }
+
+ LiveRangeBound* FindPred(const InstructionBlock* pred) {
+ LifetimePosition pred_end =
+ LifetimePosition::InstructionFromInstructionIndex(
+ pred->last_instruction_index());
+ return Find(pred_end);
+ }
+
+ LiveRangeBound* FindSucc(const InstructionBlock* succ) {
+ LifetimePosition succ_start = LifetimePosition::GapFromInstructionIndex(
+ succ->first_instruction_index());
+ return Find(succ_start);
+ }
+
+ bool FindConnectableSubranges(const InstructionBlock* block,
+ const InstructionBlock* pred,
+ FindResult* result) const {
+ LifetimePosition pred_end =
+ LifetimePosition::InstructionFromInstructionIndex(
+ pred->last_instruction_index());
+ LiveRangeBound* bound = Find(pred_end);
+ result->pred_cover_ = bound->range_;
+ LifetimePosition cur_start = LifetimePosition::GapFromInstructionIndex(
+ block->first_instruction_index());
+
+ if (bound->CanCover(cur_start)) {
+ // Both blocks are covered by the same range, so there is nothing to
+ // connect.
+ return false;
+ }
+ bound = Find(cur_start);
+ if (bound->skip_) {
+ return false;
+ }
+ result->cur_cover_ = bound->range_;
+ DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr);
+ return (result->cur_cover_ != result->pred_cover_);
+ }
+
+ private:
+ size_t length_;
+ LiveRangeBound* start_;
+
+ DISALLOW_COPY_AND_ASSIGN(LiveRangeBoundArray);
+};
+
+
+class LiveRangeFinder {
+ public:
+ explicit LiveRangeFinder(const RegisterAllocationData* data, Zone* zone)
+ : data_(data),
+ bounds_length_(static_cast<int>(data_->live_ranges().size())),
+ bounds_(zone->NewArray<LiveRangeBoundArray>(bounds_length_)),
+ zone_(zone) {
+ for (int i = 0; i < bounds_length_; ++i) {
+ new (&bounds_[i]) LiveRangeBoundArray();
+ }
+ }
+
+ LiveRangeBoundArray* ArrayFor(int operand_index) {
+ DCHECK(operand_index < bounds_length_);
+ TopLevelLiveRange* range = data_->live_ranges()[operand_index];
+ DCHECK(range != nullptr && !range->IsEmpty());
+ LiveRangeBoundArray* array = &bounds_[operand_index];
+ if (array->ShouldInitialize()) {
+ array->Initialize(zone_, range);
+ }
+ return array;
+ }
+
+ private:
+ const RegisterAllocationData* const data_;
+ const int bounds_length_;
+ LiveRangeBoundArray* const bounds_;
+ Zone* const zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder);
+};
+
+
+typedef std::pair<ParallelMove*, InstructionOperand> DelayedInsertionMapKey;
+
+
+struct DelayedInsertionMapCompare {
+ bool operator()(const DelayedInsertionMapKey& a,
+ const DelayedInsertionMapKey& b) const {
+ if (a.first == b.first) {
+ return a.second.Compare(b.second);
+ }
+ return a.first < b.first;
+ }
+};
+
+
+typedef ZoneMap<DelayedInsertionMapKey, InstructionOperand,
+ DelayedInsertionMapCompare> DelayedInsertionMap;
+
UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand,
void* hint, UsePositionHintType hint_type)
@@ -734,51 +895,13 @@ void TopLevelLiveRange::RecordSpillLocation(Zone* zone, int gap_index,
gap_index, operand, spill_move_insertion_locations_);
}
-
-bool TopLevelLiveRange::TryCommitSpillInDeferredBlock(
- InstructionSequence* code, const InstructionOperand& spill_operand) {
- if (!IsSpilledOnlyInDeferredBlocks()) return false;
-
- TRACE("Live Range %d will be spilled only in deferred blocks.\n", vreg());
- // If we have ranges that aren't spilled but require the operand on the stack,
- // make sure we insert the spill.
- for (const LiveRange* child = this; child != nullptr; child = child->next()) {
- if (!child->spilled() &&
- child->NextSlotPosition(child->Start()) != nullptr) {
- Instruction* instr =
- code->InstructionAt(child->Start().ToInstructionIndex());
- // Insert spill at the end to let live range connections happen at START.
- ParallelMove* move =
- instr->GetOrCreateParallelMove(Instruction::END, code->zone());
- InstructionOperand assigned = child->GetAssignedOperand();
- if (TopLevel()->has_slot_use()) {
- bool found = false;
- for (MoveOperands* move_op : *move) {
- if (move_op->IsEliminated()) continue;
- if (move_op->source().Equals(assigned) &&
- move_op->destination().Equals(spill_operand)) {
- found = true;
- break;
- }
- }
- if (found) continue;
- }
-
- move->AddMove(assigned, spill_operand);
- }
- }
-
- return true;
-}
-
-
void TopLevelLiveRange::CommitSpillMoves(InstructionSequence* sequence,
const InstructionOperand& op,
bool might_be_duplicated) {
- DCHECK_IMPLIES(op.IsConstant(), spill_move_insertion_locations() == nullptr);
+ DCHECK_IMPLIES(op.IsConstant(), GetSpillMoveInsertionLocations() == nullptr);
Zone* zone = sequence->zone();
- for (SpillMoveInsertionList* to_spill = spill_move_insertion_locations();
+ for (SpillMoveInsertionList* to_spill = GetSpillMoveInsertionLocations();
to_spill != nullptr; to_spill = to_spill->next) {
Instruction* instr = sequence->InstructionAt(to_spill->gap_index);
ParallelMove* move =
@@ -2321,12 +2444,15 @@ LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
const InstructionBlock* block = end_block;
// Find header of outermost loop.
- // TODO(titzer): fix redundancy below.
- while (GetContainingLoop(code(), block) != nullptr &&
- GetContainingLoop(code(), block)->rpo_number().ToInt() >
- start_block->rpo_number().ToInt()) {
- block = GetContainingLoop(code(), block);
- }
+ do {
+ const InstructionBlock* loop = GetContainingLoop(code(), block);
+ if (loop == nullptr ||
+ loop->rpo_number().ToInt() <= start_block->rpo_number().ToInt()) {
+ // No more loops or loop starts before the lifetime start.
+ break;
+ }
+ block = loop;
+ } while (true);
// We did not find any suitable outer loop. Split at the latest possible
// position unless end_block is a loop header itself.
@@ -2965,7 +3091,7 @@ void SpillSlotLocator::LocateSpillSlots() {
}
} else {
TopLevelLiveRange::SpillMoveInsertionList* spills =
- range->spill_move_insertion_locations();
+ range->GetSpillMoveInsertionLocations();
DCHECK_NOT_NULL(spills);
for (; spills != nullptr; spills = spills->next) {
code->GetInstructionBlock(spills->gap_index)->mark_needs_frame();
@@ -3032,12 +3158,10 @@ void OperandAssigner::CommitAssignment() {
// connecting move when a successor child range is spilled - because the
// spilled range picks up its value from the slot which was assigned at
// definition. For ranges that are determined to spill only in deferred
- // blocks, we let ConnectLiveRanges and ResolveControlFlow insert such
- // moves between ranges. Because of how the ranges are split around
- // deferred blocks, this amounts to spilling and filling inside such
- // blocks.
- if (!top_range->TryCommitSpillInDeferredBlock(data()->code(),
- spill_operand)) {
+ // blocks, we let ConnectLiveRanges and ResolveControlFlow find the blocks
+ // where a spill operand is expected, and then finalize by inserting the
+ // spills in the deferred blocks dominators.
+ if (!top_range->IsSpilledOnlyInDeferredBlocks()) {
// Spill at definition if the range isn't spilled only in deferred
// blocks.
top_range->CommitSpillMoves(
@@ -3188,171 +3312,6 @@ void ReferenceMapPopulator::PopulateReferenceMaps() {
}
-namespace {
-
-class LiveRangeBound {
- public:
- explicit LiveRangeBound(const LiveRange* range, bool skip)
- : range_(range), start_(range->Start()), end_(range->End()), skip_(skip) {
- DCHECK(!range->IsEmpty());
- }
-
- bool CanCover(LifetimePosition position) {
- return start_ <= position && position < end_;
- }
-
- const LiveRange* const range_;
- const LifetimePosition start_;
- const LifetimePosition end_;
- const bool skip_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(LiveRangeBound);
-};
-
-
-struct FindResult {
- const LiveRange* cur_cover_;
- const LiveRange* pred_cover_;
-};
-
-
-class LiveRangeBoundArray {
- public:
- LiveRangeBoundArray() : length_(0), start_(nullptr) {}
-
- bool ShouldInitialize() { return start_ == nullptr; }
-
- void Initialize(Zone* zone, const TopLevelLiveRange* const range) {
- length_ = range->GetChildCount();
-
- start_ = zone->NewArray<LiveRangeBound>(length_);
- LiveRangeBound* curr = start_;
- // Normally, spilled ranges do not need connecting moves, because the spill
- // location has been assigned at definition. For ranges spilled in deferred
- // blocks, that is not the case, so we need to connect the spilled children.
- bool spilled_in_blocks = range->IsSpilledOnlyInDeferredBlocks();
- for (const LiveRange *i = range; i != nullptr; i = i->next(), ++curr) {
- new (curr) LiveRangeBound(i, !spilled_in_blocks && i->spilled());
- }
- }
-
- LiveRangeBound* Find(const LifetimePosition position) const {
- size_t left_index = 0;
- size_t right_index = length_;
- while (true) {
- size_t current_index = left_index + (right_index - left_index) / 2;
- DCHECK(right_index > current_index);
- LiveRangeBound* bound = &start_[current_index];
- if (bound->start_ <= position) {
- if (position < bound->end_) return bound;
- DCHECK(left_index < current_index);
- left_index = current_index;
- } else {
- right_index = current_index;
- }
- }
- }
-
- LiveRangeBound* FindPred(const InstructionBlock* pred) {
- LifetimePosition pred_end =
- LifetimePosition::InstructionFromInstructionIndex(
- pred->last_instruction_index());
- return Find(pred_end);
- }
-
- LiveRangeBound* FindSucc(const InstructionBlock* succ) {
- LifetimePosition succ_start = LifetimePosition::GapFromInstructionIndex(
- succ->first_instruction_index());
- return Find(succ_start);
- }
-
- bool FindConnectableSubranges(const InstructionBlock* block,
- const InstructionBlock* pred,
- FindResult* result) const {
- LifetimePosition pred_end =
- LifetimePosition::InstructionFromInstructionIndex(
- pred->last_instruction_index());
- LiveRangeBound* bound = Find(pred_end);
- result->pred_cover_ = bound->range_;
- LifetimePosition cur_start = LifetimePosition::GapFromInstructionIndex(
- block->first_instruction_index());
-
- if (bound->CanCover(cur_start)) {
- // Both blocks are covered by the same range, so there is nothing to
- // connect.
- return false;
- }
- bound = Find(cur_start);
- if (bound->skip_) {
- return false;
- }
- result->cur_cover_ = bound->range_;
- DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr);
- return (result->cur_cover_ != result->pred_cover_);
- }
-
- private:
- size_t length_;
- LiveRangeBound* start_;
-
- DISALLOW_COPY_AND_ASSIGN(LiveRangeBoundArray);
-};
-
-
-class LiveRangeFinder {
- public:
- explicit LiveRangeFinder(const RegisterAllocationData* data, Zone* zone)
- : data_(data),
- bounds_length_(static_cast<int>(data_->live_ranges().size())),
- bounds_(zone->NewArray<LiveRangeBoundArray>(bounds_length_)),
- zone_(zone) {
- for (int i = 0; i < bounds_length_; ++i) {
- new (&bounds_[i]) LiveRangeBoundArray();
- }
- }
-
- LiveRangeBoundArray* ArrayFor(int operand_index) {
- DCHECK(operand_index < bounds_length_);
- TopLevelLiveRange* range = data_->live_ranges()[operand_index];
- DCHECK(range != nullptr && !range->IsEmpty());
- LiveRangeBoundArray* array = &bounds_[operand_index];
- if (array->ShouldInitialize()) {
- array->Initialize(zone_, range);
- }
- return array;
- }
-
- private:
- const RegisterAllocationData* const data_;
- const int bounds_length_;
- LiveRangeBoundArray* const bounds_;
- Zone* const zone_;
-
- DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder);
-};
-
-
-typedef std::pair<ParallelMove*, InstructionOperand> DelayedInsertionMapKey;
-
-
-struct DelayedInsertionMapCompare {
- bool operator()(const DelayedInsertionMapKey& a,
- const DelayedInsertionMapKey& b) const {
- if (a.first == b.first) {
- return a.second.Compare(b.second);
- }
- return a.first < b.first;
- }
-};
-
-
-typedef ZoneMap<DelayedInsertionMapKey, InstructionOperand,
- DelayedInsertionMapCompare> DelayedInsertionMap;
-
-} // namespace
-
-
LiveRangeConnector::LiveRangeConnector(RegisterAllocationData* data)
: data_(data) {}
@@ -3383,6 +3342,41 @@ void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
InstructionOperand pred_op = result.pred_cover_->GetAssignedOperand();
InstructionOperand cur_op = result.cur_cover_->GetAssignedOperand();
if (pred_op.Equals(cur_op)) continue;
+ if (!pred_op.IsAnyRegister() && cur_op.IsAnyRegister()) {
+ // We're doing a reload.
+ // We don't need to, if:
+ // 1) there's no register use in this block, and
+ // 2) the range ends before the block does, and
+ // 3) we don't have a successor, or the successor is spilled.
+ LifetimePosition block_start =
+ LifetimePosition::GapFromInstructionIndex(block->code_start());
+ LifetimePosition block_end =
+ LifetimePosition::GapFromInstructionIndex(block->code_end());
+ const LiveRange* current = result.cur_cover_;
+ const LiveRange* successor = current->next();
+ if (current->End() < block_end &&
+ (successor == nullptr || successor->spilled())) {
+ // verify point 1: no register use. We can go to the end of the
+ // range, since it's all within the block.
+
+ bool uses_reg = false;
+ for (const UsePosition* use = current->NextUsePosition(block_start);
+ use != nullptr; use = use->next()) {
+ if (use->operand()->IsAnyRegister()) {
+ uses_reg = true;
+ break;
+ }
+ }
+ if (!uses_reg) continue;
+ }
+ if (current->TopLevel()->IsSpilledOnlyInDeferredBlocks() &&
+ pred_block->IsDeferred()) {
+ // The spill location should be defined in pred_block, so add
+ // pred_block to the list of blocks requiring a spill operand.
+ current->TopLevel()->GetListOfBlocksRequiringSpillOperands()->Add(
+ pred_block->rpo_number().ToInt());
+ }
+ }
int move_loc = ResolveControlFlow(block, cur_op, pred_block, pred_op);
USE(move_loc);
DCHECK_IMPLIES(
@@ -3393,6 +3387,16 @@ void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
iterator.Advance();
}
}
+
+ // At this stage, we collected blocks needing a spill operand from
+ // ConnectRanges and from ResolveControlFlow. Time to commit the spills for
+ // deferred blocks.
+ for (TopLevelLiveRange* top : data()->live_ranges()) {
+ if (top == nullptr || top->IsEmpty() ||
+ !top->IsSpilledOnlyInDeferredBlocks())
+ continue;
+ CommitSpillsInDeferredBlocks(top, finder.ArrayFor(top->vreg()), local_zone);
+ }
}
@@ -3430,7 +3434,7 @@ void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
LifetimePosition pos = second_range->Start();
// Add gap move if the two live ranges touch and there is no block
// boundary.
- if (!connect_spilled && second_range->spilled()) continue;
+ if (second_range->spilled()) continue;
if (first_range->End() != pos) continue;
if (data()->IsBlockBoundary(pos) &&
!CanEagerlyResolveControlFlow(GetInstructionBlock(code(), pos))) {
@@ -3442,6 +3446,16 @@ void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
bool delay_insertion = false;
Instruction::GapPosition gap_pos;
int gap_index = pos.ToInstructionIndex();
+ if (connect_spilled && !prev_operand.IsAnyRegister() &&
+ cur_operand.IsAnyRegister()) {
+ const InstructionBlock* block = code()->GetInstructionBlock(gap_index);
+ DCHECK(block->IsDeferred());
+ // Performing a reload in this block, meaning the spill operand must
+ // be defined here.
+ top_range->GetListOfBlocksRequiringSpillOperands()->Add(
+ block->rpo_number().ToInt());
+ }
+
if (pos.IsGapPosition()) {
gap_pos = pos.IsStart() ? Instruction::START : Instruction::END;
} else {
@@ -3452,7 +3466,7 @@ void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
}
gap_pos = delay_insertion ? Instruction::END : Instruction::START;
}
- // Fills or spills for spilled in deferred blocks ranges must happen
+ // Reloads or spills for spilled in deferred blocks ranges must happen
// only in deferred blocks.
DCHECK_IMPLIES(
connect_spilled &&
@@ -3503,6 +3517,73 @@ void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
}
+void LiveRangeConnector::CommitSpillsInDeferredBlocks(
+ TopLevelLiveRange* range, LiveRangeBoundArray* array, Zone* temp_zone) {
+ DCHECK(range->IsSpilledOnlyInDeferredBlocks());
+ DCHECK(!range->spilled());
+
+ InstructionSequence* code = data()->code();
+ InstructionOperand spill_operand = range->GetSpillRangeOperand();
+
+ TRACE("Live Range %d will be spilled only in deferred blocks.\n",
+ range->vreg());
+ // If we have ranges that aren't spilled but require the operand on the stack,
+ // make sure we insert the spill.
+ for (const LiveRange* child = range; child != nullptr;
+ child = child->next()) {
+ for (const UsePosition* pos = child->first_pos(); pos != nullptr;
+ pos = pos->next()) {
+ if (pos->type() != UsePositionType::kRequiresSlot && !child->spilled())
+ continue;
+ range->AddBlockRequiringSpillOperand(
+ code->GetInstructionBlock(pos->pos().ToInstructionIndex())
+ ->rpo_number());
+ }
+ }
+
+ ZoneQueue<int> worklist(temp_zone);
+
+ for (BitVector::Iterator iterator(
+ range->GetListOfBlocksRequiringSpillOperands());
+ !iterator.Done(); iterator.Advance()) {
+ worklist.push(iterator.Current());
+ }
+
+ // Seek the deferred blocks that dominate locations requiring spill operands,
+ // and spill there. We only need to spill at the start of such blocks.
+ BitVector done_blocks(
+ range->GetListOfBlocksRequiringSpillOperands()->length(), temp_zone);
+ while (!worklist.empty()) {
+ int block_id = worklist.front();
+ worklist.pop();
+ if (done_blocks.Contains(block_id)) continue;
+ done_blocks.Add(block_id);
+ const InstructionBlock* spill_block =
+ code->InstructionBlockAt(RpoNumber::FromInt(block_id));
+
+ for (const RpoNumber& pred : spill_block->predecessors()) {
+ const InstructionBlock* pred_block = code->InstructionBlockAt(pred);
+
+ if (pred_block->IsDeferred()) {
+ worklist.push(pred_block->rpo_number().ToInt());
+ } else {
+ LifetimePosition pred_end =
+ LifetimePosition::InstructionFromInstructionIndex(
+ pred_block->last_instruction_index());
+
+ LiveRangeBound* bound = array->Find(pred_end);
+
+ InstructionOperand pred_op = bound->range_->GetAssignedOperand();
+
+ data()->AddGapMove(spill_block->first_instruction_index(),
+ Instruction::GapPosition::START, pred_op,
+ spill_operand);
+ }
+ }
+ }
+}
+
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/register-allocator.h b/deps/v8/src/compiler/register-allocator.h
index b96a43ccec..38fad05ed3 100644
--- a/deps/v8/src/compiler/register-allocator.h
+++ b/deps/v8/src/compiler/register-allocator.h
@@ -579,14 +579,17 @@ class TopLevelLiveRange final : public LiveRange {
// and instead let the LiveRangeConnector perform the spills within the
// deferred blocks. If so, we insert here spills for non-spilled ranges
// with slot use positions.
- void MarkSpilledInDeferredBlock() {
+ void TreatAsSpilledInDeferredBlock(Zone* zone, int total_block_count) {
spill_start_index_ = -1;
spilled_in_deferred_blocks_ = true;
spill_move_insertion_locations_ = nullptr;
+ list_of_blocks_requiring_spill_operands_ =
+ new (zone) BitVector(total_block_count, zone);
}
- bool TryCommitSpillInDeferredBlock(InstructionSequence* code,
- const InstructionOperand& spill_operand);
+ void CommitSpillInDeferredBlocks(RegisterAllocationData* data,
+ const InstructionOperand& spill_operand,
+ BitVector* necessary_spill_points);
TopLevelLiveRange* splintered_from() const { return splintered_from_; }
bool IsSplinter() const { return splintered_from_ != nullptr; }
@@ -617,7 +620,8 @@ class TopLevelLiveRange final : public LiveRange {
struct SpillMoveInsertionList;
- SpillMoveInsertionList* spill_move_insertion_locations() const {
+ SpillMoveInsertionList* GetSpillMoveInsertionLocations() const {
+ DCHECK(!IsSpilledOnlyInDeferredBlocks());
return spill_move_insertion_locations_;
}
TopLevelLiveRange* splinter() const { return splinter_; }
@@ -634,6 +638,16 @@ class TopLevelLiveRange final : public LiveRange {
void MarkHasPreassignedSlot() { has_preassigned_slot_ = true; }
bool has_preassigned_slot() const { return has_preassigned_slot_; }
+ void AddBlockRequiringSpillOperand(RpoNumber block_id) {
+ DCHECK(IsSpilledOnlyInDeferredBlocks());
+ GetListOfBlocksRequiringSpillOperands()->Add(block_id.ToInt());
+ }
+
+ BitVector* GetListOfBlocksRequiringSpillOperands() const {
+ DCHECK(IsSpilledOnlyInDeferredBlocks());
+ return list_of_blocks_requiring_spill_operands_;
+ }
+
private:
void SetSplinteredFrom(TopLevelLiveRange* splinter_parent);
@@ -650,7 +664,12 @@ class TopLevelLiveRange final : public LiveRange {
InstructionOperand* spill_operand_;
SpillRange* spill_range_;
};
- SpillMoveInsertionList* spill_move_insertion_locations_;
+
+ union {
+ SpillMoveInsertionList* spill_move_insertion_locations_;
+ BitVector* list_of_blocks_requiring_spill_operands_;
+ };
+
// TODO(mtrofin): generalize spilling after definition, currently specialized
// just for spill in a single deferred block.
bool spilled_in_deferred_blocks_;
@@ -1125,6 +1144,7 @@ class ReferenceMapPopulator final : public ZoneObject {
};
+class LiveRangeBoundArray;
// Insert moves of the form
//
// Operand(child_(k+1)) = Operand(child_k)
@@ -1157,6 +1177,10 @@ class LiveRangeConnector final : public ZoneObject {
const InstructionBlock* pred,
const InstructionOperand& pred_op);
+ void CommitSpillsInDeferredBlocks(TopLevelLiveRange* range,
+ LiveRangeBoundArray* array,
+ Zone* temp_zone);
+
RegisterAllocationData* const data_;
DISALLOW_COPY_AND_ASSIGN(LiveRangeConnector);
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index 5dab60f6a3..2f7720beb3 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -97,7 +97,6 @@ bool Truncation::LessGeneral(TruncationKind rep1, TruncationKind rep2) {
namespace {
-// TODO(titzer): should Word64 also be implicitly convertable to others?
bool IsWord(MachineRepresentation rep) {
return rep == MachineRepresentation::kWord8 ||
rep == MachineRepresentation::kWord16 ||
@@ -146,6 +145,9 @@ Node* RepresentationChanger::GetRepresentationFor(
return GetWord32RepresentationFor(node, output_rep, output_type);
case MachineRepresentation::kWord64:
return GetWord64RepresentationFor(node, output_rep, output_type);
+ case MachineRepresentation::kSimd128: // Fall through.
+ // TODO(bbudge) Handle conversions between tagged and untagged.
+ break;
case MachineRepresentation::kNone:
return node;
}
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 8af8bdfaa1..ed7fe9d14b 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -142,6 +142,7 @@ UseInfo TruncatingUseInfoFromRepresentation(MachineRepresentation rep) {
return UseInfo::TruncatingWord32();
case MachineRepresentation::kBit:
return UseInfo::Bool();
+ case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
break;
}
@@ -199,6 +200,9 @@ bool MachineRepresentationIsSubtype(MachineRepresentation r1,
case MachineRepresentation::kFloat64:
return r2 == MachineRepresentation::kFloat64 ||
r2 == MachineRepresentation::kTagged;
+ case MachineRepresentation::kSimd128:
+ return r2 == MachineRepresentation::kSimd128 ||
+ r2 == MachineRepresentation::kTagged;
case MachineRepresentation::kTagged:
return r2 == MachineRepresentation::kTagged;
}
@@ -1245,13 +1249,16 @@ class RepresentationSelector {
case IrOpcode::kObjectIsNumber: {
ProcessInput(node, 0, UseInfo::AnyTagged());
SetOutput(node, NodeOutputInfo::Bool());
- if (lower()) lowering->DoObjectIsNumber(node);
+ break;
+ }
+ case IrOpcode::kObjectIsReceiver: {
+ ProcessInput(node, 0, UseInfo::AnyTagged());
+ SetOutput(node, NodeOutputInfo::Bool());
break;
}
case IrOpcode::kObjectIsSmi: {
ProcessInput(node, 0, UseInfo::AnyTagged());
SetOutput(node, NodeOutputInfo::Bool());
- if (lower()) lowering->DoObjectIsSmi(node);
break;
}
@@ -1396,6 +1403,7 @@ class RepresentationSelector {
case IrOpcode::kFloat64RoundDown:
case IrOpcode::kFloat64RoundTruncate:
case IrOpcode::kFloat64RoundTiesAway:
+ case IrOpcode::kFloat64RoundUp:
return VisitUnop(node, UseInfo::Float64(), NodeOutputInfo::Float64());
case IrOpcode::kFloat64Equal:
case IrOpcode::kFloat64LessThan:
@@ -1410,6 +1418,7 @@ class RepresentationSelector {
NodeOutputInfo::Float64());
case IrOpcode::kLoadStackPointer:
case IrOpcode::kLoadFramePointer:
+ case IrOpcode::kLoadParentFramePointer:
return VisitLeaf(node, NodeOutputInfo::Pointer());
case IrOpcode::kStateValues:
VisitStateValues(node);
@@ -1587,42 +1596,6 @@ void SimplifiedLowering::DoStoreBuffer(Node* node) {
}
-void SimplifiedLowering::DoObjectIsNumber(Node* node) {
- Node* input = NodeProperties::GetValueInput(node, 0);
- // TODO(bmeurer): Optimize somewhat based on input type.
- Node* check =
- graph()->NewNode(machine()->WordEqual(),
- graph()->NewNode(machine()->WordAnd(), input,
- jsgraph()->IntPtrConstant(kSmiTagMask)),
- jsgraph()->IntPtrConstant(kSmiTag));
- Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* vtrue = jsgraph()->Int32Constant(1);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse = graph()->NewNode(
- machine()->WordEqual(),
- graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), input,
- jsgraph()->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag),
- graph()->start(), if_false),
- jsgraph()->HeapConstant(isolate()->factory()->heap_number_map()));
- Node* control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- node->ReplaceInput(0, vtrue);
- node->AppendInput(graph()->zone(), vfalse);
- node->AppendInput(graph()->zone(), control);
- NodeProperties::ChangeOp(node, common()->Phi(MachineRepresentation::kBit, 2));
-}
-
-
-void SimplifiedLowering::DoObjectIsSmi(Node* node) {
- node->ReplaceInput(0,
- graph()->NewNode(machine()->WordAnd(), node->InputAt(0),
- jsgraph()->IntPtrConstant(kSmiTagMask)));
- node->AppendInput(graph()->zone(), jsgraph()->IntPtrConstant(kSmiTag));
- NodeProperties::ChangeOp(node, machine()->WordEqual());
-}
-
-
Node* SimplifiedLowering::StringComparison(Node* node) {
Operator::Properties properties = node->op()->properties();
Callable callable = CodeFactory::StringCompare(isolate());
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index 056837ab87..358bd97f9c 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -36,8 +36,6 @@ class SimplifiedLowering final {
void DoLoadBuffer(Node* node, MachineRepresentation rep,
RepresentationChanger* changer);
void DoStoreBuffer(Node* node);
- void DoObjectIsNumber(Node* node);
- void DoObjectIsSmi(Node* node);
void DoShift(Node* node, Operator const* op, Type* rhs_type);
void DoStringEqual(Node* node);
void DoStringLessThan(Node* node);
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 1eaa287fee..c7abe9c96e 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -7,7 +7,7 @@
#include "src/base/lazy-instance.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
-#include "src/types-inl.h"
+#include "src/types.h"
namespace v8 {
namespace internal {
@@ -187,6 +187,7 @@ const ElementAccess& ElementAccessOf(const Operator* op) {
V(ChangeBoolToBit, Operator::kNoProperties, 1) \
V(ChangeBitToBool, Operator::kNoProperties, 1) \
V(ObjectIsNumber, Operator::kNoProperties, 1) \
+ V(ObjectIsReceiver, Operator::kNoProperties, 1) \
V(ObjectIsSmi, Operator::kNoProperties, 1)
#define NO_THROW_OP_LIST(V) \
@@ -253,7 +254,6 @@ NO_THROW_OP_LIST(GET_FROM_CACHE)
const Operator* SimplifiedOperatorBuilder::ReferenceEqual(Type* type) {
- // TODO(titzer): What about the type parameter?
return new (zone()) Operator(IrOpcode::kReferenceEqual,
Operator::kCommutative | Operator::kPure,
"ReferenceEqual", 2, 0, 0, 1, 0, 0);
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 3821a6de57..2ed4b5fed0 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -15,10 +15,7 @@ namespace v8 {
namespace internal {
// Forward declarations.
-template <class>
-class TypeImpl;
-struct ZoneTypeConfig;
-typedef TypeImpl<ZoneTypeConfig> Type;
+class Type;
class Zone;
@@ -168,6 +165,7 @@ class SimplifiedOperatorBuilder final : public ZoneObject {
const Operator* ChangeBitToBool();
const Operator* ObjectIsNumber();
+ const Operator* ObjectIsReceiver();
const Operator* ObjectIsSmi();
const Operator* Allocate(PretenureFlag pretenure = NOT_TENURED);
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index c1f816d34b..9679513219 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -29,10 +29,8 @@ class Typer::Decorator final : public GraphDecorator {
Typer* const typer_;
};
-
Typer::Typer(Isolate* isolate, Graph* graph, Flags flags,
- CompilationDependencies* dependencies,
- Type::FunctionType* function_type)
+ CompilationDependencies* dependencies, FunctionType* function_type)
: isolate_(isolate),
graph_(graph),
flags_(flags),
@@ -243,11 +241,14 @@ class Typer::Visitor : public Reducer {
static Type* NumberToInt32(Type*, Typer*);
static Type* NumberToUint32(Type*, Typer*);
- static Type* JSAddRanger(Type::RangeType*, Type::RangeType*, Typer*);
- static Type* JSSubtractRanger(Type::RangeType*, Type::RangeType*, Typer*);
- static Type* JSMultiplyRanger(Type::RangeType*, Type::RangeType*, Typer*);
- static Type* JSDivideRanger(Type::RangeType*, Type::RangeType*, Typer*);
- static Type* JSModulusRanger(Type::RangeType*, Type::RangeType*, Typer*);
+ static Type* ObjectIsNumber(Type*, Typer*);
+ static Type* ObjectIsReceiver(Type*, Typer*);
+ static Type* ObjectIsSmi(Type*, Typer*);
+
+ static Type* JSAddRanger(RangeType*, RangeType*, Typer*);
+ static Type* JSSubtractRanger(RangeType*, RangeType*, Typer*);
+ static Type* JSDivideRanger(RangeType*, RangeType*, Typer*);
+ static Type* JSModulusRanger(RangeType*, RangeType*, Typer*);
static ComparisonOutcome JSCompareTyper(Type*, Type*, Typer*);
@@ -508,15 +509,37 @@ Type* Typer::Visitor::NumberToUint32(Type* type, Typer* t) {
}
-// -----------------------------------------------------------------------------
+// Type checks.
-// Control operators.
+Type* Typer::Visitor::ObjectIsNumber(Type* type, Typer* t) {
+ if (type->Is(Type::Number())) return t->singleton_true_;
+ if (!type->Maybe(Type::Number())) return t->singleton_false_;
+ return Type::Boolean();
+}
+
+
+Type* Typer::Visitor::ObjectIsReceiver(Type* type, Typer* t) {
+ if (type->Is(Type::Receiver())) return t->singleton_true_;
+ if (!type->Maybe(Type::Receiver())) return t->singleton_false_;
+ return Type::Boolean();
+}
+
+
+Type* Typer::Visitor::ObjectIsSmi(Type* type, Typer* t) {
+ if (type->Is(Type::TaggedSigned())) return t->singleton_true_;
+ if (type->Is(Type::TaggedPointer())) return t->singleton_false_;
+ return Type::Boolean();
+}
-Type* Typer::Visitor::TypeStart(Node* node) { return Type::Internal(zone()); }
+// -----------------------------------------------------------------------------
+// Control operators.
+
+Type* Typer::Visitor::TypeStart(Node* node) { return Type::Internal(); }
+
Type* Typer::Visitor::TypeIfException(Node* node) { return Type::Any(); }
@@ -524,7 +547,7 @@ Type* Typer::Visitor::TypeIfException(Node* node) { return Type::Any(); }
Type* Typer::Visitor::TypeParameter(Node* node) {
- if (Type::FunctionType* function_type = typer_->function_type()) {
+ if (FunctionType* function_type = typer_->function_type()) {
int const index = ParameterIndexOf(node->op());
if (index >= 0 && index < function_type->Arity()) {
return function_type->Parameter(index);
@@ -578,7 +601,7 @@ Type* Typer::Visitor::TypeHeapConstant(Node* node) {
Type* Typer::Visitor::TypeExternalConstant(Node* node) {
- return Type::Internal(zone());
+ return Type::Internal();
}
@@ -627,22 +650,15 @@ Type* Typer::Visitor::TypeFinishRegion(Node* node) { return Operand(node, 0); }
Type* Typer::Visitor::TypeFrameState(Node* node) {
// TODO(rossberg): Ideally FrameState wouldn't have a value output.
- return Type::Internal(zone());
-}
-
-
-Type* Typer::Visitor::TypeStateValues(Node* node) {
- return Type::Internal(zone());
+ return Type::Internal();
}
+Type* Typer::Visitor::TypeStateValues(Node* node) { return Type::Internal(); }
-Type* Typer::Visitor::TypeObjectState(Node* node) {
- return Type::Internal(zone());
-}
-
+Type* Typer::Visitor::TypeObjectState(Node* node) { return Type::Internal(); }
Type* Typer::Visitor::TypeTypedStateValues(Node* node) {
- return Type::Internal(zone());
+ return Type::Internal();
}
@@ -650,7 +666,12 @@ Type* Typer::Visitor::TypeCall(Node* node) { return Type::Any(); }
Type* Typer::Visitor::TypeProjection(Node* node) {
- // TODO(titzer): use the output type of the input to determine the bounds.
+ Type* const type = Operand(node, 0);
+ if (type->Is(Type::None())) return Type::None();
+ int const index = static_cast<int>(ProjectionIndexOf(node->op()));
+ if (type->IsTuple() && index < type->AsTuple()->Arity()) {
+ return type->AsTuple()->Element(index);
+ }
return Type::Any();
}
@@ -950,9 +971,7 @@ static double array_max(double a[], size_t n) {
return x == 0 ? 0 : x; // -0 -> 0
}
-
-Type* Typer::Visitor::JSAddRanger(Type::RangeType* lhs, Type::RangeType* rhs,
- Typer* t) {
+Type* Typer::Visitor::JSAddRanger(RangeType* lhs, RangeType* rhs, Typer* t) {
double results[4];
results[0] = lhs->Min() + rhs->Min();
results[1] = lhs->Min() + rhs->Max();
@@ -998,9 +1017,8 @@ Type* Typer::Visitor::JSAddTyper(Type* lhs, Type* rhs, Typer* t) {
return Type::Number();
}
-
-Type* Typer::Visitor::JSSubtractRanger(Type::RangeType* lhs,
- Type::RangeType* rhs, Typer* t) {
+Type* Typer::Visitor::JSSubtractRanger(RangeType* lhs, RangeType* rhs,
+ Typer* t) {
double results[4];
results[0] = lhs->Min() - rhs->Min();
results[1] = lhs->Min() - rhs->Max();
@@ -1037,41 +1055,38 @@ Type* Typer::Visitor::JSSubtractTyper(Type* lhs, Type* rhs, Typer* t) {
}
-Type* Typer::Visitor::JSMultiplyRanger(Type::RangeType* lhs,
- Type::RangeType* rhs, Typer* t) {
- double results[4];
- double lmin = lhs->Min();
- double lmax = lhs->Max();
- double rmin = rhs->Min();
- double rmax = rhs->Max();
- results[0] = lmin * rmin;
- results[1] = lmin * rmax;
- results[2] = lmax * rmin;
- results[3] = lmax * rmax;
- // If the result may be nan, we give up on calculating a precise type, because
- // the discontinuity makes it too complicated. Note that even if none of the
- // "results" above is nan, the actual result may still be, so we have to do a
- // different check:
- bool maybe_nan = (lhs->Maybe(t->cache_.kSingletonZero) &&
- (rmin == -V8_INFINITY || rmax == +V8_INFINITY)) ||
- (rhs->Maybe(t->cache_.kSingletonZero) &&
- (lmin == -V8_INFINITY || lmax == +V8_INFINITY));
- if (maybe_nan) return t->cache_.kIntegerOrMinusZeroOrNaN; // Giving up.
- bool maybe_minuszero = (lhs->Maybe(t->cache_.kSingletonZero) && rmin < 0) ||
- (rhs->Maybe(t->cache_.kSingletonZero) && lmin < 0);
- Type* range =
- Type::Range(array_min(results, 4), array_max(results, 4), t->zone());
- return maybe_minuszero ? Type::Union(range, Type::MinusZero(), t->zone())
- : range;
-}
-
-
Type* Typer::Visitor::JSMultiplyTyper(Type* lhs, Type* rhs, Typer* t) {
lhs = Rangify(ToNumber(lhs, t), t);
rhs = Rangify(ToNumber(rhs, t), t);
if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
if (lhs->IsRange() && rhs->IsRange()) {
- return JSMultiplyRanger(lhs->AsRange(), rhs->AsRange(), t);
+ double results[4];
+ double lmin = lhs->AsRange()->Min();
+ double lmax = lhs->AsRange()->Max();
+ double rmin = rhs->AsRange()->Min();
+ double rmax = rhs->AsRange()->Max();
+ results[0] = lmin * rmin;
+ results[1] = lmin * rmax;
+ results[2] = lmax * rmin;
+ results[3] = lmax * rmax;
+ // If the result may be nan, we give up on calculating a precise type,
+ // because
+ // the discontinuity makes it too complicated. Note that even if none of
+ // the
+ // "results" above is nan, the actual result may still be, so we have to do
+ // a
+ // different check:
+ bool maybe_nan = (lhs->Maybe(t->cache_.kSingletonZero) &&
+ (rmin == -V8_INFINITY || rmax == +V8_INFINITY)) ||
+ (rhs->Maybe(t->cache_.kSingletonZero) &&
+ (lmin == -V8_INFINITY || lmax == +V8_INFINITY));
+ if (maybe_nan) return t->cache_.kIntegerOrMinusZeroOrNaN; // Giving up.
+ bool maybe_minuszero = (lhs->Maybe(t->cache_.kSingletonZero) && rmin < 0) ||
+ (rhs->Maybe(t->cache_.kSingletonZero) && lmin < 0);
+ Type* range =
+ Type::Range(array_min(results, 4), array_max(results, 4), t->zone());
+ return maybe_minuszero ? Type::Union(range, Type::MinusZero(), t->zone())
+ : range;
}
return Type::Number();
}
@@ -1090,9 +1105,8 @@ Type* Typer::Visitor::JSDivideTyper(Type* lhs, Type* rhs, Typer* t) {
return maybe_nan ? Type::Number() : Type::OrderedNumber();
}
-
-Type* Typer::Visitor::JSModulusRanger(Type::RangeType* lhs,
- Type::RangeType* rhs, Typer* t) {
+Type* Typer::Visitor::JSModulusRanger(RangeType* lhs, RangeType* rhs,
+ Typer* t) {
double lmin = lhs->Min();
double lmax = lhs->Max();
double rmin = rhs->Min();
@@ -1286,8 +1300,8 @@ Type* Typer::Visitor::TypeJSLoadNamed(Node* node) {
} else if (receiver->IsClass() &&
receiver->AsClass()->Map()->IsJSFunctionMap()) {
Handle<Map> map = receiver->AsClass()->Map();
- return map->has_non_instance_prototype() ? Type::Primitive(zone())
- : Type::Receiver(zone());
+ return map->has_non_instance_prototype() ? Type::Primitive()
+ : Type::Receiver();
}
}
return Type::Any();
@@ -1335,8 +1349,8 @@ Type* Typer::Visitor::Weaken(Node* node, Type* current_type,
// Only weaken if there is range involved; we should converge quickly
// for all other types (the exception is a union of many constants,
// but we currently do not increase the number of constants in unions).
- Type::RangeType* previous = previous_integer->GetRange();
- Type::RangeType* current = current_integer->GetRange();
+ Type* previous = previous_integer->GetRange();
+ Type* current = current_integer->GetRange();
if (current == nullptr || previous == nullptr) {
return current_type;
}
@@ -1397,19 +1411,12 @@ Type* Typer::Visitor::TypeJSStoreGlobal(Node* node) {
Type* Typer::Visitor::TypeJSDeleteProperty(Node* node) {
- return Type::Boolean(zone());
-}
-
-
-Type* Typer::Visitor::TypeJSHasProperty(Node* node) {
- return Type::Boolean(zone());
+ return Type::Boolean();
}
+Type* Typer::Visitor::TypeJSHasProperty(Node* node) { return Type::Boolean(); }
-Type* Typer::Visitor::TypeJSInstanceOf(Node* node) {
- return Type::Boolean(zone());
-}
-
+Type* Typer::Visitor::TypeJSInstanceOf(Node* node) { return Type::Boolean(); }
// JS context operators.
@@ -1430,9 +1437,6 @@ Type* Typer::Visitor::TypeJSStoreContext(Node* node) {
}
-Type* Typer::Visitor::TypeJSLoadDynamic(Node* node) { return Type::Any(); }
-
-
Type* Typer::Visitor::WrapContextTypeForInput(Node* node) {
Type* outer = TypeOrNone(NodeProperties::GetContextInput(node));
if (outer->Is(Type::None())) {
@@ -1525,8 +1529,14 @@ Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
case kMathClz32:
return t->cache_.kZeroToThirtyTwo;
// String functions.
+ case kStringCharCodeAt:
+ return Type::Union(Type::Range(0, kMaxUInt16, t->zone()), Type::NaN(),
+ t->zone());
case kStringCharAt:
+ case kStringConcat:
case kStringFromCharCode:
+ case kStringToLowerCase:
+ case kStringToUpperCase:
return Type::String();
// Array functions.
case kArrayIndexOf:
@@ -1550,15 +1560,15 @@ Type* Typer::Visitor::TypeJSCallFunction(Node* node) {
Type* Typer::Visitor::TypeJSCallRuntime(Node* node) {
switch (CallRuntimeParametersOf(node->op()).id()) {
+ case Runtime::kInlineIsJSReceiver:
+ return TypeUnaryOp(node, ObjectIsReceiver);
case Runtime::kInlineIsSmi:
+ return TypeUnaryOp(node, ObjectIsSmi);
case Runtime::kInlineIsArray:
case Runtime::kInlineIsDate:
case Runtime::kInlineIsTypedArray:
- case Runtime::kInlineIsMinusZero:
- case Runtime::kInlineIsFunction:
case Runtime::kInlineIsRegExp:
- case Runtime::kInlineIsJSReceiver:
- return Type::Boolean(zone());
+ return Type::Boolean();
case Runtime::kInlineDoubleLo:
case Runtime::kInlineDoubleHi:
return Type::Signed32();
@@ -1576,6 +1586,7 @@ Type* Typer::Visitor::TypeJSCallRuntime(Node* node) {
case Runtime::kInlineRegExpConstructResult:
return Type::OtherObject();
case Runtime::kInlineSubString:
+ case Runtime::kInlineStringCharFromCode:
return Type::String();
case Runtime::kInlineToInteger:
return TypeUnaryOp(node, ToInteger);
@@ -1613,15 +1624,16 @@ Type* Typer::Visitor::TypeJSForInNext(Node* node) {
Type* Typer::Visitor::TypeJSForInPrepare(Node* node) {
- // TODO(bmeurer): Return a tuple type here.
- return Type::Any();
-}
-
-
-Type* Typer::Visitor::TypeJSForInDone(Node* node) {
- return Type::Boolean(zone());
+ STATIC_ASSERT(Map::EnumLengthBits::kMax <= FixedArray::kMaxLength);
+ Factory* const f = isolate()->factory();
+ Type* const cache_type = Type::Union(
+ typer_->cache_.kSmi, Type::Class(f->meta_map(), zone()), zone());
+ Type* const cache_array = Type::Class(f->fixed_array_map(), zone());
+ Type* const cache_length = typer_->cache_.kFixedArrayLengthType;
+ return Type::Tuple(cache_type, cache_array, cache_length, zone());
}
+Type* Typer::Visitor::TypeJSForInDone(Node* node) { return Type::Boolean(); }
Type* Typer::Visitor::TypeJSForInStep(Node* node) {
STATIC_ASSERT(Map::EnumLengthBits::kMax <= FixedArray::kMaxLength);
@@ -1643,82 +1655,57 @@ Type* Typer::Visitor::TypeJSStackCheck(Node* node) { return Type::Any(); }
// Simplified operators.
-
-Type* Typer::Visitor::TypeBooleanNot(Node* node) {
- return Type::Boolean(zone());
-}
-
+Type* Typer::Visitor::TypeBooleanNot(Node* node) { return Type::Boolean(); }
Type* Typer::Visitor::TypeBooleanToNumber(Node* node) {
return TypeUnaryOp(node, ToNumber);
}
+Type* Typer::Visitor::TypeNumberEqual(Node* node) { return Type::Boolean(); }
-Type* Typer::Visitor::TypeNumberEqual(Node* node) {
- return Type::Boolean(zone());
-}
-
-
-Type* Typer::Visitor::TypeNumberLessThan(Node* node) {
- return Type::Boolean(zone());
-}
-
+Type* Typer::Visitor::TypeNumberLessThan(Node* node) { return Type::Boolean(); }
Type* Typer::Visitor::TypeNumberLessThanOrEqual(Node* node) {
- return Type::Boolean(zone());
-}
-
-
-Type* Typer::Visitor::TypeNumberAdd(Node* node) { return Type::Number(zone()); }
-
-
-Type* Typer::Visitor::TypeNumberSubtract(Node* node) {
- return Type::Number(zone());
-}
-
-
-Type* Typer::Visitor::TypeNumberMultiply(Node* node) {
- return Type::Number(zone());
+ return Type::Boolean();
}
+Type* Typer::Visitor::TypeNumberAdd(Node* node) { return Type::Number(); }
-Type* Typer::Visitor::TypeNumberDivide(Node* node) {
- return Type::Number(zone());
-}
+Type* Typer::Visitor::TypeNumberSubtract(Node* node) { return Type::Number(); }
+Type* Typer::Visitor::TypeNumberMultiply(Node* node) { return Type::Number(); }
-Type* Typer::Visitor::TypeNumberModulus(Node* node) {
- return Type::Number(zone());
-}
+Type* Typer::Visitor::TypeNumberDivide(Node* node) { return Type::Number(); }
+Type* Typer::Visitor::TypeNumberModulus(Node* node) { return Type::Number(); }
Type* Typer::Visitor::TypeNumberBitwiseOr(Node* node) {
- return Type::Signed32(zone());
+ return Type::Signed32();
}
Type* Typer::Visitor::TypeNumberBitwiseXor(Node* node) {
- return Type::Signed32(zone());
+ return Type::Signed32();
}
Type* Typer::Visitor::TypeNumberBitwiseAnd(Node* node) {
- return Type::Signed32(zone());
+ return Type::Signed32();
}
Type* Typer::Visitor::TypeNumberShiftLeft(Node* node) {
- return Type::Signed32(zone());
+ return Type::Signed32();
}
Type* Typer::Visitor::TypeNumberShiftRight(Node* node) {
- return Type::Signed32(zone());
+ return Type::Signed32();
}
Type* Typer::Visitor::TypeNumberShiftRightLogical(Node* node) {
- return Type::Unsigned32(zone());
+ return Type::Unsigned32();
}
@@ -1733,7 +1720,7 @@ Type* Typer::Visitor::TypeNumberToUint32(Node* node) {
Type* Typer::Visitor::TypeNumberIsHoleNaN(Node* node) {
- return Type::Boolean(zone());
+ return Type::Boolean();
}
@@ -1755,19 +1742,12 @@ Type* Typer::Visitor::TypeReferenceEqual(Node* node) {
return TypeBinaryOp(node, ReferenceEqualTyper);
}
+Type* Typer::Visitor::TypeStringEqual(Node* node) { return Type::Boolean(); }
-Type* Typer::Visitor::TypeStringEqual(Node* node) {
- return Type::Boolean(zone());
-}
-
-
-Type* Typer::Visitor::TypeStringLessThan(Node* node) {
- return Type::Boolean(zone());
-}
-
+Type* Typer::Visitor::TypeStringLessThan(Node* node) { return Type::Boolean(); }
Type* Typer::Visitor::TypeStringLessThanOrEqual(Node* node) {
- return Type::Boolean(zone());
+ return Type::Boolean();
}
@@ -1931,20 +1911,17 @@ Type* Typer::Visitor::TypeStoreElement(Node* node) {
Type* Typer::Visitor::TypeObjectIsNumber(Node* node) {
- Type* arg = Operand(node, 0);
- if (arg->Is(Type::None())) return Type::None();
- if (arg->Is(Type::Number())) return typer_->singleton_true_;
- if (!arg->Maybe(Type::Number())) return typer_->singleton_false_;
- return Type::Boolean();
+ return TypeUnaryOp(node, ObjectIsNumber);
+}
+
+
+Type* Typer::Visitor::TypeObjectIsReceiver(Node* node) {
+ return TypeUnaryOp(node, ObjectIsReceiver);
}
Type* Typer::Visitor::TypeObjectIsSmi(Node* node) {
- Type* arg = Operand(node, 0);
- if (arg->Is(Type::None())) return Type::None();
- if (arg->Is(Type::TaggedSigned())) return typer_->singleton_true_;
- if (arg->Is(Type::TaggedPointer())) return typer_->singleton_false_;
- return Type::Boolean();
+ return TypeUnaryOp(node, ObjectIsSmi);
}
@@ -1952,6 +1929,7 @@ Type* Typer::Visitor::TypeObjectIsSmi(Node* node) {
Type* Typer::Visitor::TypeLoad(Node* node) { return Type::Any(); }
+Type* Typer::Visitor::TypeStackSlot(Node* node) { return Type::Any(); }
Type* Typer::Visitor::TypeStore(Node* node) {
UNREACHABLE();
@@ -1989,6 +1967,11 @@ Type* Typer::Visitor::TypeWord32Clz(Node* node) { return Type::Integral32(); }
Type* Typer::Visitor::TypeWord32Ctz(Node* node) { return Type::Integral32(); }
+Type* Typer::Visitor::TypeWord32ReverseBits(Node* node) {
+ return Type::Integral32();
+}
+
+
Type* Typer::Visitor::TypeWord32Popcnt(Node* node) {
return Type::Integral32();
}
@@ -2021,6 +2004,11 @@ Type* Typer::Visitor::TypeWord64Clz(Node* node) { return Type::Internal(); }
Type* Typer::Visitor::TypeWord64Ctz(Node* node) { return Type::Internal(); }
+Type* Typer::Visitor::TypeWord64ReverseBits(Node* node) {
+ return Type::Internal();
+}
+
+
Type* Typer::Visitor::TypeWord64Popcnt(Node* node) { return Type::Internal(); }
@@ -2145,6 +2133,17 @@ Type* Typer::Visitor::TypeChangeFloat64ToUint32(Node* node) {
}
+Type* Typer::Visitor::TypeTruncateFloat32ToInt32(Node* node) {
+ return Type::Intersect(Type::Signed32(), Type::UntaggedIntegral32(), zone());
+}
+
+
+Type* Typer::Visitor::TypeTruncateFloat32ToUint32(Node* node) {
+ return Type::Intersect(Type::Unsigned32(), Type::UntaggedIntegral32(),
+ zone());
+}
+
+
Type* Typer::Visitor::TypeTryTruncateFloat32ToInt64(Node* node) {
return Type::Internal();
}
@@ -2200,6 +2199,11 @@ Type* Typer::Visitor::TypeTruncateInt64ToInt32(Node* node) {
}
+Type* Typer::Visitor::TypeRoundInt32ToFloat32(Node* node) {
+ return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat32(), zone());
+}
+
+
Type* Typer::Visitor::TypeRoundInt64ToFloat32(Node* node) {
return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat32(), zone());
}
@@ -2210,6 +2214,11 @@ Type* Typer::Visitor::TypeRoundInt64ToFloat64(Node* node) {
}
+Type* Typer::Visitor::TypeRoundUint32ToFloat32(Node* node) {
+ return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat32(), zone());
+}
+
+
Type* Typer::Visitor::TypeRoundUint64ToFloat32(Node* node) {
return Type::Intersect(Type::PlainNumber(), Type::UntaggedFloat32(), zone());
}
@@ -2406,6 +2415,9 @@ Type* Typer::Visitor::TypeLoadFramePointer(Node* node) {
return Type::Internal();
}
+Type* Typer::Visitor::TypeLoadParentFramePointer(Node* node) {
+ return Type::Internal();
+}
Type* Typer::Visitor::TypeCheckedLoad(Node* node) { return Type::Any(); }
diff --git a/deps/v8/src/compiler/typer.h b/deps/v8/src/compiler/typer.h
index 41770266c8..0982b28ade 100644
--- a/deps/v8/src/compiler/typer.h
+++ b/deps/v8/src/compiler/typer.h
@@ -30,7 +30,7 @@ class Typer {
Typer(Isolate* isolate, Graph* graph, Flags flags = kNoFlags,
CompilationDependencies* dependencies = nullptr,
- Type::FunctionType* function_type = nullptr);
+ FunctionType* function_type = nullptr);
~Typer();
void Run();
@@ -46,13 +46,13 @@ class Typer {
Isolate* isolate() const { return isolate_; }
Flags flags() const { return flags_; }
CompilationDependencies* dependencies() const { return dependencies_; }
- Type::FunctionType* function_type() const { return function_type_; }
+ FunctionType* function_type() const { return function_type_; }
Isolate* const isolate_;
Graph* const graph_;
Flags const flags_;
CompilationDependencies* const dependencies_;
- Type::FunctionType* function_type_;
+ FunctionType* function_type_;
Decorator* decorator_;
TypeCache const& cache_;
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 1a3ef8e783..99480ca2ed 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -22,7 +22,6 @@
#include "src/compiler/schedule.h"
#include "src/compiler/simplified-operator.h"
#include "src/ostreams.h"
-#include "src/types-inl.h"
namespace v8 {
namespace internal {
@@ -428,13 +427,20 @@ void Verifier::Visitor::Check(Node* node) {
}
break;
}
- case IrOpcode::kFrameState:
+ case IrOpcode::kFrameState: {
// TODO(jarin): what are the constraints on these?
CHECK_EQ(5, value_count);
CHECK_EQ(0, control_count);
CHECK_EQ(0, effect_count);
CHECK_EQ(6, input_count);
+ for (int i = 0; i < 3; ++i) {
+ CHECK(NodeProperties::GetValueInput(node, i)->opcode() ==
+ IrOpcode::kStateValues ||
+ NodeProperties::GetValueInput(node, i)->opcode() ==
+ IrOpcode::kTypedStateValues);
+ }
break;
+ }
case IrOpcode::kStateValues:
case IrOpcode::kObjectState:
case IrOpcode::kTypedStateValues:
@@ -553,7 +559,6 @@ void Verifier::Visitor::Check(Node* node) {
break;
case IrOpcode::kJSLoadContext:
- case IrOpcode::kJSLoadDynamic:
// Type can be anything.
CheckUpperIs(node, Type::Any());
break;
@@ -707,6 +712,7 @@ void Verifier::Visitor::Check(Node* node) {
break;
}
case IrOpcode::kObjectIsNumber:
+ case IrOpcode::kObjectIsReceiver:
case IrOpcode::kObjectIsSmi:
CheckValueInputIs(node, 0, Type::Any());
CheckUpperIs(node, Type::Boolean());
@@ -824,6 +830,7 @@ void Verifier::Visitor::Check(Node* node) {
// -----------------------
case IrOpcode::kLoad:
case IrOpcode::kStore:
+ case IrOpcode::kStackSlot:
case IrOpcode::kWord32And:
case IrOpcode::kWord32Or:
case IrOpcode::kWord32Xor:
@@ -834,6 +841,7 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kWord32Equal:
case IrOpcode::kWord32Clz:
case IrOpcode::kWord32Ctz:
+ case IrOpcode::kWord32ReverseBits:
case IrOpcode::kWord32Popcnt:
case IrOpcode::kWord64And:
case IrOpcode::kWord64Or:
@@ -845,6 +853,7 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kWord64Clz:
case IrOpcode::kWord64Popcnt:
case IrOpcode::kWord64Ctz:
+ case IrOpcode::kWord64ReverseBits:
case IrOpcode::kWord64Equal:
case IrOpcode::kInt32Add:
case IrOpcode::kInt32AddWithOverflow:
@@ -907,8 +916,10 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kFloat64LessThan:
case IrOpcode::kFloat64LessThanOrEqual:
case IrOpcode::kTruncateInt64ToInt32:
+ case IrOpcode::kRoundInt32ToFloat32:
case IrOpcode::kRoundInt64ToFloat32:
case IrOpcode::kRoundInt64ToFloat64:
+ case IrOpcode::kRoundUint32ToFloat32:
case IrOpcode::kRoundUint64ToFloat64:
case IrOpcode::kRoundUint64ToFloat32:
case IrOpcode::kTruncateFloat64ToFloat32:
@@ -924,6 +935,8 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kChangeFloat32ToFloat64:
case IrOpcode::kChangeFloat64ToInt32:
case IrOpcode::kChangeFloat64ToUint32:
+ case IrOpcode::kTruncateFloat32ToInt32:
+ case IrOpcode::kTruncateFloat32ToUint32:
case IrOpcode::kTryTruncateFloat32ToInt64:
case IrOpcode::kTryTruncateFloat64ToInt64:
case IrOpcode::kTryTruncateFloat32ToUint64:
@@ -934,6 +947,7 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kFloat64InsertHighWord32:
case IrOpcode::kLoadStackPointer:
case IrOpcode::kLoadFramePointer:
+ case IrOpcode::kLoadParentFramePointer:
case IrOpcode::kCheckedLoad:
case IrOpcode::kCheckedStore:
// TODO(rossberg): Check.
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index 17065d61b4..9c3858dd43 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -15,6 +15,7 @@
#include "src/compiler/graph.h"
#include "src/compiler/graph-visualizer.h"
#include "src/compiler/instruction-selector.h"
+#include "src/compiler/int64-lowering.h"
#include "src/compiler/js-generic-lowering.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-operator.h"
@@ -29,6 +30,9 @@
#include "src/code-factory.h"
#include "src/code-stubs.h"
+#include "src/factory.h"
+#include "src/log-inl.h"
+#include "src/profiler/cpu-profiler.h"
#include "src/wasm/ast-decoder.h"
#include "src/wasm/wasm-module.h"
@@ -105,6 +109,9 @@ class WasmTrapHelper : public ZoneObject {
// Make the current control path trap to unreachable.
void Unreachable() { ConnectTrap(kTrapUnreachable); }
+ // Always trap with the given reason.
+ void TrapAlways(TrapReason reason) { ConnectTrap(reason); }
+
// Add a check that traps if {node} is equal to {val}.
Node* TrapIfEq32(TrapReason reason, Node* node, int32_t val) {
Int32Matcher m(node);
@@ -165,6 +172,28 @@ class WasmTrapHelper : public ZoneObject {
*effect_ptr = before;
}
+ Node* GetTrapValue(wasm::FunctionSig* sig) {
+ if (sig->return_count() > 0) {
+ switch (sig->GetReturn()) {
+ case wasm::kAstI32:
+ return jsgraph()->Int32Constant(0xdeadbeef);
+ case wasm::kAstI64:
+ return jsgraph()->Int64Constant(0xdeadbeefdeadbeef);
+ case wasm::kAstF32:
+ return jsgraph()->Float32Constant(bit_cast<float>(0xdeadbeef));
+ case wasm::kAstF64:
+ return jsgraph()->Float64Constant(
+ bit_cast<double>(0xdeadbeefdeadbeef));
+ break;
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+ } else {
+ return jsgraph()->Int32Constant(0xdeadbeef);
+ }
+ }
+
private:
WasmGraphBuilder* builder_;
JSGraph* jsgraph_;
@@ -197,7 +226,7 @@ class WasmTrapHelper : public ZoneObject {
*effect_ptr = effects_[reason] =
graph()->NewNode(common()->EffectPhi(1), *effect_ptr, *control_ptr);
- if (module && !module->context.is_null()) {
+ if (module && !module->instance->context.is_null()) {
// Use the module context to call the runtime to throw an exception.
Runtime::FunctionId f = Runtime::kThrow;
const Runtime::Function* fun = Runtime::FunctionForId(f);
@@ -210,7 +239,7 @@ class WasmTrapHelper : public ZoneObject {
jsgraph()->ExternalConstant(
ExternalReference(f, jsgraph()->isolate())), // ref
jsgraph()->Int32Constant(fun->nargs), // arity
- jsgraph()->Constant(module->context), // context
+ jsgraph()->Constant(module->instance->context), // context
*effect_ptr,
*control_ptr};
@@ -227,29 +256,7 @@ class WasmTrapHelper : public ZoneObject {
end = thrw;
} else {
// End the control flow with returning 0xdeadbeef
- Node* ret_value;
- if (builder_->GetFunctionSignature()->return_count() > 0) {
- switch (builder_->GetFunctionSignature()->GetReturn()) {
- case wasm::kAstI32:
- ret_value = jsgraph()->Int32Constant(0xdeadbeef);
- break;
- case wasm::kAstI64:
- ret_value = jsgraph()->Int64Constant(0xdeadbeefdeadbeef);
- break;
- case wasm::kAstF32:
- ret_value = jsgraph()->Float32Constant(bit_cast<float>(0xdeadbeef));
- break;
- case wasm::kAstF64:
- ret_value = jsgraph()->Float64Constant(
- bit_cast<double>(0xdeadbeefdeadbeef));
- break;
- default:
- UNREACHABLE();
- ret_value = nullptr;
- }
- } else {
- ret_value = jsgraph()->Int32Constant(0xdeadbeef);
- }
+ Node* ret_value = GetTrapValue(builder_->GetFunctionSignature());
end = graph()->NewNode(jsgraph()->common()->Return(), ret_value,
*effect_ptr, *control_ptr);
}
@@ -475,6 +482,9 @@ Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left,
op = m->Uint32LessThanOrEqual();
std::swap(left, right);
break;
+ case wasm::kExprI64And:
+ op = m->Word64And();
+ break;
#if WASM_64
// Opcodes only supported on 64-bit platforms.
// TODO(titzer): query the machine operator builder here instead of #ifdef.
@@ -525,9 +535,6 @@ Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left,
op = m->Uint64Mod();
return graph()->NewNode(op, left, right,
trap_->ZeroCheck64(kTrapRemByZero, right));
- case wasm::kExprI64And:
- op = m->Word64And();
- break;
case wasm::kExprI64Ior:
op = m->Word64Or();
break;
@@ -696,14 +703,10 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input) {
op = m->ChangeUint32ToFloat64();
break;
case wasm::kExprF32SConvertI32:
- op = m->ChangeInt32ToFloat64(); // TODO(titzer): two conversions
- input = graph()->NewNode(op, input);
- op = m->TruncateFloat64ToFloat32();
+ op = m->RoundInt32ToFloat32();
break;
case wasm::kExprF32UConvertI32:
- op = m->ChangeUint32ToFloat64();
- input = graph()->NewNode(op, input);
- op = m->TruncateFloat64ToFloat32();
+ op = m->RoundUint32ToFloat32();
break;
case wasm::kExprI32SConvertF32:
return BuildI32SConvertF32(input);
@@ -725,6 +728,10 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input) {
if (m->Word32Ctz().IsSupported()) {
op = m->Word32Ctz().op();
break;
+ } else if (m->Word32ReverseBits().IsSupported()) {
+ Node* reversed = graph()->NewNode(m->Word32ReverseBits().op(), input);
+ Node* result = graph()->NewNode(m->Word32Clz(), reversed);
+ return result;
} else {
return BuildI32Ctz(input);
}
@@ -738,84 +745,53 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input) {
}
}
case wasm::kExprF32Floor: {
- if (m->Float32RoundDown().IsSupported()) {
- op = m->Float32RoundDown().op();
- break;
- } else {
- op = UnsupportedOpcode(opcode);
- break;
- }
+ if (!m->Float32RoundDown().IsSupported()) return BuildF32Floor(input);
+ op = m->Float32RoundDown().op();
+ break;
}
case wasm::kExprF32Ceil: {
- if (m->Float32RoundUp().IsSupported()) {
- op = m->Float32RoundUp().op();
- break;
- } else {
- op = UnsupportedOpcode(opcode);
- break;
- }
+ if (!m->Float32RoundUp().IsSupported()) return BuildF32Ceil(input);
+ op = m->Float32RoundUp().op();
+ break;
}
case wasm::kExprF32Trunc: {
- if (m->Float32RoundTruncate().IsSupported()) {
- op = m->Float32RoundTruncate().op();
- break;
- } else {
- op = UnsupportedOpcode(opcode);
- break;
- }
+ if (!m->Float32RoundTruncate().IsSupported()) return BuildF32Trunc(input);
+ op = m->Float32RoundTruncate().op();
+ break;
}
case wasm::kExprF32NearestInt: {
- if (m->Float32RoundTiesEven().IsSupported()) {
- op = m->Float32RoundTiesEven().op();
- break;
- } else {
- op = UnsupportedOpcode(opcode);
- break;
- }
+ if (!m->Float32RoundTiesEven().IsSupported())
+ return BuildF32NearestInt(input);
+ op = m->Float32RoundTiesEven().op();
+ break;
}
case wasm::kExprF64Floor: {
- if (m->Float64RoundDown().IsSupported()) {
- op = m->Float64RoundDown().op();
- break;
- } else {
- op = UnsupportedOpcode(opcode);
- break;
- }
+ if (!m->Float64RoundDown().IsSupported()) return BuildF64Floor(input);
+ op = m->Float64RoundDown().op();
+ break;
}
case wasm::kExprF64Ceil: {
- if (m->Float64RoundUp().IsSupported()) {
- op = m->Float64RoundUp().op();
- break;
- } else {
- op = UnsupportedOpcode(opcode);
- break;
- }
+ if (!m->Float64RoundUp().IsSupported()) return BuildF64Ceil(input);
+ op = m->Float64RoundUp().op();
+ break;
}
case wasm::kExprF64Trunc: {
- if (m->Float64RoundTruncate().IsSupported()) {
- op = m->Float64RoundTruncate().op();
- break;
- } else {
- op = UnsupportedOpcode(opcode);
- break;
- }
+ if (!m->Float64RoundTruncate().IsSupported()) return BuildF64Trunc(input);
+ op = m->Float64RoundTruncate().op();
+ break;
}
case wasm::kExprF64NearestInt: {
- if (m->Float64RoundTiesEven().IsSupported()) {
- op = m->Float64RoundTiesEven().op();
- break;
- } else {
- op = UnsupportedOpcode(opcode);
- break;
- }
+ if (!m->Float64RoundTiesEven().IsSupported())
+ return BuildF64NearestInt(input);
+ op = m->Float64RoundTiesEven().op();
+ break;
}
-
-#if WASM_64
- // Opcodes only supported on 64-bit platforms.
- // TODO(titzer): query the machine operator builder here instead of #ifdef.
case wasm::kExprI32ConvertI64:
op = m->TruncateInt64ToInt32();
break;
+#if WASM_64
+ // Opcodes only supported on 64-bit platforms.
+ // TODO(titzer): query the machine operator builder here instead of #ifdef.
case wasm::kExprI64SConvertI32:
op = m->ChangeInt32ToInt64();
break;
@@ -883,6 +859,10 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input) {
if (m->Word64Ctz().IsSupported()) {
op = m->Word64Ctz().op();
break;
+ } else if (m->Word64ReverseBits().IsSupported()) {
+ Node* reversed = graph()->NewNode(m->Word64ReverseBits().op(), input);
+ Node* result = graph()->NewNode(m->Word64Clz(), reversed);
+ return result;
} else {
return BuildI64Ctz(input);
}
@@ -1061,8 +1041,12 @@ Node* WasmGraphBuilder::BuildF32Min(Node* left, Node* right) {
return left_le_right.Phi(
wasm::kAstF32, left,
- right_lt_left.Phi(wasm::kAstF32, right,
- left_is_not_nan.Phi(wasm::kAstF32, right, left)));
+ right_lt_left.Phi(
+ wasm::kAstF32, right,
+ left_is_not_nan.Phi(
+ wasm::kAstF32,
+ Binop(wasm::kExprF32Mul, right, Float32Constant(1.0)),
+ Binop(wasm::kExprF32Mul, left, Float32Constant(1.0)))));
}
@@ -1078,8 +1062,12 @@ Node* WasmGraphBuilder::BuildF32Max(Node* left, Node* right) {
return left_ge_right.Phi(
wasm::kAstF32, left,
- right_gt_left.Phi(wasm::kAstF32, right,
- left_is_not_nan.Phi(wasm::kAstF32, right, left)));
+ right_gt_left.Phi(
+ wasm::kAstF32, right,
+ left_is_not_nan.Phi(
+ wasm::kAstF32,
+ Binop(wasm::kExprF32Mul, right, Float32Constant(1.0)),
+ Binop(wasm::kExprF32Mul, left, Float32Constant(1.0)))));
}
@@ -1095,8 +1083,12 @@ Node* WasmGraphBuilder::BuildF64Min(Node* left, Node* right) {
return left_le_right.Phi(
wasm::kAstF64, left,
- right_lt_left.Phi(wasm::kAstF64, right,
- left_is_not_nan.Phi(wasm::kAstF64, right, left)));
+ right_lt_left.Phi(
+ wasm::kAstF64, right,
+ left_is_not_nan.Phi(
+ wasm::kAstF64,
+ Binop(wasm::kExprF64Mul, right, Float64Constant(1.0)),
+ Binop(wasm::kExprF64Mul, left, Float64Constant(1.0)))));
}
@@ -1112,8 +1104,12 @@ Node* WasmGraphBuilder::BuildF64Max(Node* left, Node* right) {
return left_ge_right.Phi(
wasm::kAstF64, left,
- right_gt_left.Phi(wasm::kAstF64, right,
- left_is_not_nan.Phi(wasm::kAstF64, right, left)));
+ right_gt_left.Phi(
+ wasm::kAstF64, right,
+ left_is_not_nan.Phi(
+ wasm::kAstF64,
+ Binop(wasm::kExprF64Mul, right, Float64Constant(1.0)),
+ Binop(wasm::kExprF64Mul, left, Float64Constant(1.0)))));
}
@@ -1121,14 +1117,12 @@ Node* WasmGraphBuilder::BuildI32SConvertF32(Node* input) {
MachineOperatorBuilder* m = jsgraph()->machine();
// Truncation of the input value is needed for the overflow check later.
Node* trunc = Unop(wasm::kExprF32Trunc, input);
- // TODO(titzer): two conversions
- Node* f64_trunc = graph()->NewNode(m->ChangeFloat32ToFloat64(), trunc);
- Node* result = graph()->NewNode(m->ChangeFloat64ToInt32(), f64_trunc);
+ Node* result = graph()->NewNode(m->TruncateFloat32ToInt32(), trunc);
// Convert the result back to f64. If we end up at a different value than the
// truncated input value, then there has been an overflow and we trap.
- Node* check = Unop(wasm::kExprF64SConvertI32, result);
- Node* overflow = Binop(wasm::kExprF64Ne, f64_trunc, check);
+ Node* check = Unop(wasm::kExprF32SConvertI32, result);
+ Node* overflow = Binop(wasm::kExprF32Ne, trunc, check);
trap_->AddTrapIfTrue(kTrapFloatUnrepresentable, overflow);
return result;
@@ -1137,6 +1131,10 @@ Node* WasmGraphBuilder::BuildI32SConvertF32(Node* input) {
Node* WasmGraphBuilder::BuildI32SConvertF64(Node* input) {
MachineOperatorBuilder* m = jsgraph()->machine();
+ if (module_ && module_->asm_js) {
+ return graph()->NewNode(
+ m->TruncateFloat64ToInt32(TruncationMode::kJavaScript), input);
+ }
// Truncation of the input value is needed for the overflow check later.
Node* trunc = Unop(wasm::kExprF64Trunc, input);
Node* result = graph()->NewNode(m->ChangeFloat64ToInt32(), trunc);
@@ -1155,14 +1153,12 @@ Node* WasmGraphBuilder::BuildI32UConvertF32(Node* input) {
MachineOperatorBuilder* m = jsgraph()->machine();
// Truncation of the input value is needed for the overflow check later.
Node* trunc = Unop(wasm::kExprF32Trunc, input);
- // TODO(titzer): two conversions
- Node* f64_trunc = graph()->NewNode(m->ChangeFloat32ToFloat64(), trunc);
- Node* result = graph()->NewNode(m->ChangeFloat64ToUint32(), f64_trunc);
+ Node* result = graph()->NewNode(m->TruncateFloat32ToUint32(), trunc);
- // Convert the result back to f64. If we end up at a different value than the
+ // Convert the result back to f32. If we end up at a different value than the
// truncated input value, then there has been an overflow and we trap.
- Node* check = Unop(wasm::kExprF64UConvertI32, result);
- Node* overflow = Binop(wasm::kExprF64Ne, f64_trunc, check);
+ Node* check = Unop(wasm::kExprF32UConvertI32, result);
+ Node* overflow = Binop(wasm::kExprF32Ne, trunc, check);
trap_->AddTrapIfTrue(kTrapFloatUnrepresentable, overflow);
return result;
@@ -1171,6 +1167,10 @@ Node* WasmGraphBuilder::BuildI32UConvertF32(Node* input) {
Node* WasmGraphBuilder::BuildI32UConvertF64(Node* input) {
MachineOperatorBuilder* m = jsgraph()->machine();
+ if (module_ && module_->asm_js) {
+ return graph()->NewNode(
+ m->TruncateFloat64ToInt32(TruncationMode::kJavaScript), input);
+ }
// Truncation of the input value is needed for the overflow check later.
Node* trunc = Unop(wasm::kExprF64Trunc, input);
Node* result = graph()->NewNode(m->ChangeFloat64ToUint32(), trunc);
@@ -1360,6 +1360,117 @@ Node* WasmGraphBuilder::BuildI64Popcnt(Node* input) {
return result;
}
+Node* WasmGraphBuilder::BuildF32Trunc(Node* input) {
+ MachineType type = MachineType::Float32();
+ ExternalReference ref =
+ ExternalReference::f32_trunc_wrapper_function(jsgraph()->isolate());
+ return BuildRoundingInstruction(input, ref, type);
+}
+
+Node* WasmGraphBuilder::BuildF32Floor(Node* input) {
+ MachineType type = MachineType::Float32();
+ ExternalReference ref =
+ ExternalReference::f32_floor_wrapper_function(jsgraph()->isolate());
+ return BuildRoundingInstruction(input, ref, type);
+}
+
+Node* WasmGraphBuilder::BuildF32Ceil(Node* input) {
+ MachineType type = MachineType::Float32();
+ ExternalReference ref =
+ ExternalReference::f32_ceil_wrapper_function(jsgraph()->isolate());
+ return BuildRoundingInstruction(input, ref, type);
+}
+
+Node* WasmGraphBuilder::BuildF32NearestInt(Node* input) {
+ MachineType type = MachineType::Float32();
+ ExternalReference ref =
+ ExternalReference::f32_nearest_int_wrapper_function(jsgraph()->isolate());
+ return BuildRoundingInstruction(input, ref, type);
+}
+
+Node* WasmGraphBuilder::BuildF64Trunc(Node* input) {
+ MachineType type = MachineType::Float64();
+ ExternalReference ref =
+ ExternalReference::f64_trunc_wrapper_function(jsgraph()->isolate());
+ return BuildRoundingInstruction(input, ref, type);
+}
+
+Node* WasmGraphBuilder::BuildF64Floor(Node* input) {
+ MachineType type = MachineType::Float64();
+ ExternalReference ref =
+ ExternalReference::f64_floor_wrapper_function(jsgraph()->isolate());
+ return BuildRoundingInstruction(input, ref, type);
+}
+
+Node* WasmGraphBuilder::BuildF64Ceil(Node* input) {
+ MachineType type = MachineType::Float64();
+ ExternalReference ref =
+ ExternalReference::f64_ceil_wrapper_function(jsgraph()->isolate());
+ return BuildRoundingInstruction(input, ref, type);
+}
+
+Node* WasmGraphBuilder::BuildF64NearestInt(Node* input) {
+ MachineType type = MachineType::Float64();
+ ExternalReference ref =
+ ExternalReference::f64_nearest_int_wrapper_function(jsgraph()->isolate());
+ return BuildRoundingInstruction(input, ref, type);
+}
+
+Node* WasmGraphBuilder::BuildRoundingInstruction(Node* input,
+ ExternalReference ref,
+ MachineType type) {
+ // We do truncation by calling a C function which calculates the truncation
+ // for us. The input is passed to the C function as a double* to avoid double
+ // parameters. For this we reserve a slot on the stack, store the parameter in
+ // that slot, pass a pointer to the slot to the C function, and after calling
+ // the C function we collect the return value from the stack slot.
+
+ Node* stack_slot_param =
+ graph()->NewNode(jsgraph()->machine()->StackSlot(type.representation()));
+
+ const Operator* store_op = jsgraph()->machine()->Store(
+ StoreRepresentation(type.representation(), kNoWriteBarrier));
+ *effect_ =
+ graph()->NewNode(store_op, stack_slot_param, jsgraph()->Int32Constant(0),
+ input, *effect_, *control_);
+
+ Signature<MachineType>::Builder sig_builder(jsgraph()->zone(), 0, 1);
+ sig_builder.AddParam(MachineType::Pointer());
+ Node* function = graph()->NewNode(jsgraph()->common()->ExternalConstant(ref));
+
+ Node* args[] = {function, stack_slot_param};
+
+ BuildCCall(sig_builder.Build(), args);
+
+ const Operator* load_op = jsgraph()->machine()->Load(type);
+
+ Node* load =
+ graph()->NewNode(load_op, stack_slot_param, jsgraph()->Int32Constant(0),
+ *effect_, *control_);
+ *effect_ = load;
+ return load;
+}
+
+Node* WasmGraphBuilder::BuildCCall(MachineSignature* sig, Node** args) {
+ const size_t params = sig->parameter_count();
+ const size_t extra = 2; // effect and control inputs.
+ const size_t count = 1 + params + extra;
+
+ // Reallocate the buffer to make space for extra inputs.
+ args = Realloc(args, count);
+
+ // Add effect and control inputs.
+ args[params + 1] = *effect_;
+ args[params + 2] = *control_;
+
+ CallDescriptor* desc =
+ Linkage::GetSimplifiedCDescriptor(jsgraph()->zone(), sig);
+
+ const Operator* op = jsgraph()->common()->Call(desc);
+ Node* call = graph()->NewNode(op, static_cast<int>(count), args);
+ *effect_ = call;
+ return call;
+}
Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args) {
const size_t params = sig->parameter_count();
@@ -1373,8 +1484,9 @@ Node* WasmGraphBuilder::BuildWasmCall(wasm::FunctionSig* sig, Node** args) {
args[params + 1] = *effect_;
args[params + 2] = *control_;
- const Operator* op = jsgraph()->common()->Call(
- module_->GetWasmCallDescriptor(jsgraph()->zone(), sig));
+ CallDescriptor* descriptor =
+ wasm::ModuleEnv::GetWasmCallDescriptor(jsgraph()->zone(), sig);
+ const Operator* op = jsgraph()->common()->Call(descriptor);
Node* call = graph()->NewNode(op, static_cast<int>(count), args);
*effect_ = call;
@@ -1392,23 +1504,38 @@ Node* WasmGraphBuilder::CallDirect(uint32_t index, Node** args) {
return BuildWasmCall(sig, args);
}
+Node* WasmGraphBuilder::CallImport(uint32_t index, Node** args) {
+ DCHECK_NULL(args[0]);
+
+ // Add code object as constant.
+ args[0] = Constant(module_->GetImportCode(index));
+ wasm::FunctionSig* sig = module_->GetImportSignature(index);
+
+ return BuildWasmCall(sig, args);
+}
Node* WasmGraphBuilder::CallIndirect(uint32_t index, Node** args) {
DCHECK_NOT_NULL(args[0]);
+ DCHECK(module_ && module_->instance);
MachineOperatorBuilder* machine = jsgraph()->machine();
// Compute the code object by loading it from the function table.
Node* key = args[0];
- Node* table = FunctionTable();
// Bounds check the index.
int table_size = static_cast<int>(module_->FunctionTableSize());
- {
+ if (table_size > 0) {
+ // Bounds check against the table size.
Node* size = Int32Constant(static_cast<int>(table_size));
Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, size);
trap_->AddTrapIfFalse(kTrapFuncInvalid, in_bounds);
+ } else {
+ // No function table. Generate a trap and return a constant.
+ trap_->AddTrapIfFalse(kTrapFuncInvalid, Int32Constant(0));
+ return trap_->GetTrapValue(module_->GetSignature(index));
}
+ Node* table = FunctionTable();
// Load signature from the table and check.
// The table is a FixedArray; signatures are encoded as SMIs.
@@ -1546,7 +1673,8 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
args[pos++] = *control_;
// Call the WASM code.
- CallDescriptor* desc = module_->GetWasmCallDescriptor(jsgraph()->zone(), sig);
+ CallDescriptor* desc =
+ wasm::ModuleEnv::GetWasmCallDescriptor(jsgraph()->zone(), sig);
Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), count, args);
Node* jsval =
ToJS(call, context,
@@ -1631,18 +1759,23 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSFunction> function,
Node* WasmGraphBuilder::MemBuffer(uint32_t offset) {
+ DCHECK(module_ && module_->instance);
if (offset == 0) {
- if (!mem_buffer_)
- mem_buffer_ = jsgraph()->IntPtrConstant(module_->mem_start);
+ if (!mem_buffer_) {
+ mem_buffer_ = jsgraph()->IntPtrConstant(
+ reinterpret_cast<uintptr_t>(module_->instance->mem_start));
+ }
return mem_buffer_;
} else {
- return jsgraph()->IntPtrConstant(module_->mem_start + offset);
+ return jsgraph()->IntPtrConstant(
+ reinterpret_cast<uintptr_t>(module_->instance->mem_start + offset));
}
}
Node* WasmGraphBuilder::MemSize(uint32_t offset) {
- int32_t size = static_cast<int>(module_->mem_end - module_->mem_start);
+ DCHECK(module_ && module_->instance);
+ uint32_t size = static_cast<uint32_t>(module_->instance->mem_size);
if (offset == 0) {
if (!mem_size_) mem_size_ = jsgraph()->Int32Constant(size);
return mem_size_;
@@ -1653,18 +1786,21 @@ Node* WasmGraphBuilder::MemSize(uint32_t offset) {
Node* WasmGraphBuilder::FunctionTable() {
+ DCHECK(module_ && module_->instance &&
+ !module_->instance->function_table.is_null());
if (!function_table_) {
- DCHECK(!module_->function_table.is_null());
- function_table_ = jsgraph()->Constant(module_->function_table);
+ function_table_ = jsgraph()->Constant(module_->instance->function_table);
}
return function_table_;
}
Node* WasmGraphBuilder::LoadGlobal(uint32_t index) {
+ DCHECK(module_ && module_->instance && module_->instance->globals_start);
MachineType mem_type = module_->GetGlobalType(index);
Node* addr = jsgraph()->IntPtrConstant(
- module_->globals_area + module_->module->globals->at(index).offset);
+ reinterpret_cast<uintptr_t>(module_->instance->globals_start +
+ module_->module->globals->at(index).offset));
const Operator* op = jsgraph()->machine()->Load(mem_type);
Node* node = graph()->NewNode(op, addr, jsgraph()->Int32Constant(0), *effect_,
*control_);
@@ -1674,9 +1810,11 @@ Node* WasmGraphBuilder::LoadGlobal(uint32_t index) {
Node* WasmGraphBuilder::StoreGlobal(uint32_t index, Node* val) {
+ DCHECK(module_ && module_->instance && module_->instance->globals_start);
MachineType mem_type = module_->GetGlobalType(index);
Node* addr = jsgraph()->IntPtrConstant(
- module_->globals_area + module_->module->globals->at(index).offset);
+ reinterpret_cast<uintptr_t>(module_->instance->globals_start +
+ module_->module->globals->at(index).offset));
const Operator* op = jsgraph()->machine()->Store(
StoreRepresentation(mem_type.representation(), kNoWriteBarrier));
Node* node = graph()->NewNode(op, addr, jsgraph()->Int32Constant(0), val,
@@ -1689,12 +1827,11 @@ Node* WasmGraphBuilder::StoreGlobal(uint32_t index, Node* val) {
void WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
uint32_t offset) {
// TODO(turbofan): fold bounds checks for constant indexes.
- CHECK_GE(module_->mem_end, module_->mem_start);
- ptrdiff_t size = module_->mem_end - module_->mem_start;
+ DCHECK(module_ && module_->instance);
+ size_t size = module_->instance->mem_size;
byte memsize = wasm::WasmOpcodes::MemSize(memtype);
Node* cond;
- if (static_cast<ptrdiff_t>(offset) >= size ||
- static_cast<ptrdiff_t>(offset + memsize) > size) {
+ if (offset >= size || (static_cast<uint64_t>(offset) + memsize) > size) {
// The access will always throw.
cond = jsgraph()->Int32Constant(0);
} else {
@@ -1782,6 +1919,35 @@ Node* WasmGraphBuilder::String(const char* string) {
Graph* WasmGraphBuilder::graph() { return jsgraph()->graph(); }
+void WasmGraphBuilder::Int64LoweringForTesting() {
+ if (kPointerSize == 4) {
+ Int64Lowering r(jsgraph()->graph(), jsgraph()->machine(),
+ jsgraph()->common(), jsgraph()->zone(),
+ function_signature_);
+ r.LowerGraph();
+ }
+}
+
+static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
+ CompilationInfo* info,
+ const char* message, uint32_t index,
+ const char* func_name) {
+ Isolate* isolate = info->isolate();
+ if (isolate->logger()->is_logging_code_events() ||
+ isolate->cpu_profiler()->is_profiling()) {
+ ScopedVector<char> buffer(128);
+ SNPrintF(buffer, "%s#%d:%s", message, index, func_name);
+ Handle<String> name_str =
+ isolate->factory()->NewStringFromAsciiChecked(buffer.start());
+ Handle<String> script_str =
+ isolate->factory()->NewStringFromAsciiChecked("(WASM)");
+ Handle<Code> code = info->code();
+ Handle<SharedFunctionInfo> shared =
+ isolate->factory()->NewSharedFunctionInfo(name_str, code, false);
+ PROFILE(isolate,
+ CodeCreateEvent(tag, *code, *shared, info, *script_str, 0, 0));
+ }
+}
Handle<JSFunction> CompileJSToWasmWrapper(
Isolate* isolate, wasm::ModuleEnv* module, Handle<String> name,
@@ -1849,38 +2015,42 @@ Handle<JSFunction> CompileJSToWasmWrapper(
module->GetFunctionSignature(index)->parameter_count());
CallDescriptor* incoming = Linkage::GetJSCallDescriptor(
&zone, false, params + 1, CallDescriptor::kNoFlags);
- CompilationInfo info("js-to-wasm", isolate, &zone);
// TODO(titzer): this is technically a WASM wrapper, not a wasm function.
- info.set_output_code_kind(Code::WASM_FUNCTION);
+ Code::Flags flags = Code::ComputeFlags(Code::WASM_FUNCTION);
+ bool debugging =
+#if DEBUG
+ true;
+#else
+ FLAG_print_opt_code || FLAG_trace_turbo || FLAG_trace_turbo_graph;
+#endif
+ const char* func_name = "js-to-wasm";
+
+ static unsigned id = 0;
+ Vector<char> buffer;
+ if (debugging) {
+ buffer = Vector<char>::New(128);
+ SNPrintF(buffer, "js-to-wasm#%d", id);
+ func_name = buffer.start();
+ }
+
+ CompilationInfo info(func_name, isolate, &zone, flags);
Handle<Code> code =
Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
-
-#ifdef ENABLE_DISASSEMBLER
- // Disassemble the wrapper code for debugging.
- if (!code.is_null() && FLAG_print_opt_code) {
- Vector<char> buffer;
- const char* name = "";
- if (func->name_offset > 0) {
- const byte* ptr = module->module->module_start + func->name_offset;
- name = reinterpret_cast<const char*>(ptr);
- }
- SNPrintF(buffer, "JS->WASM function wrapper #%d:%s", index, name);
- OFStream os(stdout);
- code->Disassemble(buffer.start(), os);
+ if (debugging) {
+ buffer.Dispose();
}
-#endif
+
+ RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, "js-to-wasm", index,
+ module->module->GetName(func->name_offset));
// Set the JSFunction's machine code.
function->set_code(*code);
}
return function;
}
-
Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
Handle<JSFunction> function,
- uint32_t index) {
- wasm::WasmFunction* func = &module->module->functions->at(index);
-
+ wasm::FunctionSig* sig, const char* name) {
//----------------------------------------------------------------------------
// Create the Graph
//----------------------------------------------------------------------------
@@ -1894,11 +2064,11 @@ Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
Node* control = nullptr;
Node* effect = nullptr;
- WasmGraphBuilder builder(&zone, &jsgraph, func->sig);
+ WasmGraphBuilder builder(&zone, &jsgraph, sig);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
builder.set_module(module);
- builder.BuildWasmToJSWrapper(function, func->sig);
+ builder.BuildWasmToJSWrapper(function, sig);
Handle<Code> code = Handle<Code>::null();
{
@@ -1923,26 +2093,33 @@ Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
}
// Schedule and compile to machine code.
- CallDescriptor* incoming = module->GetWasmCallDescriptor(&zone, func->sig);
- CompilationInfo info("wasm-to-js", isolate, &zone);
+ CallDescriptor* incoming =
+ wasm::ModuleEnv::GetWasmCallDescriptor(&zone, sig);
// TODO(titzer): this is technically a WASM wrapper, not a wasm function.
- info.set_output_code_kind(Code::WASM_FUNCTION);
- code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
+ Code::Flags flags = Code::ComputeFlags(Code::WASM_FUNCTION);
+ bool debugging =
+#if DEBUG
+ true;
+#else
+ FLAG_print_opt_code || FLAG_trace_turbo || FLAG_trace_turbo_graph;
+#endif
+ const char* func_name = "wasm-to-js";
+ static unsigned id = 0;
+ Vector<char> buffer;
+ if (debugging) {
+ buffer = Vector<char>::New(128);
+ SNPrintF(buffer, "wasm-to-js#%d", id);
+ func_name = buffer.start();
+ }
-#ifdef ENABLE_DISASSEMBLER
- // Disassemble the wrapper code for debugging.
- if (!code.is_null() && FLAG_print_opt_code) {
- Vector<char> buffer;
- const char* name = "";
- if (func->name_offset > 0) {
- const byte* ptr = module->module->module_start + func->name_offset;
- name = reinterpret_cast<const char*>(ptr);
- }
- SNPrintF(buffer, "WASM->JS function wrapper #%d:%s", index, name);
- OFStream os(stdout);
- code->Disassemble(buffer.start(), os);
+ CompilationInfo info(func_name, isolate, &zone, flags);
+ code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
+ if (debugging) {
+ buffer.Dispose();
}
-#endif
+
+ RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, "wasm-to-js", 0,
+ name);
}
return code;
}
@@ -1951,25 +2128,21 @@ Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
// Helper function to compile a single function.
Handle<Code> CompileWasmFunction(wasm::ErrorThrower& thrower, Isolate* isolate,
wasm::ModuleEnv* module_env,
- const wasm::WasmFunction& function,
- int index) {
+ const wasm::WasmFunction& function) {
if (FLAG_trace_wasm_compiler || FLAG_trace_wasm_decode_time) {
- // TODO(titzer): clean me up a bit.
OFStream os(stdout);
- os << "Compiling WASM function #" << index << ":";
- if (function.name_offset > 0) {
- os << module_env->module->GetName(function.name_offset);
- }
+ os << "Compiling WASM function "
+ << wasm::WasmFunctionName(&function, module_env) << std::endl;
os << std::endl;
}
// Initialize the function environment for decoding.
wasm::FunctionEnv env;
env.module = module_env;
env.sig = function.sig;
- env.local_int32_count = function.local_int32_count;
- env.local_int64_count = function.local_int64_count;
- env.local_float32_count = function.local_float32_count;
- env.local_float64_count = function.local_float64_count;
+ env.local_i32_count = function.local_i32_count;
+ env.local_i64_count = function.local_i64_count;
+ env.local_f32_count = function.local_f32_count;
+ env.local_f64_count = function.local_f64_count;
env.SumLocals();
// Create a TF graph during decoding.
@@ -1993,35 +2166,49 @@ Handle<Code> CompileWasmFunction(wasm::ErrorThrower& thrower, Isolate* isolate,
os << "Compilation failed: " << result << std::endl;
}
// Add the function as another context for the exception
- Vector<char> buffer;
- SNPrintF(buffer, "Compiling WASM function #%d:%s failed:", index,
+ ScopedVector<char> buffer(128);
+ SNPrintF(buffer, "Compiling WASM function #%d:%s failed:",
+ function.func_index,
module_env->module->GetName(function.name_offset));
thrower.Failed(buffer.start(), result);
return Handle<Code>::null();
}
// Run the compiler pipeline to generate machine code.
- CallDescriptor* descriptor = const_cast<CallDescriptor*>(
- module_env->GetWasmCallDescriptor(&zone, function.sig));
- CompilationInfo info("wasm", isolate, &zone);
- info.set_output_code_kind(Code::WASM_FUNCTION);
+ CallDescriptor* descriptor =
+ wasm::ModuleEnv::GetWasmCallDescriptor(&zone, function.sig);
+ if (kPointerSize == 4) {
+ descriptor = module_env->GetI32WasmCallDescriptor(&zone, descriptor);
+ }
+ Code::Flags flags = Code::ComputeFlags(Code::WASM_FUNCTION);
+ // add flags here if a meaningful name is helpful for debugging.
+ bool debugging =
+#if DEBUG
+ true;
+#else
+ FLAG_print_opt_code || FLAG_trace_turbo || FLAG_trace_turbo_graph;
+#endif
+ const char* func_name = "wasm";
+ Vector<char> buffer;
+ if (debugging) {
+ buffer = Vector<char>::New(128);
+ SNPrintF(buffer, "WASM_function_#%d:%s", function.func_index,
+ module_env->module->GetName(function.name_offset));
+ func_name = buffer.start();
+ }
+ CompilationInfo info(func_name, isolate, &zone, flags);
+
Handle<Code> code =
Pipeline::GenerateCodeForTesting(&info, descriptor, &graph);
-
-#ifdef ENABLE_DISASSEMBLER
- // Disassemble the code for debugging.
- if (!code.is_null() && FLAG_print_opt_code) {
- Vector<char> buffer;
- const char* name = "";
- if (function.name_offset > 0) {
- const byte* ptr = module_env->module->module_start + function.name_offset;
- name = reinterpret_cast<const char*>(ptr);
- }
- SNPrintF(buffer, "WASM function #%d:%s", index, name);
- OFStream os(stdout);
- code->Disassemble(buffer.start(), os);
+ if (debugging) {
+ buffer.Dispose();
}
-#endif
+ if (!code.is_null()) {
+ RecordFunctionCompilation(
+ Logger::FUNCTION_TAG, &info, "WASM_function", function.func_index,
+ module_env->module->GetName(function.name_offset));
+ }
+
return code;
}
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 1a17a832e4..2e86b5600e 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -35,12 +35,12 @@ namespace compiler {
// Compiles a single function, producing a code object.
Handle<Code> CompileWasmFunction(wasm::ErrorThrower& thrower, Isolate* isolate,
wasm::ModuleEnv* module_env,
- const wasm::WasmFunction& function, int index);
+ const wasm::WasmFunction& function);
// Wraps a JS function, producing a code object that can be called from WASM.
Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, wasm::ModuleEnv* module,
Handle<JSFunction> function,
- uint32_t index);
+ wasm::FunctionSig* sig, const char* name);
// Wraps a given wasm code object, producing a JSFunction that can be called
// from JavaScript.
@@ -100,6 +100,7 @@ class WasmGraphBuilder {
Node* Unreachable();
Node* CallDirect(uint32_t index, Node** args);
+ Node* CallImport(uint32_t index, Node** args);
Node* CallIndirect(uint32_t index, Node** args);
void BuildJSToWasmWrapper(Handle<Code> wasm_code, wasm::FunctionSig* sig);
void BuildWasmToJSWrapper(Handle<JSFunction> function,
@@ -132,6 +133,8 @@ class WasmGraphBuilder {
wasm::FunctionSig* GetFunctionSignature() { return function_signature_; }
+ void Int64LoweringForTesting();
+
private:
static const int kDefaultBufferSize = 16;
friend class WasmTrapHelper;
@@ -159,6 +162,7 @@ class WasmGraphBuilder {
Node* MemBuffer(uint32_t offset);
void BoundsCheckMem(MachineType memtype, Node* index, uint32_t offset);
+ Node* BuildCCall(MachineSignature* sig, Node** args);
Node* BuildWasmCall(wasm::FunctionSig* sig, Node** args);
Node* BuildF32Neg(Node* input);
Node* BuildF64Neg(Node* input);
@@ -176,6 +180,16 @@ class WasmGraphBuilder {
Node* BuildI32Popcnt(Node* input);
Node* BuildI64Ctz(Node* input);
Node* BuildI64Popcnt(Node* input);
+ Node* BuildRoundingInstruction(Node* input, ExternalReference ref,
+ MachineType type);
+ Node* BuildF32Trunc(Node* input);
+ Node* BuildF32Floor(Node* input);
+ Node* BuildF32Ceil(Node* input);
+ Node* BuildF32NearestInt(Node* input);
+ Node* BuildF64Trunc(Node* input);
+ Node* BuildF64Floor(Node* input);
+ Node* BuildF64Ceil(Node* input);
+ Node* BuildF64NearestInt(Node* input);
Node** Realloc(Node** buffer, size_t count) {
Node** buf = Buffer(count);
diff --git a/deps/v8/src/compiler/wasm-linkage.cc b/deps/v8/src/compiler/wasm-linkage.cc
index 92363dd430..3176fd3e2a 100644
--- a/deps/v8/src/compiler/wasm-linkage.cc
+++ b/deps/v8/src/compiler/wasm-linkage.cc
@@ -58,7 +58,7 @@ LinkageLocation stackloc(int i) {
// ===========================================================================
// == ia32 ===================================================================
// ===========================================================================
-#define GP_PARAM_REGISTERS eax, edx, ecx, ebx, esi, edi
+#define GP_PARAM_REGISTERS eax, edx, ecx, ebx, esi
#define GP_RETURN_REGISTERS eax, edx
#define FP_PARAM_REGISTERS xmm1, xmm2, xmm3, xmm4, xmm5, xmm6
#define FP_RETURN_REGISTERS xmm1, xmm2
@@ -76,7 +76,7 @@ LinkageLocation stackloc(int i) {
// ===========================================================================
// == x87 ====================================================================
// ===========================================================================
-#define GP_PARAM_REGISTERS eax, edx, ecx, ebx, esi, edi
+#define GP_PARAM_REGISTERS eax, edx, ecx, ebx, esi
#define GP_RETURN_REGISTERS eax, edx
#define FP_RETURN_REGISTERS stX_0
@@ -191,15 +191,7 @@ struct Allocator {
};
} // namespace
-
-// General code uses the above configuration data.
-CallDescriptor* ModuleEnv::GetWasmCallDescriptor(Zone* zone,
- FunctionSig* fsig) {
- MachineSignature::Builder msig(zone, fsig->return_count(),
- fsig->parameter_count());
- LocationSignature::Builder locations(zone, fsig->return_count(),
- fsig->parameter_count());
-
+static Allocator GetReturnRegisters() {
#ifdef GP_RETURN_REGISTERS
static const Register kGPReturnRegisters[] = {GP_RETURN_REGISTERS};
static const int kGPReturnRegistersCount =
@@ -221,14 +213,10 @@ CallDescriptor* ModuleEnv::GetWasmCallDescriptor(Zone* zone,
Allocator rets(kGPReturnRegisters, kGPReturnRegistersCount,
kFPReturnRegisters, kFPReturnRegistersCount);
- // Add return location(s).
- const int return_count = static_cast<int>(locations.return_count_);
- for (int i = 0; i < return_count; i++) {
- LocalType ret = fsig->GetReturn(i);
- msig.AddReturn(MachineTypeFor(ret));
- locations.AddReturn(rets.Next(ret));
- }
+ return rets;
+}
+static Allocator GetParameterRegisters() {
#ifdef GP_PARAM_REGISTERS
static const Register kGPParamRegisters[] = {GP_PARAM_REGISTERS};
static const int kGPParamRegistersCount =
@@ -250,6 +238,29 @@ CallDescriptor* ModuleEnv::GetWasmCallDescriptor(Zone* zone,
Allocator params(kGPParamRegisters, kGPParamRegistersCount, kFPParamRegisters,
kFPParamRegistersCount);
+ return params;
+}
+
+// General code uses the above configuration data.
+CallDescriptor* ModuleEnv::GetWasmCallDescriptor(Zone* zone,
+ FunctionSig* fsig) {
+ MachineSignature::Builder msig(zone, fsig->return_count(),
+ fsig->parameter_count());
+ LocationSignature::Builder locations(zone, fsig->return_count(),
+ fsig->parameter_count());
+
+ Allocator rets = GetReturnRegisters();
+
+ // Add return location(s).
+ const int return_count = static_cast<int>(locations.return_count_);
+ for (int i = 0; i < return_count; i++) {
+ LocalType ret = fsig->GetReturn(i);
+ msig.AddReturn(MachineTypeFor(ret));
+ locations.AddReturn(rets.Next(ret));
+ }
+
+ Allocator params = GetParameterRegisters();
+
// Add register and/or stack parameter(s).
const int parameter_count = static_cast<int>(fsig->parameter_count());
for (int i = 0; i < parameter_count; i++) {
@@ -264,6 +275,7 @@ CallDescriptor* ModuleEnv::GetWasmCallDescriptor(Zone* zone,
// The target for WASM calls is always a code object.
MachineType target_type = MachineType::AnyTagged();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+
return new (zone) CallDescriptor( // --
CallDescriptor::kCallCodeObject, // kind
target_type, // target MachineType
@@ -275,8 +287,82 @@ CallDescriptor* ModuleEnv::GetWasmCallDescriptor(Zone* zone,
kCalleeSaveRegisters, // callee-saved registers
kCalleeSaveFPRegisters, // callee-saved fp regs
CallDescriptor::kUseNativeStack, // flags
- "c-call");
+ "wasm-call");
}
+
+CallDescriptor* ModuleEnv::GetI32WasmCallDescriptor(
+ Zone* zone, CallDescriptor* descriptor) {
+ const MachineSignature* signature = descriptor->GetMachineSignature();
+ size_t parameter_count = signature->parameter_count();
+ size_t return_count = signature->return_count();
+ for (size_t i = 0; i < signature->parameter_count(); i++) {
+ if (signature->GetParam(i) == MachineType::Int64()) {
+ // For each int64 input we get two int32 inputs.
+ parameter_count++;
+ }
+ }
+ for (size_t i = 0; i < signature->return_count(); i++) {
+ if (signature->GetReturn(i) == MachineType::Int64()) {
+ // For each int64 return we get two int32 returns.
+ return_count++;
+ }
+ }
+ if (parameter_count == signature->parameter_count() &&
+ return_count == signature->return_count()) {
+ // If there is no int64 parameter or return value, we can just return the
+ // original descriptor.
+ return descriptor;
+ }
+
+ MachineSignature::Builder msig(zone, return_count, parameter_count);
+ LocationSignature::Builder locations(zone, return_count, parameter_count);
+
+ Allocator rets = GetReturnRegisters();
+
+ for (size_t i = 0; i < signature->return_count(); i++) {
+ if (signature->GetReturn(i) == MachineType::Int64()) {
+ // For each int64 return we get two int32 returns.
+ msig.AddReturn(MachineType::Int32());
+ msig.AddReturn(MachineType::Int32());
+ locations.AddReturn(rets.Next(MachineRepresentation::kWord32));
+ locations.AddReturn(rets.Next(MachineRepresentation::kWord32));
+ } else {
+ msig.AddReturn(signature->GetReturn(i));
+ locations.AddReturn(rets.Next(signature->GetReturn(i).representation()));
+ }
+ }
+
+ Allocator params = GetParameterRegisters();
+
+ for (size_t i = 0; i < signature->parameter_count(); i++) {
+ if (signature->GetParam(i) == MachineType::Int64()) {
+ // For each int64 input we get two int32 inputs.
+ msig.AddParam(MachineType::Int32());
+ msig.AddParam(MachineType::Int32());
+ locations.AddParam(params.Next(MachineRepresentation::kWord32));
+ locations.AddParam(params.Next(MachineRepresentation::kWord32));
+ } else {
+ msig.AddParam(signature->GetParam(i));
+ locations.AddParam(params.Next(signature->GetParam(i).representation()));
+ }
+ }
+
+ return new (zone) CallDescriptor( // --
+ descriptor->kind(), // kind
+ descriptor->GetInputType(0), // target MachineType
+ descriptor->GetInputLocation(0), // target location
+ msig.Build(), // machine_sig
+ locations.Build(), // location_sig
+ params.stack_offset, // stack_parameter_count
+ descriptor->properties(), // properties
+ descriptor->CalleeSavedRegisters(), // callee-saved registers
+ descriptor->CalleeSavedFPRegisters(), // callee-saved fp regs
+ descriptor->flags(), // flags
+ descriptor->debug_name());
+
+ return descriptor;
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index be406fbad2..510c0c6a0c 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -209,15 +209,16 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
if (mode_ > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value_, exit());
}
- if (mode_ > RecordWriteMode::kValueIsMap) {
- __ CheckPageFlag(value_, scratch0_,
- MemoryChunk::kPointersToHereAreInterestingMask, zero,
- exit());
- }
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, zero,
+ exit());
+ RememberedSetAction const remembered_set_action =
+ mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
+ : OMIT_REMEMBERED_SET;
SaveFPRegsMode const save_fp_mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
- EMIT_REMEMBERED_SET, save_fp_mode);
+ remembered_set_action, save_fp_mode);
__ leap(scratch1_, operand_);
__ CallStub(&stub);
}
@@ -261,6 +262,32 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
} \
} while (0)
+#define ASSEMBLE_COMPARE(asm_instr) \
+ do { \
+ if (AddressingModeField::decode(instr->opcode()) != kMode_None) { \
+ size_t index = 0; \
+ Operand left = i.MemoryOperand(&index); \
+ if (HasImmediateInput(instr, index)) { \
+ __ asm_instr(left, i.InputImmediate(index)); \
+ } else { \
+ __ asm_instr(left, i.InputRegister(index)); \
+ } \
+ } else { \
+ if (HasImmediateInput(instr, 1)) { \
+ if (instr->InputAt(0)->IsRegister()) { \
+ __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
+ } else { \
+ __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \
+ } \
+ } else { \
+ if (instr->InputAt(1)->IsRegister()) { \
+ __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \
+ } else { \
+ __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \
+ } \
+ } \
+ } \
+ } while (0)
#define ASSEMBLE_MULT(asm_instr) \
do { \
@@ -654,11 +681,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
frame_access_state()->ClearSPDelta();
break;
}
- case kArchLazyBailout: {
- EnsureSpaceForLazyDeopt();
- RecordCallPosition(instr);
- break;
- }
case kArchPrepareCallCFunction: {
// Frame alignment requires using FP-relative frame addressing.
frame_access_state()->SetFrameAccessToFP();
@@ -712,6 +734,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchFramePointer:
__ movq(i.OutputRegister(), rbp);
break;
+ case kArchParentFramePointer:
+ if (frame_access_state()->frame()->needs_frame()) {
+ __ movq(i.OutputRegister(), Operand(rbp, 0));
+ } else {
+ __ movq(i.OutputRegister(), rbp);
+ }
+ break;
case kArchTruncateDoubleToI: {
auto result = i.OutputRegister();
auto input = i.InputDoubleRegister(0);
@@ -740,6 +769,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ bind(ool->exit());
break;
}
+ case kArchStackSlot: {
+ FrameOffset offset =
+ frame_access_state()->GetFrameOffset(i.InputInt32(0));
+ Register base;
+ if (offset.from_stack_pointer()) {
+ base = rsp;
+ } else {
+ base = rbp;
+ }
+ __ leaq(i.OutputRegister(), Operand(base, offset.offset()));
+ break;
+ }
case kX64Add32:
ASSEMBLE_BINOP(addl);
break;
@@ -759,16 +800,16 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_BINOP(andq);
break;
case kX64Cmp32:
- ASSEMBLE_BINOP(cmpl);
+ ASSEMBLE_COMPARE(cmpl);
break;
case kX64Cmp:
- ASSEMBLE_BINOP(cmpq);
+ ASSEMBLE_COMPARE(cmpq);
break;
case kX64Test32:
- ASSEMBLE_BINOP(testl);
+ ASSEMBLE_COMPARE(testl);
break;
case kX64Test:
- ASSEMBLE_BINOP(testq);
+ ASSEMBLE_COMPARE(testq);
break;
case kX64Imul32:
ASSEMBLE_MULT(imull);
@@ -947,6 +988,22 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
break;
}
+ case kSSEFloat32ToInt32:
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ Cvttss2si(i.OutputRegister(), i.InputDoubleRegister(0));
+ } else {
+ __ Cvttss2si(i.OutputRegister(), i.InputOperand(0));
+ }
+ break;
+ case kSSEFloat32ToUint32: {
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ Cvttss2siq(i.OutputRegister(), i.InputDoubleRegister(0));
+ } else {
+ __ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
+ }
+ __ AssertZeroExtended(i.OutputRegister());
+ break;
+ }
case kSSEFloat64Cmp:
ASSEMBLE_SSE_BINOP(Ucomisd);
break;
@@ -1197,6 +1254,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
}
break;
+ case kSSEInt32ToFloat32:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Cvtlsi2ss(i.OutputDoubleRegister(), i.InputRegister(0));
+ } else {
+ __ Cvtlsi2ss(i.OutputDoubleRegister(), i.InputOperand(0));
+ }
+ break;
case kSSEInt64ToFloat32:
if (instr->InputAt(0)->IsRegister()) {
__ Cvtqsi2ss(i.OutputDoubleRegister(), i.InputRegister(0));
@@ -1237,6 +1301,14 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
__ Cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
break;
+ case kSSEUint32ToFloat32:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ movl(kScratchRegister, i.InputRegister(0));
+ } else {
+ __ movl(kScratchRegister, i.InputOperand(0));
+ }
+ __ Cvtqsi2ss(i.OutputDoubleRegister(), kScratchRegister);
+ break;
case kSSEFloat64ExtractLowWord32:
if (instr->InputAt(0)->IsDoubleStackSlot()) {
__ movl(i.OutputRegister(), i.InputOperand(0));
@@ -1828,8 +1900,6 @@ void CodeGenerator::AssemblePrologue() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- // TODO(titzer): cannot address target function == local #-1
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
stack_shrink_slots -=
static_cast<int>(OsrHelper(info()).UnoptimizedFrameSlots());
}
diff --git a/deps/v8/src/compiler/x64/instruction-codes-x64.h b/deps/v8/src/compiler/x64/instruction-codes-x64.h
index 8e8e7652c3..6d5e77ccee 100644
--- a/deps/v8/src/compiler/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/x64/instruction-codes-x64.h
@@ -63,6 +63,8 @@ namespace compiler {
V(SSEFloat32Max) \
V(SSEFloat32Min) \
V(SSEFloat32ToFloat64) \
+ V(SSEFloat32ToInt32) \
+ V(SSEFloat32ToUint32) \
V(SSEFloat32Round) \
V(SSEFloat64Cmp) \
V(SSEFloat64Add) \
@@ -84,11 +86,13 @@ namespace compiler {
V(SSEFloat32ToUint64) \
V(SSEFloat64ToUint64) \
V(SSEInt32ToFloat64) \
+ V(SSEInt32ToFloat32) \
V(SSEInt64ToFloat32) \
V(SSEInt64ToFloat64) \
V(SSEUint64ToFloat32) \
V(SSEUint64ToFloat64) \
V(SSEUint32ToFloat64) \
+ V(SSEUint32ToFloat32) \
V(SSEFloat64ExtractLowWord32) \
V(SSEFloat64ExtractHighWord32) \
V(SSEFloat64InsertLowWord32) \
diff --git a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
index f8537c879c..1f10b51bcc 100644
--- a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
@@ -79,6 +79,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kSSEFloat64Max:
case kSSEFloat64Min:
case kSSEFloat64ToFloat32:
+ case kSSEFloat32ToInt32:
+ case kSSEFloat32ToUint32:
case kSSEFloat64ToInt32:
case kSSEFloat64ToUint32:
case kSSEFloat64ToInt64:
@@ -86,11 +88,13 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kSSEFloat64ToUint64:
case kSSEFloat32ToUint64:
case kSSEInt32ToFloat64:
+ case kSSEInt32ToFloat32:
case kSSEInt64ToFloat32:
case kSSEInt64ToFloat64:
case kSSEUint64ToFloat32:
case kSSEUint64ToFloat64:
case kSSEUint32ToFloat64:
+ case kSSEUint32ToFloat32:
case kSSEFloat64ExtractLowWord32:
case kSSEFloat64ExtractHighWord32:
case kSSEFloat64InsertLowWord32:
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index c47a42eefe..ac0c7f7bf2 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -133,6 +133,7 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord64:
opcode = kX64Movq;
break;
+ case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -219,6 +220,7 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord64:
opcode = kX64Movq;
break;
+ case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -264,8 +266,9 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- case MachineRepresentation::kBit:
- case MachineRepresentation::kTagged:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -316,8 +319,9 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
- case MachineRepresentation::kBit:
- case MachineRepresentation::kTagged:
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -622,6 +626,12 @@ void InstructionSelector::VisitWord32Ctz(Node* node) {
}
+void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
+
+
void InstructionSelector::VisitWord32Popcnt(Node* node) {
X64OperandGenerator g(this);
Emit(kX64Popcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
@@ -734,10 +744,11 @@ void VisitMulHigh(InstructionSelector* selector, Node* node,
if (selector->IsLive(left) && !selector->IsLive(right)) {
std::swap(left, right);
}
+ InstructionOperand temps[] = {g.TempRegister(rax)};
// TODO(turbofan): We use UseUniqueRegister here to improve register
// allocation.
selector->Emit(opcode, g.DefineAsFixed(node, rdx), g.UseFixed(left, rax),
- g.UseUniqueRegister(right));
+ g.UseUniqueRegister(right), arraysize(temps), temps);
}
@@ -857,6 +868,18 @@ void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
}
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kSSEFloat32ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kSSEFloat32ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
X64OperandGenerator g(this);
InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
@@ -1046,6 +1069,12 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
}
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kSSEInt32ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
X64OperandGenerator g(this);
Emit(kSSEInt64ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
@@ -1058,6 +1087,12 @@ void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
}
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kSSEUint32ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
X64OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister()};
@@ -1303,6 +1338,48 @@ bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
namespace {
+void VisitCompareWithMemoryOperand(InstructionSelector* selector,
+ InstructionCode opcode, Node* left,
+ InstructionOperand right,
+ FlagsContinuation* cont) {
+ DCHECK(left->opcode() == IrOpcode::kLoad);
+ X64OperandGenerator g(selector);
+ size_t input_count = 0;
+ InstructionOperand inputs[6];
+ AddressingMode addressing_mode =
+ g.GetEffectiveAddressMemoryOperand(left, inputs, &input_count);
+ opcode |= AddressingModeField::encode(addressing_mode);
+ opcode = cont->Encode(opcode);
+ inputs[input_count++] = right;
+
+ if (cont->IsBranch()) {
+ inputs[input_count++] = g.Label(cont->true_block());
+ inputs[input_count++] = g.Label(cont->false_block());
+ selector->Emit(opcode, 0, nullptr, input_count, inputs);
+ } else {
+ DCHECK(cont->IsSet());
+ InstructionOperand output = g.DefineAsRegister(cont->result());
+ selector->Emit(opcode, 1, &output, input_count, inputs);
+ }
+}
+
+// Determines if {input} of {node} can be replaced by a memory operand.
+bool CanUseMemoryOperand(InstructionSelector* selector, InstructionCode opcode,
+ Node* node, Node* input) {
+ if (input->opcode() != IrOpcode::kLoad || !selector->CanCover(node, input)) {
+ return false;
+ }
+ MachineRepresentation rep =
+ LoadRepresentationOf(input->op()).representation();
+ if (rep == MachineRepresentation::kWord64 ||
+ rep == MachineRepresentation::kTagged) {
+ return opcode == kX64Cmp || opcode == kX64Test;
+ } else if (rep == MachineRepresentation::kWord32) {
+ return opcode == kX64Cmp32 || opcode == kX64Test32;
+ }
+ return false;
+}
+
// Shared routine for multiple compare operations.
void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
InstructionOperand left, InstructionOperand right,
@@ -1330,26 +1407,41 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
}
-
// Shared routine for multiple word compare operations.
void VisitWordCompare(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont) {
X64OperandGenerator g(selector);
- Node* const left = node->InputAt(0);
- Node* const right = node->InputAt(1);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
- // Match immediates on left or right side of comparison.
+ // If one of the two inputs is an immediate, make sure it's on the right.
+ if (!g.CanBeImmediate(right) && g.CanBeImmediate(left)) {
+ if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
+ std::swap(left, right);
+ }
+
+ // Match immediates on right side of comparison.
if (g.CanBeImmediate(right)) {
- VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
- } else if (g.CanBeImmediate(left)) {
+ if (CanUseMemoryOperand(selector, opcode, node, left)) {
+ return VisitCompareWithMemoryOperand(selector, opcode, left,
+ g.UseImmediate(right), cont);
+ }
+ return VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right),
+ cont);
+ }
+
+ if (g.CanBeBetterLeftOperand(right)) {
if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
- VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
- } else {
- VisitCompare(selector, opcode, left, right, cont,
- node->op()->HasProperty(Operator::kCommutative));
+ std::swap(left, right);
}
-}
+ if (CanUseMemoryOperand(selector, opcode, node, left)) {
+ return VisitCompareWithMemoryOperand(selector, opcode, left,
+ g.UseRegister(right), cont);
+ }
+ return VisitCompare(selector, opcode, left, right, cont,
+ node->op()->HasProperty(Operator::kCommutative));
+}
// Shared routine for 64-bit word comparison operations.
void VisitWord64Compare(InstructionSelector* selector, Node* node,
diff --git a/deps/v8/src/compiler/x87/code-generator-x87.cc b/deps/v8/src/compiler/x87/code-generator-x87.cc
index a7b7246d3d..15755703e0 100644
--- a/deps/v8/src/compiler/x87/code-generator-x87.cc
+++ b/deps/v8/src/compiler/x87/code-generator-x87.cc
@@ -9,6 +9,7 @@
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
+#include "src/frames.h"
#include "src/x87/assembler-x87.h"
#include "src/x87/frames-x87.h"
#include "src/x87/macro-assembler-x87.h"
@@ -50,7 +51,7 @@ class X87OperandConverter : public InstructionOperandConverter {
Operand ToMaterializableOperand(int materializable_offset) {
FrameOffset offset = frame_access_state()->GetFrameOffset(
- Frame::FPOffsetToSlot(materializable_offset));
+ FPOffsetToFrameSlot(materializable_offset));
return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
}
@@ -245,15 +246,16 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
if (mode_ > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value_, exit());
}
- if (mode_ > RecordWriteMode::kValueIsMap) {
- __ CheckPageFlag(value_, scratch0_,
- MemoryChunk::kPointersToHereAreInterestingMask, zero,
- exit());
- }
+ __ CheckPageFlag(value_, scratch0_,
+ MemoryChunk::kPointersToHereAreInterestingMask, zero,
+ exit());
+ RememberedSetAction const remembered_set_action =
+ mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
+ : OMIT_REMEMBERED_SET;
SaveFPRegsMode const save_fp_mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
- EMIT_REMEMBERED_SET, save_fp_mode);
+ remembered_set_action, save_fp_mode);
__ lea(scratch1_, operand_);
__ CallStub(&stub);
}
@@ -462,14 +464,6 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
frame_access_state()->ClearSPDelta();
break;
}
- case kArchLazyBailout: {
- EnsureSpaceForLazyDeopt();
- RecordCallPosition(instr);
- // Lazy Bailout entry, need to re-initialize FPU state.
- __ fninit();
- __ fld1();
- break;
- }
case kArchPrepareCallCFunction: {
// Frame alignment requires using FP-relative frame addressing.
frame_access_state()->SetFrameAccessToFP();
@@ -559,6 +553,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchStackPointer:
__ mov(i.OutputRegister(), esp);
break;
+ case kArchParentFramePointer:
+ if (frame_access_state()->frame()->needs_frame()) {
+ __ mov(i.OutputRegister(), Operand(ebp, 0));
+ } else {
+ __ mov(i.OutputRegister(), ebp);
+ }
+ break;
case kArchTruncateDoubleToI: {
if (!instr->InputAt(0)->IsDoubleRegister()) {
__ fld_d(i.InputOperand(0));
@@ -587,6 +588,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ bind(ool->exit());
break;
}
+ case kArchStackSlot: {
+ FrameOffset offset =
+ frame_access_state()->GetFrameOffset(i.InputInt32(0));
+ Register base;
+ if (offset.from_stack_pointer()) {
+ base = esp;
+ } else {
+ base = ebp;
+ }
+ __ lea(i.OutputRegister(), Operand(base, offset.offset()));
+ break;
+ }
case kX87Add:
if (HasImmediateInput(instr, 1)) {
__ add(i.InputOperand(0), i.InputImmediate(1));
@@ -602,17 +615,37 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
case kX87Cmp:
- if (HasImmediateInput(instr, 1)) {
- __ cmp(i.InputOperand(0), i.InputImmediate(1));
+ if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ if (HasImmediateInput(instr, index)) {
+ __ cmp(operand, i.InputImmediate(index));
+ } else {
+ __ cmp(operand, i.InputRegister(index));
+ }
} else {
- __ cmp(i.InputRegister(0), i.InputOperand(1));
+ if (HasImmediateInput(instr, 1)) {
+ __ cmp(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ cmp(i.InputRegister(0), i.InputOperand(1));
+ }
}
break;
case kX87Test:
- if (HasImmediateInput(instr, 1)) {
- __ test(i.InputOperand(0), i.InputImmediate(1));
+ if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ if (HasImmediateInput(instr, index)) {
+ __ test(operand, i.InputImmediate(index));
+ } else {
+ __ test(i.InputRegister(index), operand);
+ }
} else {
- __ test(i.InputRegister(0), i.InputOperand(1));
+ if (HasImmediateInput(instr, 1)) {
+ __ test(i.InputOperand(0), i.InputImmediate(1));
+ } else {
+ __ test(i.InputRegister(0), i.InputOperand(1));
+ }
}
break;
case kX87Imul:
@@ -1062,6 +1095,66 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ lea(esp, Operand(esp, kDoubleSize));
break;
}
+ case kX87Int32ToFloat32: {
+ InstructionOperand* input = instr->InputAt(0);
+ DCHECK(input->IsRegister() || input->IsStackSlot());
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ if (input->IsRegister()) {
+ Register input_reg = i.InputRegister(0);
+ __ push(input_reg);
+ __ fild_s(Operand(esp, 0));
+ __ pop(input_reg);
+ } else {
+ __ fild_s(i.InputOperand(0));
+ }
+ break;
+ }
+ case kX87Uint32ToFloat32: {
+ InstructionOperand* input = instr->InputAt(0);
+ DCHECK(input->IsRegister() || input->IsStackSlot());
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(1);
+ }
+ __ fstp(0);
+ Label msb_set_src;
+ Label jmp_return;
+ // Put input integer into eax(tmporarilly)
+ __ push(eax);
+ if (input->IsRegister())
+ __ mov(eax, i.InputRegister(0));
+ else
+ __ mov(eax, i.InputOperand(0));
+
+ __ test(eax, eax);
+ __ j(sign, &msb_set_src, Label::kNear);
+ __ push(eax);
+ __ fild_s(Operand(esp, 0));
+ __ pop(eax);
+
+ __ jmp(&jmp_return, Label::kNear);
+ __ bind(&msb_set_src);
+ // Need another temp reg
+ __ push(ebx);
+ __ mov(ebx, eax);
+ __ shr(eax, 1);
+ // Recover the least significant bit to avoid rounding errors.
+ __ and_(ebx, Immediate(1));
+ __ or_(eax, ebx);
+ __ push(eax);
+ __ fild_s(Operand(esp, 0));
+ __ pop(eax);
+ __ fld(0);
+ __ faddp();
+ // Restore the ebx
+ __ pop(ebx);
+ __ bind(&jmp_return);
+ // Restore the eax
+ __ pop(eax);
+ break;
+ }
case kX87Int32ToFloat64: {
InstructionOperand* input = instr->InputAt(0);
DCHECK(input->IsRegister() || input->IsStackSlot());
@@ -1104,6 +1197,36 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ LoadUint32NoSSE2(i.InputRegister(0));
break;
}
+ case kX87Float32ToInt32: {
+ if (!instr->InputAt(0)->IsDoubleRegister()) {
+ __ fld_s(i.InputOperand(0));
+ }
+ __ TruncateX87TOSToI(i.OutputRegister(0));
+ if (!instr->InputAt(0)->IsDoubleRegister()) {
+ __ fstp(0);
+ }
+ break;
+ }
+ case kX87Float32ToUint32: {
+ if (!instr->InputAt(0)->IsDoubleRegister()) {
+ __ fld_s(i.InputOperand(0));
+ }
+ Label success;
+ __ TruncateX87TOSToI(i.OutputRegister(0));
+ __ test(i.OutputRegister(0), i.OutputRegister(0));
+ __ j(positive, &success);
+ __ push(Immediate(INT32_MIN));
+ __ fild_s(Operand(esp, 0));
+ __ lea(esp, Operand(esp, kPointerSize));
+ __ faddp();
+ __ TruncateX87TOSToI(i.OutputRegister(0));
+ __ or_(i.OutputRegister(0), Immediate(0x80000000));
+ __ bind(&success);
+ if (!instr->InputAt(0)->IsDoubleRegister()) {
+ __ fstp(0);
+ }
+ break;
+ }
case kX87Float64ToInt32: {
if (!instr->InputAt(0)->IsDoubleRegister()) {
__ fld_d(i.InputOperand(0));
@@ -1817,8 +1940,6 @@ void CodeGenerator::AssemblePrologue() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
- // TODO(titzer): cannot address target function == local #-1
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
diff --git a/deps/v8/src/compiler/x87/instruction-codes-x87.h b/deps/v8/src/compiler/x87/instruction-codes-x87.h
index b498d9c59c..e5d0912910 100644
--- a/deps/v8/src/compiler/x87/instruction-codes-x87.h
+++ b/deps/v8/src/compiler/x87/instruction-codes-x87.h
@@ -53,10 +53,14 @@ namespace compiler {
V(X87Float64Max) \
V(X87Float64Min) \
V(X87Float64Abs) \
+ V(X87Int32ToFloat32) \
+ V(X87Uint32ToFloat32) \
V(X87Int32ToFloat64) \
V(X87Float32ToFloat64) \
V(X87Uint32ToFloat64) \
V(X87Float64ToInt32) \
+ V(X87Float32ToInt32) \
+ V(X87Float32ToUint32) \
V(X87Float64ToFloat32) \
V(X87Float64ToUint32) \
V(X87Float64ExtractHighWord32) \
@@ -84,7 +88,6 @@ namespace compiler {
V(X87Poke) \
V(X87StackCheck)
-
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
// are encoded into the InstructionCode of the instruction and tell the
diff --git a/deps/v8/src/compiler/x87/instruction-selector-x87.cc b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
index cff4aafb27..079d5d2026 100644
--- a/deps/v8/src/compiler/x87/instruction-selector-x87.cc
+++ b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
@@ -151,7 +151,8 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kWord32:
opcode = kX87Movl;
break;
- case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -236,7 +237,8 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kWord32:
opcode = kX87Movl;
break;
- case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -288,9 +290,10 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case MachineRepresentation::kFloat64:
opcode = kCheckedLoadFloat64;
break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -334,9 +337,10 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
case MachineRepresentation::kFloat64:
opcode = kCheckedStoreFloat64;
break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kSimd128: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
return;
@@ -469,9 +473,10 @@ namespace {
void VisitMulHigh(InstructionSelector* selector, Node* node,
ArchOpcode opcode) {
X87OperandGenerator g(selector);
- selector->Emit(opcode, g.DefineAsFixed(node, edx),
- g.UseFixed(node->InputAt(0), eax),
- g.UseUniqueRegister(node->InputAt(1)));
+ InstructionOperand temps[] = {g.TempRegister(eax)};
+ selector->Emit(
+ opcode, g.DefineAsFixed(node, edx), g.UseFixed(node->InputAt(0), eax),
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
}
@@ -549,6 +554,9 @@ void InstructionSelector::VisitWord32Clz(Node* node) {
void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
+
+
void InstructionSelector::VisitWord32Popcnt(Node* node) {
X87OperandGenerator g(this);
Emit(kX87Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
@@ -655,6 +663,20 @@ void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
}
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Int32ToFloat32, g.DefineAsFixed(node, stX_0),
+ g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Uint32ToFloat32, g.DefineAsFixed(node, stX_0),
+ g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
X87OperandGenerator g(this);
Emit(kX87Int32ToFloat64, g.DefineAsFixed(node, stX_0),
@@ -669,6 +691,18 @@ void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
}
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float32ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87Float32ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
X87OperandGenerator g(this);
Emit(kX87Float64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
@@ -959,6 +993,46 @@ bool InstructionSelector::IsTailCallAddressImmediate() { return true; }
namespace {
+void VisitCompareWithMemoryOperand(InstructionSelector* selector,
+ InstructionCode opcode, Node* left,
+ InstructionOperand right,
+ FlagsContinuation* cont) {
+ DCHECK(left->opcode() == IrOpcode::kLoad);
+ X87OperandGenerator g(selector);
+ size_t input_count = 0;
+ InstructionOperand inputs[6];
+ AddressingMode addressing_mode =
+ g.GetEffectiveAddressMemoryOperand(left, inputs, &input_count);
+ opcode |= AddressingModeField::encode(addressing_mode);
+ opcode = cont->Encode(opcode);
+ inputs[input_count++] = right;
+
+ if (cont->IsBranch()) {
+ inputs[input_count++] = g.Label(cont->true_block());
+ inputs[input_count++] = g.Label(cont->false_block());
+ selector->Emit(opcode, 0, nullptr, input_count, inputs);
+ } else {
+ DCHECK(cont->IsSet());
+ InstructionOperand output = g.DefineAsRegister(cont->result());
+ selector->Emit(opcode, 1, &output, input_count, inputs);
+ }
+}
+
+// Determines if {input} of {node} can be replaced by a memory operand.
+bool CanUseMemoryOperand(InstructionSelector* selector, InstructionCode opcode,
+ Node* node, Node* input) {
+ if (input->opcode() != IrOpcode::kLoad || !selector->CanCover(node, input)) {
+ return false;
+ }
+ MachineRepresentation load_representation =
+ LoadRepresentationOf(input->op()).representation();
+ if (load_representation == MachineRepresentation::kWord32 ||
+ load_representation == MachineRepresentation::kTagged) {
+ return opcode == kX87Cmp || opcode == kX87Test;
+ }
+ return false;
+}
+
// Shared routine for multiple compare operations.
void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
InstructionOperand left, InstructionOperand right,
@@ -1020,26 +1094,41 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
}
}
-
// Shared routine for multiple word compare operations.
void VisitWordCompare(InstructionSelector* selector, Node* node,
InstructionCode opcode, FlagsContinuation* cont) {
X87OperandGenerator g(selector);
- Node* const left = node->InputAt(0);
- Node* const right = node->InputAt(1);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
- // Match immediates on left or right side of comparison.
+ // If one of the two inputs is an immediate, make sure it's on the right.
+ if (!g.CanBeImmediate(right) && g.CanBeImmediate(left)) {
+ if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
+ std::swap(left, right);
+ }
+
+ // Match immediates on right side of comparison.
if (g.CanBeImmediate(right)) {
- VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
- } else if (g.CanBeImmediate(left)) {
+ if (CanUseMemoryOperand(selector, opcode, node, left)) {
+ return VisitCompareWithMemoryOperand(selector, opcode, left,
+ g.UseImmediate(right), cont);
+ }
+ return VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right),
+ cont);
+ }
+
+ if (g.CanBeBetterLeftOperand(right)) {
if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
- VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
- } else {
- VisitCompare(selector, opcode, left, right, cont,
- node->op()->HasProperty(Operator::kCommutative));
+ std::swap(left, right);
}
-}
+ if (CanUseMemoryOperand(selector, opcode, node, left)) {
+ return VisitCompareWithMemoryOperand(selector, opcode, left,
+ g.UseRegister(right), cont);
+ }
+ return VisitCompare(selector, opcode, left, right, cont,
+ node->op()->HasProperty(Operator::kCommutative));
+}
void VisitWordCompare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
diff --git a/deps/v8/src/context-measure.cc b/deps/v8/src/context-measure.cc
index 0b87e39614..342362983d 100644
--- a/deps/v8/src/context-measure.cc
+++ b/deps/v8/src/context-measure.cc
@@ -30,7 +30,7 @@ bool ContextMeasure::IsShared(HeapObject* object) {
if (object->IsSharedFunctionInfo()) return true;
if (object->IsScopeInfo()) return true;
if (object->IsCode() && !Code::cast(object)->is_optimized_code()) return true;
- if (object->IsExecutableAccessorInfo()) return true;
+ if (object->IsAccessorInfo()) return true;
if (object->IsWeakCell()) return true;
return false;
}
diff --git a/deps/v8/src/contexts-inl.h b/deps/v8/src/contexts-inl.h
index 67257ae0d7..c26ce5bd47 100644
--- a/deps/v8/src/contexts-inl.h
+++ b/deps/v8/src/contexts-inl.h
@@ -33,7 +33,8 @@ void ScriptContextTable::set_used(int used) {
Handle<Context> ScriptContextTable::GetContext(Handle<ScriptContextTable> table,
int i) {
DCHECK(i < table->used());
- return Handle<Context>::cast(FixedArray::get(table, i + kFirstContextSlot));
+ return Handle<Context>::cast(
+ FixedArray::get(*table, i + kFirstContextSlot, table->GetIsolate()));
}
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index 79a9e926a5..7549d20518 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -79,6 +79,15 @@ Context* Context::declaration_context() {
return current;
}
+Context* Context::closure_context() {
+ Context* current = this;
+ while (!current->IsFunctionContext() && !current->IsScriptContext() &&
+ !current->IsNativeContext()) {
+ current = current->previous();
+ DCHECK(current->closure() == closure());
+ }
+ return current;
+}
JSObject* Context::extension_object() {
DCHECK(IsNativeContext() || IsFunctionContext() || IsBlockContext());
@@ -542,16 +551,6 @@ int Context::IntrinsicIndexForName(Handle<String> string) {
#undef COMPARE_NAME
-bool Context::IsJSBuiltin(Handle<Context> native_context,
- Handle<JSFunction> function) {
-#define COMPARE_FUNCTION(index, type, name) \
- if (*function == native_context->get(index)) return true;
- NATIVE_CONTEXT_JS_BUILTINS(COMPARE_FUNCTION);
-#undef COMPARE_FUNCTION
- return false;
-}
-
-
#ifdef DEBUG
bool Context::IsBootstrappingOrNativeContext(Isolate* isolate, Object* object) {
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index 6c9e195075..38ebf64ae1 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -93,13 +93,8 @@ enum BindingFlags {
V(REFLECT_DEFINE_PROPERTY_INDEX, JSFunction, reflect_define_property) \
V(REFLECT_DELETE_PROPERTY_INDEX, JSFunction, reflect_delete_property) \
V(SPREAD_ARGUMENTS_INDEX, JSFunction, spread_arguments) \
- V(SPREAD_ITERABLE_INDEX, JSFunction, spread_iterable)
-
-
-#define NATIVE_CONTEXT_JS_BUILTINS(V) \
- V(CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX, JSFunction, \
- concat_iterable_to_array_builtin)
-
+ V(SPREAD_ITERABLE_INDEX, JSFunction, spread_iterable) \
+ V(ORDINARY_HAS_INSTANCE_INDEX, JSFunction, ordinary_has_instance)
#define NATIVE_CONTEXT_IMPORTED_FIELDS(V) \
V(ARRAY_CONCAT_INDEX, JSFunction, array_concat) \
@@ -145,7 +140,6 @@ enum BindingFlags {
V(PROMISE_REJECT_INDEX, JSFunction, promise_reject) \
V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve) \
V(PROMISE_THEN_INDEX, JSFunction, promise_then) \
- V(PROXY_ENUMERATE_INDEX, JSFunction, proxy_enumerate) \
V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function) \
V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function) \
V(SET_ADD_METHOD_INDEX, JSFunction, set_add) \
@@ -154,13 +148,14 @@ enum BindingFlags {
V(STACK_OVERFLOW_BOILERPLATE_INDEX, JSObject, stack_overflow_boilerplate) \
V(SYNTAX_ERROR_FUNCTION_INDEX, JSFunction, syntax_error_function) \
V(TYPE_ERROR_FUNCTION_INDEX, JSFunction, type_error_function) \
- V(URI_ERROR_FUNCTION_INDEX, JSFunction, uri_error_function) \
- NATIVE_CONTEXT_JS_BUILTINS(V)
+ V(URI_ERROR_FUNCTION_INDEX, JSFunction, uri_error_function)
#define NATIVE_CONTEXT_FIELDS(V) \
V(GLOBAL_PROXY_INDEX, JSObject, global_proxy_object) \
V(EMBEDDER_DATA_INDEX, FixedArray, embedder_data) \
/* Below is alpha-sorted */ \
+ V(ACCESSOR_PROPERTY_DESCRIPTOR_MAP_INDEX, Map, \
+ accessor_property_descriptor_map) \
V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
V(ARRAY_BUFFER_FUN_INDEX, JSFunction, array_buffer_fun) \
V(ARRAY_BUFFER_MAP_INDEX, Map, array_buffer_map) \
@@ -177,6 +172,7 @@ enum BindingFlags {
call_as_constructor_delegate) \
V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \
V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
+ V(DATA_PROPERTY_DESCRIPTOR_MAP_INDEX, Map, data_property_descriptor_map) \
V(DATA_VIEW_FUN_INDEX, JSFunction, data_view_fun) \
V(DATE_FUNCTION_INDEX, JSFunction, date_function) \
V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \
@@ -188,7 +184,8 @@ enum BindingFlags {
V(FLOAT32_ARRAY_FUN_INDEX, JSFunction, float32_array_fun) \
V(FLOAT32X4_FUNCTION_INDEX, JSFunction, float32x4_function) \
V(FLOAT64_ARRAY_FUN_INDEX, JSFunction, float64_array_fun) \
- V(FUNCTION_CACHE_INDEX, ObjectHashTable, function_cache) \
+ V(TEMPLATE_INSTANTIATIONS_CACHE_INDEX, ObjectHashTable, \
+ template_instantiations_cache) \
V(FUNCTION_FUNCTION_INDEX, JSFunction, function_function) \
V(GENERATOR_FUNCTION_FUNCTION_INDEX, JSFunction, \
generator_function_function) \
@@ -453,6 +450,9 @@ class Context: public FixedArray {
Context* declaration_context();
bool is_declaration_context();
+ // Get the next closure's context on the context chain.
+ Context* closure_context();
+
// Returns a JSGlobalProxy object or null.
JSObject* global_proxy();
void set_global_proxy(JSObject* global);
@@ -502,9 +502,6 @@ class Context: public FixedArray {
static int ImportedFieldIndexForName(Handle<String> name);
static int IntrinsicIndexForName(Handle<String> name);
- static bool IsJSBuiltin(Handle<Context> native_context,
- Handle<JSFunction> function);
-
#define NATIVE_CONTEXT_FIELD_ACCESSORS(index, type, name) \
inline void set_##name(type* value); \
inline bool is_##name(type* value); \
diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h
index 3e56799bc9..730e6477cb 100644
--- a/deps/v8/src/conversions-inl.h
+++ b/deps/v8/src/conversions-inl.h
@@ -127,6 +127,10 @@ uint32_t NumberToUint32(Object* number) {
return DoubleToUint32(number->Number());
}
+int64_t NumberToInt64(Object* number) {
+ if (number->IsSmi()) return Smi::cast(number)->value();
+ return static_cast<int64_t>(number->Number());
+}
bool TryNumberToSize(Isolate* isolate, Object* number, size_t* result) {
SealHandleScope shs(isolate);
diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h
index 9b6d83b6bb..29262e5306 100644
--- a/deps/v8/src/conversions.h
+++ b/deps/v8/src/conversions.h
@@ -172,7 +172,7 @@ inline bool IsUint32Double(double value);
// Convert from Number object to C integer.
inline int32_t NumberToInt32(Object* number);
inline uint32_t NumberToUint32(Object* number);
-
+inline int64_t NumberToInt64(Object* number);
double StringToDouble(UnicodeCache* unicode_cache, Handle<String> string,
int flags, double empty_string_val = 0.0);
diff --git a/deps/v8/src/counters.cc b/deps/v8/src/counters.cc
index e8dea2e073..a10494e165 100644
--- a/deps/v8/src/counters.cc
+++ b/deps/v8/src/counters.cc
@@ -4,6 +4,8 @@
#include "src/counters.h"
+#include <iomanip>
+
#include "src/base/platform/platform.h"
#include "src/isolate.h"
#include "src/log-inl.h"
@@ -193,5 +195,141 @@ void Counters::ResetHistograms() {
#undef HM
}
+class RuntimeCallStatEntries {
+ public:
+ void Print(std::ostream& os) {
+ if (total_call_count == 0) return;
+ std::sort(entries.rbegin(), entries.rend());
+ os << std::setw(50) << "Runtime Function/C++ Builtin" << std::setw(10)
+ << "Time" << std::setw(18) << "Count" << std::endl
+ << std::string(86, '=') << std::endl;
+ for (Entry& entry : entries) {
+ entry.SetTotal(total_time, total_call_count);
+ entry.Print(os);
+ }
+ os << std::string(86, '-') << std::endl;
+ Entry("Total", total_time, total_call_count).Print(os);
+ }
+
+ void Add(RuntimeCallCounter* counter) {
+ if (counter->count == 0) return;
+ entries.push_back(Entry(counter->name, counter->time, counter->count));
+ total_time += counter->time;
+ total_call_count += counter->count;
+ }
+
+ private:
+ class Entry {
+ public:
+ Entry(const char* name, base::TimeDelta time, uint64_t count)
+ : name_(name),
+ time_(time.InMilliseconds()),
+ count_(count),
+ time_percent_(100),
+ count_percent_(100) {}
+
+ bool operator<(const Entry& other) const {
+ if (time_ < other.time_) return true;
+ if (time_ > other.time_) return false;
+ return count_ < other.count_;
+ }
+
+ void Print(std::ostream& os) {
+ os.precision(2);
+ os << std::fixed;
+ os << std::setw(50) << name_;
+ os << std::setw(8) << time_ << "ms ";
+ os << std::setw(6) << time_percent_ << "%";
+ os << std::setw(10) << count_ << " ";
+ os << std::setw(6) << count_percent_ << "%";
+ os << std::endl;
+ }
+
+ void SetTotal(base::TimeDelta total_time, uint64_t total_count) {
+ if (total_time.InMilliseconds() == 0) {
+ time_percent_ = 0;
+ } else {
+ time_percent_ = 100.0 * time_ / total_time.InMilliseconds();
+ }
+ count_percent_ = 100.0 * count_ / total_count;
+ }
+
+ private:
+ const char* name_;
+ int64_t time_;
+ uint64_t count_;
+ double time_percent_;
+ double count_percent_;
+ };
+
+ uint64_t total_call_count = 0;
+ base::TimeDelta total_time;
+ std::vector<Entry> entries;
+};
+
+void RuntimeCallCounter::Reset() {
+ count = 0;
+ time = base::TimeDelta();
+}
+
+void RuntimeCallStats::Enter(RuntimeCallCounter* counter) {
+ Enter(new RuntimeCallTimer(counter, current_timer_));
+}
+
+void RuntimeCallStats::Enter(RuntimeCallTimer* timer_) {
+ current_timer_ = timer_;
+ current_timer_->Start();
+}
+
+void RuntimeCallStats::Leave() {
+ RuntimeCallTimer* timer = current_timer_;
+ Leave(timer);
+ delete timer;
+}
+
+void RuntimeCallStats::Leave(RuntimeCallTimer* timer) {
+ current_timer_ = timer->Stop();
+}
+
+void RuntimeCallStats::Print(std::ostream& os) {
+ RuntimeCallStatEntries entries;
+
+#define PRINT_COUNTER(name, nargs, ressize) entries.Add(&this->Runtime_##name);
+ FOR_EACH_INTRINSIC(PRINT_COUNTER)
+#undef PRINT_COUNTER
+
+#define PRINT_COUNTER(name, type) entries.Add(&this->Builtin_##name);
+ BUILTIN_LIST_C(PRINT_COUNTER)
+#undef PRINT_COUNTER
+
+ entries.Add(&this->ExternalCallback);
+ entries.Add(&this->UnexpectedStubMiss);
+
+ entries.Print(os);
+}
+
+void RuntimeCallStats::Reset() {
+#define RESET_COUNTER(name, nargs, ressize) this->Runtime_##name.Reset();
+ FOR_EACH_INTRINSIC(RESET_COUNTER)
+#undef RESET_COUNTER
+#define RESET_COUNTER(name, type) this->Builtin_##name.Reset();
+ BUILTIN_LIST_C(RESET_COUNTER)
+#undef RESET_COUNTER
+}
+
+RuntimeCallTimerScope::RuntimeCallTimerScope(Isolate* isolate,
+ RuntimeCallCounter* counter)
+ : isolate_(isolate),
+ timer_(counter,
+ isolate->counters()->runtime_call_stats()->current_timer()) {
+ if (!FLAG_runtime_call_stats) return;
+ isolate->counters()->runtime_call_stats()->Enter(&timer_);
+}
+
+RuntimeCallTimerScope::~RuntimeCallTimerScope() {
+ if (!FLAG_runtime_call_stats) return;
+ isolate_->counters()->runtime_call_stats()->Leave(&timer_);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index d8a3f091f8..a417da3924 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -9,8 +9,10 @@
#include "src/allocation.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/base/platform/time.h"
+#include "src/builtins.h"
#include "src/globals.h"
#include "src/objects.h"
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
@@ -196,6 +198,8 @@ class Histogram {
lookup_done_ = false;
}
+ const char* name() { return name_; }
+
protected:
// Returns the handle to the histogram.
void* GetHistogram() {
@@ -206,7 +210,6 @@ class Histogram {
return histogram_;
}
- const char* name() { return name_; }
Isolate* isolate() const { return isolate_; }
private:
@@ -328,8 +331,9 @@ class AggregatableHistogramTimer : public Histogram {
base::TimeDelta time_;
};
-
-// A helper class for use with AggregatableHistogramTimer.
+// A helper class for use with AggregatableHistogramTimer. This is the
+// // outer-most timer scope used with an AggregatableHistogramTimer. It will
+// // aggregate the information from the inner AggregatedHistogramTimerScope.
class AggregatingHistogramTimerScope {
public:
explicit AggregatingHistogramTimerScope(AggregatableHistogramTimer* histogram)
@@ -342,8 +346,8 @@ class AggregatingHistogramTimerScope {
AggregatableHistogramTimer* histogram_;
};
-
-// A helper class for use with AggregatableHistogramTimer.
+// A helper class for use with AggregatableHistogramTimer, the "inner" scope
+// // which defines the events to be timed.
class AggregatedHistogramTimerScope {
public:
explicit AggregatedHistogramTimerScope(AggregatableHistogramTimer* histogram)
@@ -475,6 +479,91 @@ double AggregatedMemoryHistogram<Histogram>::Aggregate(double current_ms,
value * ((current_ms - last_ms_) / interval_ms);
}
+struct RuntimeCallCounter {
+ explicit RuntimeCallCounter(const char* name) : name(name) {}
+ void Reset();
+
+ const char* name;
+ int64_t count = 0;
+ base::TimeDelta time;
+};
+
+// RuntimeCallTimer is used to keep track of the stack of currently active
+// timers used for properly measuring the own time of a RuntimeCallCounter.
+class RuntimeCallTimer {
+ public:
+ RuntimeCallTimer(RuntimeCallCounter* counter, RuntimeCallTimer* parent)
+ : counter_(counter), parent_(parent) {}
+
+ inline void Start() {
+ timer_.Start();
+ counter_->count++;
+ }
+
+ inline RuntimeCallTimer* Stop() {
+ base::TimeDelta delta = timer_.Elapsed();
+ counter_->time += delta;
+ if (parent_ != NULL) {
+ parent_->AdjustForSubTimer(delta);
+ }
+ return parent_;
+ }
+
+ void AdjustForSubTimer(base::TimeDelta delta) { counter_->time -= delta; }
+
+ private:
+ RuntimeCallCounter* counter_;
+ RuntimeCallTimer* parent_;
+ base::ElapsedTimer timer_;
+};
+
+struct RuntimeCallStats {
+ // Dummy counter for the unexpected stub miss.
+ RuntimeCallCounter UnexpectedStubMiss =
+ RuntimeCallCounter("UnexpectedStubMiss");
+ // Counter for runtime callbacks into JavaScript.
+ RuntimeCallCounter ExternalCallback = RuntimeCallCounter("ExternalCallback");
+#define CALL_RUNTIME_COUNTER(name, nargs, ressize) \
+ RuntimeCallCounter Runtime_##name = RuntimeCallCounter(#name);
+ FOR_EACH_INTRINSIC(CALL_RUNTIME_COUNTER)
+#undef CALL_RUNTIME_COUNTER
+#define CALL_BUILTIN_COUNTER(name, type) \
+ RuntimeCallCounter Builtin_##name = RuntimeCallCounter(#name);
+ BUILTIN_LIST_C(CALL_BUILTIN_COUNTER)
+#undef CALL_BUILTIN_COUNTER
+
+ // Counter to track recursive time events.
+ RuntimeCallTimer* current_timer_ = NULL;
+
+ // Starting measuring the time for a function. This will establish the
+ // connection to the parent counter for properly calculating the own times.
+ void Enter(RuntimeCallCounter* counter);
+ void Enter(RuntimeCallTimer* timer);
+ // Leave a scope for a measured runtime function. This will properly add
+ // the time delta to the current_counter and subtract the delta from its
+ // parent.
+ void Leave();
+ void Leave(RuntimeCallTimer* timer);
+
+ RuntimeCallTimer* current_timer() { return current_timer_; }
+
+ void Reset();
+ void Print(std::ostream& os);
+
+ RuntimeCallStats() { Reset(); }
+};
+
+// A RuntimeCallTimerScopes wraps around a RuntimeCallTimer to measure the
+// the time of C++ scope.
+class RuntimeCallTimerScope {
+ public:
+ explicit RuntimeCallTimerScope(Isolate* isolate, RuntimeCallCounter* counter);
+ ~RuntimeCallTimerScope();
+
+ private:
+ Isolate* isolate_;
+ RuntimeCallTimer timer_;
+};
#define HISTOGRAM_RANGE_LIST(HR) \
/* Generic range histograms */ \
@@ -563,7 +652,9 @@ double AggregatedMemoryHistogram<Histogram>::Aggregate(double current_ms,
SC(global_handles, V8.GlobalHandles) \
/* OS Memory allocated */ \
SC(memory_allocated, V8.OsMemoryAllocated) \
- SC(normalized_maps, V8.NormalizedMaps) \
+ SC(maps_normalized, V8.MapsNormalized) \
+ SC(maps_created, V8.MapsCreated) \
+ SC(elements_transitions, V8.ObjectElementsTransitions) \
SC(props_to_dictionary, V8.ObjectPropertiesToDictionary) \
SC(elements_to_dictionary, V8.ObjectElementsToDictionary) \
SC(alive_after_last_gc, V8.AliveAfterLastGC) \
@@ -572,11 +663,7 @@ double AggregatedMemoryHistogram<Histogram>::Aggregate(double current_ms,
SC(string_table_capacity, V8.StringTableCapacity) \
SC(number_of_symbols, V8.NumberOfSymbols) \
SC(script_wrappers, V8.ScriptWrappers) \
- SC(call_initialize_stubs, V8.CallInitializeStubs) \
- SC(call_premonomorphic_stubs, V8.CallPreMonomorphicStubs) \
- SC(call_normal_stubs, V8.CallNormalStubs) \
- SC(call_megamorphic_stubs, V8.CallMegamorphicStubs) \
- SC(inlined_copied_elements, V8.InlinedCopiedElements) \
+ SC(inlined_copied_elements, V8.InlinedCopiedElements) \
SC(arguments_adaptors, V8.ArgumentsAdaptors) \
SC(compilation_cache_hits, V8.CompilationCacheHits) \
SC(compilation_cache_misses, V8.CompilationCacheMisses) \
@@ -588,8 +675,6 @@ double AggregatedMemoryHistogram<Histogram>::Aggregate(double current_ms,
SC(total_parse_size, V8.TotalParseSize) \
/* Amount of source code skipped over using preparsing. */ \
SC(total_preparse_skipped, V8.TotalPreparseSkipped) \
- /* Number of symbol lookups skipped using preparsing */ \
- SC(total_preparse_symbols_skipped, V8.TotalPreparseSymbolSkipped) \
/* Amount of compiled source code. */ \
SC(total_compile_size, V8.TotalCompileSize) \
/* Amount of source code compiled with the full codegen. */ \
@@ -602,7 +687,6 @@ double AggregatedMemoryHistogram<Histogram>::Aggregate(double current_ms,
SC(pc_to_code, V8.PcToCode) \
SC(pc_to_code_cached, V8.PcToCodeCached) \
/* The store-buffer implementation of the write barrier. */ \
- SC(store_buffer_compactions, V8.StoreBufferCompactions) \
SC(store_buffer_overflows, V8.StoreBufferOverflows)
@@ -619,40 +703,22 @@ double AggregatedMemoryHistogram<Histogram>::Aggregate(double current_ms,
V8.GCCompactorCausedByOldspaceExhaustion) \
SC(gc_last_resort_from_js, V8.GCLastResortFromJS) \
SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles) \
- /* How is the generic keyed-load stub used? */ \
- SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi) \
- SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol) \
- SC(keyed_load_generic_lookup_cache, V8.KeyedLoadGenericLookupCache) \
- SC(keyed_load_generic_slow, V8.KeyedLoadGenericSlow) \
- SC(keyed_load_polymorphic_stubs, V8.KeyedLoadPolymorphicStubs) \
- SC(keyed_load_external_array_slow, V8.KeyedLoadExternalArraySlow) \
- /* How is the generic keyed-call stub used? */ \
- SC(keyed_call_generic_smi_fast, V8.KeyedCallGenericSmiFast) \
- SC(keyed_call_generic_smi_dict, V8.KeyedCallGenericSmiDict) \
- SC(keyed_call_generic_lookup_cache, V8.KeyedCallGenericLookupCache) \
- SC(keyed_call_generic_lookup_dict, V8.KeyedCallGenericLookupDict) \
- SC(keyed_call_generic_slow, V8.KeyedCallGenericSlow) \
- SC(keyed_call_generic_slow_load, V8.KeyedCallGenericSlowLoad) \
- SC(named_load_global_stub, V8.NamedLoadGlobalStub) \
- SC(named_store_global_inline, V8.NamedStoreGlobalInline) \
- SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss) \
- SC(keyed_store_polymorphic_stubs, V8.KeyedStorePolymorphicStubs) \
- SC(keyed_store_external_array_slow, V8.KeyedStoreExternalArraySlow) \
- SC(store_normal_miss, V8.StoreNormalMiss) \
- SC(store_normal_hit, V8.StoreNormalHit) \
- SC(cow_arrays_created_stub, V8.COWArraysCreatedStub) \
+ SC(ic_keyed_load_generic_smi, V8.ICKeyedLoadGenericSmi) \
+ SC(ic_keyed_load_generic_symbol, V8.ICKeyedLoadGenericSymbol) \
+ SC(ic_keyed_load_generic_slow, V8.ICKeyedLoadGenericSlow) \
+ SC(ic_named_load_global_stub, V8.ICNamedLoadGlobalStub) \
+ SC(ic_store_normal_miss, V8.ICStoreNormalMiss) \
+ SC(ic_store_normal_hit, V8.ICStoreNormalHit) \
+ SC(ic_binary_op_miss, V8.ICBinaryOpMiss) \
+ SC(ic_compare_miss, V8.ICCompareMiss) \
+ SC(ic_call_miss, V8.ICCallMiss) \
+ SC(ic_keyed_call_miss, V8.ICKeyedCallMiss) \
+ SC(ic_load_miss, V8.ICLoadMiss) \
+ SC(ic_keyed_load_miss, V8.ICKeyedLoadMiss) \
+ SC(ic_store_miss, V8.ICStoreMiss) \
+ SC(ic_keyed_store_miss, V8.ICKeyedStoreMiss) \
SC(cow_arrays_created_runtime, V8.COWArraysCreatedRuntime) \
SC(cow_arrays_converted, V8.COWArraysConverted) \
- SC(call_miss, V8.CallMiss) \
- SC(keyed_call_miss, V8.KeyedCallMiss) \
- SC(load_miss, V8.LoadMiss) \
- SC(keyed_load_miss, V8.KeyedLoadMiss) \
- SC(call_const, V8.CallConst) \
- SC(call_const_fast_api, V8.CallConstFastApi) \
- SC(call_const_interceptor, V8.CallConstInterceptor) \
- SC(call_const_interceptor_fast_api, V8.CallConstInterceptorFastApi) \
- SC(call_global_inline, V8.CallGlobalInline) \
- SC(call_global_inline_miss, V8.CallGlobalInlineMiss) \
SC(constructed_objects, V8.ConstructedObjects) \
SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime) \
SC(negative_lookups, V8.NegativeLookups) \
@@ -660,8 +726,6 @@ double AggregatedMemoryHistogram<Histogram>::Aggregate(double current_ms,
SC(megamorphic_stub_cache_probes, V8.MegamorphicStubCacheProbes) \
SC(megamorphic_stub_cache_misses, V8.MegamorphicStubCacheMisses) \
SC(megamorphic_stub_cache_updates, V8.MegamorphicStubCacheUpdates) \
- SC(array_function_runtime, V8.ArrayFunctionRuntime) \
- SC(array_function_native, V8.ArrayFunctionNative) \
SC(enum_cache_hits, V8.EnumCacheHits) \
SC(enum_cache_misses, V8.EnumCacheMisses) \
SC(fast_new_closure_total, V8.FastNewClosureTotal) \
@@ -672,26 +736,26 @@ double AggregatedMemoryHistogram<Histogram>::Aggregate(double current_ms,
SC(string_add_runtime_ext_to_one_byte, V8.StringAddRuntimeExtToOneByte) \
SC(sub_string_runtime, V8.SubStringRuntime) \
SC(sub_string_native, V8.SubStringNative) \
- SC(string_add_make_two_char, V8.StringAddMakeTwoChar) \
SC(string_compare_native, V8.StringCompareNative) \
SC(string_compare_runtime, V8.StringCompareRuntime) \
SC(regexp_entry_runtime, V8.RegExpEntryRuntime) \
SC(regexp_entry_native, V8.RegExpEntryNative) \
SC(number_to_string_native, V8.NumberToStringNative) \
SC(number_to_string_runtime, V8.NumberToStringRuntime) \
- SC(math_acos, V8.MathAcos) \
- SC(math_asin, V8.MathAsin) \
- SC(math_atan, V8.MathAtan) \
- SC(math_atan2, V8.MathAtan2) \
- SC(math_clz32, V8.MathClz32) \
- SC(math_exp, V8.MathExp) \
- SC(math_floor, V8.MathFloor) \
- SC(math_log, V8.MathLog) \
- SC(math_pow, V8.MathPow) \
- SC(math_round, V8.MathRound) \
- SC(math_sqrt, V8.MathSqrt) \
+ SC(math_acos_runtime, V8.MathAcosRuntime) \
+ SC(math_asin_runtime, V8.MathAsinRuntime) \
+ SC(math_atan_runtime, V8.MathAtanRuntime) \
+ SC(math_atan2_runtime, V8.MathAtan2Runtime) \
+ SC(math_clz32_runtime, V8.MathClz32Runtime) \
+ SC(math_exp_runtime, V8.MathExpRuntime) \
+ SC(math_floor_runtime, V8.MathFloorRuntime) \
+ SC(math_log_runtime, V8.MathLogRuntime) \
+ SC(math_pow_runtime, V8.MathPowRuntime) \
+ SC(math_round_runtime, V8.MathRoundRuntime) \
+ SC(math_sqrt_runtime, V8.MathSqrtRuntime) \
SC(stack_interrupts, V8.StackInterrupts) \
SC(runtime_profiler_ticks, V8.RuntimeProfilerTicks) \
+ SC(runtime_calls, V8.RuntimeCalls) \
SC(bounds_checks_eliminated, V8.BoundsChecksEliminated) \
SC(bounds_checks_hoisted, V8.BoundsChecksHoisted) \
SC(soft_deopts_requested, V8.SoftDeoptsRequested) \
@@ -718,8 +782,11 @@ double AggregatedMemoryHistogram<Histogram>::Aggregate(double current_ms,
SC(turbo_escape_allocs_replaced, V8.TurboEscapeAllocsReplaced) \
SC(crankshaft_escape_allocs_replaced, V8.CrankshaftEscapeAllocsReplaced) \
SC(turbo_escape_loads_replaced, V8.TurboEscapeLoadsReplaced) \
- SC(crankshaft_escape_loads_replaced, V8.CrankshaftEscapeLoadsReplaced)
-
+ SC(crankshaft_escape_loads_replaced, V8.CrankshaftEscapeLoadsReplaced) \
+ /* Total code size (including metadata) of baseline code or bytecode. */ \
+ SC(total_baseline_code_size, V8.TotalBaselineCodeSize) \
+ /* Total count of functions compiled using the baseline compiler. */ \
+ SC(total_baseline_compile_count, V8.TotalBaselineCompileCount)
// This file contains all the v8 counters that are in use.
class Counters {
@@ -831,6 +898,7 @@ class Counters {
void ResetCounters();
void ResetHistograms();
+ RuntimeCallStats* runtime_call_stats() { return &runtime_call_stats_; }
private:
#define HR(name, caption, min, max, num_buckets) Histogram name##_;
@@ -892,6 +960,8 @@ class Counters {
CODE_AGE_LIST_COMPLETE(SC)
#undef SC
+ RuntimeCallStats runtime_call_stats_;
+
friend class Isolate;
explicit Counters(Isolate* isolate);
diff --git a/deps/v8/src/crankshaft/arm/lithium-arm.cc b/deps/v8/src/crankshaft/arm/lithium-arm.cc
index cd736ecd8f..d5590f5c05 100644
--- a/deps/v8/src/crankshaft/arm/lithium-arm.cc
+++ b/deps/v8/src/crankshaft/arm/lithium-arm.cc
@@ -383,8 +383,8 @@ void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
// Skip a slot if for a double-width slot.
- if (kind == DOUBLE_REGISTERS) spill_slot_count_++;
- return spill_slot_count_++;
+ if (kind == DOUBLE_REGISTERS) current_frame_slots_++;
+ return current_frame_slots_++;
}
@@ -1743,14 +1743,6 @@ LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
}
-LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
- HCompareMinusZeroAndBranch* instr) {
- LOperand* value = UseRegister(instr->value());
- LOperand* scratch = TempRegister();
- return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
-}
-
-
LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
DCHECK(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
@@ -1819,12 +1811,6 @@ LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
}
-LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
- LOperand* map = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LMapEnumLength(map));
-}
-
-
LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
LOperand* string = UseRegisterAtStart(instr->string());
LOperand* index = UseRegisterOrConstantAtStart(instr->index());
@@ -2449,8 +2435,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
return DefineAsSpilled(result, spill_index);
} else {
DCHECK(info()->IsStub());
- CallInterfaceDescriptor descriptor =
- info()->code_stub()->GetCallInterfaceDescriptor();
+ CallInterfaceDescriptor descriptor = graph()->descriptor();
int index = static_cast<int>(instr->index());
Register reg = descriptor.GetRegisterParameter(index);
return DefineFixed(result, reg);
@@ -2471,17 +2456,12 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
Retry(kTooManySpillSlotsNeededForOSR);
spill_index = 0;
}
+ spill_index += StandardFrameConstants::kFixedSlotCount;
}
return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
}
-LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(DefineFixed(new(zone()) LCallStub(context), r0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
// There are no real uses of the arguments object.
// arguments.length and element access are supported directly on
@@ -2622,15 +2602,5 @@ LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
return new(zone()) LStoreFrameContext(context);
}
-
-LInstruction* LChunkBuilder::DoAllocateBlockContext(
- HAllocateBlockContext* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* function = UseRegisterAtStart(instr->function());
- LAllocateBlockContext* result =
- new(zone()) LAllocateBlockContext(context, function);
- return MarkAsCall(DefineFixed(result, cp), instr);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/arm/lithium-arm.h b/deps/v8/src/crankshaft/arm/lithium-arm.h
index 6329f36fb2..91435cf785 100644
--- a/deps/v8/src/crankshaft/arm/lithium-arm.h
+++ b/deps/v8/src/crankshaft/arm/lithium-arm.h
@@ -21,7 +21,6 @@ class LCodeGen;
V(AccessArgumentsAt) \
V(AddI) \
V(Allocate) \
- V(AllocateBlockContext) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@@ -35,7 +34,6 @@ class LCodeGen;
V(CallFunction) \
V(CallNewArray) \
V(CallRuntime) \
- V(CallStub) \
V(CheckArrayBufferNotNeutered) \
V(CheckInstanceType) \
V(CheckNonSmi) \
@@ -47,7 +45,6 @@ class LCodeGen;
V(ClampIToUint8) \
V(ClampTToUint8) \
V(ClassOfTestAndBranch) \
- V(CompareMinusZeroAndBranch) \
V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpHoleAndBranch) \
@@ -101,7 +98,6 @@ class LCodeGen;
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedGeneric) \
- V(MapEnumLength) \
V(MathAbs) \
V(MathClz32) \
V(MathExp) \
@@ -460,19 +456,6 @@ class LParameter final : public LTemplateInstruction<1, 0, 0> {
};
-class LCallStub final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallStub(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
- DECLARE_HYDROGEN_ACCESSOR(CallStub)
-};
-
-
class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
public:
bool HasInterestingComment(LCodeGen* gen) const override { return false; }
@@ -992,22 +975,6 @@ class LCmpHoleAndBranch final : public LControlInstruction<1, 0> {
};
-class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 1> {
- public:
- LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
- "cmp-minus-zero-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
-};
-
-
class LIsStringAndBranch final : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1158,8 +1125,6 @@ class LCmpT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
- Strength strength() { return hydrogen()->strength(); }
-
Token::Value op() const { return hydrogen()->token(); }
};
@@ -1361,18 +1326,6 @@ class LCmpMapAndBranch final : public LControlInstruction<1, 1> {
};
-class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMapEnumLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
-};
-
-
class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
public:
LSeqStringGetChar(LOperand* string, LOperand* index) {
@@ -1498,8 +1451,6 @@ class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
- Strength strength() { return hydrogen()->strength(); }
-
private:
Token::Value op_;
};
@@ -2616,23 +2567,6 @@ class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
};
-class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
- public:
- LAllocateBlockContext(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-
- Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
-
- DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
- DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
-};
-
-
class LChunkBuilder;
class LPlatformChunk final : public LChunk {
public:
diff --git a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
index 2bd0788232..7b2ebadf1f 100644
--- a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
@@ -59,7 +59,7 @@ bool LCodeGen::GenerateCode() {
void LCodeGen::FinishCode(Handle<Code> code) {
DCHECK(is_done());
- code->set_stack_slots(GetStackSlotCount());
+ code->set_stack_slots(GetTotalFrameSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
}
@@ -103,13 +103,6 @@ bool LCodeGen::GeneratePrologue() {
if (info()->IsOptimizing()) {
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ stop("stop_at");
- }
-#endif
-
// r1: Callee's JS function.
// cp: Callee's context.
// pp: Callee's constant pool pointer (if enabled)
@@ -382,7 +375,7 @@ bool LCodeGen::GenerateJumpTable() {
bool LCodeGen::GenerateSafepointTable() {
DCHECK(is_done());
- safepoints_.Emit(masm(), GetStackSlotCount());
+ safepoints_.Emit(masm(), GetTotalFrameSlotCount());
return !is_aborted();
}
@@ -554,7 +547,7 @@ MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
DCHECK(!op->IsDoubleRegister());
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
if (NeedsEagerFrame()) {
- return MemOperand(fp, StackSlotOffset(op->index()));
+ return MemOperand(fp, FrameSlotToFPOffset(op->index()));
} else {
// Retrieve parameter without eager stack-frame relative to the
// stack-pointer.
@@ -566,7 +559,7 @@ MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
DCHECK(op->IsDoubleStackSlot());
if (NeedsEagerFrame()) {
- return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
+ return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
} else {
// Retrieve parameter without eager stack-frame relative to the
// stack-pointer.
@@ -635,9 +628,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
if (op->IsStackSlot()) {
int index = op->index();
- if (index >= 0) {
- index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
- }
if (is_tagged) {
translation->StoreStackSlot(index);
} else if (is_uint32) {
@@ -647,9 +637,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
} else if (op->IsDoubleStackSlot()) {
int index = op->index();
- if (index >= 0) {
- index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
- }
translation->StoreDoubleStackSlot(index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
@@ -979,26 +966,6 @@ void LCodeGen::DoParameter(LParameter* instr) {
}
-void LCodeGen::DoCallStub(LCallStub* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->result()).is(r0));
- switch (instr->hydrogen()->major_key()) {
- case CodeStub::RegExpExec: {
- RegExpExecStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::SubString: {
- SubStringStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
GenerateOsrPrologue();
}
@@ -1803,13 +1770,6 @@ void LCodeGen::DoConstantT(LConstantT* instr) {
}
-void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
- Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->value());
- __ EnumLength(result, map);
-}
-
-
MemOperand LCodeGen::BuildSeqStringOperand(Register string,
LOperand* index,
String::Encoding encoding) {
@@ -1946,8 +1906,14 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
// At this point, both left and right are either 0 or -0.
if (operation == HMathMinMax::kMathMin) {
// We could use a single 'vorr' instruction here if we had NEON support.
+ // The algorithm is: -((-L) + (-R)), which in case of L and R being
+ // different registers is most efficiently expressed as -((-L) - R).
__ vneg(left_reg, left_reg);
- __ vsub(result_reg, left_reg, right_reg);
+ if (left_reg.is(right_reg)) {
+ __ vadd(result_reg, left_reg, right_reg);
+ } else {
+ __ vsub(result_reg, left_reg, right_reg);
+ }
__ vneg(result_reg, result_reg);
} else {
// Since we operate on +0 and/or -0, vadd and vand have the same effect;
@@ -2014,8 +1980,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(r0));
DCHECK(ToRegister(instr->result()).is(r0));
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
// Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position.
Assembler::BlockConstPoolScope block_const_pool(masm());
@@ -2260,8 +2225,9 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
// We can statically evaluate the comparison.
double left_val = ToDouble(LConstantOperand::cast(left));
double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block = EvalComparison(instr->op(), left_val, right_val) ?
- instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
+ int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
+ ? instr->TrueDestination(chunk_)
+ : instr->FalseDestination(chunk_);
EmitGoto(next_block);
} else {
if (instr->is_double()) {
@@ -2326,33 +2292,6 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
}
-void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
- Representation rep = instr->hydrogen()->value()->representation();
- DCHECK(!rep.IsInteger32());
- Register scratch = ToRegister(instr->temp());
-
- if (rep.IsDouble()) {
- DwVfpRegister value = ToDoubleRegister(instr->value());
- __ VFPCompareAndSetFlags(value, 0.0);
- EmitFalseBranch(instr, ne);
- __ VmovHigh(scratch, value);
- __ cmp(scratch, Operand(0x80000000));
- } else {
- Register value = ToRegister(instr->value());
- __ CheckMap(value,
- scratch,
- Heap::kHeapNumberMapRootIndex,
- instr->FalseLabel(chunk()),
- DO_SMI_CHECK);
- __ ldr(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
- __ ldr(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset));
- __ cmp(scratch, Operand(0x80000000));
- __ cmp(ip, Operand(0x00000000), eq);
- }
- EmitBranch(instr, eq);
-}
-
-
Condition LCodeGen::EmitIsString(Register input,
Register temp1,
Label* is_not_string,
@@ -2616,8 +2555,7 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
- Handle<Code> ic =
- CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// This instruction also signals no smi code inlined.
__ cmp(r0, Operand::Zero());
@@ -2707,9 +2645,9 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
__ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- Handle<Code> ic =
- CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
- SLOPPY, PREMONOMORPHIC).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+ isolate(), instr->typeof_mode(), PREMONOMORPHIC)
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2804,10 +2742,10 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
// Name is always in r2.
__ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- Handle<Code> ic =
- CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+ isolate(), NOT_INSIDE_TYPEOF,
+ instr->hydrogen()->initialization_state())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -2952,6 +2890,9 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case DICTIONARY_ELEMENTS:
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ case NO_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3103,8 +3044,8 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
}
Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
- isolate(), instr->hydrogen()->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ isolate(), instr->hydrogen()->initialization_state())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -3718,21 +3659,22 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ HCallFunction* hinstr = instr->hydrogen();
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->function()).is(r1));
DCHECK(ToRegister(instr->result()).is(r0));
int arity = instr->arity();
- ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
- if (instr->hydrogen()->HasVectorAndSlot()) {
+ ConvertReceiverMode mode = hinstr->convert_mode();
+ if (hinstr->HasVectorAndSlot()) {
Register slot_register = ToRegister(instr->temp_slot());
Register vector_register = ToRegister(instr->temp_vector());
DCHECK(slot_register.is(r3));
DCHECK(vector_register.is(r2));
AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
- int index = vector->GetIndex(instr->hydrogen()->slot());
+ Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
+ int index = vector->GetIndex(hinstr->slot());
__ Move(vector_register, vector);
__ mov(slot_register, Operand(Smi::FromInt(index)));
@@ -4018,6 +3960,9 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case DICTIONARY_ELEMENTS:
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ case NO_ELEMENTS:
UNREACHABLE();
break;
}
@@ -5271,8 +5216,8 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
final_branch_condition = eq;
} else if (String::Equals(type_name, factory->undefined_string())) {
- __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
- __ b(eq, true_label);
+ __ CompareRoot(input, Heap::kNullValueRootIndex);
+ __ b(eq, false_label);
__ JumpIfSmi(input, false_label);
// Check for undetectable objects => true.
__ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
@@ -5450,17 +5395,8 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
- __ SmiTst(r0);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
-
- STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
- __ CompareObjectType(r0, r1, r1, JS_PROXY_TYPE);
- DeoptimizeIf(le, instr, Deoptimizer::kWrongInstanceType);
-
Label use_cache, call_runtime;
- Register null_value = r5;
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ CheckEnumCache(null_value, &call_runtime);
+ __ CheckEnumCache(&call_runtime);
__ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
__ b(&use_cache);
@@ -5468,12 +5404,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(r0);
- CallRuntime(Runtime::kGetPropertyNamesFast, instr);
-
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kMetaMapRootIndex);
- __ cmp(r1, ip);
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+ CallRuntime(Runtime::kForInEnumerate, instr);
__ bind(&use_cache);
}
@@ -5592,15 +5523,6 @@ void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
}
-void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
- Handle<ScopeInfo> scope_info = instr->scope_info();
- __ Push(scope_info);
- __ push(ToRegister(instr->function()));
- CallRuntime(Runtime::kPushBlockContext, instr);
- RecordSafepoint(Safepoint::kNoLazyDeopt);
-}
-
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.h b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.h
index 24a083ff2f..67925ccdf6 100644
--- a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.h
@@ -44,10 +44,8 @@ class LCodeGen: public LCodeGenBase {
}
bool NeedsEagerFrame() const {
- return GetStackSlotCount() > 0 ||
- info()->is_non_deferred_calling() ||
- !info()->IsStub() ||
- info()->requires_frame();
+ return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
+ !info()->IsStub() || info()->requires_frame();
}
bool NeedsDeferredFrame() const {
return !NeedsEagerFrame() && info()->is_deferred_calling();
@@ -153,7 +151,13 @@ class LCodeGen: public LCodeGenBase {
Register temporary,
Register temporary2);
- int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+ bool HasAllocatedStackSlots() const {
+ return chunk()->HasAllocatedStackSlots();
+ }
+ int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
+ int GetTotalFrameSlotCount() const {
+ return chunk()->GetTotalFrameSlotCount();
+ }
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
diff --git a/deps/v8/src/crankshaft/arm64/lithium-arm64.cc b/deps/v8/src/crankshaft/arm64/lithium-arm64.cc
index 3f43338585..c5d42082bd 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-arm64.cc
+++ b/deps/v8/src/crankshaft/arm64/lithium-arm64.cc
@@ -527,11 +527,7 @@ LUnallocated* LChunkBuilder::TempDoubleRegister() {
return operand;
}
-
-int LPlatformChunk::GetNextSpillIndex() {
- return spill_slot_count_++;
-}
-
+int LPlatformChunk::GetNextSpillIndex() { return current_frame_slots_++; }
LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
int index = GetNextSpillIndex();
@@ -1084,12 +1080,6 @@ LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
}
-LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(DefineFixed(new(zone()) LCallStub(context), x0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
instr->ReplayEnvironment(current_block_->last_environment());
@@ -1567,14 +1557,6 @@ LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
}
-LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
- HCompareMinusZeroAndBranch* instr) {
- LOperand* value = UseRegister(instr->value());
- LOperand* scratch = TempRegister();
- return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
-}
-
-
LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
DCHECK(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
@@ -1743,12 +1725,6 @@ LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
}
-LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
- LOperand* map = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LMapEnumLength(map));
-}
-
-
LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
DCHECK(instr->representation().IsInteger32());
DCHECK(instr->left()->representation().Equals(instr->representation()));
@@ -1966,8 +1942,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
return DefineAsSpilled(result, spill_index);
} else {
DCHECK(info()->IsStub());
- CallInterfaceDescriptor descriptor =
- info()->code_stub()->GetCallInterfaceDescriptor();
+ CallInterfaceDescriptor descriptor = graph()->descriptor();
int index = static_cast<int>(instr->index());
Register reg = descriptor.GetRegisterParameter(index);
return DefineFixed(result, reg);
@@ -2662,6 +2637,7 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
Retry(kTooManySpillSlotsNeededForOSR);
spill_index = 0;
}
+ spill_index += StandardFrameConstants::kFixedSlotCount;
}
return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
}
@@ -2719,15 +2695,5 @@ LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
}
-LInstruction* LChunkBuilder::DoAllocateBlockContext(
- HAllocateBlockContext* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* function = UseRegisterAtStart(instr->function());
- LAllocateBlockContext* result =
- new(zone()) LAllocateBlockContext(context, function);
- return MarkAsCall(DefineFixed(result, cp), instr);
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/arm64/lithium-arm64.h b/deps/v8/src/crankshaft/arm64/lithium-arm64.h
index 1b627d13f8..14abeb0ba6 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-arm64.h
+++ b/deps/v8/src/crankshaft/arm64/lithium-arm64.h
@@ -23,7 +23,6 @@ class LCodeGen;
V(AddI) \
V(AddS) \
V(Allocate) \
- V(AllocateBlockContext) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@@ -37,7 +36,6 @@ class LCodeGen;
V(CallJSFunction) \
V(CallNewArray) \
V(CallRuntime) \
- V(CallStub) \
V(CallWithDescriptor) \
V(CheckArrayBufferNotNeutered) \
V(CheckInstanceType) \
@@ -55,7 +53,6 @@ class LCodeGen;
V(CmpMapAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpT) \
- V(CompareMinusZeroAndBranch) \
V(CompareNumericAndBranch) \
V(ConstantD) \
V(ConstantE) \
@@ -106,7 +103,6 @@ class LCodeGen;
V(LoadNamedField) \
V(LoadNamedGeneric) \
V(LoadRoot) \
- V(MapEnumLength) \
V(MathAbs) \
V(MathAbsTagged) \
V(MathClz32) \
@@ -718,8 +714,6 @@ class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
- Strength strength() { return hydrogen()->strength(); }
-
private:
Token::Value op_;
};
@@ -887,19 +881,6 @@ class LCallRuntime final : public LTemplateInstruction<1, 1, 0> {
};
-class LCallStub final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallStub(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
- DECLARE_HYDROGEN_ACCESSOR(CallStub)
-};
-
-
class LCheckArrayBufferNotNeutered final
: public LTemplateInstruction<0, 1, 0> {
public:
@@ -1141,28 +1122,10 @@ class LCmpT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
- Strength strength() { return hydrogen()->strength(); }
-
Token::Value op() const { return hydrogen()->token(); }
};
-class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 1> {
- public:
- LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
- "cmp-minus-zero-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
-};
-
-
class LCompareNumericAndBranch final : public LControlInstruction<2, 0> {
public:
LCompareNumericAndBranch(LOperand* left, LOperand* right) {
@@ -1791,18 +1754,6 @@ class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
};
-class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMapEnumLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
-};
-
-
template<int T>
class LUnaryMathOperation : public LTemplateInstruction<1, 1, T> {
public:
@@ -2949,23 +2900,6 @@ class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
};
-class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
- public:
- LAllocateBlockContext(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-
- Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
-
- DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
- DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
-};
-
-
class LWrapReceiver final : public LTemplateInstruction<1, 2, 0> {
public:
LWrapReceiver(LOperand* receiver, LOperand* function) {
diff --git a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
index 571bc154af..6399a8bb09 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
+++ b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
@@ -277,9 +277,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
if (op->IsStackSlot()) {
int index = op->index();
- if (index >= 0) {
- index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
- }
if (is_tagged) {
translation->StoreStackSlot(index);
} else if (is_uint32) {
@@ -289,9 +286,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
} else if (op->IsDoubleStackSlot()) {
int index = op->index();
- if (index >= 0) {
- index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
- }
translation->StoreDoubleStackSlot(index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
@@ -366,21 +360,22 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ HCallFunction* hinstr = instr->hydrogen();
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->function()).Is(x1));
DCHECK(ToRegister(instr->result()).Is(x0));
int arity = instr->arity();
- ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
- if (instr->hydrogen()->HasVectorAndSlot()) {
+ ConvertReceiverMode mode = hinstr->convert_mode();
+ if (hinstr->HasVectorAndSlot()) {
Register slot_register = ToRegister(instr->temp_slot());
Register vector_register = ToRegister(instr->temp_vector());
DCHECK(slot_register.is(x3));
DCHECK(vector_register.is(x2));
AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
- int index = vector->GetIndex(instr->hydrogen()->slot());
+ Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
+ int index = vector->GetIndex(hinstr->slot());
__ Mov(vector_register, vector);
__ Mov(slot_register, Operand(Smi::FromInt(index)));
@@ -392,7 +387,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
__ Mov(x0, arity);
CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
}
- RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
+ RecordPushedArgumentsDelta(hinstr->argument_delta());
}
@@ -605,13 +600,6 @@ bool LCodeGen::GeneratePrologue() {
if (info()->IsOptimizing()) {
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info()->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ Debug("stop-at", __LINE__, BREAK);
- }
-#endif
}
DCHECK(__ StackPointer().Is(jssp));
@@ -870,14 +858,14 @@ bool LCodeGen::GenerateSafepointTable() {
// We do not know how much data will be emitted for the safepoint table, so
// force emission of the veneer pool.
masm()->CheckVeneerPool(true, true);
- safepoints_.Emit(masm(), GetStackSlotCount());
+ safepoints_.Emit(masm(), GetTotalFrameSlotCount());
return !is_aborted();
}
void LCodeGen::FinishCode(Handle<Code> code) {
DCHECK(is_done());
- code->set_stack_slots(GetStackSlotCount());
+ code->set_stack_slots(GetTotalFrameSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
}
@@ -1161,7 +1149,7 @@ MemOperand LCodeGen::ToMemOperand(LOperand* op, StackMode stack_mode) const {
DCHECK(!op->IsDoubleRegister());
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
if (NeedsEagerFrame()) {
- int fp_offset = StackSlotOffset(op->index());
+ int fp_offset = FrameSlotToFPOffset(op->index());
// Loads and stores have a bigger reach in positive offset than negative.
// We try to access using jssp (positive offset) first, then fall back to
// fp (negative offset) if that fails.
@@ -1178,8 +1166,8 @@ MemOperand LCodeGen::ToMemOperand(LOperand* op, StackMode stack_mode) const {
if ((stack_mode == kCanUseStackPointer) &&
!info()->saves_caller_doubles()) {
int jssp_offset_to_fp =
- StandardFrameConstants::kFixedFrameSizeFromFp +
- (pushed_arguments_ + GetStackSlotCount()) * kPointerSize;
+ (pushed_arguments_ + GetTotalFrameSlotCount()) * kPointerSize -
+ StandardFrameConstants::kFixedFrameSizeAboveFp;
int jssp_offset = fp_offset + jssp_offset_to_fp;
if (masm()->IsImmLSScaled(jssp_offset, LSDoubleWord)) {
return MemOperand(masm()->StackPointer(), jssp_offset);
@@ -1673,8 +1661,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(x0));
DCHECK(ToRegister(instr->result()).is(x0));
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
}
@@ -1998,27 +1985,6 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
}
-void LCodeGen::DoCallStub(LCallStub* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->result()).is(x0));
- switch (instr->hydrogen()->major_key()) {
- case CodeStub::RegExpExec: {
- RegExpExecStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::SubString: {
- SubStringStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- default:
- UNREACHABLE();
- }
- RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
-}
-
-
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
GenerateOsrPrologue();
}
@@ -2330,24 +2296,6 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
}
-void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
- Representation rep = instr->hydrogen()->value()->representation();
- DCHECK(!rep.IsInteger32());
- Register scratch = ToRegister(instr->temp());
-
- if (rep.IsDouble()) {
- __ JumpIfMinusZero(ToDoubleRegister(instr->value()),
- instr->TrueLabel(chunk()));
- } else {
- Register value = ToRegister(instr->value());
- __ JumpIfNotHeapNumber(value, instr->FalseLabel(chunk()), DO_SMI_CHECK);
- __ Ldr(scratch, FieldMemOperand(value, HeapNumber::kValueOffset));
- __ JumpIfMinusZero(scratch, instr->TrueLabel(chunk()));
- }
- EmitGoto(instr->FalseDestination(chunk()));
-}
-
-
void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
@@ -2360,8 +2308,9 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
// We can statically evaluate the comparison.
double left_val = ToDouble(LConstantOperand::cast(left));
double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block = EvalComparison(instr->op(), left_val, right_val) ?
- instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
+ int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
+ ? instr->TrueDestination(chunk_)
+ : instr->FalseDestination(chunk_);
EmitGoto(next_block);
} else {
if (instr->is_double()) {
@@ -2422,8 +2371,7 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
DCHECK(ToRegister(instr->left()).Is(x1));
DCHECK(ToRegister(instr->right()).Is(x0));
- Handle<Code> ic =
- CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// Signal that we don't inline smi code before this stub.
InlineSmiCheckInfo::EmitNotInlined(masm());
@@ -2715,20 +2663,12 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
Register object = ToRegister(instr->object());
- Register null_value = x5;
DCHECK(instr->IsMarkedAsCall());
DCHECK(object.Is(x0));
- DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi);
-
- STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
- __ CompareObjectType(object, x1, x1, JS_PROXY_TYPE);
- DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject);
-
Label use_cache, call_runtime;
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
+ __ CheckEnumCache(object, x5, x1, x2, x3, x4, &call_runtime);
__ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
__ B(&use_cache);
@@ -2736,12 +2676,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
// Get the set of properties to enumerate.
__ Bind(&call_runtime);
__ Push(object);
- CallRuntime(Runtime::kGetPropertyNamesFast, instr);
-
- __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
- DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr,
- Deoptimizer::kWrongMap);
-
+ CallRuntime(Runtime::kForInEnumerate, instr);
__ Bind(&use_cache);
}
@@ -3084,9 +3019,9 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->result()).Is(x0));
__ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- Handle<Code> ic =
- CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
- SLOPPY, PREMONOMORPHIC).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+ isolate(), instr->typeof_mode(), PREMONOMORPHIC)
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3194,6 +3129,9 @@ void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
case DICTIONARY_ELEMENTS:
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ case NO_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3346,8 +3284,8 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
}
Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
- isolate(), instr->hydrogen()->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ isolate(), instr->hydrogen()->initialization_state())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
DCHECK(ToRegister(instr->result()).Is(x0));
@@ -3401,10 +3339,10 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
__ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- Handle<Code> ic =
- CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+ isolate(), NOT_INSIDE_TYPEOF,
+ instr->hydrogen()->initialization_state())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
DCHECK(ToRegister(instr->result()).is(x0));
@@ -3417,13 +3355,6 @@ void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
}
-void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
- Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->value());
- __ EnumLengthSmi(result, map);
-}
-
-
void LCodeGen::DoMathAbs(LMathAbs* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsDouble()) {
@@ -4889,6 +4820,9 @@ void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
case DICTIONARY_ELEMENTS:
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ case NO_ELEMENTS:
UNREACHABLE();
break;
}
@@ -5578,7 +5512,7 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
DCHECK(instr->temp1() != NULL);
Register scratch = ToRegister(instr->temp1());
- __ JumpIfRoot(value, Heap::kUndefinedValueRootIndex, true_label);
+ __ JumpIfRoot(value, Heap::kNullValueRootIndex, false_label);
__ JumpIfSmi(value, false_label);
// Check for undetectable objects and jump to the true branch in this case.
__ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
@@ -5775,14 +5709,5 @@ void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
}
-void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
- Handle<ScopeInfo> scope_info = instr->scope_info();
- __ Push(scope_info);
- __ Push(ToRegister(instr->function()));
- CallRuntime(Runtime::kPushBlockContext, instr);
- RecordSafepoint(Safepoint::kNoLazyDeopt);
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h
index 18856da154..cf7de10394 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h
+++ b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.h
@@ -49,10 +49,8 @@ class LCodeGen: public LCodeGenBase {
}
bool NeedsEagerFrame() const {
- return GetStackSlotCount() > 0 ||
- info()->is_non_deferred_calling() ||
- !info()->IsStub() ||
- info()->requires_frame();
+ return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
+ !info()->IsStub() || info()->requires_frame();
}
bool NeedsDeferredFrame() const {
return !NeedsEagerFrame() && info()->is_deferred_calling();
@@ -253,7 +251,13 @@ class LCodeGen: public LCodeGenBase {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode);
- int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+ bool HasAllocatedStackSlots() const {
+ return chunk()->HasAllocatedStackSlots();
+ }
+ int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
+ int GetTotalFrameSlotCount() const {
+ return chunk()->GetTotalFrameSlotCount();
+ }
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
diff --git a/deps/v8/src/crankshaft/hydrogen-alias-analysis.h b/deps/v8/src/crankshaft/hydrogen-alias-analysis.h
index de8d0bdbe5..1f32b7ac6e 100644
--- a/deps/v8/src/crankshaft/hydrogen-alias-analysis.h
+++ b/deps/v8/src/crankshaft/hydrogen-alias-analysis.h
@@ -44,7 +44,6 @@ class HAliasAnalyzer : public ZoneObject {
// Constant objects can be distinguished statically.
if (a->IsConstant()) {
- // TODO(titzer): DataEquals() is more efficient, but that's protected.
return a->Equals(b) ? kMustAlias : kNoAlias;
}
return kMayAlias;
diff --git a/deps/v8/src/crankshaft/hydrogen-instructions.cc b/deps/v8/src/crankshaft/hydrogen-instructions.cc
index e2e026fb5f..a9c6228cd3 100644
--- a/deps/v8/src/crankshaft/hydrogen-instructions.cc
+++ b/deps/v8/src/crankshaft/hydrogen-instructions.cc
@@ -765,7 +765,6 @@ void HInstruction::Verify() {
bool HInstruction::CanDeoptimize() {
- // TODO(titzer): make this a virtual method?
switch (opcode()) {
case HValue::kAbnormalExit:
case HValue::kAccessArgumentsAt:
@@ -777,13 +776,11 @@ bool HInstruction::CanDeoptimize() {
case HValue::kBoundsCheckBaseIndexInformation:
case HValue::kCallFunction:
case HValue::kCallNewArray:
- case HValue::kCallStub:
case HValue::kCapturedObject:
case HValue::kClassOfTestAndBranch:
case HValue::kCompareGeneric:
case HValue::kCompareHoleAndBranch:
case HValue::kCompareMap:
- case HValue::kCompareMinusZeroAndBranch:
case HValue::kCompareNumericAndBranch:
case HValue::kCompareObjectEqAndBranch:
case HValue::kConstant:
@@ -811,7 +808,6 @@ bool HInstruction::CanDeoptimize() {
case HValue::kLoadNamedField:
case HValue::kLoadNamedGeneric:
case HValue::kLoadRoot:
- case HValue::kMapEnumLength:
case HValue::kMathMinMax:
case HValue::kParameter:
case HValue::kPhi:
@@ -832,7 +828,6 @@ bool HInstruction::CanDeoptimize() {
return false;
case HValue::kAdd:
- case HValue::kAllocateBlockContext:
case HValue::kApplyArguments:
case HValue::kBitwise:
case HValue::kBoundsCheck:
@@ -1107,12 +1102,6 @@ std::ostream& HAccessArgumentsAt::PrintDataTo(
}
-std::ostream& HAllocateBlockContext::PrintDataTo(
- std::ostream& os) const { // NOLINT
- return os << NameOf(context()) << " " << NameOf(function());
-}
-
-
std::ostream& HControlInstruction::PrintDataTo(
std::ostream& os) const { // NOLINT
os << " goto (";
@@ -1428,12 +1417,12 @@ HValue* HBitwise::Canonicalize() {
// static
HInstruction* HAdd::New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right, Strength strength,
+ HValue* left, HValue* right,
ExternalAddType external_add_type) {
// For everything else, you should use the other factory method without
// ExternalAddType.
DCHECK_EQ(external_add_type, AddOfExternalAndTagged);
- return new (zone) HAdd(context, left, right, strength, external_add_type);
+ return new (zone) HAdd(context, left, right, external_add_type);
}
@@ -1733,12 +1722,6 @@ std::ostream& HCheckInstanceType::PrintDataTo(
}
-std::ostream& HCallStub::PrintDataTo(std::ostream& os) const { // NOLINT
- os << CodeStub::MajorName(major_key_) << " ";
- return HUnaryCall::PrintDataTo(os);
-}
-
-
std::ostream& HUnknownOSRValue::PrintDataTo(std::ostream& os) const { // NOLINT
const char* type = "expression";
if (environment_->is_local_index(index_)) type = "local";
@@ -2733,7 +2716,6 @@ HConstant::HConstant(Handle<Object> object, Representation r)
bit_field_, has_int32_value && Smi::IsValid(int32_value_));
double_value_ = n;
bit_field_ = HasDoubleValueField::update(bit_field_, true);
- // TODO(titzer): if this heap number is new space, tenure a new one.
}
Initialize(r);
@@ -2906,7 +2888,6 @@ bool HConstant::EmitAtUses() {
DCHECK(IsLinked());
if (block()->graph()->has_osr() &&
block()->graph()->IsStandardConstant(this)) {
- // TODO(titzer): this seems like a hack that should be fixed by custom OSR.
return true;
}
if (HasNoUses()) return true;
@@ -2963,6 +2944,8 @@ Maybe<HConstant*> HConstant::CopyToTruncatedNumber(Isolate* isolate,
res = new (zone) HConstant(std::numeric_limits<double>::quiet_NaN());
} else if (handle->IsNull()) {
res = new(zone) HConstant(0);
+ } else if (handle->IsString()) {
+ res = new(zone) HConstant(String::ToNumber(Handle<String>::cast(handle)));
}
return res != NULL ? Just(res) : Nothing<HConstant*>();
}
@@ -3327,31 +3310,6 @@ bool HCompareNumericAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
}
-bool HCompareMinusZeroAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
- if (FLAG_fold_constants && value()->IsConstant()) {
- HConstant* constant = HConstant::cast(value());
- if (constant->HasDoubleValue()) {
- *block = IsMinusZero(constant->DoubleValue())
- ? FirstSuccessor() : SecondSuccessor();
- return true;
- }
- }
- if (value()->representation().IsSmiOrInteger32()) {
- // A Smi or Integer32 cannot contain minus zero.
- *block = SecondSuccessor();
- return true;
- }
- *block = NULL;
- return false;
-}
-
-
-void HCompareMinusZeroAndBranch::InferRepresentation(
- HInferRepresentationPhase* h_infer) {
- ChangeRepresentation(value()->representation());
-}
-
-
std::ostream& HGoto::PrintDataTo(std::ostream& os) const { // NOLINT
return os << *SuccessorAt(0);
}
@@ -3388,7 +3346,7 @@ void HCompareNumericAndBranch::InferRepresentation(
// (false). Therefore, any comparisons other than ordered relational
// comparisons must cause a deopt when one of their arguments is undefined.
// See also v8:1434
- if (Token::IsOrderedRelationalCompareOp(token_) && !is_strong(strength())) {
+ if (Token::IsOrderedRelationalCompareOp(token_)) {
SetFlag(kAllowUndefinedAsNaN);
}
}
@@ -3955,24 +3913,23 @@ bool HStoreKeyed::NeedsCanonicalization() {
#define H_CONSTANT_DOUBLE(val) \
HConstant::New(isolate, zone, context, static_cast<double>(val))
-#define DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HInstr, op) \
- HInstruction* HInstr::New(Isolate* isolate, Zone* zone, HValue* context, \
- HValue* left, HValue* right, Strength strength) { \
- if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { \
- HConstant* c_left = HConstant::cast(left); \
- HConstant* c_right = HConstant::cast(right); \
- if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \
- double double_res = c_left->DoubleValue() op c_right->DoubleValue(); \
- if (IsInt32Double(double_res)) { \
- return H_CONSTANT_INT(double_res); \
- } \
- return H_CONSTANT_DOUBLE(double_res); \
- } \
- } \
- return new (zone) HInstr(context, left, right, strength); \
+#define DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HInstr, op) \
+ HInstruction* HInstr::New(Isolate* isolate, Zone* zone, HValue* context, \
+ HValue* left, HValue* right) { \
+ if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { \
+ HConstant* c_left = HConstant::cast(left); \
+ HConstant* c_right = HConstant::cast(right); \
+ if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \
+ double double_res = c_left->DoubleValue() op c_right->DoubleValue(); \
+ if (IsInt32Double(double_res)) { \
+ return H_CONSTANT_INT(double_res); \
+ } \
+ return H_CONSTANT_DOUBLE(double_res); \
+ } \
+ } \
+ return new (zone) HInstr(context, left, right); \
}
-
DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HAdd, +)
DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HMul, *)
DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HSub, -)
@@ -4198,9 +4155,8 @@ HInstruction* HMathMinMax::New(Isolate* isolate, Zone* zone, HValue* context,
return new(zone) HMathMinMax(context, left, right, op);
}
-
HInstruction* HMod::New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right, Strength strength) {
+ HValue* left, HValue* right) {
if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
HConstant* c_left = HConstant::cast(left);
HConstant* c_right = HConstant::cast(right);
@@ -4219,12 +4175,11 @@ HInstruction* HMod::New(Isolate* isolate, Zone* zone, HValue* context,
}
}
}
- return new (zone) HMod(context, left, right, strength);
+ return new (zone) HMod(context, left, right);
}
-
HInstruction* HDiv::New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right, Strength strength) {
+ HValue* left, HValue* right) {
// If left and right are constant values, try to return a constant value.
if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
HConstant* c_left = HConstant::cast(left);
@@ -4243,13 +4198,11 @@ HInstruction* HDiv::New(Isolate* isolate, Zone* zone, HValue* context,
}
}
}
- return new (zone) HDiv(context, left, right, strength);
+ return new (zone) HDiv(context, left, right);
}
-
HInstruction* HBitwise::New(Isolate* isolate, Zone* zone, HValue* context,
- Token::Value op, HValue* left, HValue* right,
- Strength strength) {
+ Token::Value op, HValue* left, HValue* right) {
if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
HConstant* c_left = HConstant::cast(left);
HConstant* c_right = HConstant::cast(right);
@@ -4274,24 +4227,22 @@ HInstruction* HBitwise::New(Isolate* isolate, Zone* zone, HValue* context,
return H_CONSTANT_INT(result);
}
}
- return new (zone) HBitwise(context, op, left, right, strength);
+ return new (zone) HBitwise(context, op, left, right);
}
-
-#define DEFINE_NEW_H_BITWISE_INSTR(HInstr, result) \
- HInstruction* HInstr::New(Isolate* isolate, Zone* zone, HValue* context, \
- HValue* left, HValue* right, Strength strength) { \
- if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { \
- HConstant* c_left = HConstant::cast(left); \
- HConstant* c_right = HConstant::cast(right); \
- if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \
- return H_CONSTANT_INT(result); \
- } \
- } \
- return new (zone) HInstr(context, left, right, strength); \
+#define DEFINE_NEW_H_BITWISE_INSTR(HInstr, result) \
+ HInstruction* HInstr::New(Isolate* isolate, Zone* zone, HValue* context, \
+ HValue* left, HValue* right) { \
+ if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { \
+ HConstant* c_left = HConstant::cast(left); \
+ HConstant* c_right = HConstant::cast(right); \
+ if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \
+ return H_CONSTANT_INT(result); \
+ } \
+ } \
+ return new (zone) HInstr(context, left, right); \
}
-
DEFINE_NEW_H_BITWISE_INSTR(HSar,
c_left->NumberValueAsInteger32() >> (c_right->NumberValueAsInteger32() & 0x1f))
DEFINE_NEW_H_BITWISE_INSTR(HShl,
@@ -4299,9 +4250,8 @@ c_left->NumberValueAsInteger32() << (c_right->NumberValueAsInteger32() & 0x1f))
#undef DEFINE_NEW_H_BITWISE_INSTR
-
HInstruction* HShr::New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right, Strength strength) {
+ HValue* left, HValue* right) {
if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
HConstant* c_left = HConstant::cast(left);
HConstant* c_right = HConstant::cast(right);
@@ -4314,7 +4264,7 @@ HInstruction* HShr::New(Isolate* isolate, Zone* zone, HValue* context,
return H_CONSTANT_INT(static_cast<uint32_t>(left_val) >> right_val);
}
}
- return new (zone) HShr(context, left, right, strength);
+ return new (zone) HShr(context, left, right);
}
diff --git a/deps/v8/src/crankshaft/hydrogen-instructions.h b/deps/v8/src/crankshaft/hydrogen-instructions.h
index 13ada8c606..22ed052ba3 100644
--- a/deps/v8/src/crankshaft/hydrogen-instructions.h
+++ b/deps/v8/src/crankshaft/hydrogen-instructions.h
@@ -48,7 +48,6 @@ class LChunkBuilder;
V(AbnormalExit) \
V(AccessArgumentsAt) \
V(Add) \
- V(AllocateBlockContext) \
V(Allocate) \
V(ApplyArguments) \
V(ArgumentsElements) \
@@ -64,7 +63,6 @@ class LChunkBuilder;
V(CallFunction) \
V(CallNewArray) \
V(CallRuntime) \
- V(CallStub) \
V(CapturedObject) \
V(Change) \
V(CheckArrayBufferNotNeutered) \
@@ -79,7 +77,6 @@ class LChunkBuilder;
V(CompareNumericAndBranch) \
V(CompareHoleAndBranch) \
V(CompareGeneric) \
- V(CompareMinusZeroAndBranch) \
V(CompareObjectEqAndBranch) \
V(CompareMap) \
V(Constant) \
@@ -117,7 +114,6 @@ class LChunkBuilder;
V(LoadNamedField) \
V(LoadNamedGeneric) \
V(LoadRoot) \
- V(MapEnumLength) \
V(MathFloorOfDiv) \
V(MathMinMax) \
V(MaybeGrowElements) \
@@ -2402,7 +2398,9 @@ class HCallFunction final : public HBinaryCall {
HValue* context() const { return first(); }
HValue* function() const { return second(); }
- ConvertReceiverMode convert_mode() const { return convert_mode_; }
+ ConvertReceiverMode convert_mode() const {
+ return ConvertReceiverModeField::decode(bit_field_);
+ }
FeedbackVectorSlot slot() const { return slot_; }
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
@@ -2424,10 +2422,14 @@ class HCallFunction final : public HBinaryCall {
HCallFunction(HValue* context, HValue* function, int argument_count,
ConvertReceiverMode convert_mode)
: HBinaryCall(context, function, argument_count),
- convert_mode_(convert_mode) {}
+ bit_field_(ConvertReceiverModeField::encode(convert_mode)) {}
Handle<TypeFeedbackVector> feedback_vector_;
FeedbackVectorSlot slot_;
- ConvertReceiverMode convert_mode_;
+
+ class ConvertReceiverModeField : public BitField<ConvertReceiverMode, 0, 2> {
+ };
+
+ uint32_t bit_field_;
};
@@ -2493,31 +2495,6 @@ class HCallRuntime final : public HCall<1> {
};
-class HMapEnumLength final : public HUnaryOperation {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HMapEnumLength, HValue*);
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(MapEnumLength)
-
- protected:
- bool DataEquals(HValue* other) override { return true; }
-
- private:
- explicit HMapEnumLength(HValue* value)
- : HUnaryOperation(value, HType::Smi()) {
- set_representation(Representation::Smi());
- SetFlag(kUseGVN);
- SetDependsOnFlag(kMaps);
- }
-
- bool IsDeletable() const override { return true; }
-};
-
-
class HUnaryMathOperation final : public HTemplateInstruction<2> {
public:
static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
@@ -2810,8 +2787,6 @@ class HCheckValue final : public HUnaryOperation {
bool in_new_space = isolate->heap()->InNewSpace(*func);
// NOTE: We create an uninitialized Unique and initialize it later.
// This is because a JSFunction can move due to GC during graph creation.
- // TODO(titzer): This is a migration crutch. Replace with some kind of
- // Uniqueness scope later.
Unique<JSFunction> target = Unique<JSFunction>::CreateUninitialized(func);
HCheckValue* check = new(zone) HCheckValue(value, target, in_new_space);
return check;
@@ -3341,7 +3316,6 @@ class HPhi final : public HValue {
Representation representation_from_non_phi_uses_ = Representation::None();
bool has_type_feedback_from_uses_ = false;
- // TODO(titzer): we can't eliminate the receiver for generating backtraces
bool IsDeletable() const override { return !IsReceiver(); }
};
@@ -3756,9 +3730,8 @@ class HConstant final : public HTemplateInstruction<0> {
class HBinaryOperation : public HTemplateInstruction<3> {
public:
HBinaryOperation(HValue* context, HValue* left, HValue* right,
- Strength strength, HType type = HType::Tagged())
+ HType type = HType::Tagged())
: HTemplateInstruction<3>(type),
- strength_(strength),
observed_output_representation_(Representation::None()) {
DCHECK(left != NULL && right != NULL);
SetOperandAt(0, context);
@@ -3771,7 +3744,6 @@ class HBinaryOperation : public HTemplateInstruction<3> {
HValue* context() const { return OperandAt(0); }
HValue* left() const { return OperandAt(1); }
HValue* right() const { return OperandAt(2); }
- Strength strength() const { return strength_; }
// True if switching left and right operands likely generates better code.
bool AreOperandsBetterSwitched() {
@@ -3847,13 +3819,10 @@ class HBinaryOperation : public HTemplateInstruction<3> {
return base::bits::IsPowerOfTwo32(static_cast<uint32_t>(value));
}
- Strength strength() { return strength_; }
-
DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation)
private:
bool IgnoreObservedOutputRepresentation(Representation current_rep);
- Strength strength_;
Representation observed_input_representation_[2];
Representation observed_output_representation_;
@@ -4123,11 +4092,10 @@ class HBoundsCheckBaseIndexInformation final : public HTemplateInstruction<2> {
class HBitwiseBinaryOperation : public HBinaryOperation {
public:
HBitwiseBinaryOperation(HValue* context, HValue* left, HValue* right,
- Strength strength, HType type = HType::TaggedNumber())
- : HBinaryOperation(context, left, right, strength, type) {
+ HType type = HType::TaggedNumber())
+ : HBinaryOperation(context, left, right, type) {
SetFlag(kFlexibleRepresentation);
SetFlag(kTruncatingToInt32);
- if (!is_strong(strength)) SetFlag(kAllowUndefinedAsNaN);
SetAllSideEffects();
}
@@ -4182,7 +4150,7 @@ class HMathFloorOfDiv final : public HBinaryOperation {
private:
HMathFloorOfDiv(HValue* context, HValue* left, HValue* right)
- : HBinaryOperation(context, left, right, Strength::WEAK) {
+ : HBinaryOperation(context, left, right) {
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
SetFlag(kCanOverflow);
@@ -4201,13 +4169,11 @@ class HMathFloorOfDiv final : public HBinaryOperation {
class HArithmeticBinaryOperation : public HBinaryOperation {
public:
- HArithmeticBinaryOperation(HValue* context, HValue* left, HValue* right,
- Strength strength)
- : HBinaryOperation(context, left, right, strength,
- HType::TaggedNumber()) {
+ HArithmeticBinaryOperation(HValue* context, HValue* left, HValue* right)
+ : HBinaryOperation(context, left, right, HType::TaggedNumber()) {
SetAllSideEffects();
SetFlag(kFlexibleRepresentation);
- if (!is_strong(strength)) SetFlag(kAllowUndefinedAsNaN);
+ SetFlag(kAllowUndefinedAsNaN);
}
void RepresentationChanged(Representation to) override {
@@ -4232,9 +4198,8 @@ class HArithmeticBinaryOperation : public HBinaryOperation {
class HCompareGeneric final : public HBinaryOperation {
public:
static HCompareGeneric* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right, Token::Value token,
- Strength strength = Strength::WEAK) {
- return new (zone) HCompareGeneric(context, left, right, token, strength);
+ HValue* left, HValue* right, Token::Value token) {
+ return new (zone) HCompareGeneric(context, left, right, token);
}
Representation RequiredInputRepresentation(int index) override {
@@ -4250,8 +4215,8 @@ class HCompareGeneric final : public HBinaryOperation {
private:
HCompareGeneric(HValue* context, HValue* left, HValue* right,
- Token::Value token, Strength strength)
- : HBinaryOperation(context, left, right, strength, HType::Boolean()),
+ Token::Value token)
+ : HBinaryOperation(context, left, right, HType::Boolean()),
token_(token) {
DCHECK(Token::IsCompareOp(token));
set_representation(Representation::Tagged());
@@ -4268,17 +4233,9 @@ class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> {
HValue* context, HValue* left,
HValue* right, Token::Value token,
HBasicBlock* true_target = NULL,
- HBasicBlock* false_target = NULL,
- Strength strength = Strength::WEAK) {
- return new (zone) HCompareNumericAndBranch(left, right, token, true_target,
- false_target, strength);
- }
- static HCompareNumericAndBranch* New(Isolate* isolate, Zone* zone,
- HValue* context, HValue* left,
- HValue* right, Token::Value token,
- Strength strength) {
+ HBasicBlock* false_target = NULL) {
return new (zone)
- HCompareNumericAndBranch(left, right, token, NULL, NULL, strength);
+ HCompareNumericAndBranch(left, right, token, true_target, false_target);
}
HValue* left() const { return OperandAt(0); }
@@ -4302,8 +4259,6 @@ class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> {
bool KnownSuccessorBlock(HBasicBlock** block) override;
- Strength strength() const { return strength_; }
-
std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
void SetOperandPositions(Zone* zone, SourcePosition left_pos,
@@ -4316,9 +4271,8 @@ class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> {
private:
HCompareNumericAndBranch(HValue* left, HValue* right, Token::Value token,
- HBasicBlock* true_target, HBasicBlock* false_target,
- Strength strength)
- : token_(token), strength_(strength) {
+ HBasicBlock* true_target, HBasicBlock* false_target)
+ : token_(token) {
SetFlag(kFlexibleRepresentation);
DCHECK(Token::IsCompareOp(token));
SetOperandAt(0, left);
@@ -4329,7 +4283,6 @@ class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> {
Representation observed_input_representation_[2];
Token::Value token_;
- Strength strength_;
};
@@ -4358,27 +4311,6 @@ class HCompareHoleAndBranch final : public HUnaryControlInstruction {
};
-class HCompareMinusZeroAndBranch final : public HUnaryControlInstruction {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HCompareMinusZeroAndBranch, HValue*);
-
- void InferRepresentation(HInferRepresentationPhase* h_infer) override;
-
- Representation RequiredInputRepresentation(int index) override {
- return representation();
- }
-
- bool KnownSuccessorBlock(HBasicBlock** block) override;
-
- DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch)
-
- private:
- explicit HCompareMinusZeroAndBranch(HValue* value)
- : HUnaryControlInstruction(value, NULL, NULL) {
- }
-};
-
-
class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HCompareObjectEqAndBranch, HValue*, HValue*);
@@ -4682,8 +4614,7 @@ class HInstanceOf final : public HBinaryOperation {
private:
HInstanceOf(HValue* context, HValue* left, HValue* right)
- : HBinaryOperation(context, left, right, Strength::WEAK,
- HType::Boolean()) {
+ : HBinaryOperation(context, left, right, HType::Boolean()) {
set_representation(Representation::Tagged());
SetAllSideEffects();
}
@@ -4766,10 +4697,9 @@ enum ExternalAddType {
class HAdd final : public HArithmeticBinaryOperation {
public:
static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right,
- Strength strength = Strength::WEAK);
+ HValue* left, HValue* right);
static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right, Strength strength,
+ HValue* left, HValue* right,
ExternalAddType external_add_type);
// Add is only commutative if two integer values are added and not if two
@@ -4831,9 +4761,9 @@ class HAdd final : public HArithmeticBinaryOperation {
Range* InferRange(Zone* zone) override;
private:
- HAdd(HValue* context, HValue* left, HValue* right, Strength strength,
+ HAdd(HValue* context, HValue* left, HValue* right,
ExternalAddType external_add_type = NoExternalAdd)
- : HArithmeticBinaryOperation(context, left, right, strength),
+ : HArithmeticBinaryOperation(context, left, right),
external_add_type_(external_add_type) {
SetFlag(kCanOverflow);
switch (external_add_type_) {
@@ -4868,8 +4798,7 @@ class HAdd final : public HArithmeticBinaryOperation {
class HSub final : public HArithmeticBinaryOperation {
public:
static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right,
- Strength strength = Strength::WEAK);
+ HValue* left, HValue* right);
HValue* Canonicalize() override;
@@ -4890,8 +4819,8 @@ class HSub final : public HArithmeticBinaryOperation {
Range* InferRange(Zone* zone) override;
private:
- HSub(HValue* context, HValue* left, HValue* right, Strength strength)
- : HArithmeticBinaryOperation(context, left, right, strength) {
+ HSub(HValue* context, HValue* left, HValue* right)
+ : HArithmeticBinaryOperation(context, left, right) {
SetFlag(kCanOverflow);
}
};
@@ -4900,14 +4829,11 @@ class HSub final : public HArithmeticBinaryOperation {
class HMul final : public HArithmeticBinaryOperation {
public:
static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right,
- Strength strength = Strength::WEAK);
+ HValue* left, HValue* right);
static HInstruction* NewImul(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right,
- Strength strength = Strength::WEAK) {
- HInstruction* instr =
- HMul::New(isolate, zone, context, left, right, strength);
+ HValue* left, HValue* right) {
+ HInstruction* instr = HMul::New(isolate, zone, context, left, right);
if (!instr->IsMul()) return instr;
HMul* mul = HMul::cast(instr);
// TODO(mstarzinger): Prevent bailout on minus zero for imul.
@@ -4937,8 +4863,8 @@ class HMul final : public HArithmeticBinaryOperation {
Range* InferRange(Zone* zone) override;
private:
- HMul(HValue* context, HValue* left, HValue* right, Strength strength)
- : HArithmeticBinaryOperation(context, left, right, strength) {
+ HMul(HValue* context, HValue* left, HValue* right)
+ : HArithmeticBinaryOperation(context, left, right) {
SetFlag(kCanOverflow);
}
};
@@ -4947,8 +4873,7 @@ class HMul final : public HArithmeticBinaryOperation {
class HMod final : public HArithmeticBinaryOperation {
public:
static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right,
- Strength strength = Strength::WEAK);
+ HValue* left, HValue* right);
HValue* Canonicalize() override;
@@ -4967,8 +4892,8 @@ class HMod final : public HArithmeticBinaryOperation {
Range* InferRange(Zone* zone) override;
private:
- HMod(HValue* context, HValue* left, HValue* right, Strength strength)
- : HArithmeticBinaryOperation(context, left, right, strength) {
+ HMod(HValue* context, HValue* left, HValue* right)
+ : HArithmeticBinaryOperation(context, left, right) {
SetFlag(kCanBeDivByZero);
SetFlag(kCanOverflow);
SetFlag(kLeftCanBeNegative);
@@ -4979,8 +4904,7 @@ class HMod final : public HArithmeticBinaryOperation {
class HDiv final : public HArithmeticBinaryOperation {
public:
static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right,
- Strength strength = Strength::WEAK);
+ HValue* left, HValue* right);
HValue* Canonicalize() override;
@@ -4999,8 +4923,8 @@ class HDiv final : public HArithmeticBinaryOperation {
Range* InferRange(Zone* zone) override;
private:
- HDiv(HValue* context, HValue* left, HValue* right, Strength strength)
- : HArithmeticBinaryOperation(context, left, right, strength) {
+ HDiv(HValue* context, HValue* left, HValue* right)
+ : HArithmeticBinaryOperation(context, left, right) {
SetFlag(kCanBeDivByZero);
SetFlag(kCanOverflow);
}
@@ -5046,8 +4970,7 @@ class HMathMinMax final : public HArithmeticBinaryOperation {
private:
HMathMinMax(HValue* context, HValue* left, HValue* right, Operation op)
- : HArithmeticBinaryOperation(context, left, right, Strength::WEAK),
- operation_(op) {}
+ : HArithmeticBinaryOperation(context, left, right), operation_(op) {}
Operation operation_;
};
@@ -5056,8 +4979,7 @@ class HMathMinMax final : public HArithmeticBinaryOperation {
class HBitwise final : public HBitwiseBinaryOperation {
public:
static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
- Token::Value op, HValue* left, HValue* right,
- Strength strength = Strength::WEAK);
+ Token::Value op, HValue* left, HValue* right);
Token::Value op() const { return op_; }
@@ -5077,9 +4999,8 @@ class HBitwise final : public HBitwiseBinaryOperation {
Range* InferRange(Zone* zone) override;
private:
- HBitwise(HValue* context, Token::Value op, HValue* left, HValue* right,
- Strength strength)
- : HBitwiseBinaryOperation(context, left, right, strength), op_(op) {
+ HBitwise(HValue* context, Token::Value op, HValue* left, HValue* right)
+ : HBitwiseBinaryOperation(context, left, right), op_(op) {
DCHECK(op == Token::BIT_AND || op == Token::BIT_OR || op == Token::BIT_XOR);
// BIT_AND with a smi-range positive value will always unset the
// entire sign-extension of the smi-sign.
@@ -5113,8 +5034,7 @@ class HBitwise final : public HBitwiseBinaryOperation {
class HShl final : public HBitwiseBinaryOperation {
public:
static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right,
- Strength strength = Strength::WEAK);
+ HValue* left, HValue* right);
Range* InferRange(Zone* zone) override;
@@ -5135,16 +5055,15 @@ class HShl final : public HBitwiseBinaryOperation {
bool DataEquals(HValue* other) override { return true; }
private:
- HShl(HValue* context, HValue* left, HValue* right, Strength strength)
- : HBitwiseBinaryOperation(context, left, right, strength) {}
+ HShl(HValue* context, HValue* left, HValue* right)
+ : HBitwiseBinaryOperation(context, left, right) {}
};
class HShr final : public HBitwiseBinaryOperation {
public:
static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right,
- Strength strength = Strength::WEAK);
+ HValue* left, HValue* right);
bool TryDecompose(DecompositionResult* decomposition) override {
if (right()->IsInteger32Constant()) {
@@ -5173,16 +5092,15 @@ class HShr final : public HBitwiseBinaryOperation {
bool DataEquals(HValue* other) override { return true; }
private:
- HShr(HValue* context, HValue* left, HValue* right, Strength strength)
- : HBitwiseBinaryOperation(context, left, right, strength) {}
+ HShr(HValue* context, HValue* left, HValue* right)
+ : HBitwiseBinaryOperation(context, left, right) {}
};
class HSar final : public HBitwiseBinaryOperation {
public:
static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right,
- Strength strength = Strength::WEAK);
+ HValue* left, HValue* right);
bool TryDecompose(DecompositionResult* decomposition) override {
if (right()->IsInteger32Constant()) {
@@ -5211,17 +5129,16 @@ class HSar final : public HBitwiseBinaryOperation {
bool DataEquals(HValue* other) override { return true; }
private:
- HSar(HValue* context, HValue* left, HValue* right, Strength strength)
- : HBitwiseBinaryOperation(context, left, right, strength) {}
+ HSar(HValue* context, HValue* left, HValue* right)
+ : HBitwiseBinaryOperation(context, left, right) {}
};
class HRor final : public HBitwiseBinaryOperation {
public:
static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right,
- Strength strength = Strength::WEAK) {
- return new (zone) HRor(context, left, right, strength);
+ HValue* left, HValue* right) {
+ return new (zone) HRor(context, left, right);
}
void UpdateRepresentation(Representation new_rep,
@@ -5237,8 +5154,8 @@ class HRor final : public HBitwiseBinaryOperation {
bool DataEquals(HValue* other) override { return true; }
private:
- HRor(HValue* context, HValue* left, HValue* right, Strength strength)
- : HBitwiseBinaryOperation(context, left, right, strength) {
+ HRor(HValue* context, HValue* left, HValue* right)
+ : HBitwiseBinaryOperation(context, left, right) {
ChangeRepresentation(Representation::Integer32());
}
};
@@ -5316,27 +5233,6 @@ class HParameter final : public HTemplateInstruction<0> {
};
-class HCallStub final : public HUnaryCall {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallStub, CodeStub::Major, int);
- CodeStub::Major major_key() { return major_key_; }
-
- HValue* context() { return value(); }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- DECLARE_CONCRETE_INSTRUCTION(CallStub)
-
- private:
- HCallStub(HValue* context, CodeStub::Major major_key, int argument_count)
- : HUnaryCall(context, argument_count),
- major_key_(major_key) {
- }
-
- CodeStub::Major major_key_;
-};
-
-
class HUnknownOSRValue final : public HTemplateInstruction<0> {
public:
DECLARE_INSTRUCTION_FACTORY_P2(HUnknownOSRValue, HEnvironment*, int);
@@ -6027,6 +5923,11 @@ class HObjectAccess final {
Representation::UInteger8());
}
+ static HObjectAccess ForMapBitField3() {
+ return HObjectAccess(kInobject, Map::kBitField3Offset,
+ Representation::Integer32());
+ }
+
static HObjectAccess ForNameHashField() {
return HObjectAccess(kInobject,
Name::kHashFieldOffset,
@@ -6399,9 +6300,8 @@ class HLoadNamedField final : public HTemplateInstruction<2> {
class HLoadNamedGeneric final : public HTemplateInstruction<2> {
public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HLoadNamedGeneric, HValue*,
- Handle<Name>, LanguageMode,
- InlineCacheState);
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HLoadNamedGeneric, HValue*,
+ Handle<Name>, InlineCacheState);
HValue* context() const { return OperandAt(0); }
HValue* object() const { return OperandAt(1); }
@@ -6429,14 +6329,10 @@ class HLoadNamedGeneric final : public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric)
- LanguageMode language_mode() const { return language_mode_; }
-
private:
HLoadNamedGeneric(HValue* context, HValue* object, Handle<Name> name,
- LanguageMode language_mode,
InlineCacheState initialization_state)
: name_(name),
- language_mode_(language_mode),
initialization_state_(initialization_state) {
SetOperandAt(0, context);
SetOperandAt(1, object);
@@ -6447,7 +6343,6 @@ class HLoadNamedGeneric final : public HTemplateInstruction<2> {
Handle<Name> name_;
Handle<TypeFeedbackVector> feedback_vector_;
FeedbackVectorSlot slot_;
- LanguageMode language_mode_;
InlineCacheState initialization_state_;
};
@@ -6690,9 +6585,8 @@ class HLoadKeyed final : public HTemplateInstruction<4>,
class HLoadKeyedGeneric final : public HTemplateInstruction<3> {
public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HLoadKeyedGeneric, HValue*,
- HValue*, LanguageMode,
- InlineCacheState);
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HLoadKeyedGeneric, HValue*,
+ HValue*, InlineCacheState);
HValue* object() const { return OperandAt(0); }
HValue* key() const { return OperandAt(1); }
HValue* context() const { return OperandAt(2); }
@@ -6724,14 +6618,10 @@ class HLoadKeyedGeneric final : public HTemplateInstruction<3> {
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric)
- LanguageMode language_mode() const { return language_mode_; }
-
private:
HLoadKeyedGeneric(HValue* context, HValue* obj, HValue* key,
- LanguageMode language_mode,
InlineCacheState initialization_state)
- : initialization_state_(initialization_state),
- language_mode_(language_mode) {
+ : initialization_state_(initialization_state) {
set_representation(Representation::Tagged());
SetOperandAt(0, obj);
SetOperandAt(1, key);
@@ -6742,7 +6632,6 @@ class HLoadKeyedGeneric final : public HTemplateInstruction<3> {
Handle<TypeFeedbackVector> feedback_vector_;
FeedbackVectorSlot slot_;
InlineCacheState initialization_state_;
- LanguageMode language_mode_;
};
@@ -7298,7 +7187,7 @@ class HStringAdd final : public HBinaryOperation {
HStringAdd(HValue* context, HValue* left, HValue* right,
PretenureFlag pretenure_flag, StringAddFlags flags,
Handle<AllocationSite> allocation_site)
- : HBinaryOperation(context, left, right, Strength::WEAK, HType::String()),
+ : HBinaryOperation(context, left, right, HType::String()),
flags_(flags),
pretenure_flag_(pretenure_flag) {
set_representation(Representation::Tagged());
@@ -7778,36 +7667,6 @@ class HStoreFrameContext: public HUnaryOperation {
};
-class HAllocateBlockContext: public HTemplateInstruction<2> {
- public:
- DECLARE_INSTRUCTION_FACTORY_P3(HAllocateBlockContext, HValue*,
- HValue*, Handle<ScopeInfo>);
- HValue* context() const { return OperandAt(0); }
- HValue* function() const { return OperandAt(1); }
- Handle<ScopeInfo> scope_info() const { return scope_info_; }
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
-
- DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext)
-
- private:
- HAllocateBlockContext(HValue* context,
- HValue* function,
- Handle<ScopeInfo> scope_info)
- : scope_info_(scope_info) {
- SetOperandAt(0, context);
- SetOperandAt(1, function);
- set_representation(Representation::Tagged());
- }
-
- Handle<ScopeInfo> scope_info_;
-};
-
-
#undef DECLARE_INSTRUCTION
#undef DECLARE_CONCRETE_INSTRUCTION
diff --git a/deps/v8/src/crankshaft/hydrogen-load-elimination.cc b/deps/v8/src/crankshaft/hydrogen-load-elimination.cc
index da8d1864a6..88963fc18b 100644
--- a/deps/v8/src/crankshaft/hydrogen-load-elimination.cc
+++ b/deps/v8/src/crankshaft/hydrogen-load-elimination.cc
@@ -243,7 +243,6 @@ class HLoadEliminationTable : public ZoneObject {
if (instr->has_transition()) {
// A transition introduces a new field and alters the map of the object.
// Since the field in the object is new, it cannot alias existing entries.
- // TODO(titzer): introduce a constant for the new map and remember it.
KillFieldInternal(object, FieldOf(JSObject::kMapOffset), NULL);
} else {
// Kill non-equivalent may-alias entries.
@@ -402,7 +401,6 @@ class HLoadEliminationTable : public ZoneObject {
// Compute the field index for the given in-object offset; -1 if not tracked.
int FieldOf(int offset) {
if (offset >= kMaxTrackedFields * kPointerSize) return -1;
- // TODO(titzer): track misaligned loads in a separate list?
if ((offset % kPointerSize) != 0) return -1; // Ignore misaligned accesses.
return offset / kPointerSize;
}
diff --git a/deps/v8/src/crankshaft/hydrogen-range-analysis.cc b/deps/v8/src/crankshaft/hydrogen-range-analysis.cc
index f5eba5e571..a489e014eb 100644
--- a/deps/v8/src/crankshaft/hydrogen-range-analysis.cc
+++ b/deps/v8/src/crankshaft/hydrogen-range-analysis.cc
@@ -71,12 +71,6 @@ void HRangeAnalysisPhase::Run() {
instr->to().IsSmiOrInteger32());
PropagateMinusZeroChecks(instr->value());
}
- } else if (value->IsCompareMinusZeroAndBranch()) {
- HCompareMinusZeroAndBranch* instr =
- HCompareMinusZeroAndBranch::cast(value);
- if (instr->value()->representation().IsSmiOrInteger32()) {
- PropagateMinusZeroChecks(instr->value());
- }
}
}
diff --git a/deps/v8/src/crankshaft/hydrogen-store-elimination.cc b/deps/v8/src/crankshaft/hydrogen-store-elimination.cc
index ba32c8ad6b..57c7880aa7 100644
--- a/deps/v8/src/crankshaft/hydrogen-store-elimination.cc
+++ b/deps/v8/src/crankshaft/hydrogen-store-elimination.cc
@@ -36,7 +36,6 @@ void HStoreEliminationPhase::Run() {
HInstruction* instr = it.Current();
if (instr->CheckFlag(HValue::kIsDead)) continue;
- // TODO(titzer): eliminate unobserved HStoreKeyed instructions too.
switch (instr->opcode()) {
case HValue::kStoreNamedField:
// Remove any unobserved stores overwritten by this store.
@@ -68,7 +67,6 @@ void HStoreEliminationPhase::ProcessStore(HStoreNamedField* store) {
prev->id(), store->id()));
unobserved_.Remove(i);
} else {
- // TODO(titzer): remove map word clearing from folded allocations.
i++;
}
}
diff --git a/deps/v8/src/crankshaft/hydrogen-types.cc b/deps/v8/src/crankshaft/hydrogen-types.cc
index 9c5e34194e..8c8562581a 100644
--- a/deps/v8/src/crankshaft/hydrogen-types.cc
+++ b/deps/v8/src/crankshaft/hydrogen-types.cc
@@ -4,39 +4,33 @@
#include "src/crankshaft/hydrogen-types.h"
+#include "src/field-type.h"
+#include "src/handles-inl.h"
#include "src/ostreams.h"
-#include "src/types-inl.h"
-
namespace v8 {
namespace internal {
// static
-template <class T>
-HType HType::FromType(typename T::TypeHandle type) {
- if (T::Any()->Is(type)) return HType::Any();
+HType HType::FromType(Type* type) {
+ if (Type::Any()->Is(type)) return HType::Any();
if (!type->IsInhabited()) return HType::None();
- if (type->Is(T::SignedSmall())) return HType::Smi();
- if (type->Is(T::Number())) return HType::TaggedNumber();
- if (type->Is(T::Null())) return HType::Null();
- if (type->Is(T::String())) return HType::String();
- if (type->Is(T::Boolean())) return HType::Boolean();
- if (type->Is(T::Undefined())) return HType::Undefined();
- if (type->Is(T::Object())) return HType::JSObject();
- if (type->Is(T::Receiver())) return HType::JSReceiver();
+ if (type->Is(Type::SignedSmall())) return HType::Smi();
+ if (type->Is(Type::Number())) return HType::TaggedNumber();
+ if (type->Is(Type::Null())) return HType::Null();
+ if (type->Is(Type::String())) return HType::String();
+ if (type->Is(Type::Boolean())) return HType::Boolean();
+ if (type->Is(Type::Undefined())) return HType::Undefined();
+ if (type->Is(Type::Object())) return HType::JSObject();
+ if (type->Is(Type::Receiver())) return HType::JSReceiver();
return HType::Tagged();
}
// static
-template
-HType HType::FromType<Type>(Type* type);
-
-
-// static
-template
-HType HType::FromType<HeapType>(Handle<HeapType> type);
-
+HType HType::FromFieldType(Handle<FieldType> type, Zone* temp_zone) {
+ return FromType(type->Convert(temp_zone));
+}
// static
HType HType::FromValue(Handle<Object> value) {
diff --git a/deps/v8/src/crankshaft/hydrogen-types.h b/deps/v8/src/crankshaft/hydrogen-types.h
index 87148ee4cd..0690ece34f 100644
--- a/deps/v8/src/crankshaft/hydrogen-types.h
+++ b/deps/v8/src/crankshaft/hydrogen-types.h
@@ -9,12 +9,14 @@
#include <iosfwd>
#include "src/base/macros.h"
+#include "src/types.h"
namespace v8 {
namespace internal {
// Forward declarations.
template <typename T> class Handle;
+class FieldType;
class Object;
#define HTYPE_LIST(V) \
@@ -62,8 +64,9 @@ class HType final {
HTYPE_LIST(DECLARE_IS_TYPE)
#undef DECLARE_IS_TYPE
- template <class T>
- static HType FromType(typename T::TypeHandle type) WARN_UNUSED_RESULT;
+ static HType FromType(Type* type) WARN_UNUSED_RESULT;
+ static HType FromFieldType(Handle<FieldType> type,
+ Zone* temp_zone) WARN_UNUSED_RESULT;
static HType FromValue(Handle<Object> value) WARN_UNUSED_RESULT;
friend std::ostream& operator<<(std::ostream& os, const HType& t);
diff --git a/deps/v8/src/crankshaft/hydrogen.cc b/deps/v8/src/crankshaft/hydrogen.cc
index 98337be052..b6fdd3a315 100644
--- a/deps/v8/src/crankshaft/hydrogen.cc
+++ b/deps/v8/src/crankshaft/hydrogen.cc
@@ -34,6 +34,7 @@
#include "src/crankshaft/hydrogen-uint32-analysis.h"
#include "src/crankshaft/lithium-allocator.h"
#include "src/crankshaft/typing.h"
+#include "src/field-type.h"
#include "src/full-codegen/full-codegen.h"
#include "src/ic/call-optimization.h"
#include "src/ic/ic.h"
@@ -687,33 +688,28 @@ HConstant* HGraph::GetConstantBool(bool value) {
return value ? GetConstantTrue() : GetConstantFalse();
}
-
-#define DEFINE_GET_CONSTANT(Name, name, type, htype, boolean_value) \
-HConstant* HGraph::GetConstant##Name() { \
- if (!constant_##name##_.is_set()) { \
- HConstant* constant = new(zone()) HConstant( \
- Unique<Object>::CreateImmovable(isolate()->factory()->name##_value()), \
- Unique<Map>::CreateImmovable(isolate()->factory()->type##_map()), \
- false, \
- Representation::Tagged(), \
- htype, \
- true, \
- boolean_value, \
- false, \
- ODDBALL_TYPE); \
- constant->InsertAfter(entry_block()->first()); \
- constant_##name##_.set(constant); \
- } \
- return ReinsertConstantIfNecessary(constant_##name##_.get()); \
-}
-
-
-DEFINE_GET_CONSTANT(Undefined, undefined, undefined, HType::Undefined(), false)
-DEFINE_GET_CONSTANT(True, true, boolean, HType::Boolean(), true)
-DEFINE_GET_CONSTANT(False, false, boolean, HType::Boolean(), false)
-DEFINE_GET_CONSTANT(Hole, the_hole, the_hole, HType::None(), false)
-DEFINE_GET_CONSTANT(Null, null, null, HType::Null(), false)
-
+#define DEFINE_GET_CONSTANT(Name, name, type, htype, boolean_value, \
+ undetectable) \
+ HConstant* HGraph::GetConstant##Name() { \
+ if (!constant_##name##_.is_set()) { \
+ HConstant* constant = new (zone()) HConstant( \
+ Unique<Object>::CreateImmovable( \
+ isolate()->factory()->name##_value()), \
+ Unique<Map>::CreateImmovable(isolate()->factory()->type##_map()), \
+ false, Representation::Tagged(), htype, true, boolean_value, \
+ undetectable, ODDBALL_TYPE); \
+ constant->InsertAfter(entry_block()->first()); \
+ constant_##name##_.set(constant); \
+ } \
+ return ReinsertConstantIfNecessary(constant_##name##_.get()); \
+ }
+
+DEFINE_GET_CONSTANT(Undefined, undefined, undefined, HType::Undefined(), false,
+ true)
+DEFINE_GET_CONSTANT(True, true, boolean, HType::Boolean(), true, false)
+DEFINE_GET_CONSTANT(False, false, boolean, HType::Boolean(), false, false)
+DEFINE_GET_CONSTANT(Hole, the_hole, the_hole, HType::None(), false, false)
+DEFINE_GET_CONSTANT(Null, null, null, HType::Null(), false, true)
#undef DEFINE_GET_CONSTANT
@@ -1185,7 +1181,7 @@ void HGraphBuilder::LoopBuilder::EndBody() {
HGraph* HGraphBuilder::CreateGraph() {
- graph_ = new(zone()) HGraph(info_);
+ graph_ = new (zone()) HGraph(info_, descriptor_);
if (FLAG_hydrogen_stats) isolate()->GetHStatistics()->Initialize(info_);
CompilationPhase phase("H_Block building", info_);
set_current_block(graph()->entry_block());
@@ -1275,6 +1271,14 @@ HValue* HGraphBuilder::BuildGetElementsKind(HValue* object) {
}
+HValue* HGraphBuilder::BuildEnumLength(HValue* map) {
+ NoObservableSideEffectsScope scope(this);
+ HValue* bit_field3 =
+ Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField3());
+ return BuildDecodeField<Map::EnumLengthBits>(bit_field3);
+}
+
+
HValue* HGraphBuilder::BuildCheckHeapObject(HValue* obj) {
if (obj->type().IsHeapObject()) return obj;
return Add<HCheckHeapObject>(obj);
@@ -1671,10 +1675,10 @@ HValue* HGraphBuilder::BuildElementIndexHash(HValue* index) {
return AddUncasted<HBitwise>(Token::BIT_XOR, hash, shifted_hash);
}
-
-HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(
- HValue* receiver, HValue* elements, HValue* key, HValue* hash,
- LanguageMode language_mode) {
+HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(HValue* receiver,
+ HValue* elements,
+ HValue* key,
+ HValue* hash) {
HValue* capacity =
Add<HLoadKeyed>(elements, Add<HConstant>(NameDictionary::kCapacityIndex),
nullptr, nullptr, FAST_ELEMENTS);
@@ -1717,11 +1721,8 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(
// element == undefined means "not found". Call the runtime.
// TODO(jkummerow): walk the prototype chain instead.
Add<HPushArguments>(receiver, key);
- Push(Add<HCallRuntime>(
- Runtime::FunctionForId(is_strong(language_mode)
- ? Runtime::kKeyedGetPropertyStrong
- : Runtime::kKeyedGetProperty),
- 2));
+ Push(Add<HCallRuntime>(Runtime::FunctionForId(Runtime::kKeyedGetProperty),
+ 2));
}
if_undefined.Else();
{
@@ -1780,11 +1781,8 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(
FAST_ELEMENTS));
details_compare.Else();
Add<HPushArguments>(receiver, key);
- Push(Add<HCallRuntime>(
- Runtime::FunctionForId(is_strong(language_mode)
- ? Runtime::kKeyedGetPropertyStrong
- : Runtime::kKeyedGetProperty),
- 2));
+ Push(Add<HCallRuntime>(Runtime::FunctionForId(Runtime::kKeyedGetProperty),
+ 2));
details_compare.End();
found_key_match.Else();
@@ -1819,7 +1817,7 @@ HValue* HGraphBuilder::BuildCreateIterResultObject(HValue* value,
// Allocate the JSIteratorResult object.
HValue* result =
Add<HAllocate>(Add<HConstant>(JSIteratorResult::kSize), HType::JSObject(),
- NOT_TENURED, JS_ITERATOR_RESULT_TYPE);
+ NOT_TENURED, JS_OBJECT_TYPE);
// Initialize the JSIteratorResult object.
HValue* native_context = BuildGetNativeContext();
@@ -2052,6 +2050,20 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object, Type* type) {
return Pop();
}
+HValue* HGraphBuilder::BuildToNumber(HValue* input) {
+ if (input->type().IsTaggedNumber()) {
+ return input;
+ }
+ Callable callable = CodeFactory::ToNumber(isolate());
+ HValue* stub = Add<HConstant>(callable.code());
+ HValue* values[] = {context(), input};
+ HCallWithDescriptor* instr =
+ Add<HCallWithDescriptor>(stub, 0, callable.descriptor(),
+ Vector<HValue*>(values, arraysize(values)));
+ instr->set_type(HType::TaggedNumber());
+ return instr;
+}
+
HValue* HGraphBuilder::BuildToObject(HValue* receiver) {
NoObservableSideEffectsScope scope(this);
@@ -2578,8 +2590,8 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
HObjectAccess::ForFixedTypedArrayBaseExternalPointer());
HValue* base_pointer = Add<HLoadNamedField>(
elements, nullptr, HObjectAccess::ForFixedTypedArrayBaseBasePointer());
- HValue* backing_store = AddUncasted<HAdd>(
- external_pointer, base_pointer, Strength::WEAK, AddOfExternalAndTagged);
+ HValue* backing_store = AddUncasted<HAdd>(external_pointer, base_pointer,
+ AddOfExternalAndTagged);
if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
NoObservableSideEffectsScope no_effects(this);
@@ -3171,37 +3183,24 @@ void HGraphBuilder::BuildCompareNil(HValue* value, Type* type,
HIfContinuation* continuation,
MapEmbedding map_embedding) {
IfBuilder if_nil(this);
- bool some_case_handled = false;
- bool some_case_missing = false;
-
- if (type->Maybe(Type::Null())) {
- if (some_case_handled) if_nil.Or();
- if_nil.If<HCompareObjectEqAndBranch>(value, graph()->GetConstantNull());
- some_case_handled = true;
- } else {
- some_case_missing = true;
- }
-
- if (type->Maybe(Type::Undefined())) {
- if (some_case_handled) if_nil.Or();
- if_nil.If<HCompareObjectEqAndBranch>(value,
- graph()->GetConstantUndefined());
- some_case_handled = true;
- } else {
- some_case_missing = true;
- }
if (type->Maybe(Type::Undetectable())) {
- if (some_case_handled) if_nil.Or();
if_nil.If<HIsUndetectableAndBranch>(value);
- some_case_handled = true;
} else {
- some_case_missing = true;
- }
+ bool maybe_null = type->Maybe(Type::Null());
+ if (maybe_null) {
+ if_nil.If<HCompareObjectEqAndBranch>(value, graph()->GetConstantNull());
+ }
+
+ if (type->Maybe(Type::Undefined())) {
+ if (maybe_null) if_nil.Or();
+ if_nil.If<HCompareObjectEqAndBranch>(value,
+ graph()->GetConstantUndefined());
+ }
- if (some_case_missing) {
if_nil.Then();
if_nil.Else();
+
if (type->NumClasses() == 1) {
BuildCheckHeapObject(value);
// For ICs, the map checked below is a sentinel map that gets replaced by
@@ -3547,14 +3546,14 @@ HValue* HGraphBuilder::AddLoadJSBuiltin(int context_index) {
HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
- : HGraphBuilder(info),
+ : HGraphBuilder(info, CallInterfaceDescriptor()),
function_state_(NULL),
initial_function_state_(this, info, NORMAL_RETURN, 0),
ast_context_(NULL),
break_scope_(NULL),
inlined_count_(0),
globals_(10, info->zone()),
- osr_(new(info->zone()) HOsrBuilder(this)) {
+ osr_(new (info->zone()) HOsrBuilder(this)) {
// This is not initialized in the initializer list because the
// constructor for the initial state relies on function_state_ == NULL
// to know it's the initial state.
@@ -3641,7 +3640,7 @@ std::ostream& operator<<(std::ostream& os, const HBasicBlock& b) {
}
-HGraph::HGraph(CompilationInfo* info)
+HGraph::HGraph(CompilationInfo* info, CallInterfaceDescriptor descriptor)
: isolate_(info->isolate()),
next_block_id_(0),
entry_block_(NULL),
@@ -3651,6 +3650,7 @@ HGraph::HGraph(CompilationInfo* info)
uint32_instructions_(NULL),
osr_(NULL),
info_(info),
+ descriptor_(descriptor),
zone_(info->zone()),
is_recursive_(false),
use_optimistic_licm_(false),
@@ -3660,10 +3660,9 @@ HGraph::HGraph(CompilationInfo* info)
no_side_effects_scope_count_(0),
disallow_adding_new_values_(false) {
if (info->IsStub()) {
- CallInterfaceDescriptor descriptor =
- info->code_stub()->GetCallInterfaceDescriptor();
- start_environment_ =
- new (zone_) HEnvironment(zone_, descriptor.GetRegisterParameterCount());
+ // For stubs, explicitly add the context to the environment.
+ start_environment_ = new (zone_)
+ HEnvironment(zone_, descriptor.GetRegisterParameterCount() + 1);
} else {
if (info->is_tracking_positions()) {
info->TraceInlinedFunction(info->shared_info(), SourcePosition::Unknown(),
@@ -4675,12 +4674,11 @@ void HOptimizedGraphBuilder::SetUpScope(Scope* scope) {
}
AddInstruction(arguments_object);
- graph()->SetArgumentsObject(arguments_object);
// Handle the arguments and arguments shadow variables specially (they do
// not have declarations).
if (scope->arguments() != NULL) {
- environment()->Bind(scope->arguments(), graph()->GetArgumentsObject());
+ environment()->Bind(scope->arguments(), arguments_object);
}
int rest_index;
@@ -4736,8 +4734,11 @@ void HOptimizedGraphBuilder::VisitBlock(Block* stmt) {
}
AddInstruction(function);
// Allocate a block context and store it to the stack frame.
- HInstruction* inner_context = Add<HAllocateBlockContext>(
- outer_context, function, scope->GetScopeInfo(isolate()));
+ HValue* scope_info = Add<HConstant>(scope->GetScopeInfo(isolate()));
+ Add<HPushArguments>(scope_info, function);
+ HInstruction* inner_context = Add<HCallRuntime>(
+ Runtime::FunctionForId(Runtime::kPushBlockContext), 2);
+ inner_context->SetFlag(HValue::kHasNoObservableSideEffects);
HInstruction* instr = Add<HStoreFrameContext>(inner_context);
set_scope(scope);
environment()->BindContext(inner_context);
@@ -4955,9 +4956,8 @@ void HOptimizedGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
// will always evaluate to true, in a value context the return value needs
// to be a JSObject.
if (context->IsTest()) {
- TestContext* test = TestContext::cast(context);
CHECK_ALIVE(VisitForEffect(stmt->expression()));
- Goto(test->if_true(), state);
+ context->ReturnValue(graph()->GetConstantTrue());
} else if (context->IsEffect()) {
CHECK_ALIVE(VisitForEffect(stmt->expression()));
Goto(function_return(), state);
@@ -5283,10 +5283,6 @@ void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
- if (!FLAG_optimize_for_in) {
- return Bailout(kForInStatementOptimizationIsDisabled);
- }
-
if (!stmt->each()->IsVariableProxy() ||
!stmt->each()->AsVariableProxy()->var()->IsStackLocal()) {
return Bailout(kForInStatementWithNonLocalEachVariable);
@@ -5312,57 +5308,73 @@ void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
void HOptimizedGraphBuilder::BuildForInBody(ForInStatement* stmt,
Variable* each_var,
HValue* enumerable) {
- HInstruction* map;
- HInstruction* array;
- HInstruction* enum_length;
+ Handle<Map> meta_map = isolate()->factory()->meta_map();
bool fast = stmt->for_in_type() == ForInStatement::FAST_FOR_IN;
+ BuildCheckHeapObject(enumerable);
+ Add<HCheckInstanceType>(enumerable, HCheckInstanceType::IS_JS_RECEIVER);
+ Add<HSimulate>(stmt->ToObjectId());
if (fast) {
- map = Add<HForInPrepareMap>(enumerable);
- Add<HSimulate>(stmt->PrepareId());
+ HForInPrepareMap* map = Add<HForInPrepareMap>(enumerable);
+ Push(map);
+ Add<HSimulate>(stmt->EnumId());
+ Drop(1);
+ Add<HCheckMaps>(map, meta_map);
- array = Add<HForInCacheArray>(enumerable, map,
- DescriptorArray::kEnumCacheBridgeCacheIndex);
- enum_length = Add<HMapEnumLength>(map);
+ HForInCacheArray* array = Add<HForInCacheArray>(
+ enumerable, map, DescriptorArray::kEnumCacheBridgeCacheIndex);
+ HValue* enum_length = BuildEnumLength(map);
- HInstruction* index_cache = Add<HForInCacheArray>(
+ HForInCacheArray* index_cache = Add<HForInCacheArray>(
enumerable, map, DescriptorArray::kEnumCacheBridgeIndicesCacheIndex);
- HForInCacheArray::cast(array)
- ->set_index_cache(HForInCacheArray::cast(index_cache));
- } else {
- Add<HSimulate>(stmt->PrepareId());
- {
- NoObservableSideEffectsScope no_effects(this);
- BuildJSObjectCheck(enumerable, 0);
- }
- Add<HSimulate>(stmt->ToObjectId());
+ array->set_index_cache(index_cache);
- map = graph()->GetConstant1();
- Runtime::FunctionId function_id = Runtime::kGetPropertyNamesFast;
+ Push(map);
+ Push(array);
+ Push(enum_length);
+ Add<HSimulate>(stmt->PrepareId());
+ } else {
+ Runtime::FunctionId function_id = Runtime::kForInEnumerate;
Add<HPushArguments>(enumerable);
- array = Add<HCallRuntime>(Runtime::FunctionForId(function_id), 1);
+ HCallRuntime* array =
+ Add<HCallRuntime>(Runtime::FunctionForId(function_id), 1);
Push(array);
Add<HSimulate>(stmt->EnumId());
Drop(1);
- Handle<Map> array_map = isolate()->factory()->fixed_array_map();
- HValue* check = Add<HCheckMaps>(array, array_map);
- enum_length = AddLoadFixedArrayLength(array, check);
- }
- HInstruction* start_index = Add<HConstant>(0);
+ IfBuilder if_fast(this);
+ if_fast.If<HCompareMap>(array, meta_map);
+ if_fast.Then();
+ {
+ HValue* cache_map = array;
+ HForInCacheArray* cache = Add<HForInCacheArray>(
+ enumerable, cache_map, DescriptorArray::kEnumCacheBridgeCacheIndex);
+ HValue* enum_length = BuildEnumLength(cache_map);
+ Push(cache_map);
+ Push(cache);
+ Push(enum_length);
+ Add<HSimulate>(stmt->PrepareId(), FIXED_SIMULATE);
+ }
+ if_fast.Else();
+ {
+ Push(graph()->GetConstant1());
+ Push(array);
+ Push(AddLoadFixedArrayLength(array));
+ Add<HSimulate>(stmt->PrepareId(), FIXED_SIMULATE);
+ }
+ }
- Push(map);
- Push(array);
- Push(enum_length);
- Push(start_index);
+ Push(graph()->GetConstant0());
HBasicBlock* loop_entry = BuildLoopEntry(stmt);
// Reload the values to ensure we have up-to-date values inside of the loop.
// This is relevant especially for OSR where the values don't come from the
// computation above, but from the OSR entry block.
- enumerable = environment()->ExpressionStackAt(4);
HValue* index = environment()->ExpressionStackAt(0);
HValue* limit = environment()->ExpressionStackAt(1);
+ HValue* array = environment()->ExpressionStackAt(2);
+ HValue* type = environment()->ExpressionStackAt(3);
+ enumerable = environment()->ExpressionStackAt(4);
// Check that we still have more keys.
HCompareNumericAndBranch* compare_index =
@@ -5382,32 +5394,67 @@ void HOptimizedGraphBuilder::BuildForInBody(ForInStatement* stmt,
set_current_block(loop_body);
- HValue* key =
- Add<HLoadKeyed>(environment()->ExpressionStackAt(2), // Enum cache.
- index, index, nullptr, FAST_ELEMENTS);
+ // Compute the next enumerated value.
+ HValue* key = Add<HLoadKeyed>(array, index, index, nullptr, FAST_ELEMENTS);
+ HBasicBlock* continue_block = nullptr;
if (fast) {
- // Check if the expected map still matches that of the enumerable.
- // If not just deoptimize.
- Add<HCheckMapValue>(enumerable, environment()->ExpressionStackAt(3));
- Bind(each_var, key);
- } else {
- Add<HPushArguments>(enumerable, key);
- Runtime::FunctionId function_id = Runtime::kForInFilter;
- key = Add<HCallRuntime>(Runtime::FunctionForId(function_id), 2);
- Push(key);
+ // Check if expected map still matches that of the enumerable.
+ Add<HCheckMapValue>(enumerable, type);
Add<HSimulate>(stmt->FilterId());
+ } else {
+ // We need the continue block here to be able to skip over invalidated keys.
+ continue_block = graph()->CreateBasicBlock();
+
+ // We cannot use the IfBuilder here, since we need to be able to jump
+ // over the loop body in case of undefined result from %ForInFilter,
+ // and the poor soul that is the IfBuilder get's really confused about
+ // such "advanced control flow requirements".
+ HBasicBlock* if_fast = graph()->CreateBasicBlock();
+ HBasicBlock* if_slow = graph()->CreateBasicBlock();
+ HBasicBlock* if_slow_pass = graph()->CreateBasicBlock();
+ HBasicBlock* if_slow_skip = graph()->CreateBasicBlock();
+ HBasicBlock* if_join = graph()->CreateBasicBlock();
+
+ // Check if expected map still matches that of the enumerable.
+ HValue* enumerable_map =
+ Add<HLoadNamedField>(enumerable, nullptr, HObjectAccess::ForMap());
+ FinishCurrentBlock(
+ New<HCompareObjectEqAndBranch>(enumerable_map, type, if_fast, if_slow));
+ set_current_block(if_fast);
+ {
+ // The enum cache for enumerable is still valid, no need to check key.
+ Push(key);
+ Goto(if_join);
+ }
+ set_current_block(if_slow);
+ {
+ // Check if key is still valid for enumerable.
+ Add<HPushArguments>(enumerable, key);
+ Runtime::FunctionId function_id = Runtime::kForInFilter;
+ Push(Add<HCallRuntime>(Runtime::FunctionForId(function_id), 2));
+ Add<HSimulate>(stmt->FilterId());
+ FinishCurrentBlock(New<HCompareObjectEqAndBranch>(
+ Top(), graph()->GetConstantUndefined(), if_slow_skip, if_slow_pass));
+ }
+ set_current_block(if_slow_pass);
+ { Goto(if_join); }
+ set_current_block(if_slow_skip);
+ {
+ // The key is no longer valid for enumerable, skip it.
+ Drop(1);
+ Goto(continue_block);
+ }
+ if_join->SetJoinId(stmt->FilterId());
+ set_current_block(if_join);
key = Pop();
- Bind(each_var, key);
- IfBuilder if_undefined(this);
- if_undefined.If<HCompareObjectEqAndBranch>(key,
- graph()->GetConstantUndefined());
- if_undefined.ThenDeopt(Deoptimizer::kUndefined);
- if_undefined.End();
- Add<HSimulate>(stmt->AssignmentId());
}
+ Bind(each_var, key);
+ Add<HSimulate>(stmt->AssignmentId());
+
BreakAndContinueInfo break_info(stmt, scope(), 5);
+ break_info.set_continue_block(continue_block);
{
BreakAndContinueScope push(&break_info, this);
CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry));
@@ -5420,7 +5467,10 @@ void HOptimizedGraphBuilder::BuildForInBody(ForInStatement* stmt,
set_current_block(body_exit);
HValue* current_index = Pop();
- Push(AddUncasted<HAdd>(current_index, graph()->GetConstant1()));
+ HValue* increment =
+ AddUncasted<HAdd>(current_index, graph()->GetConstant1());
+ increment->ClearFlag(HValue::kCanOverflow);
+ Push(increment);
body_exit = current_block();
}
@@ -5638,7 +5688,7 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
Handle<Context> script_context = ScriptContextTable::GetContext(
script_contexts, lookup.context_index);
Handle<Object> current_value =
- FixedArray::get(script_context, lookup.slot_index);
+ FixedArray::get(*script_context, lookup.slot_index, isolate());
// If the values is not the hole, it will stay initialized,
// so no need to generate a check.
@@ -6069,9 +6119,7 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
for (int i = 0; i < length; i++) {
Expression* subexpr = subexprs->at(i);
- if (subexpr->IsSpread()) {
- return Bailout(kSpread);
- }
+ DCHECK(!subexpr->IsSpread());
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
@@ -6221,6 +6269,13 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
return instr;
}
+Handle<FieldType>
+HOptimizedGraphBuilder::PropertyAccessInfo::GetFieldTypeFromMap(
+ Handle<Map> map) const {
+ DCHECK(IsFound());
+ DCHECK(number_ < map->NumberOfOwnDescriptors());
+ return handle(map->instance_descriptors()->GetFieldType(number_), isolate());
+}
bool HOptimizedGraphBuilder::PropertyAccessInfo::IsCompatible(
PropertyAccessInfo* info) {
@@ -6316,15 +6371,15 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::LoadResult(Handle<Map> map) {
Object* raw_accessor =
IsLoad() ? Handle<AccessorPair>::cast(accessors)->getter()
: Handle<AccessorPair>::cast(accessors)->setter();
- if (!raw_accessor->IsJSFunction()) return false;
- Handle<JSFunction> accessor = handle(JSFunction::cast(raw_accessor));
- if (accessor->shared()->IsApiFunction()) {
- CallOptimization call_optimization(accessor);
- if (call_optimization.is_simple_api_call()) {
- CallOptimization::HolderLookup holder_lookup;
- api_holder_ =
- call_optimization.LookupHolderOfExpectedType(map_, &holder_lookup);
- }
+ if (!raw_accessor->IsJSFunction() &&
+ !raw_accessor->IsFunctionTemplateInfo())
+ return false;
+ Handle<Object> accessor = handle(HeapObject::cast(raw_accessor));
+ CallOptimization call_optimization(accessor);
+ if (call_optimization.is_simple_api_call()) {
+ CallOptimization::HolderLookup holder_lookup;
+ api_holder_ =
+ call_optimization.LookupHolderOfExpectedType(map_, &holder_lookup);
}
accessor_ = accessor;
} else if (IsDataConstant()) {
@@ -6342,35 +6397,24 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::LoadFieldMaps(
field_type_ = HType::Tagged();
// Figure out the field type from the accessor map.
- Handle<HeapType> field_type = GetFieldTypeFromMap(map);
+ Handle<FieldType> field_type = GetFieldTypeFromMap(map);
// Collect the (stable) maps from the field type.
- int num_field_maps = field_type->NumClasses();
- if (num_field_maps > 0) {
+ if (field_type->IsClass()) {
DCHECK(access_.representation().IsHeapObject());
- field_maps_.Reserve(num_field_maps, zone());
- HeapType::Iterator<Map> it = field_type->Classes();
- while (!it.Done()) {
- Handle<Map> field_map = it.Current();
- if (!field_map->is_stable()) {
- field_maps_.Clear();
- break;
- }
+ Handle<Map> field_map = field_type->AsClass();
+ if (field_map->is_stable()) {
field_maps_.Add(field_map, zone());
- it.Advance();
}
}
if (field_maps_.is_empty()) {
// Store is not safe if the field map was cleared.
- return IsLoad() || !field_type->Is(HeapType::None());
+ return IsLoad() || !field_type->IsNone();
}
- field_maps_.Sort();
- DCHECK_EQ(num_field_maps, field_maps_.length());
-
- // Determine field HType from field HeapType.
- field_type_ = HType::FromType<HeapType>(field_type);
+ // Determine field HType from field type.
+ field_type_ = HType::FromFieldType(field_type, zone());
DCHECK(field_type_.IsHeapObject());
// Add dependency on the map that introduced the field.
@@ -6381,6 +6425,10 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::LoadFieldMaps(
bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupInPrototypes() {
Handle<Map> map = this->map();
+ if (name_->IsPrivate()) {
+ NotFound();
+ return !map->has_hidden_prototype();
+ }
while (map->prototype()->IsJSObject()) {
holder_ = handle(JSObject::cast(map->prototype()));
@@ -6573,7 +6621,8 @@ HValue* HOptimizedGraphBuilder::BuildMonomorphicAccess(
Push(value);
}
- if (info->NeedsWrappingFor(info->accessor())) {
+ if (info->accessor()->IsJSFunction() &&
+ info->NeedsWrappingFor(Handle<JSFunction>::cast(info->accessor()))) {
HValue* function = Add<HConstant>(info->accessor());
PushArgumentsFromEnvironment(argument_count);
return New<HCallFunction>(function, argument_count,
@@ -6587,7 +6636,12 @@ HValue* HOptimizedGraphBuilder::BuildMonomorphicAccess(
}
PushArgumentsFromEnvironment(argument_count);
- return BuildCallConstantFunction(info->accessor(), argument_count);
+ if (!info->accessor()->IsJSFunction()) {
+ Bailout(kInliningBailedOut);
+ return nullptr;
+ }
+ return BuildCallConstantFunction(Handle<JSFunction>::cast(info->accessor()),
+ argument_count);
}
DCHECK(info->IsDataConstant());
@@ -6744,18 +6798,48 @@ void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
}
}
-
-static bool ComputeReceiverTypes(Expression* expr,
- HValue* receiver,
+static bool ComputeReceiverTypes(Expression* expr, HValue* receiver,
SmallMapList** t,
- Zone* zone) {
+ HOptimizedGraphBuilder* builder) {
+ Zone* zone = builder->zone();
SmallMapList* maps = expr->GetReceiverTypes();
*t = maps;
bool monomorphic = expr->IsMonomorphic();
if (maps != NULL && receiver->HasMonomorphicJSObjectType()) {
- Map* root_map = receiver->GetMonomorphicJSObjectMap()->FindRootMap();
- maps->FilterForPossibleTransitions(root_map);
- monomorphic = maps->length() == 1;
+ if (maps->length() > 0) {
+ Map* root_map = receiver->GetMonomorphicJSObjectMap()->FindRootMap();
+ maps->FilterForPossibleTransitions(root_map);
+ monomorphic = maps->length() == 1;
+ } else {
+ // No type feedback, see if we can infer the type. This is safely
+ // possible if the receiver had a known map at some point, and no
+ // map-changing stores have happened to it since.
+ Handle<Map> candidate_map = receiver->GetMonomorphicJSObjectMap();
+ if (candidate_map->is_observed()) return false;
+ for (HInstruction* current = builder->current_block()->last();
+ current != nullptr; current = current->previous()) {
+ if (current->IsBlockEntry()) break;
+ if (current->CheckChangesFlag(kMaps)) {
+ // Only allow map changes that store the candidate map. We don't
+ // need to care which object the map is being written into.
+ if (!current->IsStoreNamedField()) break;
+ HStoreNamedField* map_change = HStoreNamedField::cast(current);
+ if (!map_change->value()->IsConstant()) break;
+ HConstant* map_constant = HConstant::cast(map_change->value());
+ if (!map_constant->representation().IsTagged()) break;
+ Handle<Object> map = map_constant->handle(builder->isolate());
+ if (!map.is_identical_to(candidate_map)) break;
+ }
+ if (current == receiver) {
+ // We made it all the way back to the receiver without encountering
+ // a map change! So we can assume that the receiver still has the
+ // candidate_map we know about.
+ maps->Add(candidate_map, zone);
+ monomorphic = true;
+ break;
+ }
+ }
+ }
}
return monomorphic && CanInlinePropertyAccess(maps->first());
}
@@ -6846,7 +6930,7 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
ScriptContextTable::GetContext(script_contexts, lookup.context_index);
Handle<Object> current_value =
- FixedArray::get(script_context, lookup.slot_index);
+ FixedArray::get(*script_context, lookup.slot_index, isolate());
// If the values is not the hole, it will stay initialized,
// so no need to generate a check.
@@ -7246,14 +7330,14 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
// use a generic Keyed Load if we are using the type vector, because
// it has to share information with full code.
HConstant* key = Add<HConstant>(name);
- HLoadKeyedGeneric* result = New<HLoadKeyedGeneric>(
- object, key, function_language_mode(), PREMONOMORPHIC);
+ HLoadKeyedGeneric* result =
+ New<HLoadKeyedGeneric>(object, key, PREMONOMORPHIC);
result->SetVectorAndSlot(vector, slot);
return result;
}
- HLoadNamedGeneric* result = New<HLoadNamedGeneric>(
- object, name, function_language_mode(), PREMONOMORPHIC);
+ HLoadNamedGeneric* result =
+ New<HLoadNamedGeneric>(object, name, PREMONOMORPHIC);
result->SetVectorAndSlot(vector, slot);
return result;
} else {
@@ -7287,8 +7371,8 @@ HInstruction* HOptimizedGraphBuilder::BuildKeyedGeneric(
HValue* object, HValue* key, HValue* value) {
if (access_type == LOAD) {
InlineCacheState initial_state = expr->AsProperty()->GetInlineCacheState();
- HLoadKeyedGeneric* result = New<HLoadKeyedGeneric>(
- object, key, function_language_mode(), initial_state);
+ HLoadKeyedGeneric* result =
+ New<HLoadKeyedGeneric>(object, key, initial_state);
// HLoadKeyedGeneric with vector ics benefits from being encoded as
// MEGAMORPHIC because the vector/slot combo becomes unnecessary.
if (initial_state != MEGAMORPHIC) {
@@ -7370,8 +7454,8 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
static bool CanInlineElementAccess(Handle<Map> map) {
- return map->IsJSObjectMap() && !map->has_dictionary_elements() &&
- !map->has_sloppy_arguments_elements() &&
+ return map->IsJSObjectMap() &&
+ (map->has_fast_elements() || map->has_fixed_typed_array_elements()) &&
!map->has_indexed_interceptor() && !map->is_access_check_needed();
}
@@ -7664,7 +7748,7 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
HInstruction* instr = NULL;
SmallMapList* maps;
- bool monomorphic = ComputeReceiverTypes(expr, obj, &maps, zone());
+ bool monomorphic = ComputeReceiverTypes(expr, obj, &maps, this);
bool force_generic = false;
if (expr->GetKeyType() == PROPERTY) {
@@ -7790,7 +7874,7 @@ bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
result = New<HConstant>(argument_count);
}
} else {
- Push(graph()->GetArgumentsObject());
+ CHECK_ALIVE_OR_RETURN(VisitForValue(expr->obj(), ARGUMENTS_ALLOWED), true);
CHECK_ALIVE_OR_RETURN(VisitForValue(expr->key()), true);
HValue* key = Pop();
Drop(1); // Arguments object.
@@ -7821,7 +7905,7 @@ HValue* HOptimizedGraphBuilder::BuildNamedAccess(
Expression* expr, FeedbackVectorSlot slot, HValue* object,
Handle<Name> name, HValue* value, bool is_uninitialized) {
SmallMapList* maps;
- ComputeReceiverTypes(expr, object, &maps, zone());
+ ComputeReceiverTypes(expr, object, &maps, this);
DCHECK(maps != NULL);
if (maps->length() > 0) {
@@ -8341,10 +8425,12 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
CompilationInfo target_info(&parse_info);
Handle<SharedFunctionInfo> target_shared(target->shared());
- if (IsClassConstructor(target_shared->kind())) {
+ if (inlining_kind != CONSTRUCT_CALL_RETURN &&
+ IsClassConstructor(target_shared->kind())) {
TraceInline(target, caller, "target is classConstructor");
return false;
}
+
if (target_shared->HasDebugInfo()) {
TraceInline(target, caller, "target is being debugged");
return false;
@@ -8531,7 +8617,7 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
// return value will always evaluate to true, in a value context the
// return value is the newly allocated receiver.
if (call_context()->IsTest()) {
- Goto(inlined_test_context()->if_true(), state);
+ inlined_test_context()->ReturnValue(graph()->GetConstantTrue());
} else if (call_context()->IsEffect()) {
Goto(function_return(), state);
} else {
@@ -8554,7 +8640,7 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
// Falling off the end of a normal inlined function. This basically means
// returning undefined.
if (call_context()->IsTest()) {
- Goto(inlined_test_context()->if_false(), state);
+ inlined_test_context()->ReturnValue(graph()->GetConstantFalse());
} else if (call_context()->IsEffect()) {
Goto(function_return(), state);
} else {
@@ -8617,24 +8703,25 @@ bool HOptimizedGraphBuilder::TryInlineConstruct(CallNew* expr,
CONSTRUCT_CALL_RETURN);
}
-
-bool HOptimizedGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
+bool HOptimizedGraphBuilder::TryInlineGetter(Handle<Object> getter,
Handle<Map> receiver_map,
BailoutId ast_id,
BailoutId return_id) {
if (TryInlineApiGetter(getter, receiver_map, ast_id)) return true;
- return TryInline(getter, 0, NULL, ast_id, return_id, GETTER_CALL_RETURN);
+ return getter->IsJSFunction() &&
+ TryInline(Handle<JSFunction>::cast(getter), 0, NULL, ast_id, return_id,
+ GETTER_CALL_RETURN);
}
-
-bool HOptimizedGraphBuilder::TryInlineSetter(Handle<JSFunction> setter,
+bool HOptimizedGraphBuilder::TryInlineSetter(Handle<Object> setter,
Handle<Map> receiver_map,
BailoutId id,
BailoutId assignment_id,
HValue* implicit_return_value) {
if (TryInlineApiSetter(setter, receiver_map, id)) return true;
- return TryInline(setter, 1, implicit_return_value, id, assignment_id,
- SETTER_CALL_RETURN);
+ return setter->IsJSFunction() &&
+ TryInline(Handle<JSFunction>::cast(setter), 1, implicit_return_value,
+ id, assignment_id, SETTER_CALL_RETURN);
}
@@ -8694,7 +8781,8 @@ bool HOptimizedGraphBuilder::IsReadOnlyLengthDescriptor(
Isolate* isolate = jsarray_map->GetIsolate();
Handle<Name> length_string = isolate->factory()->length_string();
DescriptorArray* descriptors = jsarray_map->instance_descriptors();
- int number = descriptors->SearchWithCache(*length_string, *jsarray_map);
+ int number =
+ descriptors->SearchWithCache(isolate, *length_string, *jsarray_map);
DCHECK_NE(DescriptorArray::kNotFound, number);
return descriptors->GetDetails(number).IsReadOnly();
}
@@ -9058,6 +9146,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
case kArrayLastIndexOf: {
if (receiver_map.is_null()) return false;
if (receiver_map->instance_type() != JS_ARRAY_TYPE) return false;
+ if (!receiver_map->prototype()->IsJSObject()) return false;
ElementsKind kind = receiver_map->elements_kind();
if (!IsFastElementsKind(kind)) return false;
if (receiver_map->is_observed()) return false;
@@ -9127,8 +9216,7 @@ bool HOptimizedGraphBuilder::TryInlineApiMethodCall(
kCallApiMethod);
}
-
-bool HOptimizedGraphBuilder::TryInlineApiGetter(Handle<JSFunction> function,
+bool HOptimizedGraphBuilder::TryInlineApiGetter(Handle<Object> function,
Handle<Map> receiver_map,
BailoutId ast_id) {
SmallMapList receiver_maps(1, zone());
@@ -9141,8 +9229,7 @@ bool HOptimizedGraphBuilder::TryInlineApiGetter(Handle<JSFunction> function,
kCallApiGetter);
}
-
-bool HOptimizedGraphBuilder::TryInlineApiSetter(Handle<JSFunction> function,
+bool HOptimizedGraphBuilder::TryInlineApiSetter(Handle<Object> function,
Handle<Map> receiver_map,
BailoutId ast_id) {
SmallMapList receiver_maps(1, zone());
@@ -9155,15 +9242,14 @@ bool HOptimizedGraphBuilder::TryInlineApiSetter(Handle<JSFunction> function,
kCallApiSetter);
}
-
-bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<JSFunction> function,
- HValue* receiver,
- SmallMapList* receiver_maps,
- int argc,
- BailoutId ast_id,
- ApiCallType call_type) {
- if (function->context()->native_context() !=
- top_info()->closure()->context()->native_context()) {
+bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<Object> function,
+ HValue* receiver,
+ SmallMapList* receiver_maps,
+ int argc, BailoutId ast_id,
+ ApiCallType call_type) {
+ if (function->IsJSFunction() &&
+ Handle<JSFunction>::cast(function)->context()->native_context() !=
+ top_info()->closure()->context()->native_context()) {
return false;
}
CallOptimization optimization(function);
@@ -9178,8 +9264,11 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<JSFunction> function,
// Cannot embed a direct reference to the global proxy map
// as it maybe dropped on deserialization.
CHECK(!isolate()->serializer_enabled());
+ DCHECK(function->IsJSFunction());
DCHECK_EQ(0, receiver_maps->length());
- receiver_maps->Add(handle(function->global_proxy()->map()), zone());
+ receiver_maps->Add(
+ handle(Handle<JSFunction>::cast(function)->global_proxy()->map()),
+ zone());
}
CallOptimization::HolderLookup holder_lookup =
CallOptimization::kHolderNotFound;
@@ -9259,7 +9348,8 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<JSFunction> function,
HInstruction* call = nullptr;
if (!is_function) {
- CallApiAccessorStub stub(isolate(), is_store, call_data_undefined);
+ CallApiAccessorStub stub(isolate(), is_store, call_data_undefined,
+ !optimization.is_constant_call());
Handle<Code> code = stub.GetCode();
HConstant* code_value = Add<HConstant>(code);
ApiAccessorDescriptor descriptor(isolate());
@@ -9643,6 +9733,9 @@ bool HOptimizedGraphBuilder::CanBeFunctionApplyArguments(Call* expr) {
void HOptimizedGraphBuilder::VisitCall(Call* expr) {
+ if (expr->tail_call_mode() == TailCallMode::kAllow) {
+ return Bailout(kTailCall);
+ }
DCHECK(!HasStackOverflow());
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
@@ -9657,7 +9750,7 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
HValue* receiver = Top();
SmallMapList* maps;
- ComputeReceiverTypes(expr, receiver, &maps, zone());
+ ComputeReceiverTypes(expr, receiver, &maps, this);
if (prop->key()->IsPropertyName() && maps->length() > 0) {
Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
@@ -9852,7 +9945,7 @@ void HOptimizedGraphBuilder::BuildInlinedCallArray(
// Checks whether allocation using the given constructor can be inlined.
static bool IsAllocationInlineable(Handle<JSFunction> constructor) {
return constructor->has_initial_map() &&
- !IsClassConstructor(constructor->shared()->kind()) &&
+ !IsSubclassConstructor(constructor->shared()->kind()) &&
constructor->initial_map()->instance_type() == JS_OBJECT_TYPE &&
constructor->initial_map()->instance_size() <
HAllocate::kMaxInlineSize;
@@ -10105,31 +10198,6 @@ void HGraphBuilder::BuildArrayBufferViewInitialization(
}
-void HOptimizedGraphBuilder::GenerateDataViewInitialize(
- CallRuntime* expr) {
- ZoneList<Expression*>* arguments = expr->arguments();
-
- DCHECK(arguments->length()== 4);
- CHECK_ALIVE(VisitForValue(arguments->at(0)));
- HValue* obj = Pop();
-
- CHECK_ALIVE(VisitForValue(arguments->at(1)));
- HValue* buffer = Pop();
-
- CHECK_ALIVE(VisitForValue(arguments->at(2)));
- HValue* byte_offset = Pop();
-
- CHECK_ALIVE(VisitForValue(arguments->at(3)));
- HValue* byte_length = Pop();
-
- {
- NoObservableSideEffectsScope scope(this);
- BuildArrayBufferViewInitialization<JSDataView>(
- obj, buffer, byte_offset, byte_length);
- }
-}
-
-
HValue* HOptimizedGraphBuilder::BuildAllocateExternalElements(
ExternalArrayType array_type,
bool is_zero_byte_offset,
@@ -10227,7 +10295,7 @@ HValue* HOptimizedGraphBuilder::BuildAllocateFixedTypedArray(
HValue* backing_store = AddUncasted<HAdd>(
Add<HConstant>(ExternalReference::fixed_typed_array_base_data_offset()),
- elements, Strength::WEAK, AddOfExternalAndTagged);
+ elements, AddOfExternalAndTagged);
HValue* key = builder.BeginBody(
Add<HConstant>(static_cast<int32_t>(0)),
@@ -10612,8 +10680,7 @@ HInstruction* HOptimizedGraphBuilder::BuildIncrement(
HConstant* delta = (expr->op() == Token::INC)
? graph()->GetConstant1()
: graph()->GetConstantMinus1();
- HInstruction* instr =
- AddUncasted<HAdd>(Top(), delta, strength(function_language_mode()));
+ HInstruction* instr = AddUncasted<HAdd>(Top(), delta);
if (instr->IsAdd()) {
HAdd* add = HAdd::cast(instr);
add->set_observed_input_representation(1, rep);
@@ -10855,7 +10922,7 @@ HValue* HGraphBuilder::TruncateToNumber(HValue* value, Type** expected) {
Maybe<HConstant*> number =
constant->CopyToTruncatedNumber(isolate(), zone());
if (number.IsJust()) {
- *expected = Type::Number(zone());
+ *expected = Type::Number();
return AddInstruction(number.FromJust());
}
}
@@ -10869,20 +10936,20 @@ HValue* HGraphBuilder::TruncateToNumber(HValue* value, Type** expected) {
// Separate the number type from the rest.
Type* expected_obj =
- Type::Intersect(expected_type, Type::NonNumber(zone()), zone());
+ Type::Intersect(expected_type, Type::NonNumber(), zone());
Type* expected_number =
- Type::Intersect(expected_type, Type::Number(zone()), zone());
+ Type::Intersect(expected_type, Type::Number(), zone());
// We expect to get a number.
// (We need to check first, since Type::None->Is(Type::Any()) == true.
if (expected_obj->Is(Type::None())) {
- DCHECK(!expected_number->Is(Type::None(zone())));
+ DCHECK(!expected_number->Is(Type::None()));
return value;
}
- if (expected_obj->Is(Type::Undefined(zone()))) {
+ if (expected_obj->Is(Type::Undefined())) {
// This is already done by HChange.
- *expected = Type::Union(expected_number, Type::Number(zone()), zone());
+ *expected = Type::Union(expected_number, Type::Number(), zone());
return value;
}
@@ -10907,8 +10974,7 @@ HValue* HOptimizedGraphBuilder::BuildBinaryOperation(
}
HValue* result = HGraphBuilder::BuildBinaryOperation(
expr->op(), left, right, left_type, right_type, result_type,
- fixed_right_arg, allocation_mode, strength(function_language_mode()),
- expr->id());
+ fixed_right_arg, allocation_mode, expr->id());
// Add a simulate after instructions with observable side effects, and
// after phis, which are the result of BuildBinaryOperation when we
// inlined some complex subgraph.
@@ -10924,11 +10990,12 @@ HValue* HOptimizedGraphBuilder::BuildBinaryOperation(
return result;
}
-
-HValue* HGraphBuilder::BuildBinaryOperation(
- Token::Value op, HValue* left, HValue* right, Type* left_type,
- Type* right_type, Type* result_type, Maybe<int> fixed_right_arg,
- HAllocationMode allocation_mode, Strength strength, BailoutId opt_id) {
+HValue* HGraphBuilder::BuildBinaryOperation(Token::Value op, HValue* left,
+ HValue* right, Type* left_type,
+ Type* right_type, Type* result_type,
+ Maybe<int> fixed_right_arg,
+ HAllocationMode allocation_mode,
+ BailoutId opt_id) {
bool maybe_string_add = false;
if (op == Token::ADD) {
// If we are adding constant string with something for which we don't have
@@ -10957,7 +11024,7 @@ HValue* HGraphBuilder::BuildBinaryOperation(
Add<HDeoptimize>(
Deoptimizer::kInsufficientTypeFeedbackForLHSOfBinaryOperation,
Deoptimizer::SOFT);
- left_type = Type::Any(zone());
+ left_type = Type::Any();
left_rep = RepresentationFor(left_type);
maybe_string_add = op == Token::ADD;
}
@@ -10966,12 +11033,12 @@ HValue* HGraphBuilder::BuildBinaryOperation(
Add<HDeoptimize>(
Deoptimizer::kInsufficientTypeFeedbackForRHSOfBinaryOperation,
Deoptimizer::SOFT);
- right_type = Type::Any(zone());
+ right_type = Type::Any();
right_rep = RepresentationFor(right_type);
maybe_string_add = op == Token::ADD;
}
- if (!maybe_string_add && !is_strong(strength)) {
+ if (!maybe_string_add) {
left = TruncateToNumber(left, &left_type);
right = TruncateToNumber(right, &right_type);
}
@@ -10979,43 +11046,36 @@ HValue* HGraphBuilder::BuildBinaryOperation(
// Special case for string addition here.
if (op == Token::ADD &&
(left_type->Is(Type::String()) || right_type->Is(Type::String()))) {
- if (is_strong(strength)) {
- // In strong mode, if the one side of an addition is a string,
- // the other side must be a string too.
+ // Validate type feedback for left argument.
+ if (left_type->Is(Type::String())) {
left = BuildCheckString(left);
- right = BuildCheckString(right);
- } else {
- // Validate type feedback for left argument.
- if (left_type->Is(Type::String())) {
- left = BuildCheckString(left);
- }
+ }
- // Validate type feedback for right argument.
- if (right_type->Is(Type::String())) {
- right = BuildCheckString(right);
- }
+ // Validate type feedback for right argument.
+ if (right_type->Is(Type::String())) {
+ right = BuildCheckString(right);
+ }
- // Convert left argument as necessary.
- if (left_type->Is(Type::Number())) {
- DCHECK(right_type->Is(Type::String()));
- left = BuildNumberToString(left, left_type);
- } else if (!left_type->Is(Type::String())) {
- DCHECK(right_type->Is(Type::String()));
- return AddUncasted<HStringAdd>(
- left, right, allocation_mode.GetPretenureMode(),
- STRING_ADD_CONVERT_LEFT, allocation_mode.feedback_site());
- }
+ // Convert left argument as necessary.
+ if (left_type->Is(Type::Number())) {
+ DCHECK(right_type->Is(Type::String()));
+ left = BuildNumberToString(left, left_type);
+ } else if (!left_type->Is(Type::String())) {
+ DCHECK(right_type->Is(Type::String()));
+ return AddUncasted<HStringAdd>(
+ left, right, allocation_mode.GetPretenureMode(),
+ STRING_ADD_CONVERT_LEFT, allocation_mode.feedback_site());
+ }
- // Convert right argument as necessary.
- if (right_type->Is(Type::Number())) {
- DCHECK(left_type->Is(Type::String()));
- right = BuildNumberToString(right, right_type);
- } else if (!right_type->Is(Type::String())) {
- DCHECK(left_type->Is(Type::String()));
- return AddUncasted<HStringAdd>(
- left, right, allocation_mode.GetPretenureMode(),
- STRING_ADD_CONVERT_RIGHT, allocation_mode.feedback_site());
- }
+ // Convert right argument as necessary.
+ if (right_type->Is(Type::Number())) {
+ DCHECK(left_type->Is(Type::String()));
+ right = BuildNumberToString(right, right_type);
+ } else if (!right_type->Is(Type::String())) {
+ DCHECK(left_type->Is(Type::String()));
+ return AddUncasted<HStringAdd>(
+ left, right, allocation_mode.GetPretenureMode(),
+ STRING_ADD_CONVERT_RIGHT, allocation_mode.feedback_site());
}
// Fast paths for empty constant strings.
@@ -11064,6 +11124,16 @@ HValue* HGraphBuilder::BuildBinaryOperation(
allocation_mode.feedback_site());
}
+ // Special case for +x here.
+ if (op == Token::MUL) {
+ if (left->EqualsInteger32Constant(1)) {
+ return BuildToNumber(right);
+ }
+ if (right->EqualsInteger32Constant(1)) {
+ return BuildToNumber(left);
+ }
+ }
+
if (graph()->info()->IsStub()) {
left = EnforceNumberType(left, left_type);
right = EnforceNumberType(right, right_type);
@@ -11084,78 +11154,51 @@ HValue* HGraphBuilder::BuildBinaryOperation(
default:
UNREACHABLE();
case Token::ADD:
- function_id =
- is_strong(strength) ? Runtime::kAdd_Strong : Runtime::kAdd;
+ function_id = Runtime::kAdd;
break;
case Token::SUB:
- function_id = is_strong(strength) ? Runtime::kSubtract_Strong
- : Runtime::kSubtract;
+ function_id = Runtime::kSubtract;
break;
case Token::MUL:
- function_id = is_strong(strength) ? Runtime::kMultiply_Strong
- : Runtime::kMultiply;
+ function_id = Runtime::kMultiply;
break;
case Token::DIV:
- function_id =
- is_strong(strength) ? Runtime::kDivide_Strong : Runtime::kDivide;
+ function_id = Runtime::kDivide;
break;
case Token::MOD:
- function_id =
- is_strong(strength) ? Runtime::kModulus_Strong : Runtime::kModulus;
+ function_id = Runtime::kModulus;
break;
case Token::BIT_OR:
- function_id = is_strong(strength) ? Runtime::kBitwiseOr_Strong
- : Runtime::kBitwiseOr;
+ function_id = Runtime::kBitwiseOr;
break;
case Token::BIT_AND:
- function_id = is_strong(strength) ? Runtime::kBitwiseAnd_Strong
- : Runtime::kBitwiseAnd;
+ function_id = Runtime::kBitwiseAnd;
break;
case Token::BIT_XOR:
- function_id = is_strong(strength) ? Runtime::kBitwiseXor_Strong
- : Runtime::kBitwiseXor;
+ function_id = Runtime::kBitwiseXor;
break;
case Token::SAR:
- function_id = is_strong(strength) ? Runtime::kShiftRight_Strong
- : Runtime::kShiftRight;
+ function_id = Runtime::kShiftRight;
break;
case Token::SHR:
- function_id = is_strong(strength) ? Runtime::kShiftRightLogical_Strong
- : Runtime::kShiftRightLogical;
+ function_id = Runtime::kShiftRightLogical;
break;
case Token::SHL:
- function_id = is_strong(strength) ? Runtime::kShiftLeft_Strong
- : Runtime::kShiftLeft;
+ function_id = Runtime::kShiftLeft;
break;
}
Add<HPushArguments>(left, right);
instr = AddUncasted<HCallRuntime>(Runtime::FunctionForId(function_id), 2);
} else {
- if (is_strong(strength) && Token::IsBitOp(op)) {
- // TODO(conradw): This is not efficient, but is necessary to prevent
- // conversion of oddball values to numbers in strong mode. It would be
- // better to prevent the conversion rather than adding a runtime check.
- IfBuilder if_builder(this);
- if_builder.If<HHasInstanceTypeAndBranch>(left, ODDBALL_TYPE);
- if_builder.OrIf<HHasInstanceTypeAndBranch>(right, ODDBALL_TYPE);
- if_builder.Then();
- Add<HCallRuntime>(
- Runtime::FunctionForId(Runtime::kThrowStrongModeImplicitConversion),
- 0);
- if (!graph()->info()->IsStub()) {
- Add<HSimulate>(opt_id, REMOVABLE_SIMULATE);
- }
- if_builder.End();
- }
switch (op) {
case Token::ADD:
- instr = AddUncasted<HAdd>(left, right, strength);
+ instr = AddUncasted<HAdd>(left, right);
break;
case Token::SUB:
- instr = AddUncasted<HSub>(left, right, strength);
+ instr = AddUncasted<HSub>(left, right);
break;
case Token::MUL:
- instr = AddUncasted<HMul>(left, right, strength);
+ instr = AddUncasted<HMul>(left, right);
break;
case Token::MOD: {
if (fixed_right_arg.IsJust() &&
@@ -11168,38 +11211,38 @@ HValue* HGraphBuilder::BuildBinaryOperation(
if_same.ElseDeopt(Deoptimizer::kUnexpectedRHSOfBinaryOperation);
right = fixed_right;
}
- instr = AddUncasted<HMod>(left, right, strength);
+ instr = AddUncasted<HMod>(left, right);
break;
}
case Token::DIV:
- instr = AddUncasted<HDiv>(left, right, strength);
+ instr = AddUncasted<HDiv>(left, right);
break;
case Token::BIT_XOR:
case Token::BIT_AND:
- instr = AddUncasted<HBitwise>(op, left, right, strength);
+ instr = AddUncasted<HBitwise>(op, left, right);
break;
case Token::BIT_OR: {
HValue *operand, *shift_amount;
if (left_type->Is(Type::Signed32()) &&
right_type->Is(Type::Signed32()) &&
MatchRotateRight(left, right, &operand, &shift_amount)) {
- instr = AddUncasted<HRor>(operand, shift_amount, strength);
+ instr = AddUncasted<HRor>(operand, shift_amount);
} else {
- instr = AddUncasted<HBitwise>(op, left, right, strength);
+ instr = AddUncasted<HBitwise>(op, left, right);
}
break;
}
case Token::SAR:
- instr = AddUncasted<HSar>(left, right, strength);
+ instr = AddUncasted<HSar>(left, right);
break;
case Token::SHR:
- instr = AddUncasted<HShr>(left, right, strength);
+ instr = AddUncasted<HShr>(left, right);
if (instr->IsShr() && CanBeZero(right)) {
graph()->RecordUint32Instruction(instr);
}
break;
case Token::SHL:
- instr = AddUncasted<HShl>(left, right, strength);
+ instr = AddUncasted<HShl>(left, right);
break;
default:
UNREACHABLE();
@@ -11520,7 +11563,7 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
Add<HDeoptimize>(
Deoptimizer::kInsufficientTypeFeedbackForCombinedTypeOfBinaryOperation,
Deoptimizer::SOFT);
- combined_type = left_type = right_type = Type::Any(zone());
+ combined_type = left_type = right_type = Type::Any();
}
Representation left_rep = RepresentationFor(left_type);
@@ -11657,8 +11700,7 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
return result;
} else {
if (combined_rep.IsTagged() || combined_rep.IsNone()) {
- HCompareGeneric* result = Add<HCompareGeneric>(
- left, right, op, strength(function_language_mode()));
+ HCompareGeneric* result = Add<HCompareGeneric>(left, right, op);
result->set_observed_input_representation(1, left_rep);
result->set_observed_input_representation(2, right_rep);
if (result->HasObservableSideEffects()) {
@@ -11674,8 +11716,8 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
HBranch* branch = New<HBranch>(result);
return branch;
} else {
- HCompareNumericAndBranch* result = New<HCompareNumericAndBranch>(
- left, right, op, strength(function_language_mode()));
+ HCompareNumericAndBranch* result =
+ New<HCompareNumericAndBranch>(left, right, op);
result->set_observed_input_representation(left_rep, right_rep);
if (top_info()->is_tracking_positions()) {
result->SetOperandPositions(zone(), left_position, right_position);
@@ -11706,7 +11748,8 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
} else {
DCHECK_EQ(Token::EQ, expr->op());
Type* type = expr->combined_type()->Is(Type::None())
- ? Type::Any(zone()) : expr->combined_type();
+ ? Type::Any()
+ : expr->combined_type();
HIfContinuation continuation;
BuildCompareNil(value, type, &continuation);
return ast_context()->ReturnContinuation(&continuation, expr->id());
@@ -12149,8 +12192,8 @@ void HOptimizedGraphBuilder::VisitExportDeclaration(
}
-void HOptimizedGraphBuilder::VisitRewritableAssignmentExpression(
- RewritableAssignmentExpression* node) {
+void HOptimizedGraphBuilder::VisitRewritableExpression(
+ RewritableExpression* node) {
CHECK_ALIVE(Visit(node->expression()));
}
@@ -12178,25 +12221,6 @@ void HOptimizedGraphBuilder::GenerateIsJSReceiver(CallRuntime* call) {
}
-void HOptimizedGraphBuilder::GenerateIsFunction(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HHasInstanceTypeAndBranch* result = New<HHasInstanceTypeAndBranch>(
- value, FIRST_FUNCTION_TYPE, LAST_FUNCTION_TYPE);
- return ast_context()->ReturnControl(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateIsMinusZero(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HCompareMinusZeroAndBranch* result = New<HCompareMinusZeroAndBranch>(value);
- return ast_context()->ReturnControl(result, call->id());
-}
-
-
void HOptimizedGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
DCHECK(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -12264,6 +12288,30 @@ void HOptimizedGraphBuilder::GenerateToInteger(CallRuntime* call) {
}
+void HOptimizedGraphBuilder::GenerateToName(CallRuntime* call) {
+ DCHECK_EQ(1, call->arguments()->length());
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* input = Pop();
+ if (input->type().IsSmi()) {
+ HValue* result = BuildNumberToString(input, Type::SignedSmall());
+ return ast_context()->ReturnValue(result);
+ } else if (input->type().IsTaggedNumber()) {
+ HValue* result = BuildNumberToString(input, Type::Number());
+ return ast_context()->ReturnValue(result);
+ } else if (input->type().IsString()) {
+ return ast_context()->ReturnValue(input);
+ } else {
+ Callable callable = CodeFactory::ToName(isolate());
+ HValue* stub = Add<HConstant>(callable.code());
+ HValue* values[] = {context(), input};
+ HInstruction* result =
+ New<HCallWithDescriptor>(stub, 0, callable.descriptor(),
+ Vector<HValue*>(values, arraysize(values)));
+ return ast_context()->ReturnInstruction(result, call->id());
+ }
+}
+
+
void HOptimizedGraphBuilder::GenerateToObject(CallRuntime* call) {
DCHECK_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -12276,11 +12324,11 @@ void HOptimizedGraphBuilder::GenerateToObject(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateToString(CallRuntime* call) {
DCHECK_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- Callable callable = CodeFactory::ToString(isolate());
HValue* input = Pop();
if (input->type().IsString()) {
return ast_context()->ReturnValue(input);
} else {
+ Callable callable = CodeFactory::ToString(isolate());
HValue* stub = Add<HConstant>(callable.code());
HValue* values[] = {context(), input};
HInstruction* result =
@@ -12310,16 +12358,13 @@ void HOptimizedGraphBuilder::GenerateToNumber(CallRuntime* call) {
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
Callable callable = CodeFactory::ToNumber(isolate());
HValue* input = Pop();
- if (input->type().IsTaggedNumber()) {
- return ast_context()->ReturnValue(input);
- } else {
- HValue* stub = Add<HConstant>(callable.code());
- HValue* values[] = {context(), input};
- HInstruction* result =
- New<HCallWithDescriptor>(stub, 0, callable.descriptor(),
- Vector<HValue*>(values, arraysize(values)));
- return ast_context()->ReturnInstruction(result, call->id());
+ HValue* result = BuildToNumber(input);
+ if (result->HasObservableSideEffects()) {
+ if (!ast_context()->IsEffect()) Push(result);
+ Add<HSimulate>(call->id(), REMOVABLE_SIMULATE);
+ if (!ast_context()->IsEffect()) result = Pop();
}
+ return ast_context()->ReturnValue(result);
}
@@ -12372,48 +12417,6 @@ void HOptimizedGraphBuilder::GenerateHasFastPackedElements(CallRuntime* call) {
}
-// Support for arguments.length and arguments[?].
-void HOptimizedGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 0);
- HInstruction* result = NULL;
- if (function_state()->outer() == NULL) {
- HInstruction* elements = Add<HArgumentsElements>(false);
- result = New<HArgumentsLength>(elements);
- } else {
- // Number of arguments without receiver.
- int argument_count = environment()->
- arguments_environment()->parameter_count() - 1;
- result = New<HConstant>(argument_count);
- }
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateArguments(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* index = Pop();
- HInstruction* result = NULL;
- if (function_state()->outer() == NULL) {
- HInstruction* elements = Add<HArgumentsElements>(false);
- HInstruction* length = Add<HArgumentsLength>(elements);
- HInstruction* checked_index = Add<HBoundsCheck>(index, length);
- result = New<HAccessArgumentsAt>(elements, length, checked_index);
- } else {
- EnsureArgumentsArePushedForAccess();
-
- // Number of arguments without receiver.
- HInstruction* elements = function_state()->arguments_elements();
- int argument_count = environment()->
- arguments_environment()->parameter_count() - 1;
- HInstruction* length = Add<HConstant>(argument_count);
- HInstruction* checked_key = Add<HBoundsCheck>(index, length);
- result = New<HAccessArgumentsAt>(elements, length, checked_key);
- }
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
void HOptimizedGraphBuilder::GenerateValueOf(CallRuntime* call) {
DCHECK(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -12442,27 +12445,6 @@ void HOptimizedGraphBuilder::GenerateValueOf(CallRuntime* call) {
}
-void HOptimizedGraphBuilder::GenerateJSValueGetValue(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HInstruction* result = Add<HLoadNamedField>(
- value, nullptr,
- HObjectAccess::ForObservableJSObjectOffset(JSValue::kValueOffset));
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HOptimizedGraphBuilder::GenerateIsDate(CallRuntime* call) {
- DCHECK_EQ(1, call->arguments()->length());
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HHasInstanceTypeAndBranch* result =
- New<HHasInstanceTypeAndBranch>(value, JS_DATE_TYPE);
- return ast_context()->ReturnControl(result, call->id());
-}
-
-
void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar(
CallRuntime* call) {
DCHECK(call->arguments()->length() == 3);
@@ -12495,43 +12477,6 @@ void HOptimizedGraphBuilder::GenerateTwoByteSeqStringSetChar(
}
-void HOptimizedGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 2);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
- HValue* value = Pop();
- HValue* object = Pop();
-
- // Check if object is a JSValue.
- IfBuilder if_objectisvalue(this);
- if_objectisvalue.If<HHasInstanceTypeAndBranch>(object, JS_VALUE_TYPE);
- if_objectisvalue.Then();
- {
- // Create in-object property store to kValueOffset.
- Add<HStoreNamedField>(object,
- HObjectAccess::ForObservableJSObjectOffset(JSValue::kValueOffset),
- value);
- if (!ast_context()->IsEffect()) {
- Push(value);
- }
- Add<HSimulate>(call->id(), FIXED_SIMULATE);
- }
- if_objectisvalue.Else();
- {
- // Nothing to do in this case.
- if (!ast_context()->IsEffect()) {
- Push(value);
- }
- Add<HSimulate>(call->id(), FIXED_SIMULATE);
- }
- if_objectisvalue.End();
- if (!ast_context()->IsEffect()) {
- Drop(1);
- }
- return ast_context()->ReturnValue(value);
-}
-
-
// Fast support for charCodeAt(n).
void HOptimizedGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
DCHECK(call->arguments()->length() == 2);
@@ -12568,25 +12513,18 @@ void HOptimizedGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
}
-// Fast support for object equality testing.
-void HOptimizedGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 2);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
- HValue* right = Pop();
- HValue* left = Pop();
- HCompareObjectEqAndBranch* result =
- New<HCompareObjectEqAndBranch>(left, right);
- return ast_context()->ReturnControl(result, call->id());
-}
-
-
// Fast support for SubString.
void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
DCHECK_EQ(3, call->arguments()->length());
CHECK_ALIVE(VisitExpressions(call->arguments()));
PushArgumentsFromEnvironment(call->arguments()->length());
- HCallStub* result = New<HCallStub>(CodeStub::SubString, 3);
+ Callable callable = CodeFactory::SubString(isolate());
+ HValue* stub = Add<HConstant>(callable.code());
+ HValue* values[] = {context()};
+ HInstruction* result = New<HCallWithDescriptor>(
+ stub, call->arguments()->length(), callable.descriptor(),
+ Vector<HValue*>(values, arraysize(values)));
+ result->set_type(HType::String());
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -12596,7 +12534,12 @@ void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
DCHECK_EQ(4, call->arguments()->length());
CHECK_ALIVE(VisitExpressions(call->arguments()));
PushArgumentsFromEnvironment(call->arguments()->length());
- HCallStub* result = New<HCallStub>(CodeStub::RegExpExec, 4);
+ Callable callable = CodeFactory::RegExpExec(isolate());
+ HValue* stub = Add<HConstant>(callable.code());
+ HValue* values[] = {context()};
+ HInstruction* result = New<HCallWithDescriptor>(
+ stub, call->arguments()->length(), callable.descriptor(),
+ Vector<HValue*>(values, arraysize(values)));
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -12669,7 +12612,7 @@ void HOptimizedGraphBuilder::GenerateNumberToString(CallRuntime* call) {
DCHECK_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* number = Pop();
- HValue* result = BuildNumberToString(number, Type::Any(zone()));
+ HValue* result = BuildNumberToString(number, Type::Any());
return ast_context()->ReturnValue(result);
}
@@ -12929,16 +12872,6 @@ void HOptimizedGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
}
-void HOptimizedGraphBuilder::GenerateFastOneByteArrayJoin(CallRuntime* call) {
- // Simply returning undefined here would be semantically correct and even
- // avoid the bailout. Nevertheless, some ancient benchmarks like SunSpider's
- // string-fasta would tank, because fullcode contains an optimized version.
- // Obviously the fullcode => Crankshaft => bailout => fullcode dance is
- // faster... *sigh*
- return Bailout(kInlinedRuntimeFunctionFastOneByteArrayJoin);
-}
-
-
void HOptimizedGraphBuilder::GenerateDebugBreakInOptimizedCode(
CallRuntime* call) {
Add<HDebugBreak>();
diff --git a/deps/v8/src/crankshaft/hydrogen.h b/deps/v8/src/crankshaft/hydrogen.h
index 40a18347be..ce0d0df6aa 100644
--- a/deps/v8/src/crankshaft/hydrogen.h
+++ b/deps/v8/src/crankshaft/hydrogen.h
@@ -297,11 +297,12 @@ class BoundsCheckTable;
class InductionVariableBlocksTable;
class HGraph final : public ZoneObject {
public:
- explicit HGraph(CompilationInfo* info);
+ explicit HGraph(CompilationInfo* info, CallInterfaceDescriptor descriptor);
Isolate* isolate() const { return isolate_; }
Zone* zone() const { return zone_; }
CompilationInfo* info() const { return info_; }
+ CallInterfaceDescriptor descriptor() const { return descriptor_; }
const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; }
const ZoneList<HPhi*>* phi_list() const { return phi_list_; }
@@ -345,13 +346,6 @@ class HGraph final : public ZoneObject {
bool IsStandardConstant(HConstant* constant);
HBasicBlock* CreateBasicBlock();
- HArgumentsObject* GetArgumentsObject() const {
- return arguments_object_.get();
- }
-
- void SetArgumentsObject(HArgumentsObject* object) {
- arguments_object_.set(object);
- }
int GetMaximumValueID() const { return values_.length(); }
int GetNextBlockID() { return next_block_id_++; }
@@ -481,11 +475,11 @@ class HGraph final : public ZoneObject {
SetOncePointer<HConstant> constant_the_hole_;
SetOncePointer<HConstant> constant_null_;
SetOncePointer<HConstant> constant_invalid_context_;
- SetOncePointer<HArgumentsObject> arguments_object_;
HOsrBuilder* osr_;
CompilationInfo* info_;
+ CallInterfaceDescriptor descriptor_;
Zone* zone_;
bool is_recursive_;
@@ -1006,8 +1000,10 @@ class HAllocationMode final BASE_EMBEDDED {
class HGraphBuilder {
public:
- explicit HGraphBuilder(CompilationInfo* info)
+ explicit HGraphBuilder(CompilationInfo* info,
+ CallInterfaceDescriptor descriptor)
: info_(info),
+ descriptor_(descriptor),
graph_(NULL),
current_block_(NULL),
scope_(info->scope()),
@@ -1294,6 +1290,8 @@ class HGraphBuilder {
HValue* BuildGetElementsKind(HValue* object);
+ HValue* BuildEnumLength(HValue* map);
+
HValue* BuildCheckHeapObject(HValue* object);
HValue* BuildCheckString(HValue* string);
HValue* BuildWrapReceiver(HValue* object, HValue* function);
@@ -1323,6 +1321,7 @@ class HGraphBuilder {
bool is_jsarray);
HValue* BuildNumberToString(HValue* object, Type* type);
+ HValue* BuildToNumber(HValue* input);
HValue* BuildToObject(HValue* receiver);
void BuildJSObjectCheck(HValue* receiver,
@@ -1349,8 +1348,7 @@ class HGraphBuilder {
HValue* BuildUncheckedDictionaryElementLoad(HValue* receiver,
HValue* elements, HValue* key,
- HValue* hash,
- LanguageMode language_mode);
+ HValue* hash);
// ES6 section 7.4.7 CreateIterResultObject ( value, done )
HValue* BuildCreateIterResultObject(HValue* value, HValue* done);
@@ -1429,7 +1427,6 @@ class HGraphBuilder {
Type* left_type, Type* right_type,
Type* result_type, Maybe<int> fixed_right_arg,
HAllocationMode allocation_mode,
- Strength strength,
BailoutId opt_id = BailoutId::None());
HLoadNamedField* AddLoadFixedArrayLength(HValue *object,
@@ -1912,6 +1909,7 @@ class HGraphBuilder {
}
CompilationInfo* info_;
+ CallInterfaceDescriptor descriptor_;
HGraph* graph_;
HBasicBlock* current_block_;
Scope* scope_;
@@ -2200,28 +2198,21 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
F(IsRegExp) \
F(IsJSProxy) \
F(Call) \
- F(ArgumentsLength) \
- F(Arguments) \
F(ValueOf) \
- F(SetValueOf) \
- F(IsDate) \
F(StringCharFromCode) \
F(StringCharAt) \
F(OneByteSeqStringSetChar) \
F(TwoByteSeqStringSetChar) \
- F(ObjectEquals) \
F(ToInteger) \
+ F(ToName) \
F(ToObject) \
F(ToString) \
F(ToLength) \
F(ToNumber) \
- F(IsFunction) \
F(IsJSReceiver) \
F(MathPow) \
- F(IsMinusZero) \
F(HasCachedArrayIndex) \
F(GetCachedArrayIndex) \
- F(FastOneByteArrayJoin) \
F(DebugBreakInOptimizedCode) \
F(StringCharCodeAt) \
F(SubString) \
@@ -2233,7 +2224,6 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
F(DebugIsActive) \
/* Typed Arrays */ \
F(TypedArrayInitialize) \
- F(DataViewInitialize) \
F(MaxSmi) \
F(TypedArrayMaxSizeInHeap) \
F(ArrayBufferViewGetByteLength) \
@@ -2262,9 +2252,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
/* ES6 Iterators */ \
F(CreateIterResultObject) \
/* Arrays */ \
- F(HasFastPackedElements) \
- /* JSValue */ \
- F(JSValueGetValue)
+ F(HasFastPackedElements)
#define GENERATOR_DECLARATION(Name) void Generate##Name(CallRuntime* call);
FOR_EACH_HYDROGEN_INTRINSIC(GENERATOR_DECLARATION)
@@ -2420,14 +2408,10 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
bool TryInlineCall(Call* expr);
bool TryInlineConstruct(CallNew* expr, HValue* implicit_return_value);
- bool TryInlineGetter(Handle<JSFunction> getter,
- Handle<Map> receiver_map,
- BailoutId ast_id,
- BailoutId return_id);
- bool TryInlineSetter(Handle<JSFunction> setter,
- Handle<Map> receiver_map,
- BailoutId id,
- BailoutId assignment_id,
+ bool TryInlineGetter(Handle<Object> getter, Handle<Map> receiver_map,
+ BailoutId ast_id, BailoutId return_id);
+ bool TryInlineSetter(Handle<Object> setter, Handle<Map> receiver_map,
+ BailoutId id, BailoutId assignment_id,
HValue* implicit_return_value);
bool TryInlineIndirectCall(Handle<JSFunction> function, Call* expr,
int arguments_count);
@@ -2445,18 +2429,13 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HValue* receiver,
SmallMapList* receiver_types);
bool TryInlineApiFunctionCall(Call* expr, HValue* receiver);
- bool TryInlineApiGetter(Handle<JSFunction> function,
- Handle<Map> receiver_map,
+ bool TryInlineApiGetter(Handle<Object> function, Handle<Map> receiver_map,
BailoutId ast_id);
- bool TryInlineApiSetter(Handle<JSFunction> function,
- Handle<Map> receiver_map,
+ bool TryInlineApiSetter(Handle<Object> function, Handle<Map> receiver_map,
BailoutId ast_id);
- bool TryInlineApiCall(Handle<JSFunction> function,
- HValue* receiver,
- SmallMapList* receiver_maps,
- int argc,
- BailoutId ast_id,
- ApiCallType call_type);
+ bool TryInlineApiCall(Handle<Object> function, HValue* receiver,
+ SmallMapList* receiver_maps, int argc, BailoutId ast_id,
+ ApiCallType call_type);
static bool IsReadOnlyLengthDescriptor(Handle<Map> jsarray_map);
static bool CanInlineArrayResizeOperation(Handle<Map> receiver_map);
@@ -2534,7 +2513,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
: builder_(builder),
access_type_(access_type),
map_(map),
- name_(name),
+ name_(isolate()->factory()->InternalizeName(name)),
field_type_(HType::Tagged()),
access_(HObjectAccess::ForMap()),
lookup_type_(NOT_FOUND),
@@ -2599,7 +2578,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
Isolate* isolate() const { return builder_->isolate(); }
Handle<JSObject> holder() { return holder_; }
- Handle<JSFunction> accessor() { return accessor_; }
+ Handle<Object> accessor() { return accessor_; }
Handle<Object> constant() { return constant_; }
Handle<Map> transition() { return transition_; }
SmallMapList* field_maps() { return &field_maps_; }
@@ -2636,12 +2615,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
Handle<Object> GetAccessorsFromMap(Handle<Map> map) const {
return GetConstantFromMap(map);
}
- Handle<HeapType> GetFieldTypeFromMap(Handle<Map> map) const {
- DCHECK(IsFound());
- DCHECK(number_ < map->NumberOfOwnDescriptors());
- return handle(map->instance_descriptors()->GetFieldType(number_),
- isolate());
- }
+ Handle<FieldType> GetFieldTypeFromMap(Handle<Map> map) const;
Handle<Map> GetFieldOwnerFromMap(Handle<Map> map) const {
DCHECK(IsFound());
DCHECK(number_ < map->NumberOfOwnDescriptors());
@@ -2657,7 +2631,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
void LookupDescriptor(Map* map, Name* name) {
DescriptorArray* descriptors = map->instance_descriptors();
- int number = descriptors->SearchWithCache(name, map);
+ int number = descriptors->SearchWithCache(isolate(), name, map);
if (number == DescriptorArray::kNotFound) return NotFound();
lookup_type_ = DESCRIPTOR_TYPE;
details_ = descriptors->GetDetails(number);
@@ -2705,7 +2679,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
Handle<Map> map_;
Handle<Name> name_;
Handle<JSObject> holder_;
- Handle<JSFunction> accessor_;
+ Handle<Object> accessor_;
Handle<JSObject> api_holder_;
Handle<Object> constant_;
SmallMapList field_maps_;
diff --git a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
index 4ec33ab146..a535153e18 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
@@ -74,7 +74,7 @@ bool LCodeGen::GenerateCode() {
void LCodeGen::FinishCode(Handle<Code> code) {
DCHECK(is_done());
- code->set_stack_slots(GetStackSlotCount());
+ code->set_stack_slots(GetTotalFrameSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
if (info()->ShouldEnsureSpaceForLazyDeopt()) {
@@ -131,13 +131,6 @@ bool LCodeGen::GeneratePrologue() {
if (info()->IsOptimizing()) {
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ int3();
- }
-#endif
-
if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
// Move state of dynamic frame alignment into edx.
__ Move(edx, Immediate(kNoAlignmentPadding));
@@ -490,7 +483,7 @@ bool LCodeGen::GenerateSafepointTable() {
masm()->nop();
}
}
- safepoints_.Emit(masm(), GetStackSlotCount());
+ safepoints_.Emit(masm(), GetTotalFrameSlotCount());
return !is_aborted();
}
@@ -578,7 +571,7 @@ Operand LCodeGen::ToOperand(LOperand* op) const {
if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
if (NeedsEagerFrame()) {
- return Operand(ebp, StackSlotOffset(op->index()));
+ return Operand(ebp, FrameSlotToFPOffset(op->index()));
} else {
// Retrieve parameter without eager stack-frame relative to the
// stack-pointer.
@@ -590,7 +583,7 @@ Operand LCodeGen::ToOperand(LOperand* op) const {
Operand LCodeGen::HighOperand(LOperand* op) {
DCHECK(op->IsDoubleStackSlot());
if (NeedsEagerFrame()) {
- return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
+ return Operand(ebp, FrameSlotToFPOffset(op->index()) + kPointerSize);
} else {
// Retrieve parameter without eager stack-frame relative to the
// stack-pointer.
@@ -659,9 +652,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
if (op->IsStackSlot()) {
int index = op->index();
- if (index >= 0) {
- index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
- }
if (is_tagged) {
translation->StoreStackSlot(index);
} else if (is_uint32) {
@@ -671,9 +661,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
} else if (op->IsDoubleStackSlot()) {
int index = op->index();
- if (index >= 0) {
- index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
- }
translation->StoreDoubleStackSlot(index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
@@ -983,26 +970,6 @@ void LCodeGen::DoParameter(LParameter* instr) {
}
-void LCodeGen::DoCallStub(LCallStub* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->result()).is(eax));
- switch (instr->hydrogen()->major_key()) {
- case CodeStub::RegExpExec: {
- RegExpExecStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::SubString: {
- SubStringStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
GenerateOsrPrologue();
}
@@ -1668,13 +1635,6 @@ void LCodeGen::DoConstantT(LConstantT* instr) {
}
-void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
- Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->value());
- __ EnumLength(result, map);
-}
-
-
Operand LCodeGen::BuildSeqStringOperand(Register string,
LOperand* index,
String::Encoding encoding) {
@@ -1923,8 +1883,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(eax));
DCHECK(ToRegister(instr->result()).is(eax));
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
}
@@ -2167,8 +2126,9 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
// We can statically evaluate the comparison.
double left_val = ToDouble(LConstantOperand::cast(left));
double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block = EvalComparison(instr->op(), left_val, right_val) ?
- instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
+ int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
+ ? instr->TrueDestination(chunk_)
+ : instr->FalseDestination(chunk_);
EmitGoto(next_block);
} else {
if (instr->is_double()) {
@@ -2230,34 +2190,6 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
}
-void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
- Representation rep = instr->hydrogen()->value()->representation();
- DCHECK(!rep.IsInteger32());
- Register scratch = ToRegister(instr->temp());
-
- if (rep.IsDouble()) {
- XMMRegister value = ToDoubleRegister(instr->value());
- XMMRegister xmm_scratch = double_scratch0();
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(xmm_scratch, value);
- EmitFalseBranch(instr, not_equal);
- __ movmskpd(scratch, value);
- __ test(scratch, Immediate(1));
- EmitBranch(instr, not_zero);
- } else {
- Register value = ToRegister(instr->value());
- Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
- __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
- __ cmp(FieldOperand(value, HeapNumber::kExponentOffset),
- Immediate(0x1));
- EmitFalseBranch(instr, no_overflow);
- __ cmp(FieldOperand(value, HeapNumber::kMantissaOffset),
- Immediate(0x00000000));
- EmitBranch(instr, equal);
- }
-}
-
-
Condition LCodeGen::EmitIsString(Register input,
Register temp1,
Label* is_not_string,
@@ -2516,8 +2448,7 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
void LCodeGen::DoCmpT(LCmpT* instr) {
Token::Value op = instr->op();
- Handle<Code> ic =
- CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = ComputeCompareCondition(op);
@@ -2643,9 +2574,9 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
__ mov(LoadDescriptor::NameRegister(), instr->name());
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- Handle<Code> ic =
- CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
- SLOPPY, PREMONOMORPHIC).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+ isolate(), instr->typeof_mode(), PREMONOMORPHIC)
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2760,10 +2691,10 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
__ mov(LoadDescriptor::NameRegister(), instr->name());
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- Handle<Code> ic =
- CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+ isolate(), NOT_INSIDE_TYPEOF,
+ instr->hydrogen()->initialization_state())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2877,6 +2808,9 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case DICTIONARY_ELEMENTS:
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ case NO_ELEMENTS:
UNREACHABLE();
break;
}
@@ -2996,8 +2930,8 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
}
Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
- isolate(), instr->hydrogen()->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ isolate(), instr->hydrogen()->initialization_state())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3657,21 +3591,22 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ HCallFunction* hinstr = instr->hydrogen();
DCHECK(ToRegister(instr->context()).is(esi));
DCHECK(ToRegister(instr->function()).is(edi));
DCHECK(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
- ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
- if (instr->hydrogen()->HasVectorAndSlot()) {
+ ConvertReceiverMode mode = hinstr->convert_mode();
+ if (hinstr->HasVectorAndSlot()) {
Register slot_register = ToRegister(instr->temp_slot());
Register vector_register = ToRegister(instr->temp_vector());
DCHECK(slot_register.is(edx));
DCHECK(vector_register.is(ebx));
AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
- int index = vector->GetIndex(instr->hydrogen()->slot());
+ Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
+ int index = vector->GetIndex(hinstr->slot());
__ mov(vector_register, vector);
__ mov(slot_register, Immediate(Smi::FromInt(index)));
@@ -3947,6 +3882,9 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case DICTIONARY_ELEMENTS:
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ case NO_ELEMENTS:
UNREACHABLE();
break;
}
@@ -5155,8 +5093,8 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
final_branch_condition = equal;
} else if (String::Equals(type_name, factory()->undefined_string())) {
- __ cmp(input, factory()->undefined_value());
- __ j(equal, true_label, true_distance);
+ __ cmp(input, factory()->null_value());
+ __ j(equal, false_label, false_distance);
__ JumpIfSmi(input, false_label, false_distance);
// Check for undetectable objects => true.
__ mov(input, FieldOperand(input, HeapObject::kMapOffset));
@@ -5328,12 +5266,6 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
- __ test(eax, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
-
- STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
- __ CmpObjectType(eax, JS_PROXY_TYPE, ecx);
- DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType);
Label use_cache, call_runtime;
__ CheckEnumCache(&call_runtime);
@@ -5344,11 +5276,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(eax);
- CallRuntime(Runtime::kGetPropertyNamesFast, instr);
-
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- isolate()->factory()->meta_map());
- DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
+ CallRuntime(Runtime::kForInEnumerate, instr);
__ bind(&use_cache);
}
@@ -5460,15 +5388,6 @@ void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
}
-void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
- Handle<ScopeInfo> scope_info = instr->scope_info();
- __ Push(scope_info);
- __ push(ToRegister(instr->function()));
- CallRuntime(Runtime::kPushBlockContext, instr);
- RecordSafepoint(Safepoint::kNoLazyDeopt);
-}
-
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h
index 06a3e10bf2..589ef2e05e 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.h
@@ -47,10 +47,8 @@ class LCodeGen: public LCodeGenBase {
}
bool NeedsEagerFrame() const {
- return GetStackSlotCount() > 0 ||
- info()->is_non_deferred_calling() ||
- !info()->IsStub() ||
- info()->requires_frame();
+ return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
+ !info()->IsStub() || info()->requires_frame();
}
bool NeedsDeferredFrame() const {
return !NeedsEagerFrame() && info()->is_deferred_calling();
@@ -132,7 +130,13 @@ class LCodeGen: public LCodeGenBase {
Register temporary,
Register temporary2);
- int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+ bool HasAllocatedStackSlots() const {
+ return chunk()->HasAllocatedStackSlots();
+ }
+ int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
+ int GetTotalFrameSlotCount() const {
+ return chunk()->GetTotalFrameSlotCount();
+ }
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
diff --git a/deps/v8/src/crankshaft/ia32/lithium-ia32.cc b/deps/v8/src/crankshaft/ia32/lithium-ia32.cc
index a0cb93975f..e2772d5ee3 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-ia32.cc
+++ b/deps/v8/src/crankshaft/ia32/lithium-ia32.cc
@@ -344,11 +344,11 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
// Skip a slot if for a double-width slot.
if (kind == DOUBLE_REGISTERS) {
- spill_slot_count_++;
- spill_slot_count_ |= 1;
+ current_frame_slots_++;
+ current_frame_slots_ |= 1;
num_double_slots_++;
}
- return spill_slot_count_++;
+ return current_frame_slots_++;
}
@@ -437,7 +437,7 @@ LPlatformChunk* LChunkBuilder::Build() {
// Reserve the first spill slot for the state of dynamic alignment.
if (info()->IsOptimizing()) {
int alignment_state_index = chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
- DCHECK_EQ(alignment_state_index, 0);
+ DCHECK_EQ(alignment_state_index, 4);
USE(alignment_state_index);
}
@@ -1524,14 +1524,22 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
DCHECK(instr->left()->representation().Equals(instr->representation()));
DCHECK(instr->right()->representation().Equals(instr->representation()));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right = UseOrConstant(instr->BetterRightOperand());
+ HValue* h_right = instr->BetterRightOperand();
+ LOperand* right = UseOrConstant(h_right);
LOperand* temp = NULL;
if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
temp = TempRegister();
}
LMulI* mul = new(zone()) LMulI(left, right, temp);
- if (instr->CheckFlag(HValue::kCanOverflow) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ int constant_value =
+ h_right->IsConstant() ? HConstant::cast(h_right)->Integer32Value() : 0;
+ // |needs_environment| must mirror the cases where LCodeGen::DoMulI calls
+ // |DeoptimizeIf|.
+ bool needs_environment =
+ instr->CheckFlag(HValue::kCanOverflow) ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
+ (!right->IsConstantOperand() || constant_value <= 0));
+ if (needs_environment) {
AssignEnvironment(mul);
}
return DefineSameAsFirst(mul);
@@ -1701,14 +1709,6 @@ LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
}
-LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
- HCompareMinusZeroAndBranch* instr) {
- LOperand* value = UseRegister(instr->value());
- LOperand* scratch = TempRegister();
- return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
-}
-
-
LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
DCHECK(instr->value()->representation().IsTagged());
LOperand* temp = TempRegister();
@@ -1780,12 +1780,6 @@ LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
}
-LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
- LOperand* map = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LMapEnumLength(map));
-}
-
-
LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
LOperand* string = UseRegisterAtStart(instr->string());
LOperand* index = UseRegisterOrConstantAtStart(instr->index());
@@ -2492,8 +2486,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
return DefineAsSpilled(result, spill_index);
} else {
DCHECK(info()->IsStub());
- CallInterfaceDescriptor descriptor =
- info()->code_stub()->GetCallInterfaceDescriptor();
+ CallInterfaceDescriptor descriptor = graph()->descriptor();
int index = static_cast<int>(instr->index());
Register reg = descriptor.GetRegisterParameter(index);
return DefineFixed(result, reg);
@@ -2519,18 +2512,12 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
// The first local is saved at the end of the unoptimized frame.
spill_index = graph()->osr()->UnoptimizedFrameSlots();
}
+ spill_index += StandardFrameConstants::kFixedSlotCount;
}
return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
}
-LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LCallStub* result = new(zone()) LCallStub(context);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
// There are no real uses of the arguments object.
// arguments.length and element access are supported directly on
@@ -2680,16 +2667,6 @@ LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
}
-LInstruction* LChunkBuilder::DoAllocateBlockContext(
- HAllocateBlockContext* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* function = UseRegisterAtStart(instr->function());
- LAllocateBlockContext* result =
- new(zone()) LAllocateBlockContext(context, function);
- return MarkAsCall(DefineFixed(result, esi), instr);
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/ia32/lithium-ia32.h b/deps/v8/src/crankshaft/ia32/lithium-ia32.h
index ab7a4b5516..e22ab437fc 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-ia32.h
+++ b/deps/v8/src/crankshaft/ia32/lithium-ia32.h
@@ -24,7 +24,6 @@ class LCodeGen;
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
V(AccessArgumentsAt) \
V(AddI) \
- V(AllocateBlockContext) \
V(Allocate) \
V(ApplyArguments) \
V(ArgumentsElements) \
@@ -39,7 +38,6 @@ class LCodeGen;
V(CallFunction) \
V(CallNewArray) \
V(CallRuntime) \
- V(CallStub) \
V(CheckArrayBufferNotNeutered) \
V(CheckInstanceType) \
V(CheckMaps) \
@@ -51,7 +49,6 @@ class LCodeGen;
V(ClampIToUint8) \
V(ClampTToUint8) \
V(ClassOfTestAndBranch) \
- V(CompareMinusZeroAndBranch) \
V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpHoleAndBranch) \
@@ -105,7 +102,6 @@ class LCodeGen;
V(LoadNamedField) \
V(LoadNamedGeneric) \
V(LoadRoot) \
- V(MapEnumLength) \
V(MathAbs) \
V(MathClz32) \
V(MathExp) \
@@ -455,19 +451,6 @@ class LParameter final : public LTemplateInstruction<1, 0, 0> {
};
-class LCallStub final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallStub(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
- DECLARE_HYDROGEN_ACCESSOR(CallStub)
-};
-
-
class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
public:
bool HasInterestingComment(LCodeGen* gen) const override { return false; }
@@ -975,22 +958,6 @@ class LCmpHoleAndBranch final : public LControlInstruction<1, 0> {
};
-class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 1> {
- public:
- LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
- "cmp-minus-zero-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
-};
-
-
class LIsStringAndBranch final : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1140,8 +1107,6 @@ class LCmpT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
- Strength strength() { return hydrogen()->strength(); }
-
LOperand* context() { return inputs_[0]; }
Token::Value op() const { return hydrogen()->token(); }
};
@@ -1338,18 +1303,6 @@ class LCmpMapAndBranch final : public LControlInstruction<1, 0> {
};
-class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMapEnumLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
-};
-
-
class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
public:
LSeqStringGetChar(LOperand* string, LOperand* index) {
@@ -1481,8 +1434,6 @@ class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
- Strength strength() { return hydrogen()->strength(); }
-
private:
Token::Value op_;
};
@@ -2603,23 +2554,6 @@ class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
};
-class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
- public:
- LAllocateBlockContext(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-
- Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
-
- DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
- DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
-};
-
-
class LChunkBuilder;
class LPlatformChunk final : public LChunk {
public:
diff --git a/deps/v8/src/crankshaft/lithium-codegen.cc b/deps/v8/src/crankshaft/lithium-codegen.cc
index 5bd1e6a9b8..c5b7e9c470 100644
--- a/deps/v8/src/crankshaft/lithium-codegen.cc
+++ b/deps/v8/src/crankshaft/lithium-codegen.cc
@@ -154,7 +154,9 @@ void LCodeGenBase::Comment(const char* format, ...) {
void LCodeGenBase::DeoptComment(const Deoptimizer::DeoptInfo& deopt_info) {
- masm()->RecordDeoptReason(deopt_info.deopt_reason, deopt_info.position);
+ SourcePosition position = deopt_info.position;
+ int raw_position = position.IsUnknown() ? 0 : position.raw();
+ masm()->RecordDeoptReason(deopt_info.deopt_reason, raw_position);
}
diff --git a/deps/v8/src/crankshaft/lithium.cc b/deps/v8/src/crankshaft/lithium.cc
index 82ad6962be..677639095a 100644
--- a/deps/v8/src/crankshaft/lithium.cc
+++ b/deps/v8/src/crankshaft/lithium.cc
@@ -246,22 +246,9 @@ void LPointerMap::PrintTo(StringStream* stream) {
stream->Add("}");
}
-
-int StackSlotOffset(int index) {
- if (index >= 0) {
- // Local or spill slot. Skip the frame pointer, function, and
- // context in the fixed part of the frame.
- return -(index + 1) * kPointerSize -
- StandardFrameConstants::kFixedFrameSizeFromFp;
- } else {
- // Incoming parameter. Skip the return address.
- return -(index + 1) * kPointerSize + kFPOnStackSize + kPCOnStackSize;
- }
-}
-
-
LChunk::LChunk(CompilationInfo* info, HGraph* graph)
- : spill_slot_count_(0),
+ : base_frame_slots_(StandardFrameConstants::kFixedFrameSize / kPointerSize),
+ current_frame_slots_(base_frame_slots_),
info_(info),
graph_(graph),
instructions_(32, info->zone()),
@@ -270,7 +257,6 @@ LChunk::LChunk(CompilationInfo* info, HGraph* graph)
deprecation_dependencies_(32, info->zone()),
stability_dependencies_(8, info->zone()) {}
-
LLabel* LChunk::GetLabel(int block_id) const {
HBasicBlock* block = graph_->blocks()->at(block_id);
int first_instruction = block->first_instruction_index();
@@ -495,9 +481,9 @@ void LChunk::set_allocated_double_registers(BitVector* allocated_registers) {
while (!iterator.Done()) {
if (info()->saves_caller_doubles()) {
if (kDoubleSize == kPointerSize * 2) {
- spill_slot_count_ += 2;
+ current_frame_slots_ += 2;
} else {
- spill_slot_count_++;
+ current_frame_slots_++;
}
}
iterator.Advance();
diff --git a/deps/v8/src/crankshaft/lithium.h b/deps/v8/src/crankshaft/lithium.h
index 10e980e983..5cfc0c358a 100644
--- a/deps/v8/src/crankshaft/lithium.h
+++ b/deps/v8/src/crankshaft/lithium.h
@@ -638,7 +638,13 @@ class LChunk : public ZoneObject {
int ParameterAt(int index);
int GetParameterStackSlot(int index) const;
- int spill_slot_count() const { return spill_slot_count_; }
+ bool HasAllocatedStackSlots() const {
+ return current_frame_slots_ != base_frame_slots_;
+ }
+ int GetSpillSlotCount() const {
+ return current_frame_slots_ - base_frame_slots_;
+ }
+ int GetTotalFrameSlotCount() const { return current_frame_slots_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
Isolate* isolate() const { return graph_->isolate(); }
@@ -687,7 +693,8 @@ class LChunk : public ZoneObject {
protected:
LChunk(CompilationInfo* info, HGraph* graph);
- int spill_slot_count_;
+ int base_frame_slots_;
+ int current_frame_slots_;
private:
void CommitDependencies(Handle<Code> code) const;
@@ -757,8 +764,6 @@ class LChunkBuilderBase BASE_EMBEDDED {
};
-int StackSlotOffset(int index);
-
enum NumberUntagDMode {
NUMBER_CANDIDATE_IS_SMI,
NUMBER_CANDIDATE_IS_ANY_TAGGED
diff --git a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
index 2414f0d61c..8febb573b1 100644
--- a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
@@ -83,7 +83,7 @@ bool LCodeGen::GenerateCode() {
void LCodeGen::FinishCode(Handle<Code> code) {
DCHECK(is_done());
- code->set_stack_slots(GetStackSlotCount());
+ code->set_stack_slots(GetTotalFrameSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
}
@@ -127,13 +127,6 @@ bool LCodeGen::GeneratePrologue() {
if (info()->IsOptimizing()) {
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ stop("stop_at");
- }
-#endif
-
// a1: Callee's JS function.
// cp: Callee's context.
// fp: Caller's frame pointer.
@@ -379,7 +372,7 @@ bool LCodeGen::GenerateJumpTable() {
bool LCodeGen::GenerateSafepointTable() {
DCHECK(is_done());
- safepoints_.Emit(masm(), GetStackSlotCount());
+ safepoints_.Emit(masm(), GetTotalFrameSlotCount());
return !is_aborted();
}
@@ -552,7 +545,7 @@ MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
DCHECK(!op->IsDoubleRegister());
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
if (NeedsEagerFrame()) {
- return MemOperand(fp, StackSlotOffset(op->index()));
+ return MemOperand(fp, FrameSlotToFPOffset(op->index()));
} else {
// Retrieve parameter without eager stack-frame relative to the
// stack-pointer.
@@ -564,7 +557,7 @@ MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
DCHECK(op->IsDoubleStackSlot());
if (NeedsEagerFrame()) {
- return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
+ return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
} else {
// Retrieve parameter without eager stack-frame relative to the
// stack-pointer.
@@ -633,9 +626,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
if (op->IsStackSlot()) {
int index = op->index();
- if (index >= 0) {
- index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
- }
if (is_tagged) {
translation->StoreStackSlot(index);
} else if (is_uint32) {
@@ -645,9 +635,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
} else if (op->IsDoubleStackSlot()) {
int index = op->index();
- if (index >= 0) {
- index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
- }
translation->StoreDoubleStackSlot(index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
@@ -946,26 +933,6 @@ void LCodeGen::DoParameter(LParameter* instr) {
}
-void LCodeGen::DoCallStub(LCallStub* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->result()).is(v0));
- switch (instr->hydrogen()->major_key()) {
- case CodeStub::RegExpExec: {
- RegExpExecStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::SubString: {
- SubStringStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
GenerateOsrPrologue();
}
@@ -1411,8 +1378,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (constant < 0) __ Subu(result, zero_reg, result);
} else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
int32_t shift = WhichPowerOf2(constant_abs - 1);
- __ sll(scratch, left, shift);
- __ Addu(result, scratch, left);
+ __ Lsa(result, left, left, shift);
// Correct the sign of the result if the constant is negative.
if (constant < 0) __ Subu(result, zero_reg, result);
} else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
@@ -1652,13 +1618,6 @@ void LCodeGen::DoConstantT(LConstantT* instr) {
}
-void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
- Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->value());
- __ EnumLength(result, map);
-}
-
-
MemOperand LCodeGen::BuildSeqStringOperand(Register string,
LOperand* index,
String::Encoding encoding) {
@@ -1804,8 +1763,14 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
// At this point, both left and right are either 0 or -0.
if (operation == HMathMinMax::kMathMin) {
+ // The algorithm is: -((-L) + (-R)), which in case of L and R being
+ // different registers is most efficiently expressed as -((-L) - R).
__ neg_d(left_reg, left_reg);
- __ sub_d(result_reg, left_reg, right_reg);
+ if (left_reg.is(right_reg)) {
+ __ add_d(result_reg, left_reg, right_reg);
+ } else {
+ __ sub_d(result_reg, left_reg, right_reg);
+ }
__ neg_d(result_reg, result_reg);
} else {
__ add_d(result_reg, left_reg, right_reg);
@@ -1877,8 +1842,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(a0));
DCHECK(ToRegister(instr->result()).is(v0));
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
// Other arch use a nop here, to signal that there is no inlined
// patchable code. Mips does not need the nop, since our marker
@@ -2159,8 +2123,9 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
// We can statically evaluate the comparison.
double left_val = ToDouble(LConstantOperand::cast(left));
double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block = EvalComparison(instr->op(), left_val, right_val) ?
- instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
+ int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
+ ? instr->TrueDestination(chunk_)
+ : instr->FalseDestination(chunk_);
EmitGoto(next_block);
} else {
if (instr->is_double()) {
@@ -2235,32 +2200,6 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
}
-void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
- Representation rep = instr->hydrogen()->value()->representation();
- DCHECK(!rep.IsInteger32());
- Register scratch = ToRegister(instr->temp());
-
- if (rep.IsDouble()) {
- DoubleRegister value = ToDoubleRegister(instr->value());
- EmitFalseBranchF(instr, ne, value, kDoubleRegZero);
- __ FmoveHigh(scratch, value);
- __ li(at, 0x80000000);
- } else {
- Register value = ToRegister(instr->value());
- __ CheckMap(value,
- scratch,
- Heap::kHeapNumberMapRootIndex,
- instr->FalseLabel(chunk()),
- DO_SMI_CHECK);
- __ lw(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
- EmitFalseBranch(instr, ne, scratch, Operand(0x80000000));
- __ lw(scratch, FieldMemOperand(value, HeapNumber::kMantissaOffset));
- __ mov(at, zero_reg);
- }
- EmitBranch(instr, eq, scratch, Operand(at));
-}
-
-
Condition LCodeGen::EmitIsString(Register input,
Register temp1,
Label* is_not_string,
@@ -2530,8 +2469,7 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
- Handle<Code> ic =
- CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// On MIPS there is no need for a "no inlined smi code" marker (nop).
@@ -2577,8 +2515,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
Register reg = ToRegister(instr->parameter_count());
// The argument count parameter is a smi
__ SmiUntag(reg);
- __ sll(at, reg, kPointerSizeLog2);
- __ Addu(sp, sp, at);
+ __ Lsa(sp, sp, reg, kPointerSizeLog2);
}
__ Jump(ra);
@@ -2624,9 +2561,9 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
__ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- Handle<Code> ic =
- CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
- SLOPPY, PREMONOMORPHIC).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+ isolate(), instr->typeof_mode(), PREMONOMORPHIC)
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2725,10 +2662,10 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
// Name is always in a2.
__ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- Handle<Code> ic =
- CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+ isolate(), NOT_INSIDE_TYPEOF,
+ instr->hydrogen()->initialization_state())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2780,8 +2717,7 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register index = ToRegister(instr->index());
__ li(at, Operand(const_length + 1));
__ Subu(result, at, index);
- __ sll(at, result, kPointerSizeLog2);
- __ Addu(at, arguments, at);
+ __ Lsa(at, arguments, result, kPointerSizeLog2);
__ lw(result, MemOperand(at));
}
} else if (instr->index()->IsConstantOperand()) {
@@ -2790,12 +2726,10 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
int loc = const_index - 1;
if (loc != 0) {
__ Subu(result, length, Operand(loc));
- __ sll(at, result, kPointerSizeLog2);
- __ Addu(at, arguments, at);
+ __ Lsa(at, arguments, result, kPointerSizeLog2);
__ lw(result, MemOperand(at));
} else {
- __ sll(at, length, kPointerSizeLog2);
- __ Addu(at, arguments, at);
+ __ Lsa(at, arguments, length, kPointerSizeLog2);
__ lw(result, MemOperand(at));
}
} else {
@@ -2803,8 +2737,7 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register index = ToRegister(instr->index());
__ Subu(result, length, index);
__ Addu(result, result, 1);
- __ sll(at, result, kPointerSizeLog2);
- __ Addu(at, arguments, at);
+ __ Lsa(at, arguments, result, kPointerSizeLog2);
__ lw(result, MemOperand(at));
}
}
@@ -2883,6 +2816,9 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case DICTIONARY_ELEMENTS:
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ case NO_ELEMENTS:
UNREACHABLE();
break;
}
@@ -2913,8 +2849,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
key = ToRegister(instr->key());
int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
? (element_size_shift - kSmiTagSize) : element_size_shift;
- __ sll(at, key, shift_size);
- __ Addu(scratch, scratch, at);
+ __ Lsa(scratch, scratch, key, shift_size);
}
__ ldc1(result, MemOperand(scratch));
@@ -2945,11 +2880,9 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
// during bound check elimination with the index argument to the bounds
// check, which can be tagged, so that case must be handled here, too.
if (instr->hydrogen()->key()->representation().IsSmi()) {
- __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
- __ addu(scratch, elements, scratch);
+ __ Lsa(scratch, elements, key, kPointerSizeLog2 - kSmiTagSize);
} else {
- __ sll(scratch, key, kPointerSizeLog2);
- __ addu(scratch, elements, scratch);
+ __ Lsa(scratch, elements, key, kPointerSizeLog2);
}
}
__ lw(result, MemOperand(store_base, offset));
@@ -3042,8 +2975,8 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
}
Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
- isolate(), instr->hydrogen()->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ isolate(), instr->hydrogen()->initialization_state())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3683,21 +3616,22 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ HCallFunction* hinstr = instr->hydrogen();
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->function()).is(a1));
DCHECK(ToRegister(instr->result()).is(v0));
int arity = instr->arity();
- ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
- if (instr->hydrogen()->HasVectorAndSlot()) {
+ ConvertReceiverMode mode = hinstr->convert_mode();
+ if (hinstr->HasVectorAndSlot()) {
Register slot_register = ToRegister(instr->temp_slot());
Register vector_register = ToRegister(instr->temp_vector());
DCHECK(slot_register.is(a3));
DCHECK(vector_register.is(a2));
AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
- int index = vector->GetIndex(instr->hydrogen()->slot());
+ Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
+ int index = vector->GetIndex(hinstr->slot());
__ li(vector_register, vector);
__ li(slot_register, Operand(Smi::FromInt(index)));
@@ -3944,8 +3878,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
address = external_pointer;
}
} else {
- __ sll(address, key, shift_size);
- __ Addu(address, external_pointer, address);
+ __ Lsa(address, external_pointer, key, shift_size);
}
if (elements_kind == FLOAT32_ELEMENTS) {
@@ -3985,6 +3918,9 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case DICTIONARY_ELEMENTS:
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ case NO_ELEMENTS:
UNREACHABLE();
break;
}
@@ -4062,11 +3998,9 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
// during bound check elimination with the index argument to the bounds
// check, which can be tagged, so that case must be handled here, too.
if (instr->hydrogen()->key()->representation().IsSmi()) {
- __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
- __ addu(scratch, elements, scratch);
+ __ Lsa(scratch, elements, key, kPointerSizeLog2 - kSmiTagSize);
} else {
- __ sll(scratch, key, kPointerSizeLog2);
- __ addu(scratch, elements, scratch);
+ __ Lsa(scratch, elements, key, kPointerSizeLog2);
}
}
__ sw(value, MemOperand(store_base, offset));
@@ -4353,8 +4287,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
__ Branch(deferred->entry(), hi,
char_code, Operand(String::kMaxOneByteCharCode));
__ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
- __ sll(scratch, char_code, kPointerSizeLog2);
- __ Addu(result, result, scratch);
+ __ Lsa(result, result, char_code, kPointerSizeLog2);
__ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
__ Branch(deferred->entry(), eq, result, Operand(scratch));
@@ -5294,8 +5227,8 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
final_branch_condition = eq;
} else if (String::Equals(type_name, factory->undefined_string())) {
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
+ __ LoadRoot(at, Heap::kNullValueRootIndex);
+ __ Branch(USE_DELAY_SLOT, false_label, eq, at, Operand(input));
// The first instruction of JumpIfSmi is an And - it is safe in the delay
// slot.
__ JumpIfSmi(input, false_label);
@@ -5482,19 +5415,10 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
Register result = ToRegister(instr->result());
Register object = ToRegister(instr->object());
- __ And(at, object, kSmiTagMask);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
-
- STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
- __ GetObjectType(object, a1, a1);
- DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject, a1,
- Operand(JS_PROXY_TYPE));
Label use_cache, call_runtime;
DCHECK(object.is(a0));
- Register null_value = t1;
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ CheckEnumCache(null_value, &call_runtime);
+ __ CheckEnumCache(&call_runtime);
__ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
__ Branch(&use_cache);
@@ -5502,12 +5426,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(object);
- CallRuntime(Runtime::kGetPropertyNamesFast, instr);
-
- __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
- DCHECK(result.is(v0));
- __ LoadRoot(at, Heap::kMetaMapRootIndex);
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, a1, Operand(at));
+ CallRuntime(Runtime::kForInEnumerate, instr);
__ bind(&use_cache);
}
@@ -5622,15 +5541,6 @@ void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
}
-void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
- Handle<ScopeInfo> scope_info = instr->scope_info();
- __ li(at, scope_info);
- __ Push(at, ToRegister(instr->function()));
- CallRuntime(Runtime::kPushBlockContext, instr);
- RecordSafepoint(Safepoint::kNoLazyDeopt);
-}
-
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h
index 160ab9a665..df72b2e93c 100644
--- a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h
+++ b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.h
@@ -44,10 +44,8 @@ class LCodeGen: public LCodeGenBase {
}
bool NeedsEagerFrame() const {
- return GetStackSlotCount() > 0 ||
- info()->is_non_deferred_calling() ||
- !info()->IsStub() ||
- info()->requires_frame();
+ return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
+ !info()->IsStub() || info()->requires_frame();
}
bool NeedsDeferredFrame() const {
return !NeedsEagerFrame() && info()->is_deferred_calling();
@@ -153,7 +151,13 @@ class LCodeGen: public LCodeGenBase {
Register temporary,
Register temporary2);
- int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+ bool HasAllocatedStackSlots() const {
+ return chunk()->HasAllocatedStackSlots();
+ }
+ int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
+ int GetTotalFrameSlotCount() const {
+ return chunk()->GetTotalFrameSlotCount();
+ }
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
diff --git a/deps/v8/src/crankshaft/mips/lithium-mips.cc b/deps/v8/src/crankshaft/mips/lithium-mips.cc
index a9978e1068..a7c5488d04 100644
--- a/deps/v8/src/crankshaft/mips/lithium-mips.cc
+++ b/deps/v8/src/crankshaft/mips/lithium-mips.cc
@@ -390,8 +390,8 @@ void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
// Skip a slot if for a double-width slot.
- if (kind == DOUBLE_REGISTERS) spill_slot_count_++;
- return spill_slot_count_++;
+ if (kind == DOUBLE_REGISTERS) current_frame_slots_++;
+ return current_frame_slots_++;
}
@@ -1690,14 +1690,6 @@ LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
}
-LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
- HCompareMinusZeroAndBranch* instr) {
- LOperand* value = UseRegister(instr->value());
- LOperand* scratch = TempRegister();
- return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
-}
-
-
LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
DCHECK(instr->value()->representation().IsTagged());
LOperand* temp = TempRegister();
@@ -1766,12 +1758,6 @@ LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
}
-LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
- LOperand* map = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LMapEnumLength(map));
-}
-
-
LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
LOperand* string = UseRegisterAtStart(instr->string());
LOperand* index = UseRegisterOrConstantAtStart(instr->index());
@@ -2396,8 +2382,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
return DefineAsSpilled(result, spill_index);
} else {
DCHECK(info()->IsStub());
- CallInterfaceDescriptor descriptor =
- info()->code_stub()->GetCallInterfaceDescriptor();
+ CallInterfaceDescriptor descriptor = graph()->descriptor();
int index = static_cast<int>(instr->index());
Register reg = descriptor.GetRegisterParameter(index);
return DefineFixed(result, reg);
@@ -2418,17 +2403,12 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
Retry(kTooManySpillSlotsNeededForOSR);
spill_index = 0;
}
+ spill_index += StandardFrameConstants::kFixedSlotCount;
}
return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
}
-LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(DefineFixed(new(zone()) LCallStub(context), v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
// There are no real uses of the arguments object.
// arguments.length and element access are supported directly on
@@ -2570,16 +2550,6 @@ LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
return new(zone()) LStoreFrameContext(context);
}
-
-LInstruction* LChunkBuilder::DoAllocateBlockContext(
- HAllocateBlockContext* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* function = UseRegisterAtStart(instr->function());
- LAllocateBlockContext* result =
- new(zone()) LAllocateBlockContext(context, function);
- return MarkAsCall(DefineFixed(result, cp), instr);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/mips/lithium-mips.h b/deps/v8/src/crankshaft/mips/lithium-mips.h
index 880d243312..8b36c5d055 100644
--- a/deps/v8/src/crankshaft/mips/lithium-mips.h
+++ b/deps/v8/src/crankshaft/mips/lithium-mips.h
@@ -21,7 +21,6 @@ class LCodeGen;
V(AccessArgumentsAt) \
V(AddI) \
V(Allocate) \
- V(AllocateBlockContext) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@@ -35,7 +34,6 @@ class LCodeGen;
V(CallFunction) \
V(CallNewArray) \
V(CallRuntime) \
- V(CallStub) \
V(CheckArrayBufferNotNeutered) \
V(CheckInstanceType) \
V(CheckMaps) \
@@ -47,7 +45,6 @@ class LCodeGen;
V(ClampIToUint8) \
V(ClampTToUint8) \
V(ClassOfTestAndBranch) \
- V(CompareMinusZeroAndBranch) \
V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpHoleAndBranch) \
@@ -101,7 +98,6 @@ class LCodeGen;
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedGeneric) \
- V(MapEnumLength) \
V(MathAbs) \
V(MathExp) \
V(MathClz32) \
@@ -457,19 +453,6 @@ class LParameter final : public LTemplateInstruction<1, 0, 0> {
};
-class LCallStub final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallStub(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
- DECLARE_HYDROGEN_ACCESSOR(CallStub)
-};
-
-
class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
public:
bool HasInterestingComment(LCodeGen* gen) const override { return false; }
@@ -970,22 +953,6 @@ class LCmpHoleAndBranch final : public LControlInstruction<1, 0> {
};
-class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 1> {
- public:
- LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
- "cmp-minus-zero-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
-};
-
-
class LIsStringAndBranch final : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1136,8 +1103,6 @@ class LCmpT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
- Strength strength() { return hydrogen()->strength(); }
-
Token::Value op() const { return hydrogen()->token(); }
};
@@ -1324,18 +1289,6 @@ class LCmpMapAndBranch final : public LControlInstruction<1, 1> {
};
-class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMapEnumLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
-};
-
-
class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
public:
LSeqStringGetChar(LOperand* string, LOperand* index) {
@@ -1461,8 +1414,6 @@ class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
- Strength strength() { return hydrogen()->strength(); }
-
private:
Token::Value op_;
};
@@ -2575,23 +2526,6 @@ class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
};
-class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
- public:
- LAllocateBlockContext(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-
- Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
-
- DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
- DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
-};
-
-
class LChunkBuilder;
class LPlatformChunk final : public LChunk {
public:
diff --git a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
index 29d19ee809..ddf908d9ed 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
+++ b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
@@ -58,7 +58,7 @@ bool LCodeGen::GenerateCode() {
void LCodeGen::FinishCode(Handle<Code> code) {
DCHECK(is_done());
- code->set_stack_slots(GetStackSlotCount());
+ code->set_stack_slots(GetTotalFrameSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
}
@@ -102,13 +102,6 @@ bool LCodeGen::GeneratePrologue() {
if (info()->IsOptimizing()) {
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ stop("stop_at");
- }
-#endif
-
// a1: Callee's JS function.
// cp: Callee's context.
// fp: Caller's frame pointer.
@@ -362,7 +355,7 @@ bool LCodeGen::GenerateJumpTable() {
bool LCodeGen::GenerateSafepointTable() {
DCHECK(is_done());
- safepoints_.Emit(masm(), GetStackSlotCount());
+ safepoints_.Emit(masm(), GetTotalFrameSlotCount());
return !is_aborted();
}
@@ -537,7 +530,7 @@ MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
DCHECK(!op->IsDoubleRegister());
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
if (NeedsEagerFrame()) {
- return MemOperand(fp, StackSlotOffset(op->index()));
+ return MemOperand(fp, FrameSlotToFPOffset(op->index()));
} else {
// Retrieve parameter without eager stack-frame relative to the
// stack-pointer.
@@ -549,8 +542,8 @@ MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
DCHECK(op->IsDoubleStackSlot());
if (NeedsEagerFrame()) {
- // return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
- return MemOperand(fp, StackSlotOffset(op->index()) + kIntSize);
+ // return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
+ return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kIntSize);
} else {
// Retrieve parameter without eager stack-frame relative to the
// stack-pointer.
@@ -621,9 +614,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
if (op->IsStackSlot()) {
int index = op->index();
- if (index >= 0) {
- index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
- }
if (is_tagged) {
translation->StoreStackSlot(index);
} else if (is_uint32) {
@@ -633,9 +623,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
} else if (op->IsDoubleStackSlot()) {
int index = op->index();
- if (index >= 0) {
- index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
- }
translation->StoreDoubleStackSlot(index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
@@ -935,26 +922,6 @@ void LCodeGen::DoParameter(LParameter* instr) {
}
-void LCodeGen::DoCallStub(LCallStub* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->result()).is(v0));
- switch (instr->hydrogen()->major_key()) {
- case CodeStub::RegExpExec: {
- RegExpExecStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::SubString: {
- SubStringStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
GenerateOsrPrologue();
}
@@ -1410,8 +1377,7 @@ void LCodeGen::DoMulS(LMulS* instr) {
if (constant < 0) __ Dsubu(result, zero_reg, result);
} else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
int32_t shift = WhichPowerOf2(constant_abs - 1);
- __ dsll(scratch, left, shift);
- __ Daddu(result, scratch, left);
+ __ Dlsa(result, left, left, shift);
// Correct the sign of the result if the constant is negative.
if (constant < 0) __ Dsubu(result, zero_reg, result);
} else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
@@ -1512,8 +1478,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (constant < 0) __ Subu(result, zero_reg, result);
} else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
int32_t shift = WhichPowerOf2(constant_abs - 1);
- __ sll(scratch, left, shift);
- __ addu(result, scratch, left);
+ __ Lsa(result, left, left, shift);
// Correct the sign of the result if the constant is negative.
if (constant < 0) __ Subu(result, zero_reg, result);
} else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
@@ -1749,13 +1714,6 @@ void LCodeGen::DoConstantT(LConstantT* instr) {
}
-void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
- Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->value());
- __ EnumLength(result, map);
-}
-
-
MemOperand LCodeGen::BuildSeqStringOperand(Register string,
LOperand* index,
String::Encoding encoding) {
@@ -1922,8 +1880,14 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
// At this point, both left and right are either 0 or -0.
if (operation == HMathMinMax::kMathMin) {
+ // The algorithm is: -((-L) + (-R)), which in case of L and R being
+ // different registers is most efficiently expressed as -((-L) - R).
__ neg_d(left_reg, left_reg);
- __ sub_d(result_reg, left_reg, right_reg);
+ if (left_reg.is(right_reg)) {
+ __ add_d(result_reg, left_reg, right_reg);
+ } else {
+ __ sub_d(result_reg, left_reg, right_reg);
+ }
__ neg_d(result_reg, result_reg);
} else {
__ add_d(result_reg, left_reg, right_reg);
@@ -1995,8 +1959,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(a0));
DCHECK(ToRegister(instr->result()).is(v0));
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
// Other arch use a nop here, to signal that there is no inlined
// patchable code. Mips does not need the nop, since our marker
@@ -2277,8 +2240,9 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
// We can statically evaluate the comparison.
double left_val = ToDouble(LConstantOperand::cast(left));
double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block = EvalComparison(instr->op(), left_val, right_val) ?
- instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
+ int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
+ ? instr->TrueDestination(chunk_)
+ : instr->FalseDestination(chunk_);
EmitGoto(next_block);
} else {
if (instr->is_double()) {
@@ -2353,35 +2317,6 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
}
-void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
- Representation rep = instr->hydrogen()->value()->representation();
- DCHECK(!rep.IsInteger32());
- Register scratch = ToRegister(instr->temp());
-
- if (rep.IsDouble()) {
- DoubleRegister value = ToDoubleRegister(instr->value());
- EmitFalseBranchF(instr, ne, value, kDoubleRegZero);
- __ FmoveHigh(scratch, value);
- // Only use low 32-bits of value.
- __ dsll32(scratch, scratch, 0);
- __ dsrl32(scratch, scratch, 0);
- __ li(at, 0x80000000);
- } else {
- Register value = ToRegister(instr->value());
- __ CheckMap(value,
- scratch,
- Heap::kHeapNumberMapRootIndex,
- instr->FalseLabel(chunk()),
- DO_SMI_CHECK);
- __ lwu(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
- EmitFalseBranch(instr, ne, scratch, Operand(0x80000000));
- __ lwu(scratch, FieldMemOperand(value, HeapNumber::kMantissaOffset));
- __ mov(at, zero_reg);
- }
- EmitBranch(instr, eq, scratch, Operand(at));
-}
-
-
Condition LCodeGen::EmitIsString(Register input,
Register temp1,
Label* is_not_string,
@@ -2655,8 +2590,7 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
- Handle<Code> ic =
- CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// On MIPS there is no need for a "no inlined smi code" marker (nop).
@@ -2702,8 +2636,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
Register reg = ToRegister(instr->parameter_count());
// The argument count parameter is a smi
__ SmiUntag(reg);
- __ dsll(at, reg, kPointerSizeLog2);
- __ Daddu(sp, sp, at);
+ __ Dlsa(sp, sp, reg, kPointerSizeLog2);
}
__ Jump(ra);
@@ -2749,9 +2682,9 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
__ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- Handle<Code> ic =
- CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
- SLOPPY, PREMONOMORPHIC).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+ isolate(), instr->typeof_mode(), PREMONOMORPHIC)
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2865,10 +2798,10 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
// Name is always in a2.
__ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- Handle<Code> ic =
- CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+ isolate(), NOT_INSIDE_TYPEOF,
+ instr->hydrogen()->initialization_state())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2920,8 +2853,7 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register index = ToRegister(instr->index());
__ li(at, Operand(const_length + 1));
__ Dsubu(result, at, index);
- __ dsll(at, result, kPointerSizeLog2);
- __ Daddu(at, arguments, at);
+ __ Dlsa(at, arguments, result, kPointerSizeLog2);
__ ld(result, MemOperand(at));
}
} else if (instr->index()->IsConstantOperand()) {
@@ -2930,12 +2862,10 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
int loc = const_index - 1;
if (loc != 0) {
__ Dsubu(result, length, Operand(loc));
- __ dsll(at, result, kPointerSizeLog2);
- __ Daddu(at, arguments, at);
+ __ Dlsa(at, arguments, result, kPointerSizeLog2);
__ ld(result, MemOperand(at));
} else {
- __ dsll(at, length, kPointerSizeLog2);
- __ Daddu(at, arguments, at);
+ __ Dlsa(at, arguments, length, kPointerSizeLog2);
__ ld(result, MemOperand(at));
}
} else {
@@ -2943,8 +2873,7 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register index = ToRegister(instr->index());
__ Dsubu(result, length, index);
__ Daddu(result, result, 1);
- __ dsll(at, result, kPointerSizeLog2);
- __ Daddu(at, arguments, at);
+ __ Dlsa(at, arguments, result, kPointerSizeLog2);
__ ld(result, MemOperand(at));
}
}
@@ -3033,6 +2962,9 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case DICTIONARY_ELEMENTS:
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ case NO_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3106,8 +3038,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
__ SmiScale(scratch, key, kPointerSizeLog2);
__ daddu(scratch, elements, scratch);
} else {
- __ dsll(scratch, key, kPointerSizeLog2);
- __ daddu(scratch, elements, scratch);
+ __ Dlsa(scratch, elements, key, kPointerSizeLog2);
}
}
@@ -3224,8 +3155,8 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
}
Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
- isolate(), instr->hydrogen()->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ isolate(), instr->hydrogen()->initialization_state())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3888,21 +3819,22 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ HCallFunction* hinstr = instr->hydrogen();
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->function()).is(a1));
DCHECK(ToRegister(instr->result()).is(v0));
int arity = instr->arity();
- ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
- if (instr->hydrogen()->HasVectorAndSlot()) {
+ ConvertReceiverMode mode = hinstr->convert_mode();
+ if (hinstr->HasVectorAndSlot()) {
Register slot_register = ToRegister(instr->temp_slot());
Register vector_register = ToRegister(instr->temp_vector());
DCHECK(slot_register.is(a3));
DCHECK(vector_register.is(a2));
AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
- int index = vector->GetIndex(instr->hydrogen()->slot());
+ Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
+ int index = vector->GetIndex(hinstr->slot());
__ li(vector_register, vector);
__ li(slot_register, Operand(Smi::FromInt(index)));
@@ -4208,6 +4140,9 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case DICTIONARY_ELEMENTS:
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ case NO_ELEMENTS:
UNREACHABLE();
break;
}
@@ -4281,8 +4216,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
__ SmiScale(scratch, key, kPointerSizeLog2);
__ daddu(store_base, elements, scratch);
} else {
- __ dsll(scratch, key, kPointerSizeLog2);
- __ daddu(store_base, elements, scratch);
+ __ Dlsa(store_base, elements, key, kPointerSizeLog2);
}
}
@@ -4587,8 +4521,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
__ Branch(deferred->entry(), hi,
char_code, Operand(String::kMaxOneByteCharCode));
__ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
- __ dsll(scratch, char_code, kPointerSizeLog2);
- __ Daddu(result, result, scratch);
+ __ Dlsa(result, result, char_code, kPointerSizeLog2);
__ ld(result, FieldMemOperand(result, FixedArray::kHeaderSize));
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
__ Branch(deferred->entry(), eq, result, Operand(scratch));
@@ -5499,8 +5432,8 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
final_branch_condition = eq;
} else if (String::Equals(type_name, factory->undefined_string())) {
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
+ __ LoadRoot(at, Heap::kNullValueRootIndex);
+ __ Branch(USE_DELAY_SLOT, false_label, eq, at, Operand(input));
// The first instruction of JumpIfSmi is an And - it is safe in the delay
// slot.
__ JumpIfSmi(input, false_label);
@@ -5689,19 +5622,9 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
Register result = ToRegister(instr->result());
Register object = ToRegister(instr->object());
- __ And(at, object, kSmiTagMask);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
-
- STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
- __ GetObjectType(object, a1, a1);
- DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject, a1,
- Operand(JS_PROXY_TYPE));
-
Label use_cache, call_runtime;
DCHECK(object.is(a0));
- Register null_value = a5;
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ CheckEnumCache(null_value, &call_runtime);
+ __ CheckEnumCache(&call_runtime);
__ ld(result, FieldMemOperand(object, HeapObject::kMapOffset));
__ Branch(&use_cache);
@@ -5709,12 +5632,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(object);
- CallRuntime(Runtime::kGetPropertyNamesFast, instr);
-
- __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
- DCHECK(result.is(v0));
- __ LoadRoot(at, Heap::kMetaMapRootIndex);
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, a1, Operand(at));
+ CallRuntime(Runtime::kForInEnumerate, instr);
__ bind(&use_cache);
}
@@ -5827,15 +5745,6 @@ void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
}
-void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
- Handle<ScopeInfo> scope_info = instr->scope_info();
- __ li(at, scope_info);
- __ Push(at, ToRegister(instr->function()));
- CallRuntime(Runtime::kPushBlockContext, instr);
- RecordSafepoint(Safepoint::kNoLazyDeopt);
-}
-
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h
index efadb0f26b..2f1cefae76 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h
+++ b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.h
@@ -44,10 +44,8 @@ class LCodeGen: public LCodeGenBase {
}
bool NeedsEagerFrame() const {
- return GetStackSlotCount() > 0 ||
- info()->is_non_deferred_calling() ||
- !info()->IsStub() ||
- info()->requires_frame();
+ return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
+ !info()->IsStub() || info()->requires_frame();
}
bool NeedsDeferredFrame() const {
return !NeedsEagerFrame() && info()->is_deferred_calling();
@@ -155,7 +153,13 @@ class LCodeGen: public LCodeGenBase {
Register temporary,
Register temporary2);
- int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+ bool HasAllocatedStackSlots() const {
+ return chunk()->HasAllocatedStackSlots();
+ }
+ int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
+ int GetTotalFrameSlotCount() const {
+ return chunk()->GetTotalFrameSlotCount();
+ }
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
diff --git a/deps/v8/src/crankshaft/mips64/lithium-mips64.cc b/deps/v8/src/crankshaft/mips64/lithium-mips64.cc
index 129f61587f..b66e8ba18a 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-mips64.cc
+++ b/deps/v8/src/crankshaft/mips64/lithium-mips64.cc
@@ -390,8 +390,8 @@ void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
// Skip a slot if for a double-width slot.
- if (kind == DOUBLE_REGISTERS) spill_slot_count_++;
- return spill_slot_count_++;
+ if (kind == DOUBLE_REGISTERS) current_frame_slots_++;
+ return current_frame_slots_++;
}
@@ -1696,14 +1696,6 @@ LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
}
-LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
- HCompareMinusZeroAndBranch* instr) {
- LOperand* value = UseRegister(instr->value());
- LOperand* scratch = TempRegister();
- return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
-}
-
-
LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
DCHECK(instr->value()->representation().IsTagged());
LOperand* temp = TempRegister();
@@ -1772,12 +1764,6 @@ LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
}
-LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
- LOperand* map = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LMapEnumLength(map));
-}
-
-
LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
LOperand* string = UseRegisterAtStart(instr->string());
LOperand* index = UseRegisterOrConstantAtStart(instr->index());
@@ -2401,8 +2387,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
return DefineAsSpilled(result, spill_index);
} else {
DCHECK(info()->IsStub());
- CallInterfaceDescriptor descriptor =
- info()->code_stub()->GetCallInterfaceDescriptor();
+ CallInterfaceDescriptor descriptor = graph()->descriptor();
int index = static_cast<int>(instr->index());
Register reg = descriptor.GetRegisterParameter(index);
return DefineFixed(result, reg);
@@ -2423,17 +2408,12 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
Retry(kTooManySpillSlotsNeededForOSR);
spill_index = 0;
}
+ spill_index += StandardFrameConstants::kFixedSlotCount;
}
return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
}
-LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(DefineFixed(new(zone()) LCallStub(context), v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
// There are no real uses of the arguments object.
// arguments.length and element access are supported directly on
@@ -2575,16 +2555,6 @@ LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
}
-LInstruction* LChunkBuilder::DoAllocateBlockContext(
- HAllocateBlockContext* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* function = UseRegisterAtStart(instr->function());
- LAllocateBlockContext* result =
- new(zone()) LAllocateBlockContext(context, function);
- return MarkAsCall(DefineFixed(result, cp), instr);
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/mips64/lithium-mips64.h b/deps/v8/src/crankshaft/mips64/lithium-mips64.h
index 01dc234c5a..8d2324f717 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-mips64.h
+++ b/deps/v8/src/crankshaft/mips64/lithium-mips64.h
@@ -23,7 +23,6 @@ class LCodeGen;
V(AddI) \
V(AddS) \
V(Allocate) \
- V(AllocateBlockContext) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@@ -37,7 +36,6 @@ class LCodeGen;
V(CallFunction) \
V(CallNewArray) \
V(CallRuntime) \
- V(CallStub) \
V(CheckArrayBufferNotNeutered) \
V(CheckInstanceType) \
V(CheckMaps) \
@@ -49,7 +47,6 @@ class LCodeGen;
V(ClampIToUint8) \
V(ClampTToUint8) \
V(ClassOfTestAndBranch) \
- V(CompareMinusZeroAndBranch) \
V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpHoleAndBranch) \
@@ -103,7 +100,6 @@ class LCodeGen;
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedGeneric) \
- V(MapEnumLength) \
V(MathAbs) \
V(MathExp) \
V(MathClz32) \
@@ -460,19 +456,6 @@ class LParameter final : public LTemplateInstruction<1, 0, 0> {
};
-class LCallStub final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallStub(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
- DECLARE_HYDROGEN_ACCESSOR(CallStub)
-};
-
-
class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
public:
bool HasInterestingComment(LCodeGen* gen) const override { return false; }
@@ -988,22 +971,6 @@ class LCmpHoleAndBranch final : public LControlInstruction<1, 0> {
};
-class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 1> {
- public:
- LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
- "cmp-minus-zero-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
-};
-
-
class LIsStringAndBranch final : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1154,8 +1121,6 @@ class LCmpT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
- Strength strength() { return hydrogen()->strength(); }
-
Token::Value op() const { return hydrogen()->token(); }
};
@@ -1356,18 +1321,6 @@ class LCmpMapAndBranch final : public LControlInstruction<1, 1> {
};
-class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMapEnumLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
-};
-
-
class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
public:
LSeqStringGetChar(LOperand* string, LOperand* index) {
@@ -1523,8 +1476,6 @@ class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
- Strength strength() { return hydrogen()->strength(); }
-
private:
Token::Value op_;
};
@@ -2621,23 +2572,6 @@ class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
};
-class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
- public:
- LAllocateBlockContext(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-
- Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
-
- DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
- DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
-};
-
-
class LChunkBuilder;
class LPlatformChunk final : public LChunk {
public:
diff --git a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
index 936b8a76ef..921d9b69eb 100644
--- a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
+++ b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
@@ -60,7 +60,7 @@ bool LCodeGen::GenerateCode() {
void LCodeGen::FinishCode(Handle<Code> code) {
DCHECK(is_done());
- code->set_stack_slots(GetStackSlotCount());
+ code->set_stack_slots(GetTotalFrameSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
}
@@ -104,13 +104,6 @@ bool LCodeGen::GeneratePrologue() {
if (info()->IsOptimizing()) {
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ stop("stop_at");
- }
-#endif
-
// r4: Callee's JS function.
// cp: Callee's context.
// pp: Callee's constant pool pointer (if enabled)
@@ -372,7 +365,7 @@ bool LCodeGen::GenerateJumpTable() {
bool LCodeGen::GenerateSafepointTable() {
DCHECK(is_done());
- safepoints_.Emit(masm(), GetStackSlotCount());
+ safepoints_.Emit(masm(), GetTotalFrameSlotCount());
return !is_aborted();
}
@@ -524,7 +517,7 @@ MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
DCHECK(!op->IsDoubleRegister());
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
if (NeedsEagerFrame()) {
- return MemOperand(fp, StackSlotOffset(op->index()));
+ return MemOperand(fp, FrameSlotToFPOffset(op->index()));
} else {
// Retrieve parameter without eager stack-frame relative to the
// stack-pointer.
@@ -536,7 +529,7 @@ MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
DCHECK(op->IsDoubleStackSlot());
if (NeedsEagerFrame()) {
- return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
+ return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
} else {
// Retrieve parameter without eager stack-frame relative to the
// stack-pointer.
@@ -600,9 +593,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
if (op->IsStackSlot()) {
int index = op->index();
- if (index >= 0) {
- index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
- }
if (is_tagged) {
translation->StoreStackSlot(index);
} else if (is_uint32) {
@@ -612,9 +602,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
} else if (op->IsDoubleStackSlot()) {
int index = op->index();
- if (index >= 0) {
- index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
- }
translation->StoreDoubleStackSlot(index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
@@ -897,26 +884,6 @@ void LCodeGen::DoParameter(LParameter* instr) {
}
-void LCodeGen::DoCallStub(LCallStub* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->result()).is(r3));
- switch (instr->hydrogen()->major_key()) {
- case CodeStub::RegExpExec: {
- RegExpExecStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::SubString: {
- SubStringStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
GenerateOsrPrologue();
}
@@ -1806,13 +1773,6 @@ void LCodeGen::DoConstantT(LConstantT* instr) {
}
-void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
- Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->value());
- __ EnumLength(result, map);
-}
-
-
MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index,
String::Encoding encoding) {
if (index->IsConstantOperand()) {
@@ -1974,14 +1934,18 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ bne(&return_left); // left == right != 0.
// At this point, both left and right are either 0 or -0.
- // N.B. The following works because +0 + -0 == +0
if (operation == HMathMinMax::kMathMin) {
- // For min we want logical-or of sign bit: -(-L + -R)
+ // Min: The algorithm is: -((-L) + (-R)), which in case of L and R being
+ // different registers is most efficiently expressed as -((-L) - R).
__ fneg(left_reg, left_reg);
- __ fsub(result_reg, left_reg, right_reg);
+ if (left_reg.is(right_reg)) {
+ __ fadd(result_reg, left_reg, right_reg);
+ } else {
+ __ fsub(result_reg, left_reg, right_reg);
+ }
__ fneg(result_reg, result_reg);
} else {
- // For max we want logical-and of sign bit: (L + R)
+ // Max: The following works because +0 + -0 == +0
__ fadd(result_reg, left_reg, right_reg);
}
__ b(&done);
@@ -2044,8 +2008,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(r3));
DCHECK(ToRegister(instr->result()).is(r3));
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
}
@@ -2295,7 +2258,7 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
// We can statically evaluate the comparison.
double left_val = ToDouble(LConstantOperand::cast(left));
double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block = EvalComparison(instr->op(), left_val, right_val)
+ int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
? instr->TrueDestination(chunk_)
: instr->FalseDestination(chunk_);
EmitGoto(next_block);
@@ -2388,46 +2351,6 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
}
-void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
- Representation rep = instr->hydrogen()->value()->representation();
- DCHECK(!rep.IsInteger32());
- Register scratch = ToRegister(instr->temp());
-
- if (rep.IsDouble()) {
- DoubleRegister value = ToDoubleRegister(instr->value());
- __ fcmpu(value, kDoubleRegZero);
- EmitFalseBranch(instr, ne);
-#if V8_TARGET_ARCH_PPC64
- __ MovDoubleToInt64(scratch, value);
-#else
- __ MovDoubleHighToInt(scratch, value);
-#endif
- __ cmpi(scratch, Operand::Zero());
- EmitBranch(instr, lt);
- } else {
- Register value = ToRegister(instr->value());
- __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex,
- instr->FalseLabel(chunk()), DO_SMI_CHECK);
-#if V8_TARGET_ARCH_PPC64
- __ LoadP(scratch, FieldMemOperand(value, HeapNumber::kValueOffset));
- __ li(ip, Operand(1));
- __ rotrdi(ip, ip, 1); // ip = 0x80000000_00000000
- __ cmp(scratch, ip);
-#else
- __ lwz(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
- __ lwz(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset));
- Label skip;
- __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
- __ cmp(scratch, r0);
- __ bne(&skip);
- __ cmpi(ip, Operand::Zero());
- __ bind(&skip);
-#endif
- EmitBranch(instr, eq);
- }
-}
-
-
Condition LCodeGen::EmitIsString(Register input, Register temp1,
Label* is_not_string,
SmiCheck check_needed = INLINE_SMI_CHECK) {
@@ -2688,8 +2611,7 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
Token::Value op = instr->op();
- Handle<Code> ic =
- CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// This instruction also signals no smi code inlined
__ cmpi(r3, Operand::Zero());
@@ -2790,9 +2712,9 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
__ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- Handle<Code> ic =
- CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
- SLOPPY, PREMONOMORPHIC).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+ isolate(), instr->typeof_mode(), PREMONOMORPHIC)
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2907,10 +2829,10 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
// Name is always in r5.
__ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- Handle<Code> ic =
- CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+ isolate(), NOT_INSIDE_TYPEOF,
+ instr->hydrogen()->initialization_state())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3093,6 +3015,9 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case DICTIONARY_ELEMENTS:
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ case NO_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3271,8 +3196,8 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
}
Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
- isolate(), instr->hydrogen()->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ isolate(), instr->hydrogen()->initialization_state())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3707,13 +3632,8 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// If the input is +0.5, the result is 1.
__ bgt(&convert); // Out of [-0.5, +0.5].
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-#if V8_TARGET_ARCH_PPC64
- __ MovDoubleToInt64(scratch1, input);
-#else
- __ MovDoubleHighToInt(scratch1, input);
-#endif
- __ cmpi(scratch1, Operand::Zero());
- // [-0.5, -0].
+ // [-0.5, -0] (negative) yields minus zero.
+ __ TestDoubleSign(input, scratch1);
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
}
__ fcmpu(input, dot_five);
@@ -3934,21 +3854,22 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ HCallFunction* hinstr = instr->hydrogen();
DCHECK(ToRegister(instr->context()).is(cp));
DCHECK(ToRegister(instr->function()).is(r4));
DCHECK(ToRegister(instr->result()).is(r3));
int arity = instr->arity();
- ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
- if (instr->hydrogen()->HasVectorAndSlot()) {
+ ConvertReceiverMode mode = hinstr->convert_mode();
+ if (hinstr->HasVectorAndSlot()) {
Register slot_register = ToRegister(instr->temp_slot());
Register vector_register = ToRegister(instr->temp_vector());
DCHECK(slot_register.is(r6));
DCHECK(vector_register.is(r5));
AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
- int index = vector->GetIndex(instr->hydrogen()->slot());
+ Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
+ int index = vector->GetIndex(hinstr->slot());
__ Move(vector_register, vector);
__ LoadSmiLiteral(slot_register, Smi::FromInt(index));
@@ -4278,6 +4199,9 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case DICTIONARY_ELEMENTS:
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ case NO_ELEMENTS:
UNREACHABLE();
break;
}
@@ -4923,17 +4847,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
// load heap number
__ lfd(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
if (deoptimize_on_minus_zero) {
-#if V8_TARGET_ARCH_PPC64
- __ MovDoubleToInt64(scratch, result_reg);
- // rotate left by one for simple compare.
- __ rldicl(scratch, scratch, 1, 0);
- __ cmpi(scratch, Operand(1));
-#else
- __ MovDoubleToInt64(scratch, ip, result_reg);
- __ cmpi(ip, Operand::Zero());
- __ bne(&done);
- __ Cmpi(scratch, Operand(HeapNumber::kSignMask), r0);
-#endif
+ __ TestDoubleIsMinusZero(result_reg, scratch, ip);
DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
}
__ b(&done);
@@ -5022,10 +4936,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ cmpi(input_reg, Operand::Zero());
__ bne(&done);
- __ lwz(scratch1,
- FieldMemOperand(scratch2, HeapNumber::kValueOffset +
- Register::kExponentOffset));
- __ cmpwi(scratch1, Operand::Zero());
+ __ TestHeapNumberSign(scratch2, scratch1);
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
}
}
@@ -5100,12 +5011,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Label done;
__ cmpi(result_reg, Operand::Zero());
__ bne(&done);
-#if V8_TARGET_ARCH_PPC64
- __ MovDoubleToInt64(scratch1, double_input);
-#else
- __ MovDoubleHighToInt(scratch1, double_input);
-#endif
- __ cmpi(scratch1, Operand::Zero());
+ __ TestDoubleSign(double_input, scratch1);
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
__ bind(&done);
}
@@ -5130,12 +5036,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
Label done;
__ cmpi(result_reg, Operand::Zero());
__ bne(&done);
-#if V8_TARGET_ARCH_PPC64
- __ MovDoubleToInt64(scratch1, double_input);
-#else
- __ MovDoubleHighToInt(scratch1, double_input);
-#endif
- __ cmpi(scratch1, Operand::Zero());
+ __ TestDoubleSign(double_input, scratch1);
DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
__ bind(&done);
}
@@ -5550,8 +5451,8 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
final_branch_condition = eq;
} else if (String::Equals(type_name, factory->undefined_string())) {
- __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
- __ beq(true_label);
+ __ CompareRoot(input, Heap::kNullValueRootIndex);
+ __ beq(false_label);
__ JumpIfSmi(input, false_label);
// Check for undetectable objects => true.
__ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
@@ -5727,17 +5628,8 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
- __ TestIfSmi(r3, r0);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
-
- STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
- __ CompareObjectType(r3, r4, r4, JS_PROXY_TYPE);
- DeoptimizeIf(le, instr, Deoptimizer::kWrongInstanceType);
-
Label use_cache, call_runtime;
- Register null_value = r8;
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ CheckEnumCache(null_value, &call_runtime);
+ __ CheckEnumCache(&call_runtime);
__ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
__ b(&use_cache);
@@ -5745,12 +5637,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(r3);
- CallRuntime(Runtime::kGetPropertyNamesFast, instr);
-
- __ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kMetaMapRootIndex);
- __ cmp(r4, ip);
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+ CallRuntime(Runtime::kForInEnumerate, instr);
__ bind(&use_cache);
}
@@ -5862,15 +5749,6 @@ void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
}
-void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
- Handle<ScopeInfo> scope_info = instr->scope_info();
- __ Push(scope_info);
- __ push(ToRegister(instr->function()));
- CallRuntime(Runtime::kPushBlockContext, instr);
- RecordSafepoint(Safepoint::kNoLazyDeopt);
-}
-
-
#undef __
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h
index b0f016d309..1b72bf82dc 100644
--- a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h
+++ b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.h
@@ -44,7 +44,7 @@ class LCodeGen : public LCodeGenBase {
}
bool NeedsEagerFrame() const {
- return GetStackSlotCount() > 0 || info()->is_non_deferred_calling() ||
+ return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
!info()->IsStub() || info()->requires_frame();
}
bool NeedsDeferredFrame() const {
@@ -141,7 +141,13 @@ class LCodeGen : public LCodeGenBase {
Handle<String> class_name, Register input,
Register temporary, Register temporary2);
- int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+ bool HasAllocatedStackSlots() const {
+ return chunk()->HasAllocatedStackSlots();
+ }
+ int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
+ int GetTotalFrameSlotCount() const {
+ return chunk()->GetTotalFrameSlotCount();
+ }
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
diff --git a/deps/v8/src/crankshaft/ppc/lithium-ppc.cc b/deps/v8/src/crankshaft/ppc/lithium-ppc.cc
index 63aead7a3c..2a04d9926c 100644
--- a/deps/v8/src/crankshaft/ppc/lithium-ppc.cc
+++ b/deps/v8/src/crankshaft/ppc/lithium-ppc.cc
@@ -396,8 +396,8 @@ void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
// Skip a slot if for a double-width slot.
- if (kind == DOUBLE_REGISTERS) spill_slot_count_++;
- return spill_slot_count_++;
+ if (kind == DOUBLE_REGISTERS) current_frame_slots_++;
+ return current_frame_slots_++;
}
@@ -1706,14 +1706,6 @@ LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
}
-LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
- HCompareMinusZeroAndBranch* instr) {
- LOperand* value = UseRegister(instr->value());
- LOperand* scratch = TempRegister();
- return new (zone()) LCompareMinusZeroAndBranch(value, scratch);
-}
-
-
LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
DCHECK(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
@@ -1782,12 +1774,6 @@ LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
}
-LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
- LOperand* map = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new (zone()) LMapEnumLength(map));
-}
-
-
LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
LOperand* string = UseRegisterAtStart(instr->string());
LOperand* index = UseRegisterOrConstantAtStart(instr->index());
@@ -2402,8 +2388,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
return DefineAsSpilled(result, spill_index);
} else {
DCHECK(info()->IsStub());
- CallInterfaceDescriptor descriptor =
- info()->code_stub()->GetCallInterfaceDescriptor();
+ CallInterfaceDescriptor descriptor = graph()->descriptor();
int index = static_cast<int>(instr->index());
Register reg = descriptor.GetRegisterParameter(index);
return DefineFixed(result, reg);
@@ -2424,17 +2409,12 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
Retry(kTooManySpillSlotsNeededForOSR);
spill_index = 0;
}
+ spill_index += StandardFrameConstants::kFixedSlotCount;
}
return DefineAsSpilled(new (zone()) LUnknownOSRValue, spill_index);
}
-LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(DefineFixed(new (zone()) LCallStub(context), r3), instr);
-}
-
-
LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
// There are no real uses of the arguments object.
// arguments.length and element access are supported directly on
@@ -2574,14 +2554,5 @@ LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
return new (zone()) LStoreFrameContext(context);
}
-
-LInstruction* LChunkBuilder::DoAllocateBlockContext(
- HAllocateBlockContext* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- LOperand* function = UseRegisterAtStart(instr->function());
- LAllocateBlockContext* result =
- new (zone()) LAllocateBlockContext(context, function);
- return MarkAsCall(DefineFixed(result, cp), instr);
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/ppc/lithium-ppc.h b/deps/v8/src/crankshaft/ppc/lithium-ppc.h
index e86edc9afc..0dfde053b7 100644
--- a/deps/v8/src/crankshaft/ppc/lithium-ppc.h
+++ b/deps/v8/src/crankshaft/ppc/lithium-ppc.h
@@ -21,7 +21,6 @@ class LCodeGen;
V(AccessArgumentsAt) \
V(AddI) \
V(Allocate) \
- V(AllocateBlockContext) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@@ -35,7 +34,6 @@ class LCodeGen;
V(CallFunction) \
V(CallNewArray) \
V(CallRuntime) \
- V(CallStub) \
V(CheckArrayBufferNotNeutered) \
V(CheckInstanceType) \
V(CheckNonSmi) \
@@ -47,7 +45,6 @@ class LCodeGen;
V(ClampIToUint8) \
V(ClampTToUint8) \
V(ClassOfTestAndBranch) \
- V(CompareMinusZeroAndBranch) \
V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpHoleAndBranch) \
@@ -101,7 +98,6 @@ class LCodeGen;
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedGeneric) \
- V(MapEnumLength) \
V(MathAbs) \
V(MathClz32) \
V(MathExp) \
@@ -452,17 +448,6 @@ class LParameter final : public LTemplateInstruction<1, 0, 0> {
};
-class LCallStub final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallStub(LOperand* context) { inputs_[0] = context; }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
- DECLARE_HYDROGEN_ACCESSOR(CallStub)
-};
-
-
class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
public:
bool HasInterestingComment(LCodeGen* gen) const override { return false; }
@@ -954,22 +939,6 @@ class LCmpHoleAndBranch final : public LControlInstruction<1, 0> {
};
-class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 1> {
- public:
- LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
- "cmp-minus-zero-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
-};
-
-
class LIsStringAndBranch final : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1113,8 +1082,6 @@ class LCmpT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
- Strength strength() { return hydrogen()->strength(); }
-
Token::Value op() const { return hydrogen()->token(); }
};
@@ -1314,16 +1281,6 @@ class LCmpMapAndBranch final : public LControlInstruction<1, 1> {
};
-class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMapEnumLength(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
-};
-
-
class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
public:
LSeqStringGetChar(LOperand* string, LOperand* index) {
@@ -1444,8 +1401,6 @@ class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
- Strength strength() { return hydrogen()->strength(); }
-
private:
Token::Value op_;
};
@@ -2500,23 +2455,6 @@ class LStoreFrameContext : public LTemplateInstruction<0, 1, 0> {
};
-class LAllocateBlockContext : public LTemplateInstruction<1, 2, 0> {
- public:
- LAllocateBlockContext(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-
- Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
-
- DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
- DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
-};
-
-
class LChunkBuilder;
class LPlatformChunk final : public LChunk {
public:
diff --git a/deps/v8/src/crankshaft/typing.cc b/deps/v8/src/crankshaft/typing.cc
index df50f81167..69d7efed63 100644
--- a/deps/v8/src/crankshaft/typing.cc
+++ b/deps/v8/src/crankshaft/typing.cc
@@ -45,7 +45,7 @@ AstTyper::AstTyper(Isolate* isolate, Zone* zone, Handle<JSFunction> closure,
Effect AstTyper::ObservedOnStack(Object* value) {
Type* lower = Type::NowOf(value, zone());
- return Effect(Bounds(lower, Type::Any(zone())));
+ return Effect(Bounds(lower, Type::Any()));
}
@@ -393,7 +393,7 @@ void AstTyper::VisitLiteral(Literal* expr) {
void AstTyper::VisitRegExpLiteral(RegExpLiteral* expr) {
// TODO(rossberg): Reintroduce RegExp type.
- NarrowType(expr, Bounds(Type::Object(zone())));
+ NarrowType(expr, Bounds(Type::Object()));
}
@@ -421,7 +421,7 @@ void AstTyper::VisitObjectLiteral(ObjectLiteral* expr) {
RECURSE(Visit(prop->value()));
}
- NarrowType(expr, Bounds(Type::Object(zone())));
+ NarrowType(expr, Bounds(Type::Object()));
}
@@ -432,7 +432,7 @@ void AstTyper::VisitArrayLiteral(ArrayLiteral* expr) {
RECURSE(Visit(value));
}
- NarrowType(expr, Bounds(Type::Object(zone())));
+ NarrowType(expr, Bounds(Type::Object()));
}
@@ -485,7 +485,7 @@ void AstTyper::VisitThrow(Throw* expr) {
RECURSE(Visit(expr->exception()));
// TODO(rossberg): is it worth having a non-termination effect?
- NarrowType(expr, Bounds(Type::None(zone())));
+ NarrowType(expr, Bounds(Type::None()));
}
@@ -569,7 +569,7 @@ void AstTyper::VisitCallNew(CallNew* expr) {
RECURSE(Visit(arg));
}
- NarrowType(expr, Bounds(Type::None(zone()), Type::Receiver(zone())));
+ NarrowType(expr, Bounds(Type::None(), Type::Receiver()));
}
@@ -596,13 +596,13 @@ void AstTyper::VisitUnaryOperation(UnaryOperation* expr) {
switch (expr->op()) {
case Token::NOT:
case Token::DELETE:
- NarrowType(expr, Bounds(Type::Boolean(zone())));
+ NarrowType(expr, Bounds(Type::Boolean()));
break;
case Token::VOID:
- NarrowType(expr, Bounds(Type::Undefined(zone())));
+ NarrowType(expr, Bounds(Type::Undefined()));
break;
case Token::TYPEOF:
- NarrowType(expr, Bounds(Type::InternalizedString(zone())));
+ NarrowType(expr, Bounds(Type::InternalizedString()));
break;
default:
UNREACHABLE();
@@ -624,7 +624,7 @@ void AstTyper::VisitCountOperation(CountOperation* expr) {
RECURSE(Visit(expr->expression()));
- NarrowType(expr, Bounds(Type::SignedSmall(zone()), Type::Number(zone())));
+ NarrowType(expr, Bounds(Type::SignedSmall(), Type::Number()));
VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsStackAllocated()) {
@@ -679,8 +679,8 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
RECURSE(Visit(expr->right()));
Type* upper = Type::Union(
expr->left()->bounds().upper, expr->right()->bounds().upper, zone());
- if (!upper->Is(Type::Signed32())) upper = Type::Signed32(zone());
- Type* lower = Type::Intersect(Type::SignedSmall(zone()), upper, zone());
+ if (!upper->Is(Type::Signed32())) upper = Type::Signed32();
+ Type* lower = Type::Intersect(Type::SignedSmall(), upper, zone());
NarrowType(expr, Bounds(lower, upper));
break;
}
@@ -689,8 +689,7 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
case Token::SAR:
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
- NarrowType(expr,
- Bounds(Type::SignedSmall(zone()), Type::Signed32(zone())));
+ NarrowType(expr, Bounds(Type::SignedSmall(), Type::Signed32()));
break;
case Token::SHR:
RECURSE(Visit(expr->left()));
@@ -698,7 +697,7 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
// TODO(rossberg): The upper bound would be Unsigned32, but since there
// is no 'positive Smi' type for the lower bound, we use the smallest
// union of Smi and Unsigned32 as upper bound instead.
- NarrowType(expr, Bounds(Type::SignedSmall(zone()), Type::Number(zone())));
+ NarrowType(expr, Bounds(Type::SignedSmall(), Type::Number()));
break;
case Token::ADD: {
RECURSE(Visit(expr->left()));
@@ -706,17 +705,19 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
Bounds l = expr->left()->bounds();
Bounds r = expr->right()->bounds();
Type* lower =
- !l.lower->IsInhabited() || !r.lower->IsInhabited() ?
- Type::None(zone()) :
- l.lower->Is(Type::String()) || r.lower->Is(Type::String()) ?
- Type::String(zone()) :
- l.lower->Is(Type::Number()) && r.lower->Is(Type::Number()) ?
- Type::SignedSmall(zone()) : Type::None(zone());
+ !l.lower->IsInhabited() || !r.lower->IsInhabited()
+ ? Type::None()
+ : l.lower->Is(Type::String()) || r.lower->Is(Type::String())
+ ? Type::String()
+ : l.lower->Is(Type::Number()) && r.lower->Is(Type::Number())
+ ? Type::SignedSmall()
+ : Type::None();
Type* upper =
- l.upper->Is(Type::String()) || r.upper->Is(Type::String()) ?
- Type::String(zone()) :
- l.upper->Is(Type::Number()) && r.upper->Is(Type::Number()) ?
- Type::Number(zone()) : Type::NumberOrString(zone());
+ l.upper->Is(Type::String()) || r.upper->Is(Type::String())
+ ? Type::String()
+ : l.upper->Is(Type::Number()) && r.upper->Is(Type::Number())
+ ? Type::Number()
+ : Type::NumberOrString();
NarrowType(expr, Bounds(lower, upper));
break;
}
@@ -726,7 +727,7 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
case Token::MOD:
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
- NarrowType(expr, Bounds(Type::SignedSmall(zone()), Type::Number(zone())));
+ NarrowType(expr, Bounds(Type::SignedSmall(), Type::Number()));
break;
default:
UNREACHABLE();
@@ -748,11 +749,11 @@ void AstTyper::VisitCompareOperation(CompareOperation* expr) {
RECURSE(Visit(expr->left()));
RECURSE(Visit(expr->right()));
- NarrowType(expr, Bounds(Type::Boolean(zone())));
+ NarrowType(expr, Bounds(Type::Boolean()));
}
-void AstTyper::VisitSpread(Spread* expr) { RECURSE(Visit(expr->expression())); }
+void AstTyper::VisitSpread(Spread* expr) { UNREACHABLE(); }
void AstTyper::VisitEmptyParentheses(EmptyParentheses* expr) {
@@ -760,8 +761,7 @@ void AstTyper::VisitEmptyParentheses(EmptyParentheses* expr) {
}
-void AstTyper::VisitThisFunction(ThisFunction* expr) {
-}
+void AstTyper::VisitThisFunction(ThisFunction* expr) {}
void AstTyper::VisitSuperPropertyReference(SuperPropertyReference* expr) {}
@@ -770,8 +770,7 @@ void AstTyper::VisitSuperPropertyReference(SuperPropertyReference* expr) {}
void AstTyper::VisitSuperCallReference(SuperCallReference* expr) {}
-void AstTyper::VisitRewritableAssignmentExpression(
- RewritableAssignmentExpression* expr) {
+void AstTyper::VisitRewritableExpression(RewritableExpression* expr) {
Visit(expr->expression());
}
diff --git a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
index 3f7e9ba825..849b4b33c6 100644
--- a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
@@ -65,7 +65,7 @@ bool LCodeGen::GenerateCode() {
void LCodeGen::FinishCode(Handle<Code> code) {
DCHECK(is_done());
- code->set_stack_slots(GetStackSlotCount());
+ code->set_stack_slots(GetTotalFrameSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
}
@@ -118,13 +118,6 @@ bool LCodeGen::GeneratePrologue() {
if (info()->IsOptimizing()) {
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ int3();
- }
-#endif
}
info()->set_prologue_offset(masm_->pc_offset());
@@ -413,7 +406,7 @@ bool LCodeGen::GenerateDeferredCode() {
bool LCodeGen::GenerateSafepointTable() {
DCHECK(is_done());
- safepoints_.Emit(masm(), GetStackSlotCount());
+ safepoints_.Emit(masm(), GetTotalFrameSlotCount());
return !is_aborted();
}
@@ -514,7 +507,7 @@ Operand LCodeGen::ToOperand(LOperand* op) const {
// representable as an Operand.
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
if (NeedsEagerFrame()) {
- return Operand(rbp, StackSlotOffset(op->index()));
+ return Operand(rbp, FrameSlotToFPOffset(op->index()));
} else {
// Retrieve parameter without eager stack-frame relative to the
// stack-pointer.
@@ -582,9 +575,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
if (op->IsStackSlot()) {
int index = op->index();
- if (index >= 0) {
- index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
- }
if (is_tagged) {
translation->StoreStackSlot(index);
} else if (is_uint32) {
@@ -594,9 +584,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
} else if (op->IsDoubleStackSlot()) {
int index = op->index();
- if (index >= 0) {
- index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
- }
translation->StoreDoubleStackSlot(index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
@@ -913,26 +900,6 @@ void LCodeGen::DoParameter(LParameter* instr) {
}
-void LCodeGen::DoCallStub(LCallStub* instr) {
- DCHECK(ToRegister(instr->context()).is(rsi));
- DCHECK(ToRegister(instr->result()).is(rax));
- switch (instr->hydrogen()->major_key()) {
- case CodeStub::RegExpExec: {
- RegExpExecStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::SubString: {
- SubStringStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
GenerateOsrPrologue();
}
@@ -1660,13 +1627,6 @@ void LCodeGen::DoConstantT(LConstantT* instr) {
}
-void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
- Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->value());
- __ EnumLength(result, map);
-}
-
-
Operand LCodeGen::BuildSeqStringOperand(Register string,
LOperand* index,
String::Encoding encoding) {
@@ -1961,8 +1921,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(rax));
DCHECK(ToRegister(instr->result()).is(rax));
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
}
@@ -2208,8 +2167,9 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
// We can statically evaluate the comparison.
double left_val = ToDouble(LConstantOperand::cast(left));
double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block = EvalComparison(instr->op(), left_val, right_val) ?
- instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
+ int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
+ ? instr->TrueDestination(chunk_)
+ : instr->FalseDestination(chunk_);
EmitGoto(next_block);
} else {
if (instr->is_double()) {
@@ -2296,33 +2256,6 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
}
-void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
- Representation rep = instr->hydrogen()->value()->representation();
- DCHECK(!rep.IsInteger32());
-
- if (rep.IsDouble()) {
- XMMRegister value = ToDoubleRegister(instr->value());
- XMMRegister xmm_scratch = double_scratch0();
- __ Xorpd(xmm_scratch, xmm_scratch);
- __ Ucomisd(xmm_scratch, value);
- EmitFalseBranch(instr, not_equal);
- __ Movmskpd(kScratchRegister, value);
- __ testl(kScratchRegister, Immediate(1));
- EmitBranch(instr, not_zero);
- } else {
- Register value = ToRegister(instr->value());
- Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
- __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
- __ cmpl(FieldOperand(value, HeapNumber::kExponentOffset),
- Immediate(0x1));
- EmitFalseBranch(instr, no_overflow);
- __ cmpl(FieldOperand(value, HeapNumber::kMantissaOffset),
- Immediate(0x00000000));
- EmitBranch(instr, equal);
- }
-}
-
-
Condition LCodeGen::EmitIsString(Register input,
Register temp1,
Label* is_not_string,
@@ -2569,8 +2502,7 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
Token::Value op = instr->op();
- Handle<Code> ic =
- CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = TokenToCondition(op, false);
@@ -2658,9 +2590,9 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
__ Move(LoadDescriptor::NameRegister(), instr->name());
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- Handle<Code> ic =
- CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
- SLOPPY, PREMONOMORPHIC).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+ isolate(), instr->typeof_mode(), PREMONOMORPHIC)
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2775,10 +2707,10 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
__ Move(LoadDescriptor::NameRegister(), instr->name());
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- Handle<Code> ic =
- CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+ isolate(), NOT_INSIDE_TYPEOF,
+ instr->hydrogen()->initialization_state())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -2909,6 +2841,9 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case DICTIONARY_ELEMENTS:
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ case NO_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3064,8 +2999,8 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
}
Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
- isolate(), instr->hydrogen()->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ isolate(), instr->hydrogen()->initialization_state())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3740,21 +3675,23 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ HCallFunction* hinstr = instr->hydrogen();
DCHECK(ToRegister(instr->context()).is(rsi));
DCHECK(ToRegister(instr->function()).is(rdi));
DCHECK(ToRegister(instr->result()).is(rax));
int arity = instr->arity();
- ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
- if (instr->hydrogen()->HasVectorAndSlot()) {
+
+ ConvertReceiverMode mode = hinstr->convert_mode();
+ if (hinstr->HasVectorAndSlot()) {
Register slot_register = ToRegister(instr->temp_slot());
Register vector_register = ToRegister(instr->temp_vector());
DCHECK(slot_register.is(rdx));
DCHECK(vector_register.is(rbx));
AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
- int index = vector->GetIndex(instr->hydrogen()->slot());
+ Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
+ int index = vector->GetIndex(hinstr->slot());
__ Move(vector_register, vector);
__ Move(slot_register, Smi::FromInt(index));
@@ -4107,6 +4044,9 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case DICTIONARY_ELEMENTS:
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ case NO_ELEMENTS:
UNREACHABLE();
break;
}
@@ -5348,8 +5288,8 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
final_branch_condition = equal;
} else if (String::Equals(type_name, factory->undefined_string())) {
- __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
- __ j(equal, true_label, true_distance);
+ __ CompareRoot(input, Heap::kNullValueRootIndex);
+ __ j(equal, false_label, false_distance);
__ JumpIfSmi(input, false_label, false_distance);
// Check for undetectable objects => true.
__ movp(input, FieldOperand(input, HeapObject::kMapOffset));
@@ -5519,17 +5459,8 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
- Condition cc = masm()->CheckSmi(rax);
- DeoptimizeIf(cc, instr, Deoptimizer::kSmi);
-
- STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
- __ CmpObjectType(rax, JS_PROXY_TYPE, rcx);
- DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType);
-
Label use_cache, call_runtime;
- Register null_value = rdi;
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ CheckEnumCache(null_value, &call_runtime);
+ __ CheckEnumCache(&call_runtime);
__ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
__ jmp(&use_cache, Label::kNear);
@@ -5537,11 +5468,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ Push(rax);
- CallRuntime(Runtime::kGetPropertyNamesFast, instr);
-
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kMetaMapRootIndex);
- DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
+ CallRuntime(Runtime::kForInEnumerate, instr);
__ bind(&use_cache);
}
@@ -5653,15 +5580,6 @@ void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
}
-void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
- Handle<ScopeInfo> scope_info = instr->scope_info();
- __ Push(scope_info);
- __ Push(ToRegister(instr->function()));
- CallRuntime(Runtime::kPushBlockContext, instr);
- RecordSafepoint(Safepoint::kNoLazyDeopt);
-}
-
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.h b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.h
index 6fb918bf84..873a3dd1ac 100644
--- a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.h
@@ -45,10 +45,8 @@ class LCodeGen: public LCodeGenBase {
}
bool NeedsEagerFrame() const {
- return GetStackSlotCount() > 0 ||
- info()->is_non_deferred_calling() ||
- !info()->IsStub() ||
- info()->requires_frame();
+ return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
+ !info()->IsStub() || info()->requires_frame();
}
bool NeedsDeferredFrame() const {
return !NeedsEagerFrame() && info()->is_deferred_calling();
@@ -128,7 +126,13 @@ class LCodeGen: public LCodeGenBase {
Register temporary,
Register scratch);
- int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+ bool HasAllocatedStackSlots() const {
+ return chunk()->HasAllocatedStackSlots();
+ }
+ int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
+ int GetTotalFrameSlotCount() const {
+ return chunk()->GetTotalFrameSlotCount();
+ }
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
diff --git a/deps/v8/src/crankshaft/x64/lithium-x64.cc b/deps/v8/src/crankshaft/x64/lithium-x64.cc
index 3c932a24ab..6be40931de 100644
--- a/deps/v8/src/crankshaft/x64/lithium-x64.cc
+++ b/deps/v8/src/crankshaft/x64/lithium-x64.cc
@@ -334,15 +334,15 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
if (kind == DOUBLE_REGISTERS && kDoubleSize == 2 * kPointerSize) {
// Skip a slot if for a double-width slot for x32 port.
- spill_slot_count_++;
+ current_frame_slots_++;
// The spill slot's address is at rbp - (index + 1) * kPointerSize -
// StandardFrameConstants::kFixedFrameSizeFromFp. kFixedFrameSizeFromFp is
// 2 * kPointerSize, if rbp is aligned at 8-byte boundary, the below "|= 1"
// will make sure the spilled doubles are aligned at 8-byte boundary.
// TODO(haitao): make sure rbp is aligned at 8-byte boundary for x32 port.
- spill_slot_count_ |= 1;
+ current_frame_slots_ |= 1;
}
- return spill_slot_count_++;
+ return current_frame_slots_++;
}
@@ -1511,10 +1511,18 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
DCHECK(instr->left()->representation().Equals(instr->representation()));
DCHECK(instr->right()->representation().Equals(instr->representation()));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right = UseOrConstant(instr->BetterRightOperand());
+ HValue* h_right = instr->BetterRightOperand();
+ LOperand* right = UseOrConstant(h_right);
LMulI* mul = new(zone()) LMulI(left, right);
- if (instr->CheckFlag(HValue::kCanOverflow) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ int constant_value =
+ h_right->IsConstant() ? HConstant::cast(h_right)->Integer32Value() : 0;
+ // |needs_environment| must mirror the cases where LCodeGen::DoMulI calls
+ // |DeoptimizeIf|.
+ bool needs_environment =
+ instr->CheckFlag(HValue::kCanOverflow) ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
+ (!right->IsConstantOperand() || constant_value <= 0));
+ if (needs_environment) {
AssignEnvironment(mul);
}
return DefineSameAsFirst(mul);
@@ -1694,13 +1702,6 @@ LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
}
-LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
- HCompareMinusZeroAndBranch* instr) {
- LOperand* value = UseRegister(instr->value());
- return new(zone()) LCompareMinusZeroAndBranch(value);
-}
-
-
LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
DCHECK(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
@@ -1773,12 +1774,6 @@ LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
}
-LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
- LOperand* map = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LMapEnumLength(map));
-}
-
-
LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
LOperand* string = UseRegisterAtStart(instr->string());
LOperand* index = UseRegisterOrConstantAtStart(instr->index());
@@ -2485,8 +2480,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
return DefineAsSpilled(result, spill_index);
} else {
DCHECK(info()->IsStub());
- CallInterfaceDescriptor descriptor =
- info()->code_stub()->GetCallInterfaceDescriptor();
+ CallInterfaceDescriptor descriptor = graph()->descriptor();
int index = static_cast<int>(instr->index());
Register reg = descriptor.GetRegisterParameter(index);
return DefineFixed(result, reg);
@@ -2507,18 +2501,12 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
Retry(kTooManySpillSlotsNeededForOSR);
spill_index = 0;
}
+ spill_index += StandardFrameConstants::kFixedSlotCount;
}
return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
}
-LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- LCallStub* result = new(zone()) LCallStub(context);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
// There are no real uses of the arguments object.
// arguments.length and element access are supported directly on
@@ -2669,16 +2657,6 @@ LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
}
-LInstruction* LChunkBuilder::DoAllocateBlockContext(
- HAllocateBlockContext* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- LOperand* function = UseRegisterAtStart(instr->function());
- LAllocateBlockContext* result =
- new(zone()) LAllocateBlockContext(context, function);
- return MarkAsCall(DefineFixed(result, rsi), instr);
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/x64/lithium-x64.h b/deps/v8/src/crankshaft/x64/lithium-x64.h
index ebe1ef9e5d..406159b1ff 100644
--- a/deps/v8/src/crankshaft/x64/lithium-x64.h
+++ b/deps/v8/src/crankshaft/x64/lithium-x64.h
@@ -21,7 +21,6 @@ class LCodeGen;
V(AccessArgumentsAt) \
V(AddI) \
V(Allocate) \
- V(AllocateBlockContext) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@@ -35,7 +34,6 @@ class LCodeGen;
V(CallFunction) \
V(CallNewArray) \
V(CallRuntime) \
- V(CallStub) \
V(CheckArrayBufferNotNeutered) \
V(CheckInstanceType) \
V(CheckMaps) \
@@ -47,7 +45,6 @@ class LCodeGen;
V(ClampIToUint8) \
V(ClampTToUint8) \
V(ClassOfTestAndBranch) \
- V(CompareMinusZeroAndBranch) \
V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpHoleAndBranch) \
@@ -101,7 +98,6 @@ class LCodeGen;
V(LoadKeyedGeneric) \
V(LoadNamedField) \
V(LoadNamedGeneric) \
- V(MapEnumLength) \
V(MathAbs) \
V(MathClz32) \
V(MathExp) \
@@ -464,19 +460,6 @@ class LParameter final : public LTemplateInstruction<1, 0, 0> {
};
-class LCallStub final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallStub(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
- DECLARE_HYDROGEN_ACCESSOR(CallStub)
-};
-
-
class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
public:
bool HasInterestingComment(LCodeGen* gen) const override { return false; }
@@ -968,20 +951,6 @@ class LCmpHoleAndBranch final : public LControlInstruction<1, 0> {
};
-class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LCompareMinusZeroAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
- "cmp-minus-zero-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
-};
-
-
class LIsStringAndBranch final : public LControlInstruction<1, 1> {
public:
explicit LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1136,8 +1105,6 @@ class LCmpT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
- Strength strength() { return hydrogen()->strength(); }
-
Token::Value op() const { return hydrogen()->token(); }
};
@@ -1330,18 +1297,6 @@ class LCmpMapAndBranch final : public LControlInstruction<1, 0> {
};
-class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMapEnumLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
-};
-
-
class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
public:
LSeqStringGetChar(LOperand* string, LOperand* index) {
@@ -1472,8 +1427,6 @@ class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
- Strength strength() { return hydrogen()->strength(); }
-
private:
Token::Value op_;
};
@@ -2587,23 +2540,6 @@ class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
};
-class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
- public:
- LAllocateBlockContext(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-
- Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
-
- DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
- DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
-};
-
-
class LChunkBuilder;
class LPlatformChunk final : public LChunk {
public:
diff --git a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
index fe2baa5bb8..a8f22be732 100644
--- a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
+++ b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
@@ -20,7 +20,6 @@
namespace v8 {
namespace internal {
-
// When invoking builtins, we need to record the safepoint in the middle of
// the invoke instruction sequence generated by the macro assembler.
class SafepointGenerator final : public CallWrapper {
@@ -75,7 +74,7 @@ bool LCodeGen::GenerateCode() {
void LCodeGen::FinishCode(Handle<Code> code) {
DCHECK(is_done());
- code->set_stack_slots(GetStackSlotCount());
+ code->set_stack_slots(GetTotalFrameSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
if (info()->ShouldEnsureSpaceForLazyDeopt()) {
@@ -100,13 +99,6 @@ bool LCodeGen::GeneratePrologue() {
if (info()->IsOptimizing()) {
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ int3();
- }
-#endif
-
if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
// Move state of dynamic frame alignment into edx.
__ Move(edx, Immediate(kNoAlignmentPadding));
@@ -493,7 +485,7 @@ bool LCodeGen::GenerateSafepointTable() {
masm()->nop();
}
}
- safepoints_.Emit(masm(), GetStackSlotCount());
+ safepoints_.Emit(masm(), GetTotalFrameSlotCount());
return !is_aborted();
}
@@ -846,7 +838,7 @@ Operand LCodeGen::ToOperand(LOperand* op) const {
DCHECK(!op->IsDoubleRegister());
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
if (NeedsEagerFrame()) {
- return Operand(ebp, StackSlotOffset(op->index()));
+ return Operand(ebp, FrameSlotToFPOffset(op->index()));
} else {
// Retrieve parameter without eager stack-frame relative to the
// stack-pointer.
@@ -858,7 +850,7 @@ Operand LCodeGen::ToOperand(LOperand* op) const {
Operand LCodeGen::HighOperand(LOperand* op) {
DCHECK(op->IsDoubleStackSlot());
if (NeedsEagerFrame()) {
- return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
+ return Operand(ebp, FrameSlotToFPOffset(op->index()) + kPointerSize);
} else {
// Retrieve parameter without eager stack-frame relative to the
// stack-pointer.
@@ -931,9 +923,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
if (op->IsStackSlot()) {
int index = op->index();
- if (index >= 0) {
- index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
- }
if (is_tagged) {
translation->StoreStackSlot(index);
} else if (is_uint32) {
@@ -943,9 +932,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
} else if (op->IsDoubleStackSlot()) {
int index = op->index();
- if (index >= 0) {
- index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
- }
translation->StoreDoubleStackSlot(index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
@@ -1284,26 +1270,6 @@ void LCodeGen::DoParameter(LParameter* instr) {
}
-void LCodeGen::DoCallStub(LCallStub* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- DCHECK(ToRegister(instr->result()).is(eax));
- switch (instr->hydrogen()->major_key()) {
- case CodeStub::RegExpExec: {
- RegExpExecStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- case CodeStub::SubString: {
- SubStringStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
GenerateOsrPrologue();
}
@@ -1945,13 +1911,6 @@ void LCodeGen::DoConstantT(LConstantT* instr) {
}
-void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
- Register result = ToRegister(instr->result());
- Register map = ToRegister(instr->value());
- __ EnumLength(result, map);
-}
-
-
Operand LCodeGen::BuildSeqStringOperand(Register string,
LOperand* index,
String::Encoding encoding) {
@@ -2121,7 +2080,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ fstp_s(MemOperand(esp, 0));
__ fstp_s(MemOperand(esp, kPointerSize));
__ pop(scratch_reg);
- __ xor_(MemOperand(esp, 0), scratch_reg);
+ __ or_(MemOperand(esp, 0), scratch_reg);
X87Mov(left_reg, MemOperand(esp, 0), kX87FloatOperand);
__ pop(scratch_reg); // restore esp
} else {
@@ -2200,8 +2159,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->right()).is(eax));
DCHECK(ToRegister(instr->result()).is(eax));
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
}
@@ -2443,8 +2401,9 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
// We can statically evaluate the comparison.
double left_val = ToDouble(LConstantOperand::cast(left));
double right_val = ToDouble(LConstantOperand::cast(right));
- int next_block = EvalComparison(instr->op(), left_val, right_val) ?
- instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
+ int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
+ ? instr->TrueDestination(chunk_)
+ : instr->FalseDestination(chunk_);
EmitGoto(next_block);
} else {
if (instr->is_double()) {
@@ -2518,29 +2477,6 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
}
-void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
- Representation rep = instr->hydrogen()->value()->representation();
- DCHECK(!rep.IsInteger32());
-
- if (rep.IsDouble()) {
- X87Register input = ToX87Register(instr->value());
- X87LoadForUsage(input);
- __ FXamMinusZero();
- EmitBranch(instr, equal);
- } else {
- Register value = ToRegister(instr->value());
- Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
- __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
- __ cmp(FieldOperand(value, HeapNumber::kExponentOffset),
- Immediate(0x1));
- EmitFalseBranch(instr, no_overflow);
- __ cmp(FieldOperand(value, HeapNumber::kMantissaOffset),
- Immediate(0x00000000));
- EmitBranch(instr, equal);
- }
-}
-
-
Condition LCodeGen::EmitIsString(Register input,
Register temp1,
Label* is_not_string,
@@ -2799,8 +2735,7 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
void LCodeGen::DoCmpT(LCmpT* instr) {
Token::Value op = instr->op();
- Handle<Code> ic =
- CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = ComputeCompareCondition(op);
@@ -2924,9 +2859,9 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
__ mov(LoadDescriptor::NameRegister(), instr->name());
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
- Handle<Code> ic =
- CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
- SLOPPY, PREMONOMORPHIC).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+ isolate(), instr->typeof_mode(), PREMONOMORPHIC)
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3035,10 +2970,10 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
__ mov(LoadDescriptor::NameRegister(), instr->name());
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
- Handle<Code> ic =
- CodeFactory::LoadICInOptimizedCode(
- isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+ isolate(), NOT_INSIDE_TYPEOF,
+ instr->hydrogen()->initialization_state())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3150,6 +3085,9 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case DICTIONARY_ELEMENTS:
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ case NO_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3268,8 +3206,8 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
}
Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
- isolate(), instr->hydrogen()->language_mode(),
- instr->hydrogen()->initialization_state()).code();
+ isolate(), instr->hydrogen()->initialization_state())
+ .code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4010,21 +3948,22 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
void LCodeGen::DoCallFunction(LCallFunction* instr) {
+ HCallFunction* hinstr = instr->hydrogen();
DCHECK(ToRegister(instr->context()).is(esi));
DCHECK(ToRegister(instr->function()).is(edi));
DCHECK(ToRegister(instr->result()).is(eax));
int arity = instr->arity();
- ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
- if (instr->hydrogen()->HasVectorAndSlot()) {
+ ConvertReceiverMode mode = hinstr->convert_mode();
+ if (hinstr->HasVectorAndSlot()) {
Register slot_register = ToRegister(instr->temp_slot());
Register vector_register = ToRegister(instr->temp_vector());
DCHECK(slot_register.is(edx));
DCHECK(vector_register.is(ebx));
AllowDeferredHandleDereference vector_structure_check;
- Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
- int index = vector->GetIndex(instr->hydrogen()->slot());
+ Handle<TypeFeedbackVector> vector = hinstr->feedback_vector();
+ int index = vector->GetIndex(hinstr->slot());
__ mov(vector_register, vector);
__ mov(slot_register, Immediate(Smi::FromInt(index)));
@@ -4320,6 +4259,9 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case DICTIONARY_ELEMENTS:
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ case NO_ELEMENTS:
UNREACHABLE();
break;
}
@@ -5713,8 +5655,8 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
final_branch_condition = equal;
} else if (String::Equals(type_name, factory()->undefined_string())) {
- __ cmp(input, factory()->undefined_value());
- __ j(equal, true_label, true_distance);
+ __ cmp(input, factory()->null_value());
+ __ j(equal, false_label, false_distance);
__ JumpIfSmi(input, false_label, false_distance);
// Check for undetectable objects => true.
__ mov(input, FieldOperand(input, HeapObject::kMapOffset));
@@ -5888,12 +5830,6 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
- __ test(eax, Immediate(kSmiTagMask));
- DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
-
- STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
- __ CmpObjectType(eax, JS_PROXY_TYPE, ecx);
- DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType);
Label use_cache, call_runtime;
__ CheckEnumCache(&call_runtime);
@@ -5904,11 +5840,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(eax);
- CallRuntime(Runtime::kGetPropertyNamesFast, instr);
-
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- isolate()->factory()->meta_map());
- DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
+ CallRuntime(Runtime::kForInEnumerate, instr);
__ bind(&use_cache);
}
@@ -6021,15 +5953,6 @@ void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
}
-void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
- Handle<ScopeInfo> scope_info = instr->scope_info();
- __ Push(scope_info);
- __ push(ToRegister(instr->function()));
- CallRuntime(Runtime::kPushBlockContext, instr);
- RecordSafepoint(Safepoint::kNoLazyDeopt);
-}
-
-
#undef __
} // namespace internal
diff --git a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.h b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.h
index 6346344883..0cfbf70388 100644
--- a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.h
+++ b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.h
@@ -50,10 +50,8 @@ class LCodeGen: public LCodeGenBase {
}
bool NeedsEagerFrame() const {
- return GetStackSlotCount() > 0 ||
- info()->is_non_deferred_calling() ||
- !info()->IsStub() ||
- info()->requires_frame();
+ return HasAllocatedStackSlots() || info()->is_non_deferred_calling() ||
+ !info()->IsStub() || info()->requires_frame();
}
bool NeedsDeferredFrame() const {
return !NeedsEagerFrame() && info()->is_deferred_calling();
@@ -165,7 +163,13 @@ class LCodeGen: public LCodeGenBase {
Register temporary,
Register temporary2);
- int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+ bool HasAllocatedStackSlots() const {
+ return chunk()->HasAllocatedStackSlots();
+ }
+ int GetStackSlotCount() const { return chunk()->GetSpillSlotCount(); }
+ int GetTotalFrameSlotCount() const {
+ return chunk()->GetTotalFrameSlotCount();
+ }
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
diff --git a/deps/v8/src/crankshaft/x87/lithium-x87.cc b/deps/v8/src/crankshaft/x87/lithium-x87.cc
index b422e1235b..f770509076 100644
--- a/deps/v8/src/crankshaft/x87/lithium-x87.cc
+++ b/deps/v8/src/crankshaft/x87/lithium-x87.cc
@@ -355,11 +355,11 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
// Skip a slot if for a double-width slot.
if (kind == DOUBLE_REGISTERS) {
- spill_slot_count_++;
- spill_slot_count_ |= 1;
+ current_frame_slots_++;
+ current_frame_slots_ |= 1;
num_double_slots_++;
}
- return spill_slot_count_++;
+ return current_frame_slots_++;
}
@@ -448,7 +448,7 @@ LPlatformChunk* LChunkBuilder::Build() {
// Reserve the first spill slot for the state of dynamic alignment.
if (info()->IsOptimizing()) {
int alignment_state_index = chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
- DCHECK_EQ(alignment_state_index, 0);
+ DCHECK_EQ(alignment_state_index, 4);
USE(alignment_state_index);
}
@@ -1534,14 +1534,22 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
DCHECK(instr->left()->representation().Equals(instr->representation()));
DCHECK(instr->right()->representation().Equals(instr->representation()));
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
- LOperand* right = UseOrConstant(instr->BetterRightOperand());
+ HValue* h_right = instr->BetterRightOperand();
+ LOperand* right = UseOrConstant(h_right);
LOperand* temp = NULL;
if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
temp = TempRegister();
}
LMulI* mul = new(zone()) LMulI(left, right, temp);
- if (instr->CheckFlag(HValue::kCanOverflow) ||
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ int constant_value =
+ h_right->IsConstant() ? HConstant::cast(h_right)->Integer32Value() : 0;
+ // |needs_environment| must mirror the cases where LCodeGen::DoMulI calls
+ // |DeoptimizeIf|.
+ bool needs_environment =
+ instr->CheckFlag(HValue::kCanOverflow) ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
+ (!right->IsConstantOperand() || constant_value <= 0));
+ if (needs_environment) {
AssignEnvironment(mul);
}
return DefineSameAsFirst(mul);
@@ -1707,13 +1715,6 @@ LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
}
-LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
- HCompareMinusZeroAndBranch* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
- return new (zone()) LCompareMinusZeroAndBranch(value);
-}
-
-
LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
DCHECK(instr->value()->representation().IsTagged());
LOperand* temp = TempRegister();
@@ -1785,12 +1786,6 @@ LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
}
-LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
- LOperand* map = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LMapEnumLength(map));
-}
-
-
LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
LOperand* string = UseRegisterAtStart(instr->string());
LOperand* index = UseRegisterOrConstantAtStart(instr->index());
@@ -2496,8 +2491,7 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
return DefineAsSpilled(result, spill_index);
} else {
DCHECK(info()->IsStub());
- CallInterfaceDescriptor descriptor =
- info()->code_stub()->GetCallInterfaceDescriptor();
+ CallInterfaceDescriptor descriptor = graph()->descriptor();
int index = static_cast<int>(instr->index());
Register reg = descriptor.GetRegisterParameter(index);
return DefineFixed(result, reg);
@@ -2523,18 +2517,12 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
// The first local is saved at the end of the unoptimized frame.
spill_index = graph()->osr()->UnoptimizedFrameSlots();
}
+ spill_index += StandardFrameConstants::kFixedSlotCount;
}
return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
}
-LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LCallStub* result = new(zone()) LCallStub(context);
- return MarkAsCall(DefineFixed(result, eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
// There are no real uses of the arguments object.
// arguments.length and element access are supported directly on
@@ -2684,16 +2672,6 @@ LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
}
-LInstruction* LChunkBuilder::DoAllocateBlockContext(
- HAllocateBlockContext* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* function = UseRegisterAtStart(instr->function());
- LAllocateBlockContext* result =
- new(zone()) LAllocateBlockContext(context, function);
- return MarkAsCall(DefineFixed(result, esi), instr);
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/x87/lithium-x87.h b/deps/v8/src/crankshaft/x87/lithium-x87.h
index e033902617..0f2813f85a 100644
--- a/deps/v8/src/crankshaft/x87/lithium-x87.h
+++ b/deps/v8/src/crankshaft/x87/lithium-x87.h
@@ -24,7 +24,6 @@ class LCodeGen;
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
V(AccessArgumentsAt) \
V(AddI) \
- V(AllocateBlockContext) \
V(Allocate) \
V(ApplyArguments) \
V(ArgumentsElements) \
@@ -39,7 +38,6 @@ class LCodeGen;
V(CallFunction) \
V(CallNewArray) \
V(CallRuntime) \
- V(CallStub) \
V(CheckArrayBufferNotNeutered) \
V(CheckInstanceType) \
V(CheckMaps) \
@@ -52,7 +50,6 @@ class LCodeGen;
V(ClampTToUint8NoSSE2) \
V(ClassOfTestAndBranch) \
V(ClobberDoubles) \
- V(CompareMinusZeroAndBranch) \
V(CompareNumericAndBranch) \
V(CmpObjectEqAndBranch) \
V(CmpHoleAndBranch) \
@@ -106,7 +103,6 @@ class LCodeGen;
V(LoadNamedField) \
V(LoadNamedGeneric) \
V(LoadRoot) \
- V(MapEnumLength) \
V(MathAbs) \
V(MathClz32) \
V(MathExp) \
@@ -471,19 +467,6 @@ class LParameter final : public LTemplateInstruction<1, 0, 0> {
};
-class LCallStub final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LCallStub(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
- DECLARE_HYDROGEN_ACCESSOR(CallStub)
-};
-
-
class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
public:
bool HasInterestingComment(LCodeGen* gen) const override { return false; }
@@ -991,18 +974,6 @@ class LCmpHoleAndBranch final : public LControlInstruction<1, 0> {
};
-class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LCompareMinusZeroAndBranch(LOperand* value) { inputs_[0] = value; }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
- "cmp-minus-zero-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
-};
-
-
class LIsStringAndBranch final : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1152,8 +1123,6 @@ class LCmpT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
- Strength strength() { return hydrogen()->strength(); }
-
LOperand* context() { return inputs_[0]; }
Token::Value op() const { return hydrogen()->token(); }
};
@@ -1344,18 +1313,6 @@ class LCmpMapAndBranch final : public LControlInstruction<1, 0> {
};
-class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LMapEnumLength(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
-};
-
-
class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
public:
LSeqStringGetChar(LOperand* string, LOperand* index) {
@@ -1489,8 +1446,6 @@ class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
- Strength strength() { return hydrogen()->strength(); }
-
private:
Token::Value op_;
};
@@ -2617,23 +2572,6 @@ class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
};
-class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
- public:
- LAllocateBlockContext(LOperand* context, LOperand* function) {
- inputs_[0] = context;
- inputs_[1] = function;
- }
-
- LOperand* context() { return inputs_[0]; }
- LOperand* function() { return inputs_[1]; }
-
- Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
-
- DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
- DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
-};
-
-
class LChunkBuilder;
class LPlatformChunk final : public LChunk {
public:
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index bd98b93dae..7c9a24f520 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -31,6 +31,7 @@
#endif
#include "src/d8.h"
+#include "src/ostreams.h"
#include "include/libplatform/libplatform.h"
#ifndef V8_SHARED
@@ -371,6 +372,7 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
bool report_exceptions, SourceType source_type) {
HandleScope handle_scope(isolate);
TryCatch try_catch(isolate);
+ try_catch.SetVerbose(true);
MaybeLocal<Value> maybe_result;
{
@@ -1243,6 +1245,10 @@ Local<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
return global_template;
}
+static void EmptyMessageCallback(Local<Message> message, Local<Value> error) {
+ // Nothing to be done here, exceptions thrown up to the shell will be reported
+ // separately by {Shell::ReportException} after they are caught.
+}
void Shell::Initialize(Isolate* isolate) {
#ifndef V8_SHARED
@@ -1250,6 +1256,8 @@ void Shell::Initialize(Isolate* isolate) {
if (i::StrLength(i::FLAG_map_counters) != 0)
MapCounters(isolate, i::FLAG_map_counters);
#endif // !V8_SHARED
+ // Disable default message reporting.
+ isolate->AddMessageListener(EmptyMessageCallback);
}
diff --git a/deps/v8/src/debug/arm/debug-arm.cc b/deps/v8/src/debug/arm/debug-arm.cc
index 2d4cbf13d7..5fdda4fedc 100644
--- a/deps/v8/src/debug/arm/debug-arm.cc
+++ b/deps/v8/src/debug/arm/debug-arm.cc
@@ -62,6 +62,10 @@ void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
patcher.masm()->blx(ip);
}
+bool DebugCodegen::DebugBreakSlotIsPatched(Address pc) {
+ Instr current_instr = Assembler::instr_at(pc);
+ return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
+}
void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
DebugBreakCallHelperMode mode) {
diff --git a/deps/v8/src/debug/arm64/debug-arm64.cc b/deps/v8/src/debug/arm64/debug-arm64.cc
index c2b60a9326..3e4b67c938 100644
--- a/deps/v8/src/debug/arm64/debug-arm64.cc
+++ b/deps/v8/src/debug/arm64/debug-arm64.cc
@@ -74,6 +74,10 @@ void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
patcher.blr(ip0);
}
+bool DebugCodegen::DebugBreakSlotIsPatched(Address pc) {
+ Instruction* current_instr = reinterpret_cast<Instruction*>(pc);
+ return !current_instr->IsNop(Assembler::DEBUG_BREAK_NOP);
+}
void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
DebugBreakCallHelperMode mode) {
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index e19b93eebe..1134c9dd68 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -111,7 +111,7 @@ MaybeHandle<Object> DebugEvaluate::Evaluate(
// Skip the global proxy as it has no properties and always delegates to the
// real global object.
if (result->IsJSGlobalProxy()) {
- PrototypeIterator iter(isolate, result);
+ PrototypeIterator iter(isolate, Handle<JSGlobalProxy>::cast(result));
// TODO(verwaest): This will crash when the global proxy is detached.
result = PrototypeIterator::GetCurrent<JSObject>(iter);
}
@@ -128,7 +128,7 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
inlined_jsframe_index_(inlined_jsframe_index) {
FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
Handle<JSFunction> local_function =
- handle(JSFunction::cast(frame_inspector.GetFunction()));
+ Handle<JSFunction>::cast(frame_inspector.GetFunction());
Handle<Context> outer_context(local_function->context());
native_context_ = Handle<Context>(outer_context->native_context());
Handle<JSFunction> global_function(native_context_->closure());
@@ -302,8 +302,7 @@ void DebugEvaluate::ContextBuilder::MaterializeArgumentsObject(
if (maybe.FromJust()) return;
// FunctionGetArguments can't throw an exception.
- Handle<JSObject> arguments =
- Handle<JSObject>::cast(Accessors::FunctionGetArguments(function));
+ Handle<JSObject> arguments = Accessors::FunctionGetArguments(function);
Handle<String> arguments_str = isolate_->factory()->arguments_string();
JSObject::SetOwnPropertyIgnoreAttributes(target, arguments_str, arguments,
NONE)
diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc
index 012d291622..25634be8d2 100644
--- a/deps/v8/src/debug/debug-frames.cc
+++ b/deps/v8/src/debug/debug-frames.cc
@@ -15,6 +15,7 @@ FrameInspector::FrameInspector(JavaScriptFrame* frame,
has_adapted_arguments_ = frame_->has_adapted_arguments();
is_bottommost_ = inlined_jsframe_index == 0;
is_optimized_ = frame_->is_optimized();
+ is_interpreted_ = frame_->is_interpreted();
// Calculate the deoptimized frame.
if (frame->is_optimized()) {
// TODO(turbofan): Revisit once we support deoptimization.
@@ -44,33 +45,41 @@ int FrameInspector::GetParametersCount() {
: frame_->ComputeParametersCount();
}
-
-Object* FrameInspector::GetFunction() {
- return is_optimized_ ? deoptimized_frame_->GetFunction() : frame_->function();
+Handle<Object> FrameInspector::GetFunction() {
+ return is_optimized_ ? deoptimized_frame_->GetFunction()
+ : handle(frame_->function(), isolate_);
}
-
-Object* FrameInspector::GetParameter(int index) {
+Handle<Object> FrameInspector::GetParameter(int index) {
return is_optimized_ ? deoptimized_frame_->GetParameter(index)
- : frame_->GetParameter(index);
+ : handle(frame_->GetParameter(index), isolate_);
}
-
-Object* FrameInspector::GetExpression(int index) {
+Handle<Object> FrameInspector::GetExpression(int index) {
// TODO(turbofan): Revisit once we support deoptimization.
if (frame_->LookupCode()->is_turbofanned() &&
frame_->function()->shared()->asm_function() &&
!FLAG_turbo_asm_deoptimization) {
- return isolate_->heap()->undefined_value();
+ return isolate_->factory()->undefined_value();
}
return is_optimized_ ? deoptimized_frame_->GetExpression(index)
- : frame_->GetExpression(index);
+ : handle(frame_->GetExpression(index), isolate_);
}
int FrameInspector::GetSourcePosition() {
- return is_optimized_ ? deoptimized_frame_->GetSourcePosition()
- : frame_->LookupCode()->SourcePosition(frame_->pc());
+ if (is_optimized_) {
+ return deoptimized_frame_->GetSourcePosition();
+ } else if (is_interpreted_) {
+ InterpretedFrame* frame = reinterpret_cast<InterpretedFrame*>(frame_);
+ BytecodeArray* bytecode_array =
+ frame->function()->shared()->bytecode_array();
+ return bytecode_array->SourcePosition(frame->GetBytecodeOffset());
+ } else {
+ Code* code = frame_->LookupCode();
+ int offset = static_cast<int>(frame_->pc() - code->instruction_start());
+ return code->SourcePosition(offset);
+ }
}
@@ -80,9 +89,9 @@ bool FrameInspector::IsConstructor() {
: frame_->IsConstructor();
}
-
-Object* FrameInspector::GetContext() {
- return is_optimized_ ? deoptimized_frame_->GetContext() : frame_->context();
+Handle<Object> FrameInspector::GetContext() {
+ return is_optimized_ ? deoptimized_frame_->GetContext()
+ : handle(frame_->context(), isolate_);
}
@@ -92,6 +101,7 @@ void FrameInspector::SetArgumentsFrame(JavaScriptFrame* frame) {
DCHECK(has_adapted_arguments_);
frame_ = frame;
is_optimized_ = frame_->is_optimized();
+ is_interpreted_ = frame_->is_interpreted();
DCHECK(!is_optimized_);
}
@@ -109,10 +119,10 @@ void FrameInspector::MaterializeStackLocals(Handle<JSObject> target,
Handle<String> name(scope_info->ParameterName(i));
if (ParameterIsShadowedByContextLocal(scope_info, name)) continue;
- Handle<Object> value(i < GetParametersCount()
- ? GetParameter(i)
- : isolate_->heap()->undefined_value(),
- isolate_);
+ Handle<Object> value =
+ i < GetParametersCount()
+ ? GetParameter(i)
+ : Handle<Object>::cast(isolate_->factory()->undefined_value());
DCHECK(!value->IsTheHole());
JSObject::SetOwnPropertyIgnoreAttributes(target, name, value, NONE).Check();
@@ -122,8 +132,7 @@ void FrameInspector::MaterializeStackLocals(Handle<JSObject> target,
for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
if (scope_info->LocalIsSynthetic(i)) continue;
Handle<String> name(scope_info->StackLocalName(i));
- Handle<Object> value(GetExpression(scope_info->StackLocalIndex(i)),
- isolate_);
+ Handle<Object> value = GetExpression(scope_info->StackLocalIndex(i));
if (value->IsTheHole()) value = isolate_->factory()->undefined_value();
JSObject::SetOwnPropertyIgnoreAttributes(target, name, value, NONE).Check();
diff --git a/deps/v8/src/debug/debug-frames.h b/deps/v8/src/debug/debug-frames.h
index c0d20bbd1d..c04fd2b6bf 100644
--- a/deps/v8/src/debug/debug-frames.h
+++ b/deps/v8/src/debug/debug-frames.h
@@ -21,12 +21,12 @@ class FrameInspector {
~FrameInspector();
int GetParametersCount();
- Object* GetFunction();
- Object* GetParameter(int index);
- Object* GetExpression(int index);
+ Handle<Object> GetFunction();
+ Handle<Object> GetParameter(int index);
+ Handle<Object> GetExpression(int index);
int GetSourcePosition();
bool IsConstructor();
- Object* GetContext();
+ Handle<Object> GetContext();
JavaScriptFrame* GetArgumentsFrame() { return frame_; }
void SetArgumentsFrame(JavaScriptFrame* frame);
@@ -48,6 +48,7 @@ class FrameInspector {
DeoptimizedFrameInfo* deoptimized_frame_;
Isolate* isolate_;
bool is_optimized_;
+ bool is_interpreted_;
bool is_bottommost_;
bool has_adapted_arguments_;
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
index 15a0594009..e785384a42 100644
--- a/deps/v8/src/debug/debug-scopes.cc
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -28,7 +28,7 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
return;
}
- context_ = Handle<Context>(Context::cast(frame_inspector->GetContext()));
+ context_ = Handle<Context>::cast(frame_inspector->GetContext());
// Catch the case when the debugger stops in an internal function.
Handle<JSFunction> function = GetFunction();
@@ -58,12 +58,8 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
// return, which requires a debug info to be available.
Handle<DebugInfo> debug_info(shared_info->GetDebugInfo());
- // PC points to the instruction after the current one, possibly a break
- // location as well. So the "- 1" to exclude it from the search.
- Address call_pc = GetFrame()->pc() - 1;
-
// Find the break point where execution has stopped.
- BreakLocation location = BreakLocation::FromAddress(debug_info, call_pc);
+ BreakLocation location = BreakLocation::FromFrame(debug_info, GetFrame());
ignore_nested_scopes = location.IsReturn();
}
@@ -462,7 +458,8 @@ MaybeHandle<JSObject> ScopeIterator::MaterializeLocalScope() {
isolate_->factory()->NewJSObject(isolate_->object_function());
frame_inspector_->MaterializeStackLocals(local_scope, function);
- Handle<Context> frame_context(Context::cast(frame_inspector_->GetContext()));
+ Handle<Context> frame_context =
+ Handle<Context>::cast(frame_inspector_->GetContext());
HandleScope scope(isolate_);
Handle<SharedFunctionInfo> shared(function->shared());
@@ -471,7 +468,7 @@ MaybeHandle<JSObject> ScopeIterator::MaterializeLocalScope() {
if (!scope_info->HasContext()) return local_scope;
// Third fill all context locals.
- Handle<Context> function_context(frame_context->declaration_context());
+ Handle<Context> function_context(frame_context->closure_context());
CopyContextLocalsToScopeObject(scope_info, function_context, local_scope);
// Finally copy any properties from the function context extension.
@@ -480,8 +477,8 @@ MaybeHandle<JSObject> ScopeIterator::MaterializeLocalScope() {
function_context->has_extension() &&
!function_context->IsNativeContext()) {
bool success = CopyContextExtensionToScopeObject(
- handle(function_context->extension_object(), isolate_),
- local_scope, JSReceiver::INCLUDE_PROTOS);
+ handle(function_context->extension_object(), isolate_), local_scope,
+ INCLUDE_PROTOS);
if (!success) return MaybeHandle<JSObject>();
}
@@ -510,8 +507,7 @@ Handle<JSObject> ScopeIterator::MaterializeClosure() {
// be variables introduced by eval.
if (context->has_extension()) {
bool success = CopyContextExtensionToScopeObject(
- handle(context->extension_object(), isolate_), closure_scope,
- JSReceiver::OWN_ONLY);
+ handle(context->extension_object(), isolate_), closure_scope, OWN_ONLY);
DCHECK(success);
USE(success);
}
@@ -559,8 +555,7 @@ Handle<JSObject> ScopeIterator::MaterializeBlockScope() {
// Fill all extension variables.
if (context->extension_object() != nullptr) {
bool success = CopyContextExtensionToScopeObject(
- handle(context->extension_object()), block_scope,
- JSReceiver::OWN_ONLY);
+ handle(context->extension_object()), block_scope, OWN_ONLY);
DCHECK(success);
USE(success);
}
@@ -798,10 +793,9 @@ void ScopeIterator::CopyContextLocalsToScopeObject(
}
}
-
bool ScopeIterator::CopyContextExtensionToScopeObject(
Handle<JSObject> extension, Handle<JSObject> scope_object,
- JSReceiver::KeyCollectionType type) {
+ KeyCollectionType type) {
Handle<FixedArray> keys;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate_, keys, JSReceiver::GetKeys(extension, type, ENUMERABLE_STRINGS),
diff --git a/deps/v8/src/debug/debug-scopes.h b/deps/v8/src/debug/debug-scopes.h
index d4e335a2a5..fbdf632687 100644
--- a/deps/v8/src/debug/debug-scopes.h
+++ b/deps/v8/src/debug/debug-scopes.h
@@ -96,8 +96,7 @@ class ScopeIterator {
}
inline Handle<JSFunction> GetFunction() {
- return Handle<JSFunction>(
- JSFunction::cast(frame_inspector_->GetFunction()));
+ return Handle<JSFunction>::cast(frame_inspector_->GetFunction());
}
static bool InternalizedStringMatch(void* key1, void* key2) {
@@ -139,7 +138,7 @@ class ScopeIterator {
Handle<JSObject> scope_object);
bool CopyContextExtensionToScopeObject(Handle<JSObject> extension,
Handle<JSObject> scope_object,
- JSReceiver::KeyCollectionType type);
+ KeyCollectionType type);
DISALLOW_IMPLICIT_CONSTRUCTORS(ScopeIterator);
};
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index bd45b71551..93c914c3f8 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -16,6 +16,8 @@
#include "src/frames-inl.h"
#include "src/full-codegen/full-codegen.h"
#include "src/global-handles.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/interpreter/interpreter.h"
#include "src/isolate-inl.h"
#include "src/list.h"
#include "src/log.h"
@@ -58,29 +60,39 @@ static v8::Local<v8::Context> GetDebugEventContext(Isolate* isolate) {
return v8::Utils::ToLocal(native_context);
}
-
-BreakLocation::BreakLocation(Handle<DebugInfo> debug_info, RelocInfo* rinfo,
- int position, int statement_position)
+BreakLocation::BreakLocation(Handle<DebugInfo> debug_info, DebugBreakType type,
+ int code_offset, int position,
+ int statement_position)
: debug_info_(debug_info),
- pc_offset_(static_cast<int>(rinfo->pc() - debug_info->code()->entry())),
- rmode_(rinfo->rmode()),
- data_(rinfo->data()),
+ code_offset_(code_offset),
+ type_(type),
position_(position),
statement_position_(statement_position) {}
+BreakLocation::Iterator* BreakLocation::GetIterator(
+ Handle<DebugInfo> debug_info, BreakLocatorType type) {
+ if (debug_info->abstract_code()->IsBytecodeArray()) {
+ return new BytecodeArrayIterator(debug_info, type);
+ } else {
+ return new CodeIterator(debug_info, type);
+ }
+}
-BreakLocation::Iterator::Iterator(Handle<DebugInfo> debug_info,
- BreakLocatorType type)
+BreakLocation::Iterator::Iterator(Handle<DebugInfo> debug_info)
: debug_info_(debug_info),
- reloc_iterator_(debug_info->code(), GetModeMask(type)),
break_index_(-1),
position_(1),
- statement_position_(1) {
+ statement_position_(1) {}
+
+BreakLocation::CodeIterator::CodeIterator(Handle<DebugInfo> debug_info,
+ BreakLocatorType type)
+ : Iterator(debug_info),
+ reloc_iterator_(debug_info->abstract_code()->GetCode(),
+ GetModeMask(type)) {
if (!Done()) Next();
}
-
-int BreakLocation::Iterator::GetModeMask(BreakLocatorType type) {
+int BreakLocation::CodeIterator::GetModeMask(BreakLocatorType type) {
int mask = 0;
mask |= RelocInfo::ModeMask(RelocInfo::POSITION);
mask |= RelocInfo::ModeMask(RelocInfo::STATEMENT_POSITION);
@@ -93,13 +105,11 @@ int BreakLocation::Iterator::GetModeMask(BreakLocatorType type) {
return mask;
}
-
-void BreakLocation::Iterator::Next() {
+void BreakLocation::CodeIterator::Next() {
DisallowHeapAllocation no_gc;
DCHECK(!Done());
- // Iterate through reloc info for code and original code stopping at each
- // breakable code target.
+ // Iterate through reloc info stopping at each breakable code target.
bool first = break_index_ == -1;
while (!Done()) {
if (!first) reloc_iterator_.next();
@@ -141,43 +151,154 @@ void BreakLocation::Iterator::Next() {
break_index_++;
}
+BreakLocation BreakLocation::CodeIterator::GetBreakLocation() {
+ DebugBreakType type;
+ if (RelocInfo::IsDebugBreakSlotAtReturn(rmode())) {
+ type = DEBUG_BREAK_SLOT_AT_RETURN;
+ } else if (RelocInfo::IsDebugBreakSlotAtCall(rmode())) {
+ type = DEBUG_BREAK_SLOT_AT_CALL;
+ } else if (RelocInfo::IsDebuggerStatement(rmode())) {
+ type = DEBUGGER_STATEMENT;
+ } else if (RelocInfo::IsDebugBreakSlot(rmode())) {
+ type = DEBUG_BREAK_SLOT;
+ } else {
+ type = NOT_DEBUG_BREAK;
+ }
+ return BreakLocation(debug_info_, type, code_offset(), position(),
+ statement_position());
+}
+
+BreakLocation::BytecodeArrayIterator::BytecodeArrayIterator(
+ Handle<DebugInfo> debug_info, BreakLocatorType type)
+ : Iterator(debug_info),
+ source_position_iterator_(
+ debug_info->abstract_code()->GetBytecodeArray()),
+ break_locator_type_(type),
+ start_position_(debug_info->shared()->start_position()) {
+ if (!Done()) Next();
+}
+
+void BreakLocation::BytecodeArrayIterator::Next() {
+ DisallowHeapAllocation no_gc;
+ DCHECK(!Done());
+ bool first = break_index_ == -1;
+ while (!Done()) {
+ if (!first) source_position_iterator_.Advance();
+ first = false;
+ if (Done()) return;
+ position_ = source_position_iterator_.source_position() - start_position_;
+ if (source_position_iterator_.is_statement()) {
+ statement_position_ = position_;
+ }
+ DCHECK(position_ >= 0);
+ DCHECK(statement_position_ >= 0);
+ break_index_++;
+
+ enum DebugBreakType type = GetDebugBreakType();
+ if (type == NOT_DEBUG_BREAK) continue;
+
+ if (break_locator_type_ == ALL_BREAK_LOCATIONS) break;
+
+ DCHECK_EQ(CALLS_AND_RETURNS, break_locator_type_);
+ if (type == DEBUG_BREAK_SLOT_AT_CALL ||
+ type == DEBUG_BREAK_SLOT_AT_RETURN) {
+ break;
+ }
+ }
+}
+
+BreakLocation::DebugBreakType
+BreakLocation::BytecodeArrayIterator::GetDebugBreakType() {
+ BytecodeArray* bytecode_array = debug_info_->original_bytecode_array();
+ interpreter::Bytecode bytecode =
+ interpreter::Bytecodes::FromByte(bytecode_array->get(code_offset()));
+
+ if (bytecode == interpreter::Bytecode::kDebugger) {
+ return DEBUGGER_STATEMENT;
+ } else if (bytecode == interpreter::Bytecode::kReturn) {
+ return DEBUG_BREAK_SLOT_AT_RETURN;
+ } else if (interpreter::Bytecodes::IsCallOrNew(bytecode)) {
+ return DEBUG_BREAK_SLOT_AT_CALL;
+ } else if (source_position_iterator_.is_statement()) {
+ return DEBUG_BREAK_SLOT;
+ } else {
+ return NOT_DEBUG_BREAK;
+ }
+}
+
+BreakLocation BreakLocation::BytecodeArrayIterator::GetBreakLocation() {
+ return BreakLocation(debug_info_, GetDebugBreakType(), code_offset(),
+ position(), statement_position());
+}
// Find the break point at the supplied address, or the closest one before
// the address.
-BreakLocation BreakLocation::FromAddress(Handle<DebugInfo> debug_info,
- Address pc) {
- Iterator it(debug_info, ALL_BREAK_LOCATIONS);
- it.SkipTo(BreakIndexFromAddress(debug_info, pc));
- return it.GetBreakLocation();
+BreakLocation BreakLocation::FromCodeOffset(Handle<DebugInfo> debug_info,
+ int offset) {
+ base::SmartPointer<Iterator> it(GetIterator(debug_info));
+ it->SkipTo(BreakIndexFromCodeOffset(debug_info, offset));
+ return it->GetBreakLocation();
}
+FrameSummary GetFirstFrameSummary(JavaScriptFrame* frame) {
+ List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
+ frame->Summarize(&frames);
+ return frames.first();
+}
+
+int CallOffsetFromCodeOffset(int code_offset, bool is_interpreted) {
+ // Code offset points to the instruction after the call. Subtract 1 to
+ // exclude that instruction from the search. For bytecode, the code offset
+ // still points to the call.
+ return is_interpreted ? code_offset : code_offset - 1;
+}
+
+BreakLocation BreakLocation::FromFrame(Handle<DebugInfo> debug_info,
+ JavaScriptFrame* frame) {
+ FrameSummary summary = GetFirstFrameSummary(frame);
+ int call_offset =
+ CallOffsetFromCodeOffset(summary.code_offset(), frame->is_interpreted());
+ return FromCodeOffset(debug_info, call_offset);
+}
// Find the break point at the supplied address, or the closest one before
// the address.
-void BreakLocation::FromAddressSameStatement(Handle<DebugInfo> debug_info,
- Address pc,
- List<BreakLocation>* result_out) {
- int break_index = BreakIndexFromAddress(debug_info, pc);
- Iterator it(debug_info, ALL_BREAK_LOCATIONS);
- it.SkipTo(break_index);
- int statement_position = it.statement_position();
- while (!it.Done() && it.statement_position() == statement_position) {
- result_out->Add(it.GetBreakLocation());
- it.Next();
+void BreakLocation::FromCodeOffsetSameStatement(
+ Handle<DebugInfo> debug_info, int offset, List<BreakLocation>* result_out) {
+ int break_index = BreakIndexFromCodeOffset(debug_info, offset);
+ base::SmartPointer<Iterator> it(GetIterator(debug_info));
+ it->SkipTo(break_index);
+ int statement_position = it->statement_position();
+ while (!it->Done() && it->statement_position() == statement_position) {
+ result_out->Add(it->GetBreakLocation());
+ it->Next();
+ }
+}
+
+
+void BreakLocation::AllForStatementPosition(Handle<DebugInfo> debug_info,
+ int statement_position,
+ List<BreakLocation>* result_out) {
+ for (base::SmartPointer<Iterator> it(GetIterator(debug_info)); !it->Done();
+ it->Next()) {
+ if (it->statement_position() == statement_position) {
+ result_out->Add(it->GetBreakLocation());
+ }
}
}
-
-int BreakLocation::BreakIndexFromAddress(Handle<DebugInfo> debug_info,
- Address pc) {
+int BreakLocation::BreakIndexFromCodeOffset(Handle<DebugInfo> debug_info,
+ int offset) {
// Run through all break points to locate the one closest to the address.
int closest_break = 0;
int distance = kMaxInt;
- for (Iterator it(debug_info, ALL_BREAK_LOCATIONS); !it.Done(); it.Next()) {
+ DCHECK(0 <= offset && offset < debug_info->abstract_code()->Size());
+ for (base::SmartPointer<Iterator> it(GetIterator(debug_info)); !it->Done();
+ it->Next()) {
// Check if this break point is closer that what was previously found.
- if (it.pc() <= pc && pc - it.pc() < distance) {
- closest_break = it.break_index();
- distance = static_cast<int>(pc - it.pc());
+ if (it->code_offset() <= offset && offset - it->code_offset() < distance) {
+ closest_break = it->break_index();
+ distance = offset - it->code_offset();
// Check whether we can't get any closer.
if (distance == 0) break;
}
@@ -191,28 +312,26 @@ BreakLocation BreakLocation::FromPosition(Handle<DebugInfo> debug_info,
BreakPositionAlignment alignment) {
// Run through all break points to locate the one closest to the source
// position.
- int closest_break = 0;
int distance = kMaxInt;
-
- for (Iterator it(debug_info, ALL_BREAK_LOCATIONS); !it.Done(); it.Next()) {
+ base::SmartPointer<Iterator> it(GetIterator(debug_info));
+ BreakLocation closest_break = it->GetBreakLocation();
+ while (!it->Done()) {
int next_position;
if (alignment == STATEMENT_ALIGNED) {
- next_position = it.statement_position();
+ next_position = it->statement_position();
} else {
DCHECK(alignment == BREAK_POSITION_ALIGNED);
- next_position = it.position();
+ next_position = it->position();
}
if (position <= next_position && next_position - position < distance) {
- closest_break = it.break_index();
+ closest_break = it->GetBreakLocation();
distance = next_position - position;
// Check whether we can't get any closer.
if (distance == 0) break;
}
+ it->Next();
}
-
- Iterator it(debug_info, ALL_BREAK_LOCATIONS);
- it.SkipTo(closest_break);
- return it.GetBreakLocation();
+ return closest_break;
}
@@ -222,14 +341,14 @@ void BreakLocation::SetBreakPoint(Handle<Object> break_point_object) {
if (!HasBreakPoint()) SetDebugBreak();
DCHECK(IsDebugBreak() || IsDebuggerStatement());
// Set the break point information.
- DebugInfo::SetBreakPoint(debug_info_, pc_offset_, position_,
+ DebugInfo::SetBreakPoint(debug_info_, code_offset_, position_,
statement_position_, break_point_object);
}
void BreakLocation::ClearBreakPoint(Handle<Object> break_point_object) {
// Clear the break point information.
- DebugInfo::ClearBreakPoint(debug_info_, pc_offset_, break_point_object);
+ DebugInfo::ClearBreakPoint(debug_info_, code_offset_, break_point_object);
// If there are no more break points here remove the debug break.
if (!HasBreakPoint()) {
ClearDebugBreak();
@@ -280,11 +399,23 @@ void BreakLocation::SetDebugBreak() {
if (IsDebugBreak()) return;
DCHECK(IsDebugBreakSlot());
- Isolate* isolate = debug_info_->GetIsolate();
- Builtins* builtins = isolate->builtins();
- Handle<Code> target =
- IsReturn() ? builtins->Return_DebugBreak() : builtins->Slot_DebugBreak();
- DebugCodegen::PatchDebugBreakSlot(isolate, pc(), target);
+ if (abstract_code()->IsCode()) {
+ Code* code = abstract_code()->GetCode();
+ DCHECK(code->kind() == Code::FUNCTION);
+ Builtins* builtins = isolate()->builtins();
+ Handle<Code> target = IsReturn() ? builtins->Return_DebugBreak()
+ : builtins->Slot_DebugBreak();
+ Address pc = code->instruction_start() + code_offset();
+ DebugCodegen::PatchDebugBreakSlot(isolate(), pc, target);
+ } else {
+ BytecodeArray* bytecode_array = abstract_code()->GetBytecodeArray();
+ interpreter::Bytecode bytecode =
+ interpreter::Bytecodes::FromByte(bytecode_array->get(code_offset()));
+ interpreter::Bytecode debugbreak =
+ interpreter::Bytecodes::GetDebugBreak(bytecode);
+ bytecode_array->set(code_offset(),
+ interpreter::Bytecodes::ToByte(debugbreak));
+ }
DCHECK(IsDebugBreak());
}
@@ -294,7 +425,16 @@ void BreakLocation::ClearDebugBreak() {
if (IsDebuggerStatement()) return;
DCHECK(IsDebugBreakSlot());
- DebugCodegen::ClearDebugBreakSlot(debug_info_->GetIsolate(), pc());
+ if (abstract_code()->IsCode()) {
+ Code* code = abstract_code()->GetCode();
+ DCHECK(code->kind() == Code::FUNCTION);
+ Address pc = code->instruction_start() + code_offset();
+ DebugCodegen::ClearDebugBreakSlot(isolate(), pc);
+ } else {
+ BytecodeArray* bytecode_array = abstract_code()->GetBytecodeArray();
+ BytecodeArray* original = debug_info_->original_bytecode_array();
+ bytecode_array->set(code_offset(), original->get(code_offset()));
+ }
DCHECK(!IsDebugBreak());
}
@@ -302,15 +442,24 @@ void BreakLocation::ClearDebugBreak() {
bool BreakLocation::IsDebugBreak() const {
if (IsDebuggerStatement()) return false;
DCHECK(IsDebugBreakSlot());
- return rinfo().IsPatchedDebugBreakSlotSequence();
+ if (abstract_code()->IsCode()) {
+ Code* code = abstract_code()->GetCode();
+ DCHECK(code->kind() == Code::FUNCTION);
+ Address pc = code->instruction_start() + code_offset();
+ return DebugCodegen::DebugBreakSlotIsPatched(pc);
+ } else {
+ BytecodeArray* bytecode_array = abstract_code()->GetBytecodeArray();
+ interpreter::Bytecode bytecode =
+ interpreter::Bytecodes::FromByte(bytecode_array->get(code_offset()));
+ return interpreter::Bytecodes::IsDebugBreak(bytecode);
+ }
}
Handle<Object> BreakLocation::BreakPointObjects() const {
- return debug_info_->GetBreakPointObjects(pc_offset_);
+ return debug_info_->GetBreakPointObjects(code_offset_);
}
-
void DebugFeatureTracker::Track(DebugFeatureTracker::Feature feature) {
uint32_t mask = 1 << feature;
// Only count one sample per feature and isolate.
@@ -444,22 +593,16 @@ void Debug::Break(Arguments args, JavaScriptFrame* frame) {
Handle<DebugInfo> debug_info(shared->GetDebugInfo());
// Find the break location where execution has stopped.
- // PC points to the instruction after the current one, possibly a break
- // location as well. So the "- 1" to exclude it from the search.
- Address call_pc = frame->pc() - 1;
- BreakLocation location = BreakLocation::FromAddress(debug_info, call_pc);
+ BreakLocation location = BreakLocation::FromFrame(debug_info, frame);
// Find actual break points, if any, and trigger debug break event.
- if (break_points_active_ && location.HasBreakPoint()) {
- Handle<Object> break_point_objects = location.BreakPointObjects();
- Handle<Object> break_points_hit = CheckBreakPoints(break_point_objects);
- if (!break_points_hit->IsUndefined()) {
- // Clear all current stepping setup.
- ClearStepping();
- // Notify the debug event listeners.
- OnDebugBreak(break_points_hit, false);
- return;
- }
+ Handle<Object> break_points_hit = CheckBreakPoints(&location);
+ if (!break_points_hit->IsUndefined()) {
+ // Clear all current stepping setup.
+ ClearStepping();
+ // Notify the debug event listeners.
+ OnDebugBreak(break_points_hit, false);
+ return;
}
// No break point. Check for stepping.
@@ -480,11 +623,14 @@ void Debug::Break(Arguments args, JavaScriptFrame* frame) {
// Step next should not break in a deeper frame.
if (current_fp < target_fp) return;
// Fall through.
- case StepIn:
+ case StepIn: {
+ FrameSummary summary = GetFirstFrameSummary(frame);
+ int offset = summary.code_offset();
step_break = location.IsReturn() || (current_fp != last_fp) ||
(thread_local_.last_statement_position_ !=
- location.code()->SourceStatementPosition(frame->pc()));
+ location.abstract_code()->SourceStatementPosition(offset));
break;
+ }
case StepFrame:
step_break = current_fp != last_fp;
break;
@@ -503,12 +649,17 @@ void Debug::Break(Arguments args, JavaScriptFrame* frame) {
}
-// Check the break point objects for whether one or more are actually
-// triggered. This function returns a JSArray with the break point objects
-// which is triggered.
-Handle<Object> Debug::CheckBreakPoints(Handle<Object> break_point_objects) {
+// Find break point objects for this location, if any, and evaluate them.
+// Return an array of break point objects that evaluated true.
+Handle<Object> Debug::CheckBreakPoints(BreakLocation* location,
+ bool* has_break_points) {
Factory* factory = isolate_->factory();
+ bool has_break_points_to_check =
+ break_points_active_ && location->HasBreakPoint();
+ if (has_break_points) *has_break_points = has_break_points_to_check;
+ if (!has_break_points_to_check) return factory->undefined_value();
+ Handle<Object> break_point_objects = location->BreakPointObjects();
// Count the number of break points hit. If there are multiple break points
// they are in a FixedArray.
Handle<FixedArray> break_points_hit;
@@ -518,9 +669,9 @@ Handle<Object> Debug::CheckBreakPoints(Handle<Object> break_point_objects) {
Handle<FixedArray> array(FixedArray::cast(*break_point_objects));
break_points_hit = factory->NewFixedArray(array->length());
for (int i = 0; i < array->length(); i++) {
- Handle<Object> o(array->get(i), isolate_);
- if (CheckBreakPoint(o)) {
- break_points_hit->set(break_points_hit_count++, *o);
+ Handle<Object> break_point_object(array->get(i), isolate_);
+ if (CheckBreakPoint(break_point_object)) {
+ break_points_hit->set(break_points_hit_count++, *break_point_object);
}
}
} else {
@@ -529,25 +680,51 @@ Handle<Object> Debug::CheckBreakPoints(Handle<Object> break_point_objects) {
break_points_hit->set(break_points_hit_count++, *break_point_objects);
}
}
-
- // Return undefined if no break points were triggered.
- if (break_points_hit_count == 0) {
- return factory->undefined_value();
- }
- // Return break points hit as a JSArray.
+ if (break_points_hit_count == 0) return factory->undefined_value();
Handle<JSArray> result = factory->NewJSArrayWithElements(break_points_hit);
result->set_length(Smi::FromInt(break_points_hit_count));
return result;
}
+bool Debug::IsMutedAtCurrentLocation(JavaScriptFrame* frame) {
+ // A break location is considered muted if break locations on the current
+ // statement have at least one break point, and all of these break points
+ // evaluate to false. Aside from not triggering a debug break event at the
+ // break location, we also do not trigger one for debugger statements, nor
+ // an exception event on exception at this location.
+ Object* fun = frame->function();
+ if (!fun->IsJSFunction()) return false;
+ JSFunction* function = JSFunction::cast(fun);
+ if (!function->shared()->HasDebugInfo()) return false;
+ HandleScope scope(isolate_);
+ Handle<DebugInfo> debug_info(function->shared()->GetDebugInfo());
+ // Enter the debugger.
+ DebugScope debug_scope(this);
+ if (debug_scope.failed()) return false;
+ BreakLocation current_position = BreakLocation::FromFrame(debug_info, frame);
+ List<BreakLocation> break_locations;
+ BreakLocation::AllForStatementPosition(
+ debug_info, current_position.statement_position(), &break_locations);
+ bool has_break_points_at_all = false;
+ for (int i = 0; i < break_locations.length(); i++) {
+ bool has_break_points;
+ Handle<Object> check_result =
+ CheckBreakPoints(&break_locations[i], &has_break_points);
+ has_break_points_at_all |= has_break_points;
+ if (has_break_points && !check_result->IsUndefined()) return false;
+ }
+ return has_break_points_at_all;
+}
+
+
MaybeHandle<Object> Debug::CallFunction(const char* name, int argc,
Handle<Object> args[]) {
PostponeInterruptsScope no_interrupts(isolate_);
AssertDebugContext();
Handle<Object> holder = isolate_->natives_utils_object();
Handle<JSFunction> fun = Handle<JSFunction>::cast(
- Object::GetProperty(isolate_, holder, name, STRICT).ToHandleChecked());
+ Object::GetProperty(isolate_, holder, name).ToHandleChecked());
Handle<Object> undefined = isolate_->factory()->undefined_value();
return Execution::TryCall(isolate_, fun, undefined, argc, args);
}
@@ -668,11 +845,8 @@ void Debug::ClearBreakPoint(Handle<Object> break_point_object) {
Handle<BreakPointInfo>::cast(result);
Handle<DebugInfo> debug_info = node->debug_info();
- // Find the break point and clear it.
- Address pc =
- debug_info->code()->entry() + break_point_info->code_position();
-
- BreakLocation location = BreakLocation::FromAddress(debug_info, pc);
+ BreakLocation location = BreakLocation::FromCodeOffset(
+ debug_info, break_point_info->code_offset());
location.ClearBreakPoint(break_point_object);
// If there are no more break points left remove the debug info for this
@@ -694,9 +868,10 @@ void Debug::ClearBreakPoint(Handle<Object> break_point_object) {
void Debug::ClearAllBreakPoints() {
for (DebugInfoListNode* node = debug_info_list_; node != NULL;
node = node->next()) {
- for (BreakLocation::Iterator it(node->debug_info(), ALL_BREAK_LOCATIONS);
- !it.Done(); it.Next()) {
- it.GetBreakLocation().ClearDebugBreak();
+ for (base::SmartPointer<BreakLocation::Iterator> it(
+ BreakLocation::GetIterator(node->debug_info()));
+ !it->Done(); it->Next()) {
+ it->GetBreakLocation().ClearDebugBreak();
}
}
// Remove all debug info.
@@ -727,8 +902,10 @@ void Debug::FloodWithOneShot(Handle<JSFunction> function,
// Flood the function with break points.
Handle<DebugInfo> debug_info(shared->GetDebugInfo());
- for (BreakLocation::Iterator it(debug_info, type); !it.Done(); it.Next()) {
- it.GetBreakLocation().SetOneShot();
+ for (base::SmartPointer<BreakLocation::Iterator> it(
+ BreakLocation::GetIterator(debug_info, type));
+ !it->Done(); it->Next()) {
+ it->GetBreakLocation().SetOneShot();
}
}
@@ -751,13 +928,6 @@ bool Debug::IsBreakOnException(ExceptionBreakType type) {
}
-FrameSummary GetFirstFrameSummary(JavaScriptFrame* frame) {
- List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
- frame->Summarize(&frames);
- return frames.first();
-}
-
-
void Debug::PrepareStepIn(Handle<JSFunction> function) {
if (!is_active()) return;
if (last_step_action() < StepIn) return;
@@ -779,8 +949,7 @@ void Debug::PrepareStepOnThrow() {
JavaScriptFrameIterator it(isolate_);
while (!it.done()) {
JavaScriptFrame* frame = it.frame();
- int stack_slots = 0; // The computed stack slot count is not used.
- if (frame->LookupExceptionHandlerInTable(&stack_slots, NULL) > 0) break;
+ if (frame->LookupExceptionHandlerInTable(nullptr, nullptr) > 0) break;
it.Advance();
}
@@ -843,18 +1012,21 @@ void Debug::PrepareStep(StepAction step_action) {
Handle<DebugInfo> debug_info(shared->GetDebugInfo());
// Refresh frame summary if the code has been recompiled for debugging.
- if (shared->code() != *summary.code()) summary = GetFirstFrameSummary(frame);
+ if (AbstractCode::cast(shared->code()) != *summary.abstract_code()) {
+ summary = GetFirstFrameSummary(frame);
+ }
- // PC points to the instruction after the current one, possibly a break
- // location as well. So the "- 1" to exclude it from the search.
- Address call_pc = summary.pc() - 1;
- BreakLocation location = BreakLocation::FromAddress(debug_info, call_pc);
+ int call_offset =
+ CallOffsetFromCodeOffset(summary.code_offset(), frame->is_interpreted());
+ BreakLocation location =
+ BreakLocation::FromCodeOffset(debug_info, call_offset);
// At a return statement we will step out either way.
if (location.IsReturn()) step_action = StepOut;
thread_local_.last_statement_position_ =
- debug_info->code()->SourceStatementPosition(summary.pc());
+ debug_info->abstract_code()->SourceStatementPosition(
+ summary.code_offset());
thread_local_.last_fp_ = frame->UnpaddedFP();
switch (step_action) {
@@ -961,9 +1133,10 @@ void Debug::ClearOneShot() {
// removed from the list.
for (DebugInfoListNode* node = debug_info_list_; node != NULL;
node = node->next()) {
- for (BreakLocation::Iterator it(node->debug_info(), ALL_BREAK_LOCATIONS);
- !it.Done(); it.Next()) {
- it.GetBreakLocation().ClearOneShot();
+ for (base::SmartPointer<BreakLocation::Iterator> it(
+ BreakLocation::GetIterator(node->debug_info()));
+ !it->Done(); it->Next()) {
+ it->GetBreakLocation().ClearOneShot();
}
}
}
@@ -1070,6 +1243,15 @@ class RedirectActiveFunctions : public ThreadVisitor {
if (frame->is_optimized()) continue;
if (!function->Inlines(shared_)) continue;
+ if (frame->is_interpreted()) {
+ InterpretedFrame* interpreted_frame =
+ reinterpret_cast<InterpretedFrame*>(frame);
+ BytecodeArray* debug_copy =
+ shared_->GetDebugInfo()->abstract_code()->GetBytecodeArray();
+ interpreted_frame->PatchBytecodeArray(debug_copy);
+ continue;
+ }
+
Code* frame_code = frame->LookupCode();
DCHECK(frame_code->kind() == Code::FUNCTION);
if (frame_code->has_debug_break_slots()) continue;
@@ -1127,11 +1309,15 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
// Make sure we abort incremental marking.
isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
"prepare for break points");
+ bool is_interpreted = shared->HasBytecodeArray();
{
+ // TODO(yangguo): with bytecode, we still walk the heap to find all
+ // optimized code for the function to deoptimize. We can probably be
+ // smarter here and avoid the heap walk.
HeapIterator iterator(isolate_->heap());
HeapObject* obj;
- bool include_generators = shared->is_generator();
+ bool include_generators = !is_interpreted && shared->is_generator();
while ((obj = iterator.next())) {
if (obj->IsJSFunction()) {
@@ -1140,6 +1326,7 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
if (function->code()->kind() == Code::OPTIMIZED_FUNCTION) {
Deoptimizer::DeoptimizeFunction(function);
}
+ if (is_interpreted) continue;
if (function->shared() == *shared) functions.Add(handle(function));
} else if (include_generators && obj->IsJSGeneratorObject()) {
JSGeneratorObject* generator_obj = JSGeneratorObject::cast(obj);
@@ -1155,7 +1342,12 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
}
}
- if (!shared->HasDebugCode()) {
+ // We do not need to replace code to debug bytecode.
+ DCHECK(!is_interpreted || functions.length() == 0);
+ DCHECK(!is_interpreted || suspended_generators.length() == 0);
+
+ // We do not need to recompile to debug bytecode.
+ if (!is_interpreted && !shared->HasDebugCode()) {
DCHECK(functions.length() > 0);
if (!Compiler::CompileDebugCode(functions.first())) return false;
}
@@ -1326,10 +1518,16 @@ bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
return false;
}
- if (!PrepareFunctionForBreakPoints(shared)) return false;
-
- CreateDebugInfo(shared);
-
+ if (shared->HasBytecodeArray()) {
+ // To prepare bytecode for debugging, we already need to have the debug
+ // info (containing the debug copy) upfront, but since we do not recompile,
+ // preparing for break points cannot fail.
+ CreateDebugInfo(shared);
+ CHECK(PrepareFunctionForBreakPoints(shared));
+ } else {
+ if (!PrepareFunctionForBreakPoints(shared)) return false;
+ CreateDebugInfo(shared);
+ }
return true;
}
@@ -1363,7 +1561,7 @@ void Debug::RemoveDebugInfoAndClearFromShared(Handle<DebugInfo> debug_info) {
prev->set_next(current->next());
}
delete current;
- shared->set_debug_info(isolate_->heap()->undefined_value());
+ shared->set_debug_info(DebugInfo::uninitialized());
return;
}
// Move to next in list.
@@ -1374,14 +1572,25 @@ void Debug::RemoveDebugInfoAndClearFromShared(Handle<DebugInfo> debug_info) {
UNREACHABLE();
}
-
-void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
- after_break_target_ = NULL;
-
- if (LiveEdit::SetAfterBreakTarget(this)) return; // LiveEdit did the job.
-
- // Continue just after the slot.
- after_break_target_ = frame->pc();
+Object* Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
+ if (frame->is_interpreted()) {
+ // Find the handler from the original bytecode array.
+ InterpretedFrame* interpreted_frame =
+ reinterpret_cast<InterpretedFrame*>(frame);
+ SharedFunctionInfo* shared = interpreted_frame->function()->shared();
+ BytecodeArray* bytecode_array = shared->bytecode_array();
+ int bytecode_offset = interpreted_frame->GetBytecodeOffset();
+ interpreter::Bytecode bytecode =
+ interpreter::Bytecodes::FromByte(bytecode_array->get(bytecode_offset));
+ return isolate_->interpreter()->GetBytecodeHandler(bytecode);
+ } else {
+ after_break_target_ = NULL;
+ if (!LiveEdit::SetAfterBreakTarget(this)) {
+ // Continue just after the slot.
+ after_break_target_ = frame->pc();
+ }
+ return isolate_->heap()->undefined_value();
+ }
}
@@ -1394,21 +1603,14 @@ bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
// With no debug info there are no break points, so we can't be at a return.
if (!shared->HasDebugInfo()) return false;
- Handle<DebugInfo> debug_info(shared->GetDebugInfo());
- Handle<Code> code(debug_info->code());
-#ifdef DEBUG
- // Get the code which is actually executing.
- Handle<Code> frame_code(frame->LookupCode());
- DCHECK(frame_code.is_identical_to(code));
-#endif
- // Find the reloc info matching the start of the debug break slot.
- Address slot_pc = frame->pc() - Assembler::kDebugBreakSlotLength;
- int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_RETURN);
- for (RelocIterator it(*code, mask); !it.done(); it.next()) {
- if (it.rinfo()->pc() == slot_pc) return true;
- }
- return false;
+ DCHECK(!frame->is_optimized());
+ FrameSummary summary = GetFirstFrameSummary(frame);
+
+ Handle<DebugInfo> debug_info(shared->GetDebugInfo());
+ BreakLocation location =
+ BreakLocation::FromCodeOffset(debug_info, summary.code_offset());
+ return location.IsReturn();
}
@@ -1466,16 +1668,18 @@ void Debug::GetStepinPositions(JavaScriptFrame* frame, StackFrame::Id frame_id,
Handle<DebugInfo> debug_info(shared->GetDebugInfo());
// Refresh frame summary if the code has been recompiled for debugging.
- if (shared->code() != *summary.code()) summary = GetFirstFrameSummary(frame);
+ if (AbstractCode::cast(shared->code()) != *summary.abstract_code()) {
+ summary = GetFirstFrameSummary(frame);
+ }
- // Find range of break points starting from the break point where execution
- // has stopped.
- Address call_pc = summary.pc() - 1;
+ int call_offset =
+ CallOffsetFromCodeOffset(summary.code_offset(), frame->is_interpreted());
List<BreakLocation> locations;
- BreakLocation::FromAddressSameStatement(debug_info, call_pc, &locations);
+ BreakLocation::FromCodeOffsetSameStatement(debug_info, call_offset,
+ &locations);
for (BreakLocation location : locations) {
- if (location.pc() <= summary.pc()) {
+ if (location.code_offset() <= summary.code_offset()) {
// The break point is near our pc. Could be a step-in possibility,
// that is currently taken by active debugger call.
if (break_frame_id() == StackFrame::NO_ID) {
@@ -1619,6 +1823,12 @@ void Debug::OnException(Handle<Object> exception, Handle<Object> promise) {
if (!break_on_exception_) return;
}
+ {
+ // Check whether the break location is muted.
+ JavaScriptFrameIterator it(isolate_);
+ if (!it.done() && IsMutedAtCurrentLocation(it.frame())) return;
+ }
+
DebugScope debug_scope(this);
if (debug_scope.failed()) return;
@@ -1636,8 +1846,7 @@ void Debug::OnException(Handle<Object> exception, Handle<Object> promise) {
}
-void Debug::OnDebugBreak(Handle<Object> break_points_hit,
- bool auto_continue) {
+void Debug::OnDebugBreak(Handle<Object> break_points_hit, bool auto_continue) {
// The caller provided for DebugScope.
AssertDebugContext();
// Bail out if there is no listener for this event
@@ -2071,6 +2280,8 @@ void Debug::HandleDebugBreak() {
JSFunction::cast(fun)->context()->global_object();
// Don't stop in debugger functions.
if (IsDebugGlobal(global)) return;
+ // Don't stop if the break location is muted.
+ if (IsMutedAtCurrentLocation(it.frame())) return;
}
}
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index 7dcc2b5e34..81db9e54af 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -16,6 +16,7 @@
#include "src/flags.h"
#include "src/frames.h"
#include "src/hashmap.h"
+#include "src/interpreter/source-position-table.h"
#include "src/runtime/runtime.h"
#include "src/string-stream.h"
#include "src/v8threads.h"
@@ -64,24 +65,32 @@ class BreakLocation {
public:
// Find the break point at the supplied address, or the closest one before
// the address.
- static BreakLocation FromAddress(Handle<DebugInfo> debug_info, Address pc);
+ static BreakLocation FromCodeOffset(Handle<DebugInfo> debug_info, int offset);
- static void FromAddressSameStatement(Handle<DebugInfo> debug_info, Address pc,
- List<BreakLocation>* result_out);
+ static BreakLocation FromFrame(Handle<DebugInfo> debug_info,
+ JavaScriptFrame* frame);
+
+ static void FromCodeOffsetSameStatement(Handle<DebugInfo> debug_info,
+ int offset,
+ List<BreakLocation>* result_out);
+
+ static void AllForStatementPosition(Handle<DebugInfo> debug_info,
+ int statement_position,
+ List<BreakLocation>* result_out);
static BreakLocation FromPosition(Handle<DebugInfo> debug_info, int position,
BreakPositionAlignment alignment);
bool IsDebugBreak() const;
- inline bool IsReturn() const {
- return RelocInfo::IsDebugBreakSlotAtReturn(rmode_);
- }
- inline bool IsCall() const {
- return RelocInfo::IsDebugBreakSlotAtCall(rmode_);
+ inline bool IsReturn() const { return type_ == DEBUG_BREAK_SLOT_AT_RETURN; }
+ inline bool IsCall() const { return type_ == DEBUG_BREAK_SLOT_AT_CALL; }
+ inline bool IsDebugBreakSlot() const { return type_ >= DEBUG_BREAK_SLOT; }
+ inline bool IsDebuggerStatement() const {
+ return type_ == DEBUGGER_STATEMENT;
}
inline bool HasBreakPoint() const {
- return debug_info_->HasBreakPoint(pc_offset_);
+ return debug_info_->HasBreakPoint(code_offset_);
}
Handle<Object> BreakPointObjects() const;
@@ -92,79 +101,118 @@ class BreakLocation {
void SetOneShot();
void ClearOneShot();
-
- inline RelocInfo rinfo() const {
- return RelocInfo(debug_info_->GetIsolate(), pc(), rmode(), data_, code());
- }
-
inline int position() const { return position_; }
inline int statement_position() const { return statement_position_; }
- inline Address pc() const { return code()->entry() + pc_offset_; }
+ inline int code_offset() const { return code_offset_; }
+ inline Isolate* isolate() { return debug_info_->GetIsolate(); }
- inline RelocInfo::Mode rmode() const { return rmode_; }
+ inline AbstractCode* abstract_code() const {
+ return debug_info_->abstract_code();
+ }
- inline Code* code() const { return debug_info_->code(); }
+ protected:
+ enum DebugBreakType {
+ NOT_DEBUG_BREAK,
+ DEBUGGER_STATEMENT,
+ DEBUG_BREAK_SLOT,
+ DEBUG_BREAK_SLOT_AT_CALL,
+ DEBUG_BREAK_SLOT_AT_RETURN
+ };
- private:
- BreakLocation(Handle<DebugInfo> debug_info, RelocInfo* rinfo, int position,
- int statement_position);
+ BreakLocation(Handle<DebugInfo> debug_info, DebugBreakType type,
+ int code_offset, int position, int statement_position);
class Iterator {
public:
- Iterator(Handle<DebugInfo> debug_info, BreakLocatorType type);
-
- BreakLocation GetBreakLocation() {
- return BreakLocation(debug_info_, rinfo(), position(),
- statement_position());
- }
+ virtual ~Iterator() {}
- inline bool Done() const { return reloc_iterator_.done(); }
- void Next();
+ virtual BreakLocation GetBreakLocation() = 0;
+ virtual bool Done() const = 0;
+ virtual void Next() = 0;
void SkipTo(int count) {
while (count-- > 0) Next();
}
- inline RelocInfo::Mode rmode() { return reloc_iterator_.rinfo()->rmode(); }
- inline RelocInfo* rinfo() { return reloc_iterator_.rinfo(); }
- inline Address pc() { return rinfo()->pc(); }
+ virtual int code_offset() = 0;
int break_index() const { return break_index_; }
inline int position() const { return position_; }
inline int statement_position() const { return statement_position_; }
- private:
- static int GetModeMask(BreakLocatorType type);
+ protected:
+ explicit Iterator(Handle<DebugInfo> debug_info);
Handle<DebugInfo> debug_info_;
- RelocIterator reloc_iterator_;
int break_index_;
int position_;
int statement_position_;
+ private:
DisallowHeapAllocation no_gc_;
-
DISALLOW_COPY_AND_ASSIGN(Iterator);
};
+ class CodeIterator : public Iterator {
+ public:
+ CodeIterator(Handle<DebugInfo> debug_info, BreakLocatorType type);
+ ~CodeIterator() override {}
+
+ BreakLocation GetBreakLocation() override;
+ bool Done() const override { return reloc_iterator_.done(); }
+ void Next() override;
+
+ int code_offset() override {
+ return static_cast<int>(
+ rinfo()->pc() -
+ debug_info_->abstract_code()->GetCode()->instruction_start());
+ }
+
+ private:
+ static int GetModeMask(BreakLocatorType type);
+ RelocInfo::Mode rmode() { return reloc_iterator_.rinfo()->rmode(); }
+ RelocInfo* rinfo() { return reloc_iterator_.rinfo(); }
+
+ RelocIterator reloc_iterator_;
+ DISALLOW_COPY_AND_ASSIGN(CodeIterator);
+ };
+
+ class BytecodeArrayIterator : public Iterator {
+ public:
+ BytecodeArrayIterator(Handle<DebugInfo> debug_info, BreakLocatorType type);
+ ~BytecodeArrayIterator() override {}
+
+ BreakLocation GetBreakLocation() override;
+ bool Done() const override { return source_position_iterator_.done(); }
+ void Next() override;
+
+ int code_offset() override {
+ return source_position_iterator_.bytecode_offset();
+ }
+
+ private:
+ DebugBreakType GetDebugBreakType();
+
+ interpreter::SourcePositionTableIterator source_position_iterator_;
+ BreakLocatorType break_locator_type_;
+ int start_position_;
+ DISALLOW_COPY_AND_ASSIGN(BytecodeArrayIterator);
+ };
+
+ static Iterator* GetIterator(Handle<DebugInfo> debug_info,
+ BreakLocatorType type = ALL_BREAK_LOCATIONS);
+
+ private:
friend class Debug;
- static int BreakIndexFromAddress(Handle<DebugInfo> debug_info, Address pc);
+ static int BreakIndexFromCodeOffset(Handle<DebugInfo> debug_info, int offset);
void SetDebugBreak();
void ClearDebugBreak();
- inline bool IsDebuggerStatement() const {
- return RelocInfo::IsDebuggerStatement(rmode_);
- }
- inline bool IsDebugBreakSlot() const {
- return RelocInfo::IsDebugBreakSlot(rmode_);
- }
-
Handle<DebugInfo> debug_info_;
- int pc_offset_;
- RelocInfo::Mode rmode_;
- intptr_t data_;
+ int code_offset_;
+ DebugBreakType type_;
int position_;
int statement_position_;
};
@@ -383,7 +431,7 @@ class Debug {
// Internal logic
bool Load();
void Break(Arguments args, JavaScriptFrame*);
- void SetAfterBreakTarget(JavaScriptFrame* frame);
+ Object* SetAfterBreakTarget(JavaScriptFrame* frame);
// Scripts handling.
Handle<FixedArray> GetLoadedScripts();
@@ -555,7 +603,9 @@ class Debug {
void ClearOneShot();
void ActivateStepOut(StackFrame* frame);
void RemoveDebugInfoAndClearFromShared(Handle<DebugInfo> debug_info);
- Handle<Object> CheckBreakPoints(Handle<Object> break_point);
+ Handle<Object> CheckBreakPoints(BreakLocation* location,
+ bool* has_break_points = nullptr);
+ bool IsMutedAtCurrentLocation(JavaScriptFrame* frame);
bool CheckBreakPoint(Handle<Object> break_point_object);
MaybeHandle<Object> CallFunction(const char* name, int argc,
Handle<Object> args[]);
@@ -739,6 +789,7 @@ class DebugCodegen : public AllStatic {
static void PatchDebugBreakSlot(Isolate* isolate, Address pc,
Handle<Code> code);
+ static bool DebugBreakSlotIsPatched(Address pc);
static void ClearDebugBreakSlot(Isolate* isolate, Address pc);
};
diff --git a/deps/v8/src/debug/debug.js b/deps/v8/src/debug/debug.js
index 1158f7dd38..6849bf5345 100644
--- a/deps/v8/src/debug/debug.js
+++ b/deps/v8/src/debug/debug.js
@@ -147,10 +147,8 @@ function BreakPoint(source_position, opt_script_break_point) {
} else {
this.number_ = next_break_point_number++;
}
- this.hit_count_ = 0;
this.active_ = true;
this.condition_ = null;
- this.ignoreCount_ = 0;
}
@@ -169,11 +167,6 @@ BreakPoint.prototype.source_position = function() {
};
-BreakPoint.prototype.hit_count = function() {
- return this.hit_count_;
-};
-
-
BreakPoint.prototype.active = function() {
if (this.script_break_point()) {
return this.script_break_point().active();
@@ -190,11 +183,6 @@ BreakPoint.prototype.condition = function() {
};
-BreakPoint.prototype.ignoreCount = function() {
- return this.ignoreCount_;
-};
-
-
BreakPoint.prototype.script_break_point = function() {
return this.script_break_point_;
};
@@ -215,11 +203,6 @@ BreakPoint.prototype.setCondition = function(condition) {
};
-BreakPoint.prototype.setIgnoreCount = function(ignoreCount) {
- this.ignoreCount_ = ignoreCount;
-};
-
-
BreakPoint.prototype.isTriggered = function(exec_state) {
// Break point not active - not triggered.
if (!this.active()) return false;
@@ -239,18 +222,6 @@ BreakPoint.prototype.isTriggered = function(exec_state) {
}
}
- // Update the hit count.
- this.hit_count_++;
- if (this.script_break_point_) {
- this.script_break_point_.hit_count_++;
- }
-
- // If the break point has an ignore count it is not triggered.
- if (this.ignoreCount_ > 0) {
- this.ignoreCount_--;
- return false;
- }
-
// Break point triggered.
return true;
};
@@ -283,10 +254,8 @@ function ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
this.groupId_ = opt_groupId;
this.position_alignment_ = IS_UNDEFINED(opt_position_alignment)
? Debug.BreakPositionAlignment.Statement : opt_position_alignment;
- this.hit_count_ = 0;
this.active_ = true;
this.condition_ = null;
- this.ignoreCount_ = 0;
this.break_points_ = [];
}
@@ -299,10 +268,8 @@ ScriptBreakPoint.prototype.cloneForOtherScript = function (other_script) {
copy.number_ = next_break_point_number++;
script_break_points.push(copy);
- copy.hit_count_ = this.hit_count_;
copy.active_ = this.active_;
copy.condition_ = this.condition_;
- copy.ignoreCount_ = this.ignoreCount_;
return copy;
};
@@ -362,11 +329,6 @@ ScriptBreakPoint.prototype.update_positions = function(line, column) {
};
-ScriptBreakPoint.prototype.hit_count = function() {
- return this.hit_count_;
-};
-
-
ScriptBreakPoint.prototype.active = function() {
return this.active_;
};
@@ -377,11 +339,6 @@ ScriptBreakPoint.prototype.condition = function() {
};
-ScriptBreakPoint.prototype.ignoreCount = function() {
- return this.ignoreCount_;
-};
-
-
ScriptBreakPoint.prototype.enable = function() {
this.active_ = true;
};
@@ -397,16 +354,6 @@ ScriptBreakPoint.prototype.setCondition = function(condition) {
};
-ScriptBreakPoint.prototype.setIgnoreCount = function(ignoreCount) {
- this.ignoreCount_ = ignoreCount;
-
- // Set ignore count on all break points created from this script break point.
- for (var i = 0; i < this.break_points_.length; i++) {
- this.break_points_[i].setIgnoreCount(ignoreCount);
- }
-};
-
-
// Check whether a script matches this script break point. Currently this is
// only based on script name.
ScriptBreakPoint.prototype.matchesScript = function(script) {
@@ -461,7 +408,6 @@ ScriptBreakPoint.prototype.set = function (script) {
// Create a break point object and set the break point.
var break_point = MakeBreakPoint(position, this);
- break_point.setIgnoreCount(this.ignoreCount());
var actual_position = %SetScriptBreakPoint(script, position,
this.position_alignment_,
break_point);
@@ -726,13 +672,6 @@ Debug.changeBreakPointCondition = function(break_point_number, condition) {
};
-Debug.changeBreakPointIgnoreCount = function(break_point_number, ignoreCount) {
- if (ignoreCount < 0) throw MakeError(kDebugger, 'Invalid argument');
- var break_point = this.findBreakPoint(break_point_number, false);
- break_point.setIgnoreCount(ignoreCount);
-};
-
-
Debug.clearBreakPoint = function(break_point_number) {
var break_point = this.findBreakPoint(break_point_number, true);
if (break_point) {
@@ -857,14 +796,6 @@ Debug.changeScriptBreakPointCondition = function(
};
-Debug.changeScriptBreakPointIgnoreCount = function(
- break_point_number, ignoreCount) {
- if (ignoreCount < 0) throw MakeError(kDebugger, 'Invalid argument');
- var script_break_point = this.findScriptBreakPoint(break_point_number, false);
- script_break_point.setIgnoreCount(ignoreCount);
-};
-
-
Debug.scriptBreakPoints = function() {
return script_break_points;
};
@@ -1503,7 +1434,6 @@ DebugCommandProcessor.prototype.setBreakPointRequest_ =
var enabled = IS_UNDEFINED(request.arguments.enabled) ?
true : request.arguments.enabled;
var condition = request.arguments.condition;
- var ignoreCount = request.arguments.ignoreCount;
var groupId = request.arguments.groupId;
// Check for legal arguments.
@@ -1569,9 +1499,6 @@ DebugCommandProcessor.prototype.setBreakPointRequest_ =
// Set additional break point properties.
var break_point = Debug.findBreakPoint(break_point_number);
- if (ignoreCount) {
- Debug.changeBreakPointIgnoreCount(break_point_number, ignoreCount);
- }
if (!enabled) {
Debug.disableBreakPoint(break_point_number);
}
@@ -1617,7 +1544,6 @@ DebugCommandProcessor.prototype.changeBreakPointRequest_ = function(
var break_point = TO_NUMBER(request.arguments.breakpoint);
var enabled = request.arguments.enabled;
var condition = request.arguments.condition;
- var ignoreCount = request.arguments.ignoreCount;
// Check for legal arguments.
if (!break_point) {
@@ -1638,11 +1564,6 @@ DebugCommandProcessor.prototype.changeBreakPointRequest_ = function(
if (!IS_UNDEFINED(condition)) {
Debug.changeBreakPointCondition(break_point, condition);
}
-
- // Change ignore count if supplied
- if (!IS_UNDEFINED(ignoreCount)) {
- Debug.changeBreakPointIgnoreCount(break_point, ignoreCount);
- }
};
@@ -1717,10 +1638,8 @@ DebugCommandProcessor.prototype.listBreakpointsRequest_ = function(
line: break_point.line(),
column: break_point.column(),
groupId: break_point.groupId(),
- hit_count: break_point.hit_count(),
active: break_point.active(),
condition: break_point.condition(),
- ignoreCount: break_point.ignoreCount(),
actual_locations: break_point.actual_locations()
};
diff --git a/deps/v8/src/debug/ia32/debug-ia32.cc b/deps/v8/src/debug/ia32/debug-ia32.cc
index d489a01441..95f2bc6b68 100644
--- a/deps/v8/src/debug/ia32/debug-ia32.cc
+++ b/deps/v8/src/debug/ia32/debug-ia32.cc
@@ -50,6 +50,9 @@ void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
DCHECK_EQ(kSize, patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
}
+bool DebugCodegen::DebugBreakSlotIsPatched(Address pc) {
+ return !Assembler::IsNop(pc);
+}
void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
DebugBreakCallHelperMode mode) {
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index f1f3f2391a..91c990d19b 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -1857,8 +1857,8 @@ bool LiveEdit::FindActiveGenerators(Handle<FixedArray> shared_info_array,
HandleScope scope(isolate);
for (int i = 0; i < len; i++) {
- Handle<JSValue> jsvalue =
- Handle<JSValue>::cast(FixedArray::get(shared_info_array, i));
+ Handle<JSValue> jsvalue = Handle<JSValue>::cast(
+ FixedArray::get(*shared_info_array, i, isolate));
Handle<SharedFunctionInfo> shared =
UnwrapSharedFunctionInfoFromJSValue(jsvalue);
diff --git a/deps/v8/src/debug/mips/debug-mips.cc b/deps/v8/src/debug/mips/debug-mips.cc
index c5c58d044b..1d9f7d6037 100644
--- a/deps/v8/src/debug/mips/debug-mips.cc
+++ b/deps/v8/src/debug/mips/debug-mips.cc
@@ -56,6 +56,10 @@ void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
patcher.masm()->Call(v8::internal::t9);
}
+bool DebugCodegen::DebugBreakSlotIsPatched(Address pc) {
+ Instr current_instr = Assembler::instr_at(pc);
+ return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
+}
void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
DebugBreakCallHelperMode mode) {
diff --git a/deps/v8/src/debug/mips64/debug-mips64.cc b/deps/v8/src/debug/mips64/debug-mips64.cc
index 1d65fd9efd..0646a249f7 100644
--- a/deps/v8/src/debug/mips64/debug-mips64.cc
+++ b/deps/v8/src/debug/mips64/debug-mips64.cc
@@ -58,6 +58,10 @@ void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
patcher.masm()->Call(v8::internal::t9);
}
+bool DebugCodegen::DebugBreakSlotIsPatched(Address pc) {
+ Instr current_instr = Assembler::instr_at(pc);
+ return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
+}
void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
DebugBreakCallHelperMode mode) {
diff --git a/deps/v8/src/debug/mirrors.js b/deps/v8/src/debug/mirrors.js
index 1fd5fa9ecd..8b9dd02b6e 100644
--- a/deps/v8/src/debug/mirrors.js
+++ b/deps/v8/src/debug/mirrors.js
@@ -812,7 +812,7 @@ ObjectMirror.prototype.lookupProperty = function(value) {
// Skip properties which are defined through accessors.
var property = properties[i];
if (property.propertyType() != PropertyType.AccessorConstant) {
- if (%_ObjectEquals(property.value_, value.value_)) {
+ if (property.value_ === value.value_) {
return property;
}
}
diff --git a/deps/v8/src/debug/ppc/debug-ppc.cc b/deps/v8/src/debug/ppc/debug-ppc.cc
index c5ddab8bc0..aab5399fee 100644
--- a/deps/v8/src/debug/ppc/debug-ppc.cc
+++ b/deps/v8/src/debug/ppc/debug-ppc.cc
@@ -64,6 +64,10 @@ void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
patcher.masm()->bctrl();
}
+bool DebugCodegen::DebugBreakSlotIsPatched(Address pc) {
+ Instr current_instr = Assembler::instr_at(pc);
+ return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
+}
void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
DebugBreakCallHelperMode mode) {
diff --git a/deps/v8/src/debug/x64/debug-x64.cc b/deps/v8/src/debug/x64/debug-x64.cc
index 0d56ea7521..f7fbe7691e 100644
--- a/deps/v8/src/debug/x64/debug-x64.cc
+++ b/deps/v8/src/debug/x64/debug-x64.cc
@@ -51,6 +51,9 @@ void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
DCHECK_EQ(kSize, patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
}
+bool DebugCodegen::DebugBreakSlotIsPatched(Address pc) {
+ return !Assembler::IsNop(pc);
+}
void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
DebugBreakCallHelperMode mode) {
diff --git a/deps/v8/src/debug/x87/debug-x87.cc b/deps/v8/src/debug/x87/debug-x87.cc
index 8c04e02b89..8ddb82f39d 100644
--- a/deps/v8/src/debug/x87/debug-x87.cc
+++ b/deps/v8/src/debug/x87/debug-x87.cc
@@ -50,6 +50,9 @@ void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
DCHECK_EQ(kSize, patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
}
+bool DebugCodegen::DebugBreakSlotIsPatched(Address pc) {
+ return !Assembler::IsNop(pc);
+}
void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
DebugBreakCallHelperMode mode) {
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index 4bdafbf1b4..e00e5ab538 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -11,8 +11,10 @@
#include "src/frames-inl.h"
#include "src/full-codegen/full-codegen.h"
#include "src/global-handles.h"
+#include "src/interpreter/interpreter.h"
#include "src/macro-assembler.h"
#include "src/profiler/cpu-profiler.h"
+#include "src/tracing/trace-event.h"
#include "src/v8.h"
@@ -35,7 +37,6 @@ static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) {
DeoptimizerData::DeoptimizerData(MemoryAllocator* allocator)
: allocator_(allocator),
- deoptimized_frame_info_(NULL),
current_(NULL) {
for (int i = 0; i < Deoptimizer::kBailoutTypesWithCodeEntry; ++i) {
deopt_entry_code_entries_[i] = -1;
@@ -52,13 +53,6 @@ DeoptimizerData::~DeoptimizerData() {
}
-void DeoptimizerData::Iterate(ObjectVisitor* v) {
- if (deoptimized_frame_info_ != NULL) {
- deoptimized_frame_info_->Iterate(v);
- }
-}
-
-
Code* Deoptimizer::FindDeoptimizingCode(Address addr) {
if (function_->IsHeapObject()) {
// Search all deoptimizing code in the native context of the function.
@@ -140,73 +134,27 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
int jsframe_index,
Isolate* isolate) {
CHECK(frame->is_optimized());
- CHECK(isolate->deoptimizer_data()->deoptimized_frame_info_ == NULL);
-
- // Get the function and code from the frame.
- JSFunction* function = frame->function();
- Code* code = frame->LookupCode();
-
- // Locate the deoptimization point in the code. As we are at a call the
- // return address must be at a place in the code with deoptimization support.
- SafepointEntry safepoint_entry = code->GetSafepointEntry(frame->pc());
- int deoptimization_index = safepoint_entry.deoptimization_index();
- CHECK_NE(deoptimization_index, Safepoint::kNoDeoptimizationIndex);
-
- // Always use the actual stack slots when calculating the fp to sp
- // delta adding two for the function and context.
- unsigned stack_slots = code->stack_slots();
- unsigned arguments_stack_height =
- Deoptimizer::ComputeOutgoingArgumentSize(code, deoptimization_index);
- unsigned fp_to_sp_delta = (stack_slots * kPointerSize) +
- StandardFrameConstants::kFixedFrameSizeFromFp +
- arguments_stack_height;
-
- Deoptimizer* deoptimizer = new Deoptimizer(isolate,
- function,
- Deoptimizer::DEBUGGER,
- deoptimization_index,
- frame->pc(),
- fp_to_sp_delta,
- code);
- Address tos = frame->fp() - fp_to_sp_delta;
- deoptimizer->FillInputFrame(tos, frame);
-
- // Calculate the output frames.
- Deoptimizer::ComputeOutputFrames(deoptimizer);
-
- // Create the GC safe output frame information and register it for GC
- // handling.
- CHECK_LT(jsframe_index, deoptimizer->jsframe_count());
-
- // Convert JS frame index into frame index.
- int frame_index = deoptimizer->ConvertJSFrameIndexToFrameIndex(jsframe_index);
- bool has_arguments_adaptor =
- frame_index > 0 &&
- deoptimizer->output_[frame_index - 1]->GetFrameType() ==
- StackFrame::ARGUMENTS_ADAPTOR;
-
- int construct_offset = has_arguments_adaptor ? 2 : 1;
- bool has_construct_stub =
- frame_index >= construct_offset &&
- deoptimizer->output_[frame_index - construct_offset]->GetFrameType() ==
- StackFrame::CONSTRUCT;
-
- DeoptimizedFrameInfo* info = new DeoptimizedFrameInfo(deoptimizer,
- frame_index,
- has_arguments_adaptor,
- has_construct_stub);
- isolate->deoptimizer_data()->deoptimized_frame_info_ = info;
-
- // Done with the GC-unsafe frame descriptions. This re-enables allocation.
- deoptimizer->DeleteFrameDescriptions();
-
- // Allocate a heap number for the doubles belonging to this frame.
- deoptimizer->MaterializeHeapNumbersForDebuggerInspectableFrame(
- frame_index, info->parameters_count(), info->expression_count(), info);
+ TranslatedState translated_values(frame);
+ translated_values.Prepare(false, frame->fp());
+
+ TranslatedState::iterator frame_it = translated_values.end();
+ int counter = jsframe_index;
+ for (auto it = translated_values.begin(); it != translated_values.end();
+ it++) {
+ if (it->kind() == TranslatedFrame::kFunction ||
+ it->kind() == TranslatedFrame::kInterpretedFunction) {
+ if (counter == 0) {
+ frame_it = it;
+ break;
+ }
+ counter--;
+ }
+ }
+ CHECK(frame_it != translated_values.end());
- // Finished using the deoptimizer instance.
- delete deoptimizer;
+ DeoptimizedFrameInfo* info =
+ new DeoptimizedFrameInfo(&translated_values, frame_it, isolate);
return info;
}
@@ -214,9 +162,7 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
void Deoptimizer::DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info,
Isolate* isolate) {
- CHECK_EQ(isolate->deoptimizer_data()->deoptimized_frame_info_, info);
delete info;
- isolate->deoptimizer_data()->deoptimized_frame_info_ = NULL;
}
@@ -394,8 +340,8 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
element = next;
}
- // TODO(titzer): we need a handle scope only because of the macro assembler,
- // which is only used in EnsureCodeForDeoptimizationEntry.
+ // We need a handle scope only because of the macro assembler,
+ // which is used in code patching in EnsureCodeForDeoptimizationEntry.
HandleScope scope(isolate);
// Now patch all the codes for deoptimization.
@@ -426,6 +372,8 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
+ TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
+ TRACE_EVENT0("v8", "V8.DeoptimizeCode");
if (FLAG_trace_deopt) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(), "[deoptimize all code in all contexts]\n");
@@ -443,6 +391,8 @@ void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
+ TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
+ TRACE_EVENT0("v8", "V8.DeoptimizeCode");
if (FLAG_trace_deopt) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(), "[deoptimize marked code in all contexts]\n");
@@ -470,6 +420,8 @@ void Deoptimizer::MarkAllCodeForContext(Context* context) {
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
+ TimerEventScope<TimerEventDeoptimizeCode> timer(function->GetIsolate());
+ TRACE_EVENT0("v8", "V8.DeoptimizeCode");
Code* code = function->code();
if (code->kind() == Code::OPTIMIZED_FUNCTION) {
// Mark the code for deoptimization and unlink any functions that also
@@ -513,7 +465,6 @@ const char* Deoptimizer::MessageFor(BailoutType type) {
return NULL;
}
-
Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
BailoutType type, unsigned bailout_id, Address from,
int fp_to_sp_delta, Code* optimized_code)
@@ -524,11 +475,19 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
from_(from),
fp_to_sp_delta_(fp_to_sp_delta),
has_alignment_padding_(0),
+ deoptimizing_throw_(false),
+ catch_handler_data_(-1),
+ catch_handler_pc_offset_(-1),
input_(nullptr),
output_count_(0),
jsframe_count_(0),
output_(nullptr),
trace_scope_(nullptr) {
+ if (isolate->deoptimizer_lazy_throw()) {
+ isolate->set_deoptimizer_lazy_throw(false);
+ deoptimizing_throw_ = true;
+ }
+
// For COMPILED_STUBs called from builtins, the function pointer is a SMI
// indicating an internal frame.
if (function->IsSmi()) {
@@ -567,7 +526,11 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
PROFILE(isolate_, CodeDeoptEvent(compiled_code_, from_, fp_to_sp_delta_));
}
unsigned size = ComputeInputFrameSize();
- input_ = new(size) FrameDescription(size, function);
+ int parameter_count =
+ function == nullptr
+ ? 0
+ : (function->shared()->internal_formal_parameter_count() + 1);
+ input_ = new (size) FrameDescription(size, parameter_count);
input_->SetFrameType(frame_type);
}
@@ -702,6 +665,41 @@ int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
return length;
}
+namespace {
+
+int LookupCatchHandler(TranslatedFrame* translated_frame, int* data_out) {
+ switch (translated_frame->kind()) {
+ case TranslatedFrame::kFunction: {
+ BailoutId node_id = translated_frame->node_id();
+ JSFunction* function =
+ JSFunction::cast(translated_frame->begin()->GetRawValue());
+ Code* non_optimized_code = function->shared()->code();
+ FixedArray* raw_data = non_optimized_code->deoptimization_data();
+ DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
+ unsigned pc_and_state =
+ Deoptimizer::GetOutputInfo(data, node_id, function->shared());
+ unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
+ HandlerTable* table =
+ HandlerTable::cast(non_optimized_code->handler_table());
+ HandlerTable::CatchPrediction prediction;
+ return table->LookupRange(pc_offset, data_out, &prediction);
+ }
+ case TranslatedFrame::kInterpretedFunction: {
+ int bytecode_offset = translated_frame->node_id().ToInt();
+ JSFunction* function =
+ JSFunction::cast(translated_frame->begin()->GetRawValue());
+ BytecodeArray* bytecode = function->shared()->bytecode_array();
+ HandlerTable* table = HandlerTable::cast(bytecode->handler_table());
+ HandlerTable::CatchPrediction prediction;
+ return table->LookupRange(bytecode_offset, data_out, &prediction);
+ }
+ default:
+ break;
+ }
+ return -1;
+}
+
+} // namespace
// We rely on this function not causing a GC. It is called from generated code
// without having a real stack frame in place.
@@ -742,6 +740,22 @@ void Deoptimizer::DoComputeOutputFrames() {
// Do the input frame to output frame(s) translation.
size_t count = translated_state_.frames().size();
+ // If we are supposed to go to the catch handler, find the catching frame
+ // for the catch and make sure we only deoptimize upto that frame.
+ if (deoptimizing_throw_) {
+ size_t catch_handler_frame_index = count;
+ for (size_t i = count; i-- > 0;) {
+ catch_handler_pc_offset_ = LookupCatchHandler(
+ &(translated_state_.frames()[i]), &catch_handler_data_);
+ if (catch_handler_pc_offset_ >= 0) {
+ catch_handler_frame_index = i;
+ break;
+ }
+ }
+ CHECK_LT(catch_handler_frame_index, count);
+ count = catch_handler_frame_index + 1;
+ }
+
DCHECK(output_ == NULL);
output_ = new FrameDescription*[count];
for (size_t i = 0; i < count; ++i) {
@@ -760,11 +774,12 @@ void Deoptimizer::DoComputeOutputFrames() {
int frame_index = static_cast<int>(i);
switch (translated_state_.frames()[i].kind()) {
case TranslatedFrame::kFunction:
- DoComputeJSFrame(frame_index);
+ DoComputeJSFrame(frame_index, deoptimizing_throw_ && i == count - 1);
jsframe_count_++;
break;
case TranslatedFrame::kInterpretedFunction:
- DoComputeInterpretedFrame(frame_index);
+ DoComputeInterpretedFrame(frame_index,
+ deoptimizing_throw_ && i == count - 1);
jsframe_count_++;
break;
case TranslatedFrame::kArgumentsAdaptor:
@@ -809,40 +824,53 @@ void Deoptimizer::DoComputeOutputFrames() {
}
}
-
-void Deoptimizer::DoComputeJSFrame(int frame_index) {
+void Deoptimizer::DoComputeJSFrame(int frame_index, bool goto_catch_handler) {
TranslatedFrame* translated_frame =
&(translated_state_.frames()[frame_index]);
+ SharedFunctionInfo* shared = translated_frame->raw_shared_info();
+
TranslatedFrame::iterator value_iterator = translated_frame->begin();
+ bool is_bottommost = (0 == frame_index);
+ bool is_topmost = (output_count_ - 1 == frame_index);
int input_index = 0;
BailoutId node_id = translated_frame->node_id();
unsigned height =
translated_frame->height() - 1; // Do not count the context.
unsigned height_in_bytes = height * kPointerSize;
+ if (goto_catch_handler) {
+ // Take the stack height from the handler table.
+ height = catch_handler_data_;
+ // We also make space for the exception itself.
+ height_in_bytes = (height + 1) * kPointerSize;
+ CHECK(is_topmost);
+ }
+
JSFunction* function = JSFunction::cast(value_iterator->GetRawValue());
value_iterator++;
input_index++;
if (trace_scope_ != NULL) {
PrintF(trace_scope_->file(), " translating frame ");
- function->PrintName(trace_scope_->file());
+ base::SmartArrayPointer<char> name = shared->DebugName()->ToCString();
+ PrintF(trace_scope_->file(), "%s", name.get());
PrintF(trace_scope_->file(),
" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
+ PrintF(trace_scope_->file(), " => node=%d, height=%d%s\n", node_id.ToInt(),
+ height_in_bytes, goto_catch_handler ? " (throw)" : "");
}
// The 'fixed' part of the frame consists of the incoming parameters and
// the part described by JavaScriptFrameConstants.
- unsigned fixed_frame_size = ComputeJavascriptFixedSize(function);
+ unsigned fixed_frame_size = ComputeJavascriptFixedSize(shared);
unsigned input_frame_size = input_->GetFrameSize();
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
+ int parameter_count = shared->internal_formal_parameter_count() + 1;
+ FrameDescription* output_frame = new (output_frame_size)
+ FrameDescription(output_frame_size, parameter_count);
output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
- bool is_bottommost = (0 == frame_index);
- bool is_topmost = (output_count_ - 1 == frame_index);
CHECK(frame_index >= 0 && frame_index < output_count_);
CHECK_NULL(output_[frame_index]);
output_[frame_index] = output_frame;
@@ -856,9 +884,8 @@ void Deoptimizer::DoComputeJSFrame(int frame_index) {
if (is_bottommost) {
// Determine whether the input frame contains alignment padding.
has_alignment_padding_ =
- (!compiled_code_->is_turbofanned() && HasAlignmentPadding(function))
- ? 1
- : 0;
+ (!compiled_code_->is_turbofanned() && HasAlignmentPadding(shared)) ? 1
+ : 0;
// 2 = context and function in the frame.
// If the optimized frame had alignment padding, adjust the frame pointer
// to point to the new position of the old frame pointer after padding
@@ -872,8 +899,6 @@ void Deoptimizer::DoComputeJSFrame(int frame_index) {
output_frame->SetTop(top_address);
// Compute the incoming parameter translation.
- int parameter_count =
- function->shared()->internal_formal_parameter_count() + 1;
unsigned output_offset = output_frame_size;
unsigned input_offset = input_frame_size;
for (int i = 0; i < parameter_count; ++i) {
@@ -945,8 +970,20 @@ void Deoptimizer::DoComputeJSFrame(int frame_index) {
Register context_reg = JavaScriptFrame::context_register();
output_offset -= kPointerSize;
input_offset -= kPointerSize;
+
+ TranslatedFrame::iterator context_pos = value_iterator;
+ int context_input_index = input_index;
+ // When deoptimizing into a catch block, we need to take the context
+ // from just above the top of the operand stack (we push the context
+ // at the entry of the try block).
+ if (goto_catch_handler) {
+ for (unsigned i = 0; i < height + 1; ++i) {
+ context_pos++;
+ context_input_index++;
+ }
+ }
// Read the context from the translations.
- Object* context = value_iterator->GetRawValue();
+ Object* context = context_pos->GetRawValue();
if (context == isolate_->heap()->undefined_value()) {
// If the context was optimized away, just use the context from
// the activation. This should only apply to Crankshaft code.
@@ -959,13 +996,13 @@ void Deoptimizer::DoComputeJSFrame(int frame_index) {
value = reinterpret_cast<intptr_t>(context);
output_frame->SetContext(value);
if (is_topmost) output_frame->SetRegister(context_reg.code(), value);
- WriteValueToOutput(context, input_index, frame_index, output_offset,
+ WriteValueToOutput(context, context_input_index, frame_index, output_offset,
"context ");
if (context == isolate_->heap()->arguments_marker()) {
Address output_address =
reinterpret_cast<Address>(output_[frame_index]->GetTop()) +
output_offset;
- values_to_materialize_.push_back({output_address, value_iterator});
+ values_to_materialize_.push_back({output_address, context_pos});
}
value_iterator++;
input_index++;
@@ -985,19 +1022,19 @@ void Deoptimizer::DoComputeJSFrame(int frame_index) {
WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
output_offset);
}
+ if (goto_catch_handler) {
+ // Write out the exception for the catch handler.
+ output_offset -= kPointerSize;
+ Object* exception_obj = reinterpret_cast<Object*>(
+ input_->GetRegister(FullCodeGenerator::result_register().code()));
+ WriteValueToOutput(exception_obj, input_index, frame_index, output_offset,
+ "exception ");
+ input_index++;
+ }
CHECK_EQ(0u, output_offset);
- // Compute this frame's PC, state, and continuation.
- Code* non_optimized_code = function->shared()->code();
- FixedArray* raw_data = non_optimized_code->deoptimization_data();
- DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
- Address start = non_optimized_code->instruction_start();
- unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
- unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
- intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset);
- output_frame->SetPc(pc_value);
-
// Update constant pool.
+ Code* non_optimized_code = shared->code();
if (FLAG_enable_embedded_constant_pool) {
intptr_t constant_pool_value =
reinterpret_cast<intptr_t>(non_optimized_code->constant_pool());
@@ -1009,8 +1046,22 @@ void Deoptimizer::DoComputeJSFrame(int frame_index) {
}
}
+ // Compute this frame's PC, state, and continuation.
+ FixedArray* raw_data = non_optimized_code->deoptimization_data();
+ DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
+ Address start = non_optimized_code->instruction_start();
+ unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
+ unsigned pc_offset = goto_catch_handler
+ ? catch_handler_pc_offset_
+ : FullCodeGenerator::PcField::decode(pc_and_state);
+ intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset);
+ output_frame->SetPc(pc_value);
+
+ // If we are going to the catch handler, then the exception lives in
+ // the accumulator.
FullCodeGenerator::State state =
- FullCodeGenerator::StateField::decode(pc_and_state);
+ goto_catch_handler ? FullCodeGenerator::TOS_REG
+ : FullCodeGenerator::StateField::decode(pc_and_state);
output_frame->SetState(Smi::FromInt(state));
// Set the continuation for the topmost frame.
@@ -1029,14 +1080,16 @@ void Deoptimizer::DoComputeJSFrame(int frame_index) {
}
}
-
-void Deoptimizer::DoComputeInterpretedFrame(int frame_index) {
+void Deoptimizer::DoComputeInterpretedFrame(int frame_index,
+ bool goto_catch_handler) {
TranslatedFrame* translated_frame =
&(translated_state_.frames()[frame_index]);
+ SharedFunctionInfo* shared = translated_frame->raw_shared_info();
+
TranslatedFrame::iterator value_iterator = translated_frame->begin();
int input_index = 0;
- BailoutId bytecode_offset = translated_frame->node_id();
+ int bytecode_offset = translated_frame->node_id().ToInt();
unsigned height = translated_frame->height();
unsigned height_in_bytes = height * kPointerSize;
JSFunction* function = JSFunction::cast(value_iterator->GetRawValue());
@@ -1044,20 +1097,26 @@ void Deoptimizer::DoComputeInterpretedFrame(int frame_index) {
input_index++;
if (trace_scope_ != NULL) {
PrintF(trace_scope_->file(), " translating interpreted frame ");
- function->PrintName(trace_scope_->file());
- PrintF(trace_scope_->file(), " => bytecode_offset=%d, height=%d\n",
- bytecode_offset.ToInt(), height_in_bytes);
+ base::SmartArrayPointer<char> name = shared->DebugName()->ToCString();
+ PrintF(trace_scope_->file(), "%s", name.get());
+ PrintF(trace_scope_->file(), " => bytecode_offset=%d, height=%d%s\n",
+ bytecode_offset, height_in_bytes,
+ goto_catch_handler ? " (throw)" : "");
+ }
+ if (goto_catch_handler) {
+ bytecode_offset = catch_handler_pc_offset_;
}
// The 'fixed' part of the frame consists of the incoming parameters and
// the part described by InterpreterFrameConstants.
- unsigned fixed_frame_size = ComputeInterpretedFixedSize(function);
+ unsigned fixed_frame_size = ComputeInterpretedFixedSize(shared);
unsigned input_frame_size = input_->GetFrameSize();
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
- FrameDescription* output_frame =
- new (output_frame_size) FrameDescription(output_frame_size, function);
+ int parameter_count = shared->internal_formal_parameter_count() + 1;
+ FrameDescription* output_frame = new (output_frame_size)
+ FrameDescription(output_frame_size, parameter_count);
output_frame->SetFrameType(StackFrame::INTERPRETED);
bool is_bottommost = (0 == frame_index);
@@ -1084,8 +1143,6 @@ void Deoptimizer::DoComputeInterpretedFrame(int frame_index) {
output_frame->SetTop(top_address);
// Compute the incoming parameter translation.
- int parameter_count =
- function->shared()->internal_formal_parameter_count() + 1;
unsigned output_offset = output_frame_size;
unsigned input_offset = input_frame_size;
for (int i = 0; i < parameter_count; ++i) {
@@ -1159,14 +1216,27 @@ void Deoptimizer::DoComputeInterpretedFrame(int frame_index) {
Register context_reg = InterpretedFrame::context_register();
output_offset -= kPointerSize;
input_offset -= kPointerSize;
+
+ // When deoptimizing into a catch block, we need to take the context
+ // from a register that was specified in the handler table.
+ TranslatedFrame::iterator context_pos = value_iterator;
+ int context_input_index = input_index;
+ if (goto_catch_handler) {
+ // Skip to the translated value of the register specified
+ // in the handler table.
+ for (int i = 0; i < catch_handler_data_ + 1; ++i) {
+ context_pos++;
+ context_input_index++;
+ }
+ }
// Read the context from the translations.
- Object* context = value_iterator->GetRawValue();
+ Object* context = context_pos->GetRawValue();
// The context should not be a placeholder for a materialized object.
CHECK(context != isolate_->heap()->arguments_marker());
value = reinterpret_cast<intptr_t>(context);
output_frame->SetContext(value);
if (is_topmost) output_frame->SetRegister(context_reg.code(), value);
- WriteValueToOutput(context, input_index, frame_index, output_offset,
+ WriteValueToOutput(context, context_input_index, frame_index, output_offset,
"context ");
value_iterator++;
input_index++;
@@ -1180,45 +1250,64 @@ void Deoptimizer::DoComputeInterpretedFrame(int frame_index) {
DCHECK(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
WriteValueToOutput(function, 0, frame_index, output_offset, "function ");
- // TODO(rmcilroy): Deal with new.target correctly - currently just set it to
+ // The new.target slot is only used during function activiation which is
+ // before the first deopt point, so should never be needed. Just set it to
// undefined.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
Object* new_target = isolate_->heap()->undefined_value();
WriteValueToOutput(new_target, 0, frame_index, output_offset, "new_target ");
+ // Set the bytecode array pointer.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ Object* bytecode_array = shared->bytecode_array();
+ WriteValueToOutput(bytecode_array, 0, frame_index, output_offset,
+ "bytecode array ");
+
// The bytecode offset was mentioned explicitly in the BEGIN_FRAME.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
int raw_bytecode_offset =
- BytecodeArray::kHeaderSize - kHeapObjectTag + bytecode_offset.ToInt();
+ BytecodeArray::kHeaderSize - kHeapObjectTag + bytecode_offset;
Smi* smi_bytecode_offset = Smi::FromInt(raw_bytecode_offset);
WriteValueToOutput(smi_bytecode_offset, 0, frame_index, output_offset,
"bytecode offset ");
// Translate the rest of the interpreter registers in the frame.
- for (unsigned i = 0; i < height; ++i) {
+ for (unsigned i = 0; i < height - 1; ++i) {
output_offset -= kPointerSize;
WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
output_offset);
}
- CHECK_EQ(0u, output_offset);
- // Set the accumulator register.
- output_frame->SetRegister(
- kInterpreterAccumulatorRegister.code(),
- reinterpret_cast<intptr_t>(value_iterator->GetRawValue()));
- value_iterator++;
+ // Put the accumulator on the stack. It will be popped by the
+ // InterpreterNotifyDeopt builtin (possibly after materialization).
+ output_offset -= kPointerSize;
+ if (goto_catch_handler) {
+ // If we are lazy deopting to a catch handler, we set the accumulator to
+ // the exception (which lives in the result register).
+ intptr_t accumulator_value =
+ input_->GetRegister(FullCodeGenerator::result_register().code());
+ WriteValueToOutput(reinterpret_cast<Object*>(accumulator_value), 0,
+ frame_index, output_offset, "accumulator ");
+ value_iterator++;
+ } else {
+ WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
+ output_offset);
+ }
+ CHECK_EQ(0u, output_offset);
Builtins* builtins = isolate_->builtins();
- Code* trampoline = builtins->builtin(Builtins::kInterpreterEntryTrampoline);
- output_frame->SetPc(reinterpret_cast<intptr_t>(trampoline->entry()));
+ Code* dispatch_builtin =
+ builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
+ output_frame->SetPc(reinterpret_cast<intptr_t>(dispatch_builtin->entry()));
output_frame->SetState(0);
// Update constant pool.
if (FLAG_enable_embedded_constant_pool) {
intptr_t constant_pool_value =
- reinterpret_cast<intptr_t>(trampoline->constant_pool());
+ reinterpret_cast<intptr_t>(dispatch_builtin->constant_pool());
output_frame->SetConstantPool(constant_pool_value);
if (is_topmost) {
Register constant_pool_reg =
@@ -1266,8 +1355,9 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(int frame_index) {
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
+ int parameter_count = height;
+ FrameDescription* output_frame = new (output_frame_size)
+ FrameDescription(output_frame_size, parameter_count);
output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
// Arguments adaptor can not be topmost or bottommost.
@@ -1282,7 +1372,6 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(int frame_index) {
output_frame->SetTop(top_address);
// Compute the incoming parameter translation.
- int parameter_count = height;
unsigned output_offset = output_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
@@ -1362,7 +1451,7 @@ void Deoptimizer::DoComputeConstructStubFrame(int frame_index) {
Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
unsigned height = translated_frame->height();
unsigned height_in_bytes = height * kPointerSize;
- JSFunction* function = JSFunction::cast(value_iterator->GetRawValue());
+ // Skip function.
value_iterator++;
input_index++;
if (trace_scope_ != NULL) {
@@ -1375,7 +1464,7 @@ void Deoptimizer::DoComputeConstructStubFrame(int frame_index) {
// Allocate and store the output frame description.
FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
+ new (output_frame_size) FrameDescription(output_frame_size);
output_frame->SetFrameType(StackFrame::CONSTRUCT);
// Construct stub can not be topmost or bottommost.
@@ -1488,7 +1577,7 @@ void Deoptimizer::DoComputeAccessorStubFrame(int frame_index,
TranslatedFrame::iterator value_iterator = translated_frame->begin();
int input_index = 0;
- JSFunction* accessor = JSFunction::cast(value_iterator->GetRawValue());
+ // Skip accessor.
value_iterator++;
input_index++;
// The receiver (and the implicit return value, if any) are expected in
@@ -1515,7 +1604,7 @@ void Deoptimizer::DoComputeAccessorStubFrame(int frame_index,
// Allocate and store the output frame description.
FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, accessor);
+ new (output_frame_size) FrameDescription(output_frame_size);
output_frame->SetFrameType(StackFrame::INTERNAL);
// A frame for an accessor stub can not be the topmost or bottommost one.
@@ -1657,7 +1746,9 @@ void Deoptimizer::DoComputeCompiledStubFrame(int frame_index) {
// object to the stub failure handler.
int param_count = descriptor.GetRegisterParameterCount();
int stack_param_count = descriptor.GetStackParameterCount();
- CHECK_EQ(translated_frame->height(), param_count);
+ // The translated frame contains all of the register parameters
+ // plus the context.
+ CHECK_EQ(translated_frame->height(), param_count + 1);
CHECK_GE(param_count, 0);
int height_in_bytes = kPointerSize * (param_count + stack_param_count) +
@@ -1674,7 +1765,7 @@ void Deoptimizer::DoComputeCompiledStubFrame(int frame_index) {
// The stub failure trampoline is a single frame.
FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, NULL);
+ new (output_frame_size) FrameDescription(output_frame_size);
output_frame->SetFrameType(StackFrame::STUB_FAILURE_TRAMPOLINE);
CHECK_EQ(frame_index, 0);
output_[frame_index] = output_frame;
@@ -1716,15 +1807,10 @@ void Deoptimizer::DoComputeCompiledStubFrame(int frame_index) {
"caller's constant_pool\n");
}
- // The context can be gotten from the input frame.
- Register context_reg = StubFailureTrampolineFrame::context_register();
- input_frame_offset -= kPointerSize;
- value = input_->GetFrameSlot(input_frame_offset);
- output_frame->SetRegister(context_reg.code(), value);
+ // Remember where the context will need to be written back from the deopt
+ // translation.
output_frame_offset -= kPointerSize;
- output_frame->SetFrameSlot(output_frame_offset, value);
- CHECK(reinterpret_cast<Object*>(value)->IsContext());
- DebugPrintOutputSlot(value, frame_index, output_frame_offset, "context\n");
+ unsigned context_frame_offset = output_frame_offset;
// A marker value is used in place of the function.
output_frame_offset -= kPointerSize;
@@ -1782,6 +1868,15 @@ void Deoptimizer::DoComputeCompiledStubFrame(int frame_index) {
}
}
+ Object* maybe_context = value_iterator->GetRawValue();
+ CHECK(maybe_context->IsContext());
+ Register context_reg = StubFailureTrampolineFrame::context_register();
+ value = reinterpret_cast<intptr_t>(maybe_context);
+ output_frame->SetRegister(context_reg.code(), value);
+ output_frame->SetFrameSlot(context_frame_offset, value);
+ DebugPrintOutputSlot(value, frame_index, context_frame_offset, "context\n");
+ ++value_iterator;
+
// Copy constant stack parameters to the failure frame. If the number of stack
// parameters is not known in the descriptor, the arguments object is the way
// to access them.
@@ -1875,55 +1970,6 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
}
-void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
- int frame_index, int parameter_count, int expression_count,
- DeoptimizedFrameInfo* info) {
- CHECK_EQ(DEBUGGER, bailout_type_);
-
- translated_state_.Prepare(false, nullptr);
-
- TranslatedFrame* frame = &(translated_state_.frames()[frame_index]);
- CHECK(frame->kind() == TranslatedFrame::kFunction);
- int frame_arg_count = frame->shared_info()->internal_formal_parameter_count();
-
- // The height is #expressions + 1 for context.
- CHECK_EQ(expression_count + 1, frame->height());
- TranslatedFrame* argument_frame = frame;
- if (frame_index > 0) {
- TranslatedFrame* previous_frame =
- &(translated_state_.frames()[frame_index - 1]);
- if (previous_frame->kind() == TranslatedFrame::kArgumentsAdaptor) {
- argument_frame = previous_frame;
- CHECK_EQ(parameter_count, argument_frame->height() - 1);
- } else {
- CHECK_EQ(frame_arg_count, parameter_count);
- }
- } else {
- CHECK_EQ(frame_arg_count, parameter_count);
- }
-
- TranslatedFrame::iterator arg_iter = argument_frame->begin();
- arg_iter++; // Skip the function.
- arg_iter++; // Skip the receiver.
- for (int i = 0; i < parameter_count; i++, arg_iter++) {
- if (!arg_iter->IsMaterializedObject()) {
- info->SetParameter(i, *(arg_iter->GetValue()));
- }
- }
-
- TranslatedFrame::iterator iter = frame->begin();
- // Skip the function, receiver, context and arguments.
- for (int i = 0; i < frame_arg_count + 3; i++, iter++) {
- }
-
- for (int i = 0; i < expression_count; i++, iter++) {
- if (!iter->IsMaterializedObject()) {
- info->SetExpression(i, *(iter->GetValue()));
- }
- }
-}
-
-
void Deoptimizer::WriteTranslatedValueToOutput(
TranslatedFrame::iterator* iterator, int* input_index, int frame_index,
unsigned output_offset, const char* debug_hint_string,
@@ -1980,7 +2026,12 @@ void Deoptimizer::DebugPrintOutputSlot(intptr_t value, int frame_index,
unsigned Deoptimizer::ComputeInputFrameSize() const {
- unsigned fixed_size = ComputeJavascriptFixedSize(function_);
+ unsigned fixed_size = StandardFrameConstants::kFixedFrameSize;
+ if (!function_->IsSmi()) {
+ fixed_size += ComputeIncomingArgumentSize(function_->shared());
+ } else {
+ CHECK_EQ(Smi::cast(function_), Smi::FromInt(StackFrame::STUB));
+ }
// The fp-to-sp delta already takes the context, constant pool pointer and the
// function into account so we have to avoid double counting them.
unsigned result = fixed_size + fp_to_sp_delta_ -
@@ -1989,39 +2040,33 @@ unsigned Deoptimizer::ComputeInputFrameSize() const {
unsigned stack_slots = compiled_code_->stack_slots();
unsigned outgoing_size =
ComputeOutgoingArgumentSize(compiled_code_, bailout_id_);
- CHECK(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size);
+ CHECK(result ==
+ fixed_size + (stack_slots * kPointerSize) -
+ StandardFrameConstants::kFixedFrameSize + outgoing_size);
}
return result;
}
-
-unsigned Deoptimizer::ComputeJavascriptFixedSize(JSFunction* function) const {
+// static
+unsigned Deoptimizer::ComputeJavascriptFixedSize(SharedFunctionInfo* shared) {
// The fixed part of the frame consists of the return address, frame
// pointer, function, context, and all the incoming arguments.
- return ComputeIncomingArgumentSize(function) +
+ return ComputeIncomingArgumentSize(shared) +
StandardFrameConstants::kFixedFrameSize;
}
-
-unsigned Deoptimizer::ComputeInterpretedFixedSize(JSFunction* function) const {
+// static
+unsigned Deoptimizer::ComputeInterpretedFixedSize(SharedFunctionInfo* shared) {
// The fixed part of the frame consists of the return address, frame
// pointer, function, context, new.target, bytecode offset and all the
// incoming arguments.
- return ComputeIncomingArgumentSize(function) +
+ return ComputeIncomingArgumentSize(shared) +
InterpreterFrameConstants::kFixedFrameSize;
}
-
-unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
- // The incoming arguments is the values for formal parameters and
- // the receiver. Every slot contains a pointer.
- if (function->IsSmi()) {
- CHECK_EQ(Smi::cast(function), Smi::FromInt(StackFrame::STUB));
- return 0;
- }
- unsigned arguments =
- function->shared()->internal_formal_parameter_count() + 1;
- return arguments * kPointerSize;
+// static
+unsigned Deoptimizer::ComputeIncomingArgumentSize(SharedFunctionInfo* shared) {
+ return (shared->internal_formal_parameter_count() + 1) * kPointerSize;
}
@@ -2079,11 +2124,9 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
data->deopt_entry_code_entries_[type] = entry_count;
}
-
-FrameDescription::FrameDescription(uint32_t frame_size,
- JSFunction* function)
+FrameDescription::FrameDescription(uint32_t frame_size, int parameter_count)
: frame_size_(frame_size),
- function_(function),
+ parameter_count_(parameter_count),
top_(kZapUint32),
pc_(kZapUint32),
fp_(kZapUint32),
@@ -2107,10 +2150,10 @@ FrameDescription::FrameDescription(uint32_t frame_size,
int FrameDescription::ComputeFixedSize() {
if (type_ == StackFrame::INTERPRETED) {
return InterpreterFrameConstants::kFixedFrameSize +
- (ComputeParametersCount() + 1) * kPointerSize;
+ parameter_count() * kPointerSize;
} else {
return StandardFrameConstants::kFixedFrameSize +
- (ComputeParametersCount() + 1) * kPointerSize;
+ parameter_count() * kPointerSize;
}
}
@@ -2123,54 +2166,13 @@ unsigned FrameDescription::GetOffsetFromSlotIndex(int slot_index) {
return base - ((slot_index + 1) * kPointerSize);
} else {
// Incoming parameter.
- int arg_size = (ComputeParametersCount() + 1) * kPointerSize;
+ int arg_size = parameter_count() * kPointerSize;
unsigned base = GetFrameSize() - arg_size;
return base - ((slot_index + 1) * kPointerSize);
}
}
-int FrameDescription::ComputeParametersCount() {
- switch (type_) {
- case StackFrame::JAVA_SCRIPT:
- return function_->shared()->internal_formal_parameter_count();
- case StackFrame::ARGUMENTS_ADAPTOR: {
- // Last slot contains number of incomming arguments as a smi.
- // Can't use GetExpression(0) because it would cause infinite recursion.
- return reinterpret_cast<Smi*>(*GetFrameSlotPointer(0))->value();
- }
- case StackFrame::STUB:
- return -1; // Minus receiver.
- default:
- FATAL("Unexpected stack frame type");
- return 0;
- }
-}
-
-
-Object* FrameDescription::GetParameter(int index) {
- CHECK_GE(index, 0);
- CHECK_LT(index, ComputeParametersCount());
- // The slot indexes for incoming arguments are negative.
- unsigned offset = GetOffsetFromSlotIndex(index - ComputeParametersCount());
- return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
-}
-
-
-unsigned FrameDescription::GetExpressionCount() {
- CHECK_EQ(StackFrame::JAVA_SCRIPT, type_);
- unsigned size = GetFrameSize() - ComputeFixedSize();
- return size / kPointerSize;
-}
-
-
-Object* FrameDescription::GetExpression(int index) {
- DCHECK_EQ(StackFrame::JAVA_SCRIPT, type_);
- unsigned offset = GetOffsetFromSlotIndex(index);
- return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
-}
-
-
void TranslationBuffer::Add(int32_t value, Zone* zone) {
// This wouldn't handle kMinInt correctly if it ever encountered it.
DCHECK(value != kMinInt);
@@ -2359,14 +2361,13 @@ void Translation::StoreArgumentsObject(bool args_known,
void Translation::StoreJSFrameFunction() {
- buffer_->Add(JS_FRAME_FUNCTION, zone());
+ StoreStackSlot((StandardFrameConstants::kCallerPCOffset -
+ StandardFrameConstants::kMarkerOffset) /
+ kPointerSize);
}
-
int Translation::NumberOfOperandsFor(Opcode opcode) {
switch (opcode) {
- case JS_FRAME_FUNCTION:
- return 0;
case GETTER_STUB_FRAME:
case SETTER_STUB_FRAME:
case DUPLICATED_OBJECT:
@@ -2493,60 +2494,111 @@ Handle<FixedArray> MaterializedObjectStore::EnsureStackEntries(int length) {
return new_array;
}
+namespace {
-DeoptimizedFrameInfo::DeoptimizedFrameInfo(Deoptimizer* deoptimizer,
- int frame_index,
- bool has_arguments_adaptor,
- bool has_construct_stub) {
- FrameDescription* output_frame = deoptimizer->output_[frame_index];
- function_ = output_frame->GetFunction();
- context_ = reinterpret_cast<Object*>(output_frame->GetContext());
- has_construct_stub_ = has_construct_stub;
- expression_count_ = output_frame->GetExpressionCount();
- expression_stack_ = new Object* [expression_count_];
- // Get the source position using the unoptimized code.
- Address pc = reinterpret_cast<Address>(output_frame->GetPc());
- Code* code = Code::cast(deoptimizer->isolate()->FindCodeObject(pc));
- source_position_ = code->SourcePosition(pc);
-
- for (int i = 0; i < expression_count_; i++) {
- Object* value = output_frame->GetExpression(i);
- // Replace materialization markers with the undefined value.
- if (value == deoptimizer->isolate()->heap()->arguments_marker()) {
- value = deoptimizer->isolate()->heap()->undefined_value();
+Handle<Object> GetValueForDebugger(TranslatedFrame::iterator it,
+ Isolate* isolate) {
+ if (it->GetRawValue() == isolate->heap()->arguments_marker()) {
+ if (!it->IsMaterializableByDebugger()) {
+ return isolate->factory()->undefined_value();
}
- SetExpression(i, value);
}
+ return it->GetValue();
+}
- if (has_arguments_adaptor) {
- output_frame = deoptimizer->output_[frame_index - 1];
- CHECK_EQ(output_frame->GetFrameType(), StackFrame::ARGUMENTS_ADAPTOR);
+int ComputeSourcePosition(Handle<SharedFunctionInfo> shared,
+ BailoutId node_id) {
+ if (shared->HasBytecodeArray()) {
+ BytecodeArray* bytecodes = shared->bytecode_array();
+ return bytecodes->SourcePosition(node_id.ToInt());
+ } else {
+ Code* non_optimized_code = shared->code();
+ FixedArray* raw_data = non_optimized_code->deoptimization_data();
+ DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
+ unsigned pc_and_state = Deoptimizer::GetOutputInfo(data, node_id, *shared);
+ unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
+ return non_optimized_code->SourcePosition(pc_offset);
}
+}
- parameters_count_ = output_frame->ComputeParametersCount();
- parameters_ = new Object* [parameters_count_];
- for (int i = 0; i < parameters_count_; i++) {
- Object* value = output_frame->GetParameter(i);
- // Replace materialization markers with the undefined value.
- if (value == deoptimizer->isolate()->heap()->arguments_marker()) {
- value = deoptimizer->isolate()->heap()->undefined_value();
- }
- SetParameter(i, value);
+} // namespace
+
+DeoptimizedFrameInfo::DeoptimizedFrameInfo(TranslatedState* state,
+ TranslatedState::iterator frame_it,
+ Isolate* isolate) {
+ // If the previous frame is an adaptor frame, we will take the parameters
+ // from there.
+ TranslatedState::iterator parameter_frame = frame_it;
+ if (parameter_frame != state->begin()) {
+ parameter_frame--;
}
-}
+ int parameter_count;
+ if (parameter_frame->kind() == TranslatedFrame::kArgumentsAdaptor) {
+ parameter_count = parameter_frame->height() - 1; // Ignore the receiver.
+ } else {
+ parameter_frame = frame_it;
+ parameter_count =
+ frame_it->shared_info()->internal_formal_parameter_count();
+ }
+ TranslatedFrame::iterator parameter_it = parameter_frame->begin();
+ parameter_it++; // Skip the function.
+ parameter_it++; // Skip the receiver.
+ // Figure out whether there is a construct stub frame on top of
+ // the parameter frame.
+ has_construct_stub_ =
+ parameter_frame != state->begin() &&
+ (parameter_frame - 1)->kind() == TranslatedFrame::kConstructStub;
-DeoptimizedFrameInfo::~DeoptimizedFrameInfo() {
- delete[] expression_stack_;
- delete[] parameters_;
-}
+ source_position_ =
+ ComputeSourcePosition(frame_it->shared_info(), frame_it->node_id());
+
+ TranslatedFrame::iterator value_it = frame_it->begin();
+ // Get the function. Note that this might materialize the function.
+ // In case the debugger mutates this value, we should deoptimize
+ // the function and remember the value in the materialized value store.
+ function_ = Handle<JSFunction>::cast(value_it->GetValue());
+
+ parameters_.resize(static_cast<size_t>(parameter_count));
+ for (int i = 0; i < parameter_count; i++) {
+ Handle<Object> parameter = GetValueForDebugger(parameter_it, isolate);
+ SetParameter(i, parameter);
+ parameter_it++;
+ }
+
+ // Skip the function, the receiver and the arguments.
+ int skip_count =
+ frame_it->shared_info()->internal_formal_parameter_count() + 2;
+ TranslatedFrame::iterator stack_it = frame_it->begin();
+ for (int i = 0; i < skip_count; i++) {
+ stack_it++;
+ }
+
+ // Get the context.
+ context_ = GetValueForDebugger(stack_it, isolate);
+ stack_it++;
+ // Get the expression stack.
+ int stack_height = frame_it->height();
+ if (frame_it->kind() == TranslatedFrame::kFunction ||
+ frame_it->kind() == TranslatedFrame::kInterpretedFunction) {
+ // For full-code frames, we should not count the context.
+ // For interpreter frames, we should not count the accumulator.
+ // TODO(jarin): Clean up the indexing in translated frames.
+ stack_height--;
+ }
+ expression_stack_.resize(static_cast<size_t>(stack_height));
+ for (int i = 0; i < stack_height; i++) {
+ Handle<Object> expression = GetValueForDebugger(stack_it, isolate);
+ SetExpression(i, expression);
+ stack_it++;
+ }
-void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) {
- v->VisitPointer(bit_cast<Object**>(&function_));
- v->VisitPointer(&context_);
- v->VisitPointers(parameters_, parameters_ + parameters_count_);
- v->VisitPointers(expression_stack_, expression_stack_ + expression_count_);
+ // For interpreter frame, skip the accumulator.
+ if (frame_it->kind() == TranslatedFrame::kInterpretedFunction) {
+ stack_it++;
+ }
+ CHECK(stack_it == frame_it->end());
}
@@ -2826,6 +2878,10 @@ bool TranslatedValue::IsMaterializedObject() const {
}
}
+bool TranslatedValue::IsMaterializableByDebugger() const {
+ // At the moment, we only allow materialization of doubles.
+ return (kind() == kDouble);
+}
int TranslatedValue::GetChildrenCount() const {
if (kind() == kCapturedObject || kind() == kArgumentsObject) {
@@ -2906,8 +2962,8 @@ int TranslatedFrame::GetValueCount() {
case kInterpretedFunction: {
int parameter_count =
raw_shared_info_->internal_formal_parameter_count() + 1;
- // + 3 for function, context and accumulator.
- return height_ + parameter_count + 3;
+ // + 2 for function and context.
+ return height_ + parameter_count + 2;
}
case kGetter:
@@ -3058,7 +3114,6 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
case Translation::BOOL_STACK_SLOT:
case Translation::DOUBLE_STACK_SLOT:
case Translation::LITERAL:
- case Translation::JS_FRAME_FUNCTION:
break;
}
FATAL("We should never get here - unexpected deopt info.");
@@ -3261,16 +3316,6 @@ TranslatedValue TranslatedState::CreateNextTranslatedValue(
return TranslatedValue::NewTagged(this, value);
}
-
- case Translation::JS_FRAME_FUNCTION: {
- int slot_offset = JavaScriptFrameConstants::kFunctionOffset;
- intptr_t value = *(reinterpret_cast<intptr_t*>(fp + slot_offset));
- if (trace_file != nullptr) {
- PrintF(trace_file, "0x%08" V8PRIxPTR " ; (frame function) ", value);
- reinterpret_cast<Object*>(value)->ShortPrint(trace_file);
- }
- return TranslatedValue::NewTagged(this, reinterpret_cast<Object*>(value));
- }
}
FATAL("We should never get here - unexpected deopt info.");
@@ -3385,7 +3430,7 @@ void TranslatedState::Prepare(bool has_adapted_arguments,
Handle<Object> TranslatedState::MaterializeAt(int frame_index,
int* value_index) {
TranslatedFrame* frame = &(frames_[frame_index]);
- DCHECK(static_cast<size_t>(*value_index) < frame->values_.size());
+ CHECK(static_cast<size_t>(*value_index) < frame->values_.size());
TranslatedValue* slot = &(frame->values_[*value_index]);
(*value_index)++;
@@ -3541,16 +3586,16 @@ Handle<Object> TranslatedState::MaterializeAt(int frame_index,
TranslatedState::ObjectPosition pos = object_positions_[object_index];
// Make sure the duplicate is refering to a previous object.
- DCHECK(pos.frame_index_ < frame_index ||
- (pos.frame_index_ == frame_index &&
- pos.value_index_ < *value_index - 1));
+ CHECK(pos.frame_index_ < frame_index ||
+ (pos.frame_index_ == frame_index &&
+ pos.value_index_ < *value_index - 1));
Handle<Object> object =
frames_[pos.frame_index_].values_[pos.value_index_].GetValue();
// The object should have a (non-sentinel) value.
- DCHECK(!object.is_null() &&
- !object.is_identical_to(isolate_->factory()->arguments_marker()));
+ CHECK(!object.is_null() &&
+ !object.is_identical_to(isolate_->factory()->arguments_marker()));
slot->value_ = object;
return object;
@@ -3583,7 +3628,7 @@ bool TranslatedState::GetAdaptedArguments(Handle<JSObject>* result,
// recursive functions!)
Handle<JSFunction> function =
Handle<JSFunction>::cast(frames_[frame_index].front().GetValue());
- *result = Handle<JSObject>::cast(Accessors::FunctionGetArguments(function));
+ *result = Accessors::FunctionGetArguments(function);
return true;
} else {
TranslatedFrame* previous_frame = &(frames_[frame_index]);
@@ -3615,7 +3660,8 @@ bool TranslatedState::GetAdaptedArguments(Handle<JSObject>* result,
TranslatedFrame* TranslatedState::GetArgumentsInfoFromJSFrameIndex(
int jsframe_index, int* args_count) {
for (size_t i = 0; i < frames_.size(); i++) {
- if (frames_[i].kind() == TranslatedFrame::kFunction) {
+ if (frames_[i].kind() == TranslatedFrame::kFunction ||
+ frames_[i].kind() == TranslatedFrame::kInterpretedFunction) {
if (jsframe_index > 0) {
jsframe_index--;
} else {
@@ -3654,7 +3700,7 @@ void TranslatedState::StoreMaterializedValuesAndDeopt() {
new_store = true;
}
- DCHECK_EQ(length, previously_materialized_objects->length());
+ CHECK_EQ(length, previously_materialized_objects->length());
bool value_changed = false;
for (int i = 0; i < length; i++) {
@@ -3662,7 +3708,7 @@ void TranslatedState::StoreMaterializedValuesAndDeopt() {
TranslatedValue* value_info =
&(frames_[pos.frame_index_].values_[pos.value_index_]);
- DCHECK(value_info->IsMaterializedObject());
+ CHECK(value_info->IsMaterializedObject());
Handle<Object> value(value_info->GetRawValue(), isolate_);
@@ -3671,14 +3717,15 @@ void TranslatedState::StoreMaterializedValuesAndDeopt() {
previously_materialized_objects->set(i, *value);
value_changed = true;
} else {
- DCHECK(previously_materialized_objects->get(i) == *value);
+ CHECK(previously_materialized_objects->get(i) == *value);
}
}
}
if (new_store && value_changed) {
materialized_store->Set(stack_frame_pointer_,
previously_materialized_objects);
- DCHECK_EQ(TranslatedFrame::kFunction, frames_[0].kind());
+ CHECK(frames_[0].kind() == TranslatedFrame::kFunction ||
+ frames_[0].kind() == TranslatedFrame::kInterpretedFunction);
Object* const function = frames_[0].front().GetRawValue();
Deoptimizer::DeoptimizeFunction(JSFunction::cast(function));
}
@@ -3697,7 +3744,7 @@ void TranslatedState::UpdateFromPreviouslyMaterializedObjects() {
Handle<Object> marker = isolate_->factory()->arguments_marker();
int length = static_cast<int>(object_positions_.size());
- DCHECK_EQ(length, previously_materialized_objects->length());
+ CHECK_EQ(length, previously_materialized_objects->length());
for (int i = 0; i < length; i++) {
// For a previously materialized objects, inject their value into the
@@ -3706,7 +3753,7 @@ void TranslatedState::UpdateFromPreviouslyMaterializedObjects() {
TranslatedState::ObjectPosition pos = object_positions_[i];
TranslatedValue* value_info =
&(frames_[pos.frame_index_].values_[pos.value_index_]);
- DCHECK(value_info->IsMaterializedObject());
+ CHECK(value_info->IsMaterializedObject());
value_info->value_ =
Handle<Object>(previously_materialized_objects->get(i), isolate_);
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index 10685b6193..0259f01ccc 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -27,6 +27,7 @@ class TranslatedValue {
Handle<Object> GetValue();
bool IsMaterializedObject() const;
+ bool IsMaterializableByDebugger() const;
private:
friend class TranslatedState;
@@ -128,6 +129,11 @@ class TranslatedFrame {
Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
int height() const { return height_; }
+ SharedFunctionInfo* raw_shared_info() const {
+ CHECK_NOT_NULL(raw_shared_info_);
+ return raw_shared_info_;
+ }
+
class iterator {
public:
iterator& operator++() {
@@ -503,10 +509,6 @@ class Deoptimizer : public Malloced {
void MaterializeHeapObjects(JavaScriptFrameIterator* it);
- void MaterializeHeapNumbersForDebuggerInspectableFrame(
- int frame_index, int parameter_count, int expression_count,
- DeoptimizedFrameInfo* info);
-
static void ComputeOutputFrames(Deoptimizer* deoptimizer);
@@ -592,8 +594,8 @@ class Deoptimizer : public Malloced {
void DeleteFrameDescriptions();
void DoComputeOutputFrames();
- void DoComputeJSFrame(int frame_index);
- void DoComputeInterpretedFrame(int frame_index);
+ void DoComputeJSFrame(int frame_index, bool goto_catch_handler);
+ void DoComputeInterpretedFrame(int frame_index, bool goto_catch_handler);
void DoComputeArgumentsAdaptorFrame(int frame_index);
void DoComputeConstructStubFrame(int frame_index);
void DoComputeAccessorStubFrame(int frame_index, bool is_setter_stub_frame);
@@ -611,10 +613,10 @@ class Deoptimizer : public Malloced {
const char* debug_hint_string);
unsigned ComputeInputFrameSize() const;
- unsigned ComputeJavascriptFixedSize(JSFunction* function) const;
- unsigned ComputeInterpretedFixedSize(JSFunction* function) const;
+ static unsigned ComputeJavascriptFixedSize(SharedFunctionInfo* shared);
+ static unsigned ComputeInterpretedFixedSize(SharedFunctionInfo* shared);
- unsigned ComputeIncomingArgumentSize(JSFunction* function) const;
+ static unsigned ComputeIncomingArgumentSize(SharedFunctionInfo* shared);
static unsigned ComputeOutgoingArgumentSize(Code* code, unsigned bailout_id);
Object* ComputeLiteral(int index) const;
@@ -640,11 +642,6 @@ class Deoptimizer : public Malloced {
// searching all code objects).
Code* FindDeoptimizingCode(Address addr);
- // Fill the input from from a JavaScript frame. This is used when
- // the debugger needs to inspect an optimized frame. For normal
- // deoptimizations the input frame is filled in generated code.
- void FillInputFrame(Address tos, JavaScriptFrame* frame);
-
// Fill the given output frame's registers to contain the failure handler
// address and the number of parameters for a stub failure trampoline.
void SetPlatformCompiledStubRegisters(FrameDescription* output_frame,
@@ -656,7 +653,7 @@ class Deoptimizer : public Malloced {
// Determines whether the input frame contains alignment padding by looking
// at the dynamic alignment state slot inside the frame.
- bool HasAlignmentPadding(JSFunction* function);
+ bool HasAlignmentPadding(SharedFunctionInfo* shared);
Isolate* isolate_;
JSFunction* function_;
@@ -666,6 +663,9 @@ class Deoptimizer : public Malloced {
Address from_;
int fp_to_sp_delta_;
int has_alignment_padding_;
+ bool deoptimizing_throw_;
+ int catch_handler_data_;
+ int catch_handler_pc_offset_;
// Input frame description.
FrameDescription* input_;
@@ -736,8 +736,7 @@ class RegisterValues {
class FrameDescription {
public:
- FrameDescription(uint32_t frame_size,
- JSFunction* function);
+ explicit FrameDescription(uint32_t frame_size, int parameter_count = 0);
void* operator new(size_t size, uint32_t frame_size) {
// Subtracts kPointerSize, as the member frame_content_ already supplies
@@ -758,8 +757,6 @@ class FrameDescription {
return static_cast<uint32_t>(frame_size_);
}
- JSFunction* GetFunction() const { return function_; }
-
unsigned GetOffsetFromSlotIndex(int slot_index);
intptr_t GetFrameSlot(unsigned offset) {
@@ -767,8 +764,7 @@ class FrameDescription {
}
Address GetFramePointerAddress() {
- int fp_offset = GetFrameSize() -
- (ComputeParametersCount() + 1) * kPointerSize -
+ int fp_offset = GetFrameSize() - parameter_count() * kPointerSize -
StandardFrameConstants::kCallerSPOffset;
return reinterpret_cast<Address>(GetFrameSlotPointer(fp_offset));
}
@@ -826,17 +822,8 @@ class FrameDescription {
StackFrame::Type GetFrameType() const { return type_; }
void SetFrameType(StackFrame::Type type) { type_ = type; }
- // Get the incoming arguments count.
- int ComputeParametersCount();
-
- // Get a parameter value for an unoptimized frame.
- Object* GetParameter(int index);
-
- // Get the expression stack height for a unoptimized frame.
- unsigned GetExpressionCount();
-
- // Get the expression stack value for an unoptimized frame.
- Object* GetExpression(int index);
+ // Argument count, including receiver.
+ int parameter_count() { return parameter_count_; }
static int registers_offset() {
return OFFSET_OF(FrameDescription, register_values_.registers_);
@@ -869,7 +856,7 @@ class FrameDescription {
// keep the variable-size array frame_content_ of type intptr_t at
// the end of the structure aligned.
uintptr_t frame_size_; // Number of bytes.
- JSFunction* function_;
+ int parameter_count_;
RegisterValues register_values_;
intptr_t top_;
intptr_t pc_;
@@ -902,15 +889,11 @@ class DeoptimizerData {
explicit DeoptimizerData(MemoryAllocator* allocator);
~DeoptimizerData();
- void Iterate(ObjectVisitor* v);
-
private:
MemoryAllocator* allocator_;
int deopt_entry_code_entries_[Deoptimizer::kBailoutTypesWithCodeEntry];
MemoryChunk* deopt_entry_code_[Deoptimizer::kBailoutTypesWithCodeEntry];
- DeoptimizedFrameInfo* deoptimized_frame_info_;
-
Deoptimizer* current_;
friend class Deoptimizer;
@@ -953,7 +936,6 @@ class TranslationIterator BASE_EMBEDDED {
int index_;
};
-
#define TRANSLATION_OPCODE_LIST(V) \
V(BEGIN) \
V(JS_FRAME) \
@@ -976,9 +958,7 @@ class TranslationIterator BASE_EMBEDDED {
V(UINT32_STACK_SLOT) \
V(BOOL_STACK_SLOT) \
V(DOUBLE_STACK_SLOT) \
- V(LITERAL) \
- V(JS_FRAME_FUNCTION)
-
+ V(LITERAL)
class Translation BASE_EMBEDDED {
public:
@@ -1071,28 +1051,20 @@ class MaterializedObjectStore {
// formal parameter count.
class DeoptimizedFrameInfo : public Malloced {
public:
- DeoptimizedFrameInfo(Deoptimizer* deoptimizer,
- int frame_index,
- bool has_arguments_adaptor,
- bool has_construct_stub);
- virtual ~DeoptimizedFrameInfo();
-
- // GC support.
- void Iterate(ObjectVisitor* v);
+ DeoptimizedFrameInfo(TranslatedState* state,
+ TranslatedState::iterator frame_it, Isolate* isolate);
// Return the number of incoming arguments.
- int parameters_count() { return parameters_count_; }
+ int parameters_count() { return static_cast<int>(parameters_.size()); }
// Return the height of the expression stack.
- int expression_count() { return expression_count_; }
+ int expression_count() { return static_cast<int>(expression_stack_.size()); }
// Get the frame function.
- JSFunction* GetFunction() {
- return function_;
- }
+ Handle<JSFunction> GetFunction() { return function_; }
// Get the frame context.
- Object* GetContext() { return context_; }
+ Handle<Object> GetContext() { return context_; }
// Check if this frame is preceded by construct stub frame. The bottom-most
// inlined frame might still be called by an uninlined construct stub.
@@ -1101,13 +1073,13 @@ class DeoptimizedFrameInfo : public Malloced {
}
// Get an incoming argument.
- Object* GetParameter(int index) {
+ Handle<Object> GetParameter(int index) {
DCHECK(0 <= index && index < parameters_count());
return parameters_[index];
}
// Get an expression from the expression stack.
- Object* GetExpression(int index) {
+ Handle<Object> GetExpression(int index) {
DCHECK(0 <= index && index < expression_count());
return expression_stack_[index];
}
@@ -1118,24 +1090,22 @@ class DeoptimizedFrameInfo : public Malloced {
private:
// Set an incoming argument.
- void SetParameter(int index, Object* obj) {
+ void SetParameter(int index, Handle<Object> obj) {
DCHECK(0 <= index && index < parameters_count());
parameters_[index] = obj;
}
// Set an expression on the expression stack.
- void SetExpression(int index, Object* obj) {
+ void SetExpression(int index, Handle<Object> obj) {
DCHECK(0 <= index && index < expression_count());
expression_stack_[index] = obj;
}
- JSFunction* function_;
- Object* context_;
+ Handle<JSFunction> function_;
+ Handle<Object> context_;
bool has_construct_stub_;
- int parameters_count_;
- int expression_count_;
- Object** parameters_;
- Object** expression_stack_;
+ std::vector<Handle<Object> > parameters_;
+ std::vector<Handle<Object> > expression_stack_;
int source_position_;
friend class Deoptimizer;
diff --git a/deps/v8/src/elements-kind.cc b/deps/v8/src/elements-kind.cc
index 0d29c30472..7bb75c4c9c 100644
--- a/deps/v8/src/elements-kind.cc
+++ b/deps/v8/src/elements-kind.cc
@@ -37,7 +37,12 @@ int ElementsKindToShiftSize(ElementsKind elements_kind) {
case DICTIONARY_ELEMENTS:
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
return kPointerSizeLog2;
+ case NO_ELEMENTS:
+ UNREACHABLE();
+ return 0;
}
UNREACHABLE();
return 0;
diff --git a/deps/v8/src/elements-kind.h b/deps/v8/src/elements-kind.h
index 5f6cd62c46..3ebc9ad287 100644
--- a/deps/v8/src/elements-kind.h
+++ b/deps/v8/src/elements-kind.h
@@ -30,10 +30,16 @@ enum ElementsKind {
// The "slow" kind.
DICTIONARY_ELEMENTS,
+ // Elements kind of the "arguments" object (only in sloppy mode).
FAST_SLOPPY_ARGUMENTS_ELEMENTS,
SLOW_SLOPPY_ARGUMENTS_ELEMENTS,
- // Fixed typed arrays
+ // For string wrapper objects ("new String('...')"), the string's characters
+ // are overlaid onto a regular elements backing store.
+ FAST_STRING_WRAPPER_ELEMENTS,
+ SLOW_STRING_WRAPPER_ELEMENTS,
+
+ // Fixed typed arrays.
UINT8_ELEMENTS,
INT8_ELEMENTS,
UINT16_ELEMENTS,
@@ -44,7 +50,10 @@ enum ElementsKind {
FLOAT64_ELEMENTS,
UINT8_CLAMPED_ELEMENTS,
- // Derived constants from ElementsKind
+ // Sentinel ElementsKind for objects with no elements.
+ NO_ELEMENTS,
+
+ // Derived constants from ElementsKind.
FIRST_ELEMENTS_KIND = FAST_SMI_ELEMENTS,
LAST_ELEMENTS_KIND = UINT8_CLAMPED_ELEMENTS,
FIRST_FAST_ELEMENTS_KIND = FAST_SMI_ELEMENTS,
@@ -83,6 +92,10 @@ inline bool IsSloppyArgumentsElements(ElementsKind kind) {
kind == SLOW_SLOPPY_ARGUMENTS_ELEMENTS;
}
+inline bool IsStringWrapperElementsKind(ElementsKind kind) {
+ return kind == FAST_STRING_WRAPPER_ELEMENTS ||
+ kind == SLOW_STRING_WRAPPER_ELEMENTS;
+}
inline bool IsFixedTypedArrayElementsKind(ElementsKind kind) {
return kind >= FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND &&
@@ -104,7 +117,8 @@ inline bool IsFastElementsKind(ElementsKind kind) {
inline bool IsTransitionElementsKind(ElementsKind kind) {
return IsFastElementsKind(kind) || IsFixedTypedArrayElementsKind(kind) ||
- kind == FAST_SLOPPY_ARGUMENTS_ELEMENTS;
+ kind == FAST_SLOPPY_ARGUMENTS_ELEMENTS ||
+ kind == FAST_STRING_WRAPPER_ELEMENTS;
}
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 277749763a..9fd450a75a 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -40,7 +40,9 @@
// - SloppyArgumentsElementsAccessor
// - FastSloppyArgumentsElementsAccessor
// - SlowSloppyArgumentsElementsAccessor
-
+// - StringWrapperElementsAccessor
+// - FastStringWrapperElementsAccessor
+// - SlowStringWrapperElementsAccessor
namespace v8 {
namespace internal {
@@ -72,6 +74,10 @@ enum Where { AT_START, AT_END };
FixedArray) \
V(SlowSloppyArgumentsElementsAccessor, SLOW_SLOPPY_ARGUMENTS_ELEMENTS, \
FixedArray) \
+ V(FastStringWrapperElementsAccessor, FAST_STRING_WRAPPER_ELEMENTS, \
+ FixedArray) \
+ V(SlowStringWrapperElementsAccessor, SLOW_STRING_WRAPPER_ELEMENTS, \
+ FixedArray) \
V(FixedUint8ElementsAccessor, UINT8_ELEMENTS, FixedUint8Array) \
V(FixedInt8ElementsAccessor, INT8_ELEMENTS, FixedInt8Array) \
V(FixedUint16ElementsAccessor, UINT16_ELEMENTS, FixedUint16Array) \
@@ -83,7 +89,6 @@ enum Where { AT_START, AT_END };
V(FixedUint8ClampedElementsAccessor, UINT8_CLAMPED_ELEMENTS, \
FixedUint8ClampedArray)
-
template<ElementsKind Kind> class ElementsKindTraits {
public:
typedef FixedArrayBase BackingStore;
@@ -134,11 +139,14 @@ void CopyObjectToObjectElements(FixedArrayBase* from_base,
if (copy_size == 0) return;
FixedArray* from = FixedArray::cast(from_base);
FixedArray* to = FixedArray::cast(to_base);
- DCHECK(IsFastSmiOrObjectElementsKind(from_kind));
+ DCHECK(IsFastSmiOrObjectElementsKind(from_kind) ||
+ from_kind == FAST_STRING_WRAPPER_ELEMENTS);
DCHECK(IsFastSmiOrObjectElementsKind(to_kind));
WriteBarrierMode write_barrier_mode =
- (IsFastObjectElementsKind(from_kind) && IsFastObjectElementsKind(to_kind))
+ ((IsFastObjectElementsKind(from_kind) &&
+ IsFastObjectElementsKind(to_kind)) ||
+ from_kind == FAST_STRING_WRAPPER_ELEMENTS)
? UPDATE_WRITE_BARRIER
: SKIP_WRITE_BARRIER;
for (int i = 0; i < copy_size; i++) {
@@ -230,15 +238,16 @@ static void CopyDoubleToObjectElements(FixedArrayBase* from_base,
Handle<FixedDoubleArray> from(FixedDoubleArray::cast(from_base), isolate);
Handle<FixedArray> to(FixedArray::cast(to_base), isolate);
- // create an outer loop to not waste too much time on creating HandleScopes
- // on the other hand we might overflow a single handle scope depending on
- // the copy_size
+ // Use an outer loop to not waste too much time on creating HandleScopes.
+ // On the other hand we might overflow a single handle scope depending on
+ // the copy_size.
int offset = 0;
while (offset < copy_size) {
HandleScope scope(isolate);
offset += 100;
for (int i = offset - 100; i < offset && i < copy_size; ++i) {
- Handle<Object> value = FixedDoubleArray::get(from, i + from_start);
+ Handle<Object> value =
+ FixedDoubleArray::get(*from, i + from_start, isolate);
to->set(i + to_start, *value, UPDATE_WRITE_BARRIER);
}
}
@@ -555,30 +564,22 @@ class ElementsAccessorBase : public ElementsAccessor {
return false;
}
- Handle<Object> Get(Handle<FixedArrayBase> backing_store,
- uint32_t entry) final {
- return ElementsAccessorSubclass::GetImpl(backing_store, entry);
- }
-
- static Handle<Object> GetImpl(Handle<FixedArrayBase> backing_store,
- uint32_t entry) {
- uint32_t index = GetIndexForEntryImpl(*backing_store, entry);
- return BackingStore::get(Handle<BackingStore>::cast(backing_store), index);
+ Handle<Object> Get(Handle<JSObject> holder, uint32_t entry) final {
+ return ElementsAccessorSubclass::GetImpl(holder, entry);
}
- void Set(FixedArrayBase* backing_store, uint32_t entry, Object* value) final {
- ElementsAccessorSubclass::SetImpl(backing_store, entry, value);
+ static Handle<Object> GetImpl(Handle<JSObject> holder, uint32_t entry) {
+ return ElementsAccessorSubclass::GetImpl(holder->elements(), entry);
}
- static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
- Object* value) {
- UNREACHABLE();
+ static Handle<Object> GetImpl(FixedArrayBase* backing_store, uint32_t entry) {
+ Isolate* isolate = backing_store->GetIsolate();
+ uint32_t index = GetIndexForEntryImpl(backing_store, entry);
+ return handle(BackingStore::cast(backing_store)->get(index), isolate);
}
-
- static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
- Object* value, WriteBarrierMode mode) {
- UNREACHABLE();
+ void Set(Handle<JSObject> holder, uint32_t entry, Object* value) final {
+ ElementsAccessorSubclass::SetImpl(holder, entry, value);
}
void Reconfigure(Handle<JSObject> object, Handle<FixedArrayBase> store,
@@ -790,6 +791,7 @@ class ElementsAccessorBase : public ElementsAccessor {
DCHECK(IsFastDoubleElementsKind(from_kind) !=
IsFastDoubleElementsKind(kind()) ||
IsDictionaryElementsKind(from_kind) ||
+ from_kind == SLOW_STRING_WRAPPER_ELEMENTS ||
static_cast<uint32_t>(old_elements->length()) < capacity);
Handle<FixedArrayBase> elements =
ConvertElementsWithCapacity(object, old_elements, from_kind, capacity);
@@ -824,21 +826,6 @@ class ElementsAccessorBase : public ElementsAccessor {
UNREACHABLE();
}
- void CopyElements(Handle<FixedArrayBase> from, uint32_t from_start,
- ElementsKind from_kind, Handle<FixedArrayBase> to,
- uint32_t to_start, int copy_size) final {
- DCHECK(!from.is_null());
- // NOTE: the ElementsAccessorSubclass::CopyElementsImpl() methods
- // violate the handlified function signature convention:
- // raw pointer parameters in the function that allocates. This is done
- // intentionally to avoid ArrayConcat() builtin performance degradation.
- // See the comment in another ElementsAccessorBase::CopyElements() for
- // details.
- ElementsAccessorSubclass::CopyElementsImpl(*from, from_start, *to,
- from_kind, to_start,
- kPackedSizeNotKnown, copy_size);
- }
-
void CopyElements(JSObject* from_holder, uint32_t from_start,
ElementsKind from_kind, Handle<FixedArrayBase> to,
uint32_t to_start, int copy_size) final {
@@ -871,6 +858,7 @@ class ElementsAccessorBase : public ElementsAccessor {
KeyAccumulator* keys, uint32_t range,
PropertyFilter filter,
uint32_t offset) {
+ DCHECK_NE(DICTIONARY_ELEMENTS, kind());
if (filter & ONLY_ALL_CAN_READ) {
// Non-dictionary elements can't have all-can-read accessors.
return;
@@ -885,8 +873,9 @@ class ElementsAccessorBase : public ElementsAccessor {
if (range < length) length = range;
for (uint32_t i = offset; i < length; i++) {
if (!ElementsAccessorSubclass::HasElementImpl(object, i, backing_store,
- filter))
+ filter)) {
continue;
+ }
keys->AddKey(i);
}
}
@@ -902,19 +891,8 @@ class ElementsAccessorBase : public ElementsAccessor {
void AddElementsToKeyAccumulator(Handle<JSObject> receiver,
KeyAccumulator* accumulator,
AddKeyConversion convert) final {
- Handle<FixedArrayBase> from(receiver->elements());
- uint32_t add_length =
- ElementsAccessorSubclass::GetCapacityImpl(*receiver, *from);
- if (add_length == 0) return;
-
- for (uint32_t i = 0; i < add_length; i++) {
- if (!ElementsAccessorSubclass::HasEntryImpl(*from, i)) continue;
- Handle<Object> value = ElementsAccessorSubclass::GetImpl(from, i);
- DCHECK(!value->IsTheHole());
- DCHECK(!value->IsAccessorPair());
- DCHECK(!value->IsExecutableAccessorInfo());
- accumulator->AddKey(value, convert);
- }
+ ElementsAccessorSubclass::AddElementsToKeyAccumulatorImpl(
+ receiver, accumulator, convert);
}
static uint32_t GetCapacityImpl(JSObject* holder,
@@ -926,10 +904,6 @@ class ElementsAccessorBase : public ElementsAccessor {
return ElementsAccessorSubclass::GetCapacityImpl(holder, backing_store);
}
- static bool HasEntryImpl(FixedArrayBase* backing_store, uint32_t entry) {
- return true;
- }
-
static uint32_t GetIndexForEntryImpl(FixedArrayBase* backing_store,
uint32_t entry) {
return entry;
@@ -966,9 +940,12 @@ class ElementsAccessorBase : public ElementsAccessor {
return PropertyDetails(NONE, DATA, 0, PropertyCellType::kNoCell);
}
- PropertyDetails GetDetails(FixedArrayBase* backing_store,
- uint32_t entry) final {
- return ElementsAccessorSubclass::GetDetailsImpl(backing_store, entry);
+ static PropertyDetails GetDetailsImpl(JSObject* holder, uint32_t entry) {
+ return PropertyDetails(NONE, DATA, 0, PropertyCellType::kNoCell);
+ }
+
+ PropertyDetails GetDetails(JSObject* holder, uint32_t entry) final {
+ return ElementsAccessorSubclass::GetDetailsImpl(holder, entry);
}
private:
@@ -1078,15 +1055,22 @@ class DictionaryElementsAccessor
return backing_store->ValueAt(entry);
}
- static Handle<Object> GetImpl(Handle<FixedArrayBase> store, uint32_t entry) {
- Isolate* isolate = store->GetIsolate();
- return handle(GetRaw(*store, entry), isolate);
+ static Handle<Object> GetImpl(Handle<JSObject> holder, uint32_t entry) {
+ return GetImpl(holder->elements(), entry);
}
- static inline void SetImpl(FixedArrayBase* store, uint32_t entry,
+ static Handle<Object> GetImpl(FixedArrayBase* backing_store, uint32_t entry) {
+ return handle(GetRaw(backing_store, entry), backing_store->GetIsolate());
+ }
+
+ static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
Object* value) {
- SeededNumberDictionary* dictionary = SeededNumberDictionary::cast(store);
- dictionary->ValueAtPut(entry, value);
+ SetImpl(holder->elements(), entry, value);
+ }
+
+ static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
+ Object* value) {
+ SeededNumberDictionary::cast(backing_store)->ValueAtPut(entry, value);
}
static void ReconfigureImpl(Handle<JSObject> object,
@@ -1107,7 +1091,7 @@ class DictionaryElementsAccessor
uint32_t new_capacity) {
PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
Handle<SeededNumberDictionary> dictionary =
- object->HasFastElements()
+ object->HasFastElements() || object->HasFastStringWrapperElements()
? JSObject::NormalizeElements(object)
: handle(SeededNumberDictionary::cast(object->elements()));
Handle<SeededNumberDictionary> new_dictionary =
@@ -1148,6 +1132,10 @@ class DictionaryElementsAccessor
return static_cast<uint32_t>(entry);
}
+ static PropertyDetails GetDetailsImpl(JSObject* holder, uint32_t entry) {
+ return GetDetailsImpl(holder->elements(), entry);
+ }
+
static PropertyDetails GetDetailsImpl(FixedArrayBase* backing_store,
uint32_t entry) {
return SeededNumberDictionary::cast(backing_store)->DetailsAt(entry);
@@ -1184,6 +1172,24 @@ class DictionaryElementsAccessor
keys->SortCurrentElementsList();
}
+
+ static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
+ KeyAccumulator* accumulator,
+ AddKeyConversion convert) {
+ SeededNumberDictionary* dictionary =
+ SeededNumberDictionary::cast(receiver->elements());
+ int capacity = dictionary->Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object* k = dictionary->KeyAt(i);
+ if (!dictionary->IsKey(k)) continue;
+ if (dictionary->IsDeleted(i)) continue;
+ Object* value = dictionary->ValueAt(i);
+ DCHECK(!value->IsTheHole());
+ DCHECK(!value->IsAccessorPair());
+ DCHECK(!value->IsAccessorInfo());
+ accumulator->AddKey(value, convert);
+ }
+ }
};
@@ -1222,9 +1228,9 @@ class FastElementsAccessor
static void DeleteCommon(Handle<JSObject> obj, uint32_t entry,
Handle<FixedArrayBase> store) {
- DCHECK(obj->HasFastSmiOrObjectElements() ||
- obj->HasFastDoubleElements() ||
- obj->HasFastArgumentsElements());
+ DCHECK(obj->HasFastSmiOrObjectElements() || obj->HasFastDoubleElements() ||
+ obj->HasFastArgumentsElements() ||
+ obj->HasFastStringWrapperElements());
Handle<BackingStore> backing_store = Handle<BackingStore>::cast(store);
if (!obj->IsJSArray() &&
entry == static_cast<uint32_t>(store->length()) - 1) {
@@ -1302,7 +1308,7 @@ class FastElementsAccessor
FastElementsAccessorSubclass::GrowCapacityAndConvertImpl(object,
new_capacity);
} else {
- if (from_kind != to_kind) {
+ if (IsFastElementsKind(from_kind) && from_kind != to_kind) {
JSObject::TransitionElementsKind(object, to_kind);
}
if (IsFastSmiOrObjectElementsKind(from_kind)) {
@@ -1310,7 +1316,7 @@ class FastElementsAccessor
JSObject::EnsureWritableFastElements(object);
}
}
- FastElementsAccessorSubclass::SetImpl(object->elements(), index, *value);
+ FastElementsAccessorSubclass::SetImpl(object, index, *value);
}
static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) {
@@ -1328,6 +1334,27 @@ class FastElementsAccessor
return !BackingStore::cast(backing_store)->is_the_hole(entry);
}
+ static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
+ KeyAccumulator* accumulator,
+ AddKeyConversion convert) {
+ uint32_t length = 0;
+ Handle<FixedArrayBase> elements(receiver->elements(),
+ receiver->GetIsolate());
+ if (receiver->IsJSArray()) {
+ length = Smi::cast(JSArray::cast(*receiver)->length())->value();
+ } else {
+ length =
+ FastElementsAccessorSubclass::GetCapacityImpl(*receiver, *elements);
+ }
+ for (uint32_t i = 0; i < length; i++) {
+ if (IsFastPackedElementsKind(KindTraits::Kind) ||
+ HasEntryImpl(*elements, i)) {
+ accumulator->AddKey(FastElementsAccessorSubclass::GetImpl(*elements, i),
+ convert);
+ }
+ }
+ }
+
static void ValidateContents(Handle<JSObject> holder, int length) {
#if DEBUG
Isolate* isolate = holder->GetIsolate();
@@ -1345,7 +1372,7 @@ class FastElementsAccessor
Handle<BackingStore> backing_store = Handle<BackingStore>::cast(elements);
if (IsFastSmiElementsKind(KindTraits::Kind)) {
for (int i = 0; i < length; i++) {
- DCHECK(BackingStore::get(backing_store, i)->IsSmi() ||
+ DCHECK(BackingStore::get(*backing_store, i, isolate)->IsSmi() ||
(IsFastHoleyElementsKind(KindTraits::Kind) &&
backing_store->is_the_hole(i)));
}
@@ -1505,7 +1532,7 @@ class FastElementsAccessor
int new_length = length - 1;
int remove_index = remove_position == AT_START ? 0 : new_length;
Handle<Object> result =
- FastElementsAccessorSubclass::GetImpl(backing_store, remove_index);
+ FastElementsAccessorSubclass::GetImpl(*backing_store, remove_index);
if (remove_position == AT_START) {
FastElementsAccessorSubclass::MoveElements(
isolate, receiver, backing_store, 0, 1, new_length, 0, 0);
@@ -1582,6 +1609,11 @@ class FastSmiOrObjectElementsAccessor
: FastElementsAccessor<FastElementsAccessorSubclass,
KindTraits>(name) {}
+ static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
+ Object* value) {
+ SetImpl(holder->elements(), entry, value);
+ }
+
static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
Object* value) {
FixedArray::cast(backing_store)->set(entry, value);
@@ -1638,6 +1670,7 @@ class FastSmiOrObjectElementsAccessor
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS:
CopyObjectToObjectElements(from, from_kind, from_start, to, to_kind,
to_start, copy_size);
break;
@@ -1649,17 +1682,21 @@ class FastSmiOrObjectElementsAccessor
break;
}
case DICTIONARY_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
CopyDictionaryToObjectElements(from, from_start, to, to_kind, to_start,
copy_size);
break;
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS: \
- UNREACHABLE();
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
+ // This function is currently only used for JSArrays with non-zero
+ // length.
+ UNREACHABLE();
+ break;
+ case NO_ELEMENTS:
+ break; // Nothing to do.
}
}
};
@@ -1722,6 +1759,21 @@ class FastDoubleElementsAccessor
: FastElementsAccessor<FastElementsAccessorSubclass,
KindTraits>(name) {}
+ static Handle<Object> GetImpl(Handle<JSObject> holder, uint32_t entry) {
+ return GetImpl(holder->elements(), entry);
+ }
+
+ static Handle<Object> GetImpl(FixedArrayBase* backing_store, uint32_t entry) {
+ Isolate* isolate = backing_store->GetIsolate();
+ return FixedDoubleArray::get(FixedDoubleArray::cast(backing_store), entry,
+ isolate);
+ }
+
+ static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
+ Object* value) {
+ SetImpl(holder->elements(), entry, value);
+ }
+
static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
Object* value) {
FixedDoubleArray::cast(backing_store)->set(entry, value->Number());
@@ -1784,13 +1836,16 @@ class FastDoubleElementsAccessor
break;
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
-
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS: \
- UNREACHABLE();
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ case NO_ELEMENTS:
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
+ // This function is currently only used for JSArrays with non-zero
+ // length.
+ UNREACHABLE();
+ break;
}
}
};
@@ -1833,14 +1888,14 @@ class TypedElementsAccessor
typedef typename ElementsKindTraits<Kind>::BackingStore BackingStore;
typedef TypedElementsAccessor<Kind> AccessorClass;
- static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
+ static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
Object* value) {
- BackingStore::cast(backing_store)->SetValue(entry, value);
+ SetImpl(holder->elements(), entry, value);
}
- static bool HasAccessorsImpl(JSObject* holder,
- FixedArrayBase* backing_store) {
- return false;
+ static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
+ Object* value) {
+ BackingStore::cast(backing_store)->SetValue(entry, value);
}
static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
@@ -1848,10 +1903,16 @@ class TypedElementsAccessor
BackingStore::cast(backing_store)->SetValue(entry, value);
}
- static Handle<Object> GetImpl(Handle<FixedArrayBase> backing_store,
- uint32_t entry) {
- uint32_t index = GetIndexForEntryImpl(*backing_store, entry);
- return BackingStore::get(Handle<BackingStore>::cast(backing_store), index);
+ static Handle<Object> GetImpl(Handle<JSObject> holder, uint32_t entry) {
+ return GetImpl(holder->elements(), entry);
+ }
+
+ static Handle<Object> GetImpl(FixedArrayBase* backing_store, uint32_t entry) {
+ return BackingStore::get(BackingStore::cast(backing_store), entry);
+ }
+
+ static PropertyDetails GetDetailsImpl(JSObject* holder, uint32_t entry) {
+ return PropertyDetails(DONT_DELETE, DATA, 0, PropertyCellType::kNoCell);
}
static PropertyDetails GetDetailsImpl(FixedArrayBase* backing_store,
@@ -1859,6 +1920,17 @@ class TypedElementsAccessor
return PropertyDetails(DONT_DELETE, DATA, 0, PropertyCellType::kNoCell);
}
+ static bool HasElementImpl(Handle<JSObject> holder, uint32_t index,
+ Handle<FixedArrayBase> backing_store,
+ PropertyFilter filter) {
+ return index < AccessorClass::GetCapacityImpl(*holder, *backing_store);
+ }
+
+ static bool HasAccessorsImpl(JSObject* holder,
+ FixedArrayBase* backing_store) {
+ return false;
+ }
+
static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
uint32_t length,
Handle<FixedArrayBase> backing_store) {
@@ -1889,6 +1961,18 @@ class TypedElementsAccessor
if (view->WasNeutered()) return 0;
return backing_store->length();
}
+
+ static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
+ KeyAccumulator* accumulator,
+ AddKeyConversion convert) {
+ Handle<FixedArrayBase> elements(receiver->elements(),
+ receiver->GetIsolate());
+ uint32_t length = AccessorClass::GetCapacityImpl(*receiver, *elements);
+ for (uint32_t i = 0; i < length; i++) {
+ Handle<Object> value = AccessorClass::GetImpl(*elements, i);
+ accumulator->AddKey(value, convert);
+ }
+ }
};
@@ -1913,10 +1997,13 @@ class SloppyArgumentsElementsAccessor
USE(KindTraits::Kind);
}
- static Handle<Object> GetImpl(Handle<FixedArrayBase> parameters,
- uint32_t entry) {
+ static Handle<Object> GetImpl(Handle<JSObject> holder, uint32_t entry) {
+ return GetImpl(holder->elements(), entry);
+ }
+
+ static Handle<Object> GetImpl(FixedArrayBase* parameters, uint32_t entry) {
Isolate* isolate = parameters->GetIsolate();
- Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(parameters);
+ Handle<FixedArray> parameter_map(FixedArray::cast(parameters), isolate);
uint32_t length = parameter_map->length() - 2;
if (entry < length) {
DisallowHeapAllocation no_gc;
@@ -1927,10 +2014,8 @@ class SloppyArgumentsElementsAccessor
return handle(context->get(context_entry), isolate);
} else {
// Object is not mapped, defer to the arguments.
- Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)),
- isolate);
- Handle<Object> result =
- ArgumentsAccessor::GetImpl(arguments, entry - length);
+ Handle<Object> result = ArgumentsAccessor::GetImpl(
+ FixedArray::cast(parameter_map->get(1)), entry - length);
// Elements of the arguments object in slow mode might be slow aliases.
if (result->IsAliasedArgumentsEntry()) {
DisallowHeapAllocation no_gc;
@@ -1949,6 +2034,11 @@ class SloppyArgumentsElementsAccessor
UNREACHABLE();
}
+ static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
+ Object* value) {
+ SetImpl(holder->elements(), entry, value);
+ }
+
static inline void SetImpl(FixedArrayBase* store, uint32_t entry,
Object* value) {
FixedArray* parameter_map = FixedArray::cast(store);
@@ -1989,6 +2079,18 @@ class SloppyArgumentsElementsAccessor
ArgumentsAccessor::GetCapacityImpl(holder, arguments);
}
+ static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
+ KeyAccumulator* accumulator,
+ AddKeyConversion convert) {
+ FixedArrayBase* elements = receiver->elements();
+ uint32_t length = GetCapacityImpl(*receiver, elements);
+ for (uint32_t entry = 0; entry < length; entry++) {
+ if (!HasEntryImpl(elements, entry)) continue;
+ Handle<Object> value = GetImpl(elements, entry);
+ accumulator->AddKey(value, convert);
+ }
+ }
+
static bool HasEntryImpl(FixedArrayBase* parameters, uint32_t entry) {
FixedArray* parameter_map = FixedArray::cast(parameters);
uint32_t length = parameter_map->length() - 2;
@@ -2031,9 +2133,8 @@ class SloppyArgumentsElementsAccessor
return (parameter_map->length() - 2) + entry;
}
- static PropertyDetails GetDetailsImpl(FixedArrayBase* parameters,
- uint32_t entry) {
- FixedArray* parameter_map = FixedArray::cast(parameters);
+ static PropertyDetails GetDetailsImpl(JSObject* holder, uint32_t entry) {
+ FixedArray* parameter_map = FixedArray::cast(holder->elements());
uint32_t length = parameter_map->length() - 2;
if (entry < length) {
return PropertyDetails(NONE, DATA, 0, PropertyCellType::kNoCell);
@@ -2238,6 +2339,172 @@ class FastSloppyArgumentsElementsAccessor
}
};
+template <typename StringWrapperElementsAccessorSubclass,
+ typename BackingStoreAccessor, typename KindTraits>
+class StringWrapperElementsAccessor
+ : public ElementsAccessorBase<StringWrapperElementsAccessorSubclass,
+ KindTraits> {
+ public:
+ explicit StringWrapperElementsAccessor(const char* name)
+ : ElementsAccessorBase<StringWrapperElementsAccessorSubclass, KindTraits>(
+ name) {
+ USE(KindTraits::Kind);
+ }
+
+ static Handle<Object> GetImpl(Handle<JSObject> holder, uint32_t entry) {
+ Isolate* isolate = holder->GetIsolate();
+ Handle<String> string(GetString(*holder), isolate);
+ uint32_t length = static_cast<uint32_t>(string->length());
+ if (entry < length) {
+ return isolate->factory()->LookupSingleCharacterStringFromCode(
+ String::Flatten(string)->Get(entry));
+ }
+ return BackingStoreAccessor::GetImpl(holder, entry - length);
+ }
+
+ static PropertyDetails GetDetailsImpl(JSObject* holder, uint32_t entry) {
+ uint32_t length = static_cast<uint32_t>(GetString(holder)->length());
+ if (entry < length) {
+ PropertyAttributes attributes =
+ static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
+ return PropertyDetails(attributes, v8::internal::DATA, 0,
+ PropertyCellType::kNoCell);
+ }
+ return BackingStoreAccessor::GetDetailsImpl(holder, entry - length);
+ }
+
+ static uint32_t GetEntryForIndexImpl(JSObject* holder,
+ FixedArrayBase* backing_store,
+ uint32_t index, PropertyFilter filter) {
+ uint32_t length = static_cast<uint32_t>(GetString(holder)->length());
+ if (index < length) return index;
+ uint32_t backing_store_entry = BackingStoreAccessor::GetEntryForIndexImpl(
+ holder, backing_store, index, filter);
+ if (backing_store_entry == kMaxUInt32) return kMaxUInt32;
+ DCHECK(backing_store_entry < kMaxUInt32 - length);
+ return backing_store_entry + length;
+ }
+
+ static void DeleteImpl(Handle<JSObject> holder, uint32_t entry) {
+ uint32_t length = static_cast<uint32_t>(GetString(*holder)->length());
+ if (entry < length) {
+ return; // String contents can't be deleted.
+ }
+ BackingStoreAccessor::DeleteImpl(holder, entry - length);
+ }
+
+ static void SetImpl(Handle<JSObject> holder, uint32_t entry, Object* value) {
+ uint32_t length = static_cast<uint32_t>(GetString(*holder)->length());
+ if (entry < length) {
+ return; // String contents are read-only.
+ }
+ BackingStoreAccessor::SetImpl(holder->elements(), entry - length, value);
+ }
+
+ static void AddImpl(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value, PropertyAttributes attributes,
+ uint32_t new_capacity) {
+ DCHECK(index >= static_cast<uint32_t>(GetString(*object)->length()));
+ // Explicitly grow fast backing stores if needed. Dictionaries know how to
+ // extend their capacity themselves.
+ if (KindTraits::Kind == FAST_STRING_WRAPPER_ELEMENTS &&
+ (object->GetElementsKind() == SLOW_STRING_WRAPPER_ELEMENTS ||
+ BackingStoreAccessor::GetCapacityImpl(*object, object->elements()) !=
+ new_capacity)) {
+ StringWrapperElementsAccessorSubclass::GrowCapacityAndConvertImpl(
+ object, new_capacity);
+ }
+ BackingStoreAccessor::AddImpl(object, index, value, attributes,
+ new_capacity);
+ }
+
+ static void ReconfigureImpl(Handle<JSObject> object,
+ Handle<FixedArrayBase> store, uint32_t entry,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
+ uint32_t length = static_cast<uint32_t>(GetString(*object)->length());
+ if (entry < length) {
+ return; // String contents can't be reconfigured.
+ }
+ BackingStoreAccessor::ReconfigureImpl(object, store, entry - length, value,
+ attributes);
+ }
+
+ static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
+ KeyAccumulator* accumulator,
+ AddKeyConversion convert) {
+ Isolate* isolate = receiver->GetIsolate();
+ Handle<String> string(GetString(*receiver), isolate);
+ string = String::Flatten(string);
+ uint32_t length = static_cast<uint32_t>(string->length());
+ for (uint32_t i = 0; i < length; i++) {
+ accumulator->AddKey(
+ isolate->factory()->LookupSingleCharacterStringFromCode(
+ string->Get(i)),
+ convert);
+ }
+ BackingStoreAccessor::AddElementsToKeyAccumulatorImpl(receiver, accumulator,
+ convert);
+ }
+
+ static void CollectElementIndicesImpl(Handle<JSObject> object,
+ Handle<FixedArrayBase> backing_store,
+ KeyAccumulator* keys, uint32_t range,
+ PropertyFilter filter,
+ uint32_t offset) {
+ if ((filter & ONLY_ALL_CAN_READ) == 0) {
+ uint32_t length = GetString(*object)->length();
+ for (uint32_t i = 0; i < length; i++) {
+ keys->AddKey(i);
+ }
+ }
+ BackingStoreAccessor::CollectElementIndicesImpl(object, backing_store, keys,
+ range, filter, offset);
+ }
+
+ static void CopyElementsImpl(FixedArrayBase* from, uint32_t from_start,
+ FixedArrayBase* to, ElementsKind from_kind,
+ uint32_t to_start, int packed_size,
+ int copy_size) {
+ BackingStoreAccessor::CopyElementsImpl(from, from_start, to, from_kind,
+ to_start, packed_size, copy_size);
+ }
+
+ private:
+ static String* GetString(JSObject* holder) {
+ DCHECK(holder->IsJSValue());
+ JSValue* js_value = JSValue::cast(holder);
+ DCHECK(js_value->value()->IsString());
+ return String::cast(js_value->value());
+ }
+};
+
+class FastStringWrapperElementsAccessor
+ : public StringWrapperElementsAccessor<
+ FastStringWrapperElementsAccessor, FastHoleyObjectElementsAccessor,
+ ElementsKindTraits<FAST_STRING_WRAPPER_ELEMENTS>> {
+ public:
+ explicit FastStringWrapperElementsAccessor(const char* name)
+ : StringWrapperElementsAccessor<
+ FastStringWrapperElementsAccessor, FastHoleyObjectElementsAccessor,
+ ElementsKindTraits<FAST_STRING_WRAPPER_ELEMENTS>>(name) {}
+};
+
+class SlowStringWrapperElementsAccessor
+ : public StringWrapperElementsAccessor<
+ SlowStringWrapperElementsAccessor, DictionaryElementsAccessor,
+ ElementsKindTraits<SLOW_STRING_WRAPPER_ELEMENTS>> {
+ public:
+ explicit SlowStringWrapperElementsAccessor(const char* name)
+ : StringWrapperElementsAccessor<
+ SlowStringWrapperElementsAccessor, DictionaryElementsAccessor,
+ ElementsKindTraits<SLOW_STRING_WRAPPER_ELEMENTS>>(name) {}
+
+ static bool HasAccessorsImpl(JSObject* holder,
+ FixedArrayBase* backing_store) {
+ return DictionaryElementsAccessor::HasAccessorsImpl(holder, backing_store);
+ }
+};
} // namespace
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index 86ada229e3..483d753bc1 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -29,8 +29,6 @@ class ElementsAccessor {
return elements_accessors_[elements_kind];
}
- static ElementsAccessor* ForArray(Handle<FixedArrayBase> array);
-
// Checks the elements of an object for consistency, asserting when a problem
// is found.
virtual void Validate(Handle<JSObject> obj) = 0;
@@ -54,15 +52,15 @@ class ElementsAccessor {
return HasElement(holder, index, handle(holder->elements()), filter);
}
- virtual bool HasAccessors(JSObject* holder) = 0;
-
// Returns true if the backing store is compact in the given range
virtual bool IsPacked(Handle<JSObject> holder,
Handle<FixedArrayBase> backing_store, uint32_t start,
uint32_t end) = 0;
- virtual Handle<Object> Get(Handle<FixedArrayBase> backing_store,
- uint32_t entry) = 0;
+ virtual Handle<Object> Get(Handle<JSObject> holder, uint32_t entry) = 0;
+
+ virtual PropertyDetails GetDetails(JSObject* holder, uint32_t entry) = 0;
+ virtual bool HasAccessors(JSObject* holder) = 0;
// Modifies the length data property as specified for JSArrays and resizes the
// underlying backing store accordingly. The method honors the semantics of
@@ -83,38 +81,6 @@ class ElementsAccessor {
// destination array with the hole.
static const int kCopyToEndAndInitializeToHole = -2;
- // Copy elements from one backing store to another. Typically, callers specify
- // the source JSObject or JSArray in source_holder. If the holder's backing
- // store is available, it can be passed in source and source_holder is
- // ignored.
- virtual void CopyElements(
- Handle<FixedArrayBase> source,
- uint32_t source_start,
- ElementsKind source_kind,
- Handle<FixedArrayBase> destination,
- uint32_t destination_start,
- int copy_size) = 0;
-
- // NOTE: this method violates the handlified function signature convention:
- // raw pointer parameter |source_holder| in the function that allocates.
- // This is done intentionally to avoid ArrayConcat() builtin performance
- // degradation.
- virtual void CopyElements(
- JSObject* source_holder,
- uint32_t source_start,
- ElementsKind source_kind,
- Handle<FixedArrayBase> destination,
- uint32_t destination_start,
- int copy_size) = 0;
-
- inline void CopyElements(
- Handle<JSObject> from_holder,
- Handle<FixedArrayBase> to,
- ElementsKind from_kind) {
- CopyElements(
- *from_holder, 0, from_kind, to, 0, kCopyToEndAndInitializeToHole);
- }
-
// Copy all indices that have elements from |object| into the given
// KeyAccumulator. For Dictionary-based element-kinds we filter out elements
// whose PropertyAttribute match |filter|.
@@ -144,8 +110,7 @@ class ElementsAccessor {
static void InitializeOncePerProcess();
static void TearDown();
- virtual void Set(FixedArrayBase* backing_store, uint32_t entry,
- Object* value) = 0;
+ virtual void Set(Handle<JSObject> holder, uint32_t entry, Object* value) = 0;
virtual void Reconfigure(Handle<JSObject> object,
Handle<FixedArrayBase> backing_store, uint32_t entry,
@@ -185,9 +150,6 @@ class ElementsAccessor {
protected:
friend class LookupIterator;
- static ElementsAccessor* ForArray(FixedArrayBase* array);
-
-
// Element handlers distinguish between entries and indices when they
// manipulate elements. Entries refer to elements in terms of their location
// in the underlying storage's backing store representation, and are between 0
@@ -199,8 +161,15 @@ class ElementsAccessor {
virtual uint32_t GetEntryForIndex(JSObject* holder,
FixedArrayBase* backing_store,
uint32_t index) = 0;
- virtual PropertyDetails GetDetails(FixedArrayBase* backing_store,
- uint32_t entry) = 0;
+
+ // NOTE: this method violates the handlified function signature convention:
+ // raw pointer parameter |source_holder| in the function that allocates.
+ // This is done intentionally to avoid ArrayConcat() builtin performance
+ // degradation.
+ virtual void CopyElements(JSObject* source_holder, uint32_t source_start,
+ ElementsKind source_kind,
+ Handle<FixedArrayBase> destination,
+ uint32_t destination_start, int copy_size) = 0;
private:
virtual uint32_t GetCapacity(JSObject* holder,
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index d4efb7653d..e6a569f33d 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -144,8 +144,8 @@ MaybeHandle<Object> Execution::Call(Isolate* isolate, Handle<Object> callable,
if (receiver->IsUndefined() || receiver->IsNull()) {
receiver = handle(function->global_proxy(), isolate);
} else {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, receiver, Execution::ToObject(isolate, receiver), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, receiver,
+ Object::ToObject(isolate, receiver), Object);
}
}
DCHECK(function->context()->global_object()->IsJSGlobalObject());
@@ -421,18 +421,6 @@ void StackGuard::InitThread(const ExecutionAccess& lock) {
// --- C a l l s t o n a t i v e s ---
-MaybeHandle<JSReceiver> Execution::ToObject(Isolate* isolate,
- Handle<Object> obj) {
- Handle<JSReceiver> receiver;
- if (JSReceiver::ToObject(isolate, obj).ToHandle(&receiver)) {
- return receiver;
- }
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kUndefinedOrNullToObject),
- JSReceiver);
-}
-
-
Handle<String> Execution::GetStackTraceLine(Handle<Object> recv,
Handle<JSFunction> fun,
Handle<Object> pos,
@@ -492,7 +480,7 @@ Object* StackGuard::HandleInterrupts() {
isolate_->counters()->stack_interrupts()->Increment();
isolate_->counters()->runtime_profiler_ticks()->Increment();
- isolate_->runtime_profiler()->OptimizeNow();
+ isolate_->runtime_profiler()->MarkCandidatesForOptimization();
return isolate_->heap()->undefined_value();
}
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index 81b71b631e..52c76280eb 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -49,10 +49,6 @@ class Execution final : public AllStatic {
Handle<Object> argv[],
MaybeHandle<Object>* exception_out = NULL);
- // ECMA-262 9.9
- MUST_USE_RESULT static MaybeHandle<JSReceiver> ToObject(Isolate* isolate,
- Handle<Object> obj);
-
static Handle<String> GetStackTraceLine(Handle<Object> recv,
Handle<JSFunction> fun,
Handle<Object> pos,
diff --git a/deps/v8/src/extensions/externalize-string-extension.cc b/deps/v8/src/extensions/externalize-string-extension.cc
index 9241e9f4de..2ed3ad27e5 100644
--- a/deps/v8/src/extensions/externalize-string-extension.cc
+++ b/deps/v8/src/extensions/externalize-string-extension.cc
@@ -36,10 +36,10 @@ typedef SimpleStringResource<char, v8::String::ExternalOneByteStringResource>
typedef SimpleStringResource<uc16, v8::String::ExternalStringResource>
SimpleTwoByteStringResource;
-
const char* const ExternalizeStringExtension::kSource =
"native function externalizeString();"
- "native function isOneByteString();";
+ "native function isOneByteString();"
+ "function x() { return 1; }";
v8::Local<v8::FunctionTemplate>
ExternalizeStringExtension::GetNativeFunctionTemplate(
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index f03e6b2e61..15ddb5ff43 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -211,13 +211,6 @@ Handle<String> Factory::InternalizeUtf8String(Vector<const char> string) {
}
-// Internalized strings are created in the old generation (data space).
-Handle<String> Factory::InternalizeString(Handle<String> string) {
- if (string->IsInternalizedString()) return string;
- return StringTable::LookupString(isolate(), string);
-}
-
-
Handle<String> Factory::InternalizeOneByteString(Vector<const uint8_t> string) {
OneByteStringKey key(string, isolate()->heap()->HashSeed());
return InternalizeStringWithKey(&key);
@@ -243,12 +236,6 @@ Handle<String> Factory::InternalizeStringWithKey(StringTableKey* key) {
}
-Handle<Name> Factory::InternalizeName(Handle<Name> name) {
- if (name->IsUniqueName()) return name;
- return InternalizeString(Handle<String>::cast(name));
-}
-
-
MaybeHandle<String> Factory::NewStringFromOneByte(Vector<const uint8_t> string,
PretenureFlag pretenure) {
int length = string.length();
@@ -868,10 +855,9 @@ Handle<AliasedArgumentsEntry> Factory::NewAliasedArgumentsEntry(
}
-Handle<ExecutableAccessorInfo> Factory::NewExecutableAccessorInfo() {
- Handle<ExecutableAccessorInfo> info =
- Handle<ExecutableAccessorInfo>::cast(
- NewStruct(EXECUTABLE_ACCESSOR_INFO_TYPE));
+Handle<AccessorInfo> Factory::NewAccessorInfo() {
+ Handle<AccessorInfo> info =
+ Handle<AccessorInfo>::cast(NewStruct(ACCESSOR_INFO_TYPE));
info->set_flag(0); // Must clear the flag, it was initialized as undefined.
return info;
}
@@ -1042,6 +1028,13 @@ Handle<FixedArray> Factory::CopyFixedArrayAndGrow(Handle<FixedArray> array,
FixedArray);
}
+Handle<FixedArray> Factory::CopyFixedArrayUpTo(Handle<FixedArray> array,
+ int new_len,
+ PretenureFlag pretenure) {
+ CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->CopyFixedArrayUpTo(
+ *array, new_len, pretenure),
+ FixedArray);
+}
Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
CALL_HEAP_FUNCTION(isolate(),
@@ -1488,6 +1481,12 @@ Handle<Code> Factory::CopyCode(Handle<Code> code, Vector<byte> reloc_info) {
Code);
}
+Handle<BytecodeArray> Factory::CopyBytecodeArray(
+ Handle<BytecodeArray> bytecode_array) {
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->CopyBytecodeArray(*bytecode_array),
+ BytecodeArray);
+}
Handle<JSObject> Factory::NewJSObject(Handle<JSFunction> constructor,
PretenureFlag pretenure) {
@@ -1738,16 +1737,6 @@ Handle<JSSetIterator> Factory::NewJSSetIterator() {
}
-Handle<JSIteratorResult> Factory::NewJSIteratorResult(Handle<Object> value,
- Handle<Object> done) {
- Handle<JSIteratorResult> result = Handle<JSIteratorResult>::cast(
- NewJSObjectFromMap(isolate()->iterator_result_map()));
- result->set_value(*value);
- result->set_done(*done);
- return result;
-}
-
-
namespace {
ElementsKind GetExternalArrayElementsKind(ExternalArrayType type) {
@@ -1967,9 +1956,9 @@ MaybeHandle<JSBoundFunction> Factory::NewJSBoundFunction(
// Determine the prototype of the {target_function}.
Handle<Object> prototype;
- ASSIGN_RETURN_ON_EXCEPTION(isolate(), prototype,
- Object::GetPrototype(isolate(), target_function),
- JSBoundFunction);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), prototype,
+ JSReceiver::GetPrototype(isolate(), target_function), JSBoundFunction);
// Create the [[BoundArguments]] for the result.
Handle<FixedArray> bound_arguments;
@@ -2001,7 +1990,6 @@ MaybeHandle<JSBoundFunction> Factory::NewJSBoundFunction(
result->set_bound_target_function(*target_function);
result->set_bound_this(*bound_this);
result->set_bound_arguments(*bound_arguments);
- result->set_creation_context(*isolate()->native_context());
result->set_length(Smi::FromInt(0));
result->set_name(*undefined_value(), SKIP_WRITE_BARRIER);
return result;
@@ -2124,6 +2112,10 @@ Handle<JSMessageObject> Factory::NewJSMessageObject(
Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
Handle<String> name, MaybeHandle<Code> maybe_code, bool is_constructor) {
+ // Function names are assumed to be flat elsewhere. Must flatten before
+ // allocating SharedFunctionInfo to avoid GC seeing the uninitialized SFI.
+ name = String::Flatten(name, TENURED);
+
Handle<Map> map = shared_function_info_map();
Handle<SharedFunctionInfo> share = New<SharedFunctionInfo>(map, OLD_SPACE);
@@ -2143,7 +2135,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
share->set_instance_class_name(*Object_string());
share->set_function_data(*undefined_value(), SKIP_WRITE_BARRIER);
share->set_script(*undefined_value(), SKIP_WRITE_BARRIER);
- share->set_debug_info(*undefined_value(), SKIP_WRITE_BARRIER);
+ share->set_debug_info(DebugInfo::uninitialized(), SKIP_WRITE_BARRIER);
share->set_inferred_name(*empty_string(), SKIP_WRITE_BARRIER);
StaticFeedbackVectorSpec empty_spec;
Handle<TypeFeedbackMetadata> feedback_metadata =
@@ -2261,7 +2253,14 @@ Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
Handle<DebugInfo> debug_info =
Handle<DebugInfo>::cast(NewStruct(DEBUG_INFO_TYPE));
debug_info->set_shared(*shared);
- debug_info->set_code(shared->code());
+ if (shared->HasBytecodeArray()) {
+ // Create a copy for debugging.
+ Handle<BytecodeArray> original(shared->bytecode_array(), isolate());
+ Handle<BytecodeArray> copy = CopyBytecodeArray(original);
+ debug_info->set_abstract_code(AbstractCode::cast(*copy));
+ } else {
+ debug_info->set_abstract_code(AbstractCode::cast(shared->code()));
+ }
debug_info->set_break_points(*break_points);
// Link debug info to function.
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index 01a2f7eecf..dd107d144b 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -70,7 +70,7 @@ class Factory final {
Handle<String> InternalizeUtf8String(const char* str) {
return InternalizeUtf8String(CStrVector(str));
}
- Handle<String> InternalizeString(Handle<String> str);
+
Handle<String> InternalizeOneByteString(Vector<const uint8_t> str);
Handle<String> InternalizeOneByteString(
Handle<SeqOneByteString>, int from, int length);
@@ -80,8 +80,16 @@ class Factory final {
template<class StringTableKey>
Handle<String> InternalizeStringWithKey(StringTableKey* key);
- Handle<Name> InternalizeName(Handle<Name> name);
+ // Internalized strings are created in the old generation (data space).
+ Handle<String> InternalizeString(Handle<String> string) {
+ if (string->IsInternalizedString()) return string;
+ return StringTable::LookupString(isolate(), string);
+ }
+ Handle<Name> InternalizeName(Handle<Name> name) {
+ if (name->IsUniqueName()) return name;
+ return StringTable::LookupString(isolate(), Handle<String>::cast(name));
+ }
// String creation functions. Most of the string creation functions take
// a Heap::PretenureFlag argument to optionally request that they be
@@ -262,7 +270,7 @@ class Factory final {
Handle<AliasedArgumentsEntry> NewAliasedArgumentsEntry(
int aliased_context_slot);
- Handle<ExecutableAccessorInfo> NewExecutableAccessorInfo();
+ Handle<AccessorInfo> NewAccessorInfo();
Handle<Script> NewScript(Handle<String> source);
@@ -323,6 +331,9 @@ class Factory final {
Handle<FixedArray> array, int grow_by,
PretenureFlag pretenure = NOT_TENURED);
+ Handle<FixedArray> CopyFixedArrayUpTo(Handle<FixedArray> array, int new_len,
+ PretenureFlag pretenure = NOT_TENURED);
+
Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
// This method expects a COW array in new space, and creates a copy
@@ -395,11 +406,6 @@ class Factory final {
// JS arrays are pretenured when allocated by the parser.
- // Create a JSArray with no elements.
- Handle<JSArray> NewJSArray(ElementsKind elements_kind,
- Strength strength = Strength::WEAK,
- PretenureFlag pretenure = NOT_TENURED);
-
// Create a JSArray with a specified length and elements initialized
// according to the specified mode.
Handle<JSArray> NewJSArray(
@@ -474,11 +480,6 @@ class Factory final {
Handle<JSMapIterator> NewJSMapIterator();
Handle<JSSetIterator> NewJSSetIterator();
- // Creates a new JSIteratorResult object with the arguments {value} and
- // {done}. Implemented according to ES6 section 7.4.7 CreateIterResultObject.
- Handle<JSIteratorResult> NewJSIteratorResult(Handle<Object> value,
- Handle<Object> done);
-
// Allocates a bound function.
MaybeHandle<JSBoundFunction> NewJSBoundFunction(
Handle<JSReceiver> target_function, Handle<Object> bound_this,
@@ -548,6 +549,8 @@ class Factory final {
Handle<Code> CopyCode(Handle<Code> code, Vector<byte> reloc_info);
+ Handle<BytecodeArray> CopyBytecodeArray(Handle<BytecodeArray>);
+
// Interface for creating error objects.
Handle<Object> NewError(Handle<JSFunction> constructor,
Handle<String> message);
@@ -704,6 +707,11 @@ class Factory final {
Handle<SharedFunctionInfo> info,
Handle<Context> context,
PretenureFlag pretenure = TENURED);
+
+ // Create a JSArray with no elements and no length.
+ Handle<JSArray> NewJSArray(ElementsKind elements_kind,
+ Strength strength = Strength::WEAK,
+ PretenureFlag pretenure = NOT_TENURED);
};
} // namespace internal
diff --git a/deps/v8/src/field-type.cc b/deps/v8/src/field-type.cc
new file mode 100644
index 0000000000..76d694c132
--- /dev/null
+++ b/deps/v8/src/field-type.cc
@@ -0,0 +1,91 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/field-type.h"
+
+#include "src/handles-inl.h"
+#include "src/ostreams.h"
+#include "src/types.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+FieldType* FieldType::None() {
+ return reinterpret_cast<FieldType*>(Smi::FromInt(0));
+}
+
+// static
+FieldType* FieldType::Any() {
+ return reinterpret_cast<FieldType*>(Smi::FromInt(1));
+}
+
+// static
+Handle<FieldType> FieldType::None(Isolate* isolate) {
+ return handle(None(), isolate);
+}
+
+// static
+Handle<FieldType> FieldType::Any(Isolate* isolate) {
+ return handle(Any(), isolate);
+}
+
+// static
+FieldType* FieldType::Class(i::Map* map) { return FieldType::cast(map); }
+
+// static
+Handle<FieldType> FieldType::Class(i::Handle<i::Map> map, Isolate* isolate) {
+ return handle(Class(*map), isolate);
+}
+
+// static
+FieldType* FieldType::cast(Object* object) {
+ DCHECK(object == None() || object == Any() || object->IsMap());
+ return reinterpret_cast<FieldType*>(object);
+}
+
+bool FieldType::IsClass() { return this->IsMap(); }
+
+Handle<i::Map> FieldType::AsClass() {
+ DCHECK(IsClass());
+ i::Map* map = Map::cast(this);
+ return handle(map, map->GetIsolate());
+}
+
+bool FieldType::NowStable() {
+ return !this->IsClass() || this->AsClass()->is_stable();
+}
+
+bool FieldType::NowIs(FieldType* other) {
+ if (other->IsAny()) return true;
+ if (IsNone()) return true;
+ if (other->IsNone()) return false;
+ if (IsAny()) return false;
+ DCHECK(IsClass());
+ DCHECK(other->IsClass());
+ return this == other;
+}
+
+bool FieldType::NowIs(Handle<FieldType> other) { return NowIs(*other); }
+
+Type* FieldType::Convert(Zone* zone) {
+ if (IsAny()) return Type::Any();
+ if (IsNone()) return Type::None();
+ DCHECK(IsClass());
+ return Type::Class(AsClass(), zone);
+}
+
+void FieldType::PrintTo(std::ostream& os) {
+ if (IsAny()) {
+ os << "Any";
+ } else if (IsNone()) {
+ os << "None";
+ } else {
+ DCHECK(IsClass());
+ os << "Class(" << static_cast<void*>(*AsClass()) << ")";
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/field-type.h b/deps/v8/src/field-type.h
new file mode 100644
index 0000000000..eb7ffcab47
--- /dev/null
+++ b/deps/v8/src/field-type.h
@@ -0,0 +1,49 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_FIELD_TYPE_H_
+#define V8_FIELD_TYPE_H_
+
+#include "src/handles.h"
+#include "src/objects.h"
+#include "src/ostreams.h"
+
+namespace v8 {
+namespace internal {
+
+class FieldType : public Object {
+ public:
+ static FieldType* None();
+ static FieldType* Any();
+ static Handle<FieldType> None(Isolate* isolate);
+ static Handle<FieldType> Any(Isolate* isolate);
+ static FieldType* Class(i::Map* map);
+ static Handle<FieldType> Class(i::Handle<i::Map> map, Isolate* isolate);
+ static FieldType* cast(Object* object);
+
+ bool NowContains(Object* value) {
+ if (this == Any()) return true;
+ if (this == None()) return false;
+ if (!value->IsHeapObject()) return false;
+ return HeapObject::cast(value)->map() == Map::cast(this);
+ }
+
+ bool NowContains(Handle<Object> value) { return NowContains(*value); }
+
+ bool IsClass();
+ Handle<i::Map> AsClass();
+ bool IsNone() { return this == None(); }
+ bool IsAny() { return this == Any(); }
+ bool NowStable();
+ bool NowIs(FieldType* other);
+ bool NowIs(Handle<FieldType> other);
+ Type* Convert(Zone* zone);
+
+ void PrintTo(std::ostream& os);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_FIELD_TYPE_H_
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 9966a70382..ac430ab503 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -144,6 +144,11 @@ struct MaybeBoolFlag {
#else
# define ENABLE_NEON_DEFAULT false
#endif
+#ifdef V8_OS_WIN
+# define ENABLE_LOG_COLOUR false
+#else
+# define ENABLE_LOG_COLOUR true
+#endif
#define DEFINE_BOOL(nam, def, cmt) FLAG(BOOL, bool, nam, def, cmt)
#define DEFINE_BOOL_READONLY(nam, def, cmt) \
@@ -196,39 +201,44 @@ DEFINE_NEG_IMPLICATION(harmony, promise_extra)
// Activate on ClusterFuzz.
DEFINE_IMPLICATION(es_staging, harmony_regexp_lookbehind)
DEFINE_IMPLICATION(es_staging, move_object_start)
+DEFINE_IMPLICATION(es_staging, harmony_tailcalls)
// Features that are still work in progress (behind individual flags).
-#define HARMONY_INPROGRESS(V) \
- V(harmony_modules, "harmony modules") \
- V(harmony_unicode_regexps, "harmony unicode regexps") \
- V(harmony_function_name, "harmony Function name inference") \
- V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
- V(harmony_simd, "harmony simd") \
- V(harmony_do_expressions, "harmony do-expressions") \
- V(harmony_regexp_subclass, "harmony regexp subclassing") \
- V(harmony_species, "harmony Symbol.species")
+#define HARMONY_INPROGRESS(V) \
+ V(harmony_object_observe, "harmony Object.observe") \
+ V(harmony_modules, "harmony modules") \
+ V(harmony_function_sent, "harmony function.sent") \
+ V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
+ V(harmony_simd, "harmony simd") \
+ V(harmony_do_expressions, "harmony do-expressions") \
+ V(harmony_iterator_close, "harmony iterator finalization") \
+ V(harmony_tailcalls, "harmony tail calls") \
+ V(harmony_object_values_entries, "harmony Object.values / Object.entries") \
+ V(harmony_object_own_property_descriptors, \
+ "harmony Object.getOwnPropertyDescriptors()") \
+ V(harmony_regexp_property, "harmony unicode regexp property classes")
// Features that are complete (but still behind --harmony/es-staging flag).
-#define HARMONY_STAGED(V) \
- V(harmony_regexp_lookbehind, "harmony regexp lookbehind")
+#define HARMONY_STAGED(V) \
+ V(harmony_function_name, "harmony Function name inference") \
+ V(harmony_regexp_lookbehind, "harmony regexp lookbehind") \
+ V(harmony_species, "harmony Symbol.species") \
+ V(harmony_instanceof, "harmony instanceof support")
// Features that are shipping (turned on by default, but internal flag remains).
#define HARMONY_SHIPPING(V) \
V(harmony_default_parameters, "harmony default parameters") \
V(harmony_destructuring_assignment, "harmony destructuring assignment") \
V(harmony_destructuring_bind, "harmony destructuring bind") \
- V(harmony_concat_spreadable, "harmony isConcatSpreadable") \
- V(harmony_object_observe, "harmony Object.observe") \
- V(harmony_tolength, "harmony ToLength") \
V(harmony_tostring, "harmony toString") \
- V(harmony_completion, "harmony completion value semantics") \
V(harmony_regexps, "harmony regular expression extensions") \
+ V(harmony_unicode_regexps, "harmony unicode regexps") \
V(harmony_sloppy, "harmony features in sloppy mode") \
V(harmony_sloppy_let, "harmony let in sloppy mode") \
V(harmony_sloppy_function, "harmony sloppy function block scoping") \
V(harmony_proxies, "harmony proxies") \
- V(harmony_reflect, "harmony Reflect API")
-
+ V(harmony_reflect, "harmony Reflect API") \
+ V(harmony_regexp_subclass, "harmony regexp subclassing")
// Once a shipping feature has proved stable in the wild, it will be dropped
// from HARMONY_SHIPPING, all occurrences of the FLAG_ variable are removed,
@@ -297,13 +307,10 @@ DEFINE_BOOL(string_slices, true, "use string slices")
// Flags for Ignition.
DEFINE_BOOL(ignition, false, "use ignition interpreter")
DEFINE_STRING(ignition_filter, "*", "filter for ignition interpreter")
-DEFINE_BOOL(ignition_fake_try_catch, false,
- "enable fake try-catch-finally blocks in ignition for testing")
-DEFINE_BOOL(ignition_fallback_on_eval_and_catch, false,
- "fallback to full-codegen for functions which contain eval, catch"
- "and es6 blocks")
DEFINE_BOOL(print_bytecode, false,
"print bytecode generated by ignition interpreter")
+DEFINE_BOOL(trace_ignition, false,
+ "trace the bytecodes executed by the ignition interpreter")
DEFINE_BOOL(trace_ignition_codegen, false,
"trace the codegen of ignition interpreter bytecode handlers")
@@ -328,7 +335,7 @@ DEFINE_INT(max_inlined_nodes_cumulative, 400,
"maximum cumulative number of AST nodes considered for inlining")
DEFINE_BOOL(loop_invariant_code_motion, true, "loop invariant code motion")
DEFINE_BOOL(fast_math, true, "faster (but maybe less accurate) math functions")
-DEFINE_BOOL(collect_megamorphic_maps_from_stub_cache, true,
+DEFINE_BOOL(collect_megamorphic_maps_from_stub_cache, false,
"crankshaft harvests type feedback from stub cache")
DEFINE_BOOL(hydrogen_stats, false, "print statistics for hydrogen")
DEFINE_BOOL(trace_check_elimination, false, "trace check elimination phase")
@@ -395,8 +402,6 @@ DEFINE_BOOL(inline_accessors, true, "inline JavaScript accessors")
DEFINE_INT(escape_analysis_iterations, 2,
"maximum number of escape analysis fix-point iterations")
-DEFINE_BOOL(optimize_for_in, true, "optimize functions containing for-in loops")
-
DEFINE_BOOL(concurrent_recompilation, true,
"optimizing hot functions asynchronously on a separate thread")
DEFINE_BOOL(trace_concurrent_recompilation, false,
@@ -417,7 +422,6 @@ DEFINE_BOOL(omit_map_checks_for_leaf_maps, true,
// Flags for TurboFan.
DEFINE_BOOL(turbo, false, "enable TurboFan compiler")
DEFINE_IMPLICATION(turbo, turbo_asm_deoptimization)
-DEFINE_IMPLICATION(turbo, turbo_inlining)
DEFINE_BOOL(turbo_shipping, true, "enable TurboFan compiler on subset")
DEFINE_BOOL(turbo_greedy_regalloc, false, "use the greedy register allocator")
DEFINE_BOOL(turbo_sp_frame_access, false,
@@ -450,7 +454,7 @@ DEFINE_BOOL(function_context_specialization, false,
"enable function context specialization in TurboFan")
DEFINE_BOOL(native_context_specialization, true,
"enable native context specialization in TurboFan")
-DEFINE_BOOL(turbo_inlining, false, "enable inlining in TurboFan")
+DEFINE_BOOL(turbo_inlining, true, "enable inlining in TurboFan")
DEFINE_BOOL(trace_turbo_inlining, false, "trace TurboFan inlining")
DEFINE_BOOL(loop_assignment_analysis, true, "perform loop assignment analysis")
DEFINE_BOOL(turbo_profiling, false, "enable profiling in TurboFan")
@@ -459,7 +463,6 @@ DEFINE_BOOL(turbo_verify_allocation, DEBUG_BOOL,
DEFINE_BOOL(turbo_move_optimization, true, "optimize gap moves in TurboFan")
DEFINE_BOOL(turbo_jt, true, "enable jump threading in TurboFan")
DEFINE_BOOL(turbo_osr, true, "enable OSR in TurboFan")
-DEFINE_BOOL(turbo_try_finally, false, "enable try-finally support in TurboFan")
DEFINE_BOOL(turbo_stress_loop_peeling, false,
"stress loop peeling optimization")
DEFINE_BOOL(turbo_cf_optimization, true, "optimize control flow in TurboFan")
@@ -467,18 +470,26 @@ DEFINE_BOOL(turbo_frame_elision, true, "elide frames in TurboFan")
DEFINE_BOOL(turbo_cache_shared_code, true, "cache context-independent code")
DEFINE_BOOL(turbo_preserve_shared_code, false, "keep context-independent code")
DEFINE_BOOL(turbo_escape, false, "enable escape analysis")
-DEFINE_BOOL(trace_turbo_escape, false, "enable tracing in escape analysis")
DEFINE_BOOL(turbo_instruction_scheduling, false,
"enable instruction scheduling in TurboFan")
+DEFINE_BOOL(turbo_stress_instruction_scheduling, false,
+ "randomly schedule instructions to stress dependency tracking")
// Flags for native WebAssembly.
DEFINE_BOOL(expose_wasm, false, "expose WASM interface to JavaScript")
DEFINE_BOOL(trace_wasm_decoder, false, "trace decoding of wasm code")
DEFINE_BOOL(trace_wasm_decode_time, false, "trace decoding time of wasm code")
DEFINE_BOOL(trace_wasm_compiler, false, "trace compiling of wasm code")
+DEFINE_BOOL(trace_wasm_ast, false, "dump AST after WASM decode")
DEFINE_BOOL(wasm_break_on_decoder_error, false,
"debug break when wasm decoder encounters an error")
+DEFINE_BOOL(enable_simd_asmjs, false, "enable SIMD.js in asm.js stdlib")
+
+DEFINE_BOOL(dump_asmjs_wasm, false, "dump Asm.js to WASM module bytes")
+DEFINE_STRING(asmjs_wasm_dumpfile, "asmjs.wasm",
+ "file to dump asm wasm conversion result to")
+
DEFINE_INT(typed_array_max_size_in_heap, 64,
"threshold for in-heap typed array")
@@ -626,13 +637,14 @@ DEFINE_INT(max_stack_trace_source_length, 300,
// full-codegen.cc
DEFINE_BOOL(always_inline_smi_code, false,
"always inline smi code in non-opt code")
+DEFINE_BOOL(verify_operand_stack_depth, false,
+ "emit debug code that verifies the static tracking of the operand "
+ "stack depth")
// heap.cc
DEFINE_INT(min_semi_space_size, 0,
"min size of a semi-space (in MBytes), the new space consists of two"
"semi-spaces")
-DEFINE_INT(target_semi_space_size, 0,
- "target size of a semi-space (in MBytes) before triggering a GC")
DEFINE_INT(max_semi_space_size, 0,
"max size of a semi-space (in MBytes), the new space consists of two"
"semi-spaces")
@@ -721,6 +733,11 @@ DEFINE_BOOL(heap_profiler_trace_objects, false,
"Dump heap object allocations/movements/size_updates")
+// sampling-heap-profiler.cc
+DEFINE_BOOL(sampling_heap_profiler_suppress_randomness, false,
+ "Use constant sample intervals to eliminate test flakiness")
+
+
// v8.cc
DEFINE_BOOL(use_idle_notification, true,
"Use idle notification to reduce memory footprint.")
@@ -787,7 +804,8 @@ DEFINE_INT(sim_stack_size, 2 * MB / KB,
"in kBytes (default is 2 MB)")
DEFINE_BOOL(log_regs_modified, true,
"When logging register values, only print modified registers.")
-DEFINE_BOOL(log_colour, true, "When logging, try to use coloured output.")
+DEFINE_BOOL(log_colour, ENABLE_LOG_COLOUR,
+ "When logging, try to use coloured output.")
DEFINE_BOOL(ignore_asm_unimplemented_break, false,
"Don't break for ASM_UNIMPLEMENTED_BREAK macros.")
DEFINE_BOOL(trace_sim_messages, false,
@@ -805,6 +823,9 @@ DEFINE_INT(hash_seed, 0,
"Fixed seed to use to hash property keys (0 means random)"
"(with snapshots this option cannot override the baked-in seed)")
+// runtime.cc
+DEFINE_BOOL(runtime_call_stats, false, "report runtime call counts and times")
+
// snapshot-common.cc
DEFINE_BOOL(profile_deserialization, false,
"Print the time it takes to deserialize the snapshot.")
@@ -864,6 +885,10 @@ DEFINE_INT(external_allocation_limit_incremental_time, 1,
"Time spent in incremental marking steps (in ms) once the external "
"allocation limit is reached")
+DEFINE_BOOL(disable_old_api_accessors, false,
+ "Disable old-style API accessors whose setters trigger through the "
+ "prototype chain")
+
//
// Dev shell flags
//
@@ -919,7 +944,6 @@ DEFINE_BOOL(print_builtin_source, false,
"pretty print source code for builtins")
DEFINE_BOOL(print_ast, false, "print source AST")
DEFINE_BOOL(print_builtin_ast, false, "print source AST for builtins")
-DEFINE_STRING(stop_at, "", "function name where to insert a breakpoint")
DEFINE_BOOL(trap_on_abort, false, "replace aborts by breakpoints")
// compiler.cc
@@ -941,6 +965,7 @@ DEFINE_BOOL(print_global_handles, false, "report global handles after GC")
// TurboFan debug-only flags.
DEFINE_BOOL(print_turbo_replay, false,
"print C++ code to recreate TurboFan graphs")
+DEFINE_BOOL(trace_turbo_escape, false, "enable tracing in escape analysis")
// objects.cc
DEFINE_BOOL(trace_normalization, false,
@@ -953,6 +978,8 @@ DEFINE_BOOL(trace_lazy, false, "trace lazy compilation")
DEFINE_BOOL(collect_heap_spill_statistics, false,
"report heap spill statistics along with heap_stats "
"(requires heap_stats)")
+DEFINE_BOOL(trace_live_bytes, false,
+ "trace incrementing and resetting of live bytes")
DEFINE_BOOL(trace_isolates, false, "trace isolate state changes")
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index d60ab29c4e..50a2e21a05 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -121,15 +121,15 @@ StackFrame* StackFrameIteratorBase::SingletonFor(StackFrame::Type type,
StackFrame* StackFrameIteratorBase::SingletonFor(StackFrame::Type type) {
#define FRAME_TYPE_CASE(type, field) \
- case StackFrame::type: result = &field##_; break;
+ case StackFrame::type: \
+ return &field##_;
- StackFrame* result = NULL;
switch (type) {
case StackFrame::NONE: return NULL;
STACK_FRAME_TYPE_LIST(FRAME_TYPE_CASE)
default: break;
}
- return result;
+ return NULL;
#undef FRAME_TYPE_CASE
}
@@ -234,17 +234,8 @@ SafeStackFrameIterator::SafeStackFrameIterator(
}
if (SingletonFor(type) == NULL) return;
frame_ = SingletonFor(type, &state);
- if (frame_ == NULL) return;
-
+ DCHECK(frame_);
Advance();
-
- if (frame_ != NULL && !frame_->is_exit() &&
- external_callback_scope_ != NULL &&
- external_callback_scope_->scope_address() < frame_->fp()) {
- // Skip top ExternalCallbackScope if we already advanced to a JS frame
- // under it. Sampler will anyways take this top external callback.
- external_callback_scope_ = external_callback_scope_->previous();
- }
}
@@ -272,8 +263,12 @@ void SafeStackFrameIterator::AdvanceOneFrame() {
// Advance to the previous frame.
StackFrame::State state;
StackFrame::Type type = frame_->GetCallerState(&state);
+ if (SingletonFor(type) == NULL) {
+ frame_ = NULL;
+ return;
+ }
frame_ = SingletonFor(type, &state);
- if (frame_ == NULL) return;
+ DCHECK(frame_);
// Check that we have actually moved to the previous frame in the stack.
if (frame_->sp() < last_sp || frame_->fp() < last_fp) {
@@ -325,22 +320,30 @@ bool SafeStackFrameIterator::IsValidExitFrame(Address fp) const {
void SafeStackFrameIterator::Advance() {
while (true) {
AdvanceOneFrame();
- if (done()) return;
- if (frame_->is_java_script()) return;
- if (frame_->is_exit() && external_callback_scope_) {
+ if (done()) break;
+ ExternalCallbackScope* last_callback_scope = NULL;
+ while (external_callback_scope_ != NULL &&
+ external_callback_scope_->scope_address() < frame_->fp()) {
+ // As long as the setup of a frame is not atomic, we may happen to be
+ // in an interval where an ExternalCallbackScope is already created,
+ // but the frame is not yet entered. So we are actually observing
+ // the previous frame.
+ // Skip all the ExternalCallbackScope's that are below the current fp.
+ last_callback_scope = external_callback_scope_;
+ external_callback_scope_ = external_callback_scope_->previous();
+ }
+ if (frame_->is_java_script()) break;
+ if (frame_->is_exit()) {
// Some of the EXIT frames may have ExternalCallbackScope allocated on
// top of them. In that case the scope corresponds to the first EXIT
// frame beneath it. There may be other EXIT frames on top of the
// ExternalCallbackScope, just skip them as we cannot collect any useful
// information about them.
- if (external_callback_scope_->scope_address() < frame_->fp()) {
+ if (last_callback_scope) {
frame_->state_.pc_address =
- external_callback_scope_->callback_entrypoint_address();
- external_callback_scope_ = external_callback_scope_->previous();
- DCHECK(external_callback_scope_ == NULL ||
- external_callback_scope_->scope_address() > frame_->fp());
- return;
+ last_callback_scope->callback_entrypoint_address();
}
+ break;
}
}
}
@@ -411,6 +414,12 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
// the VM with a signal at any arbitrary instruction, with essentially
// anything on the stack. So basically none of these checks are 100%
// reliable.
+#if defined(USE_SIMULATOR)
+ MSAN_MEMORY_IS_INITIALIZED(
+ state->fp + StandardFrameConstants::kContextOffset, kPointerSize);
+ MSAN_MEMORY_IS_INITIALIZED(
+ state->fp + StandardFrameConstants::kMarkerOffset, kPointerSize);
+#endif
if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
// An adapter frame has a special SMI constant for the context and
// is not distinguished through the marker.
@@ -446,7 +455,8 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
return ARGUMENTS_ADAPTOR;
} else {
// The interpreter entry trampoline has a non-SMI marker.
- DCHECK(code_obj->is_interpreter_entry_trampoline());
+ DCHECK(code_obj->is_interpreter_entry_trampoline() ||
+ code_obj->is_interpreter_enter_bytecode_dispatch());
return INTERPRETED;
}
}
@@ -598,23 +608,14 @@ Address StandardFrame::GetExpressionAddress(int n) const {
return fp() + offset - n * kPointerSize;
}
-
-Object* StandardFrame::GetExpression(Address fp, int index) {
- return Memory::Object_at(GetExpressionAddress(fp, index));
-}
-
-
-Address StandardFrame::GetExpressionAddress(Address fp, int n) {
- const int offset = StandardFrameConstants::kExpressionsOffset;
- return fp + offset - n * kPointerSize;
+Address InterpretedFrame::GetExpressionAddress(int n) const {
+ const int offset = InterpreterFrameConstants::kExpressionsOffset;
+ return fp() + offset - n * kPointerSize;
}
-
int StandardFrame::ComputeExpressionsCount() const {
- const int offset =
- StandardFrameConstants::kExpressionsOffset + kPointerSize;
- Address base = fp() + offset;
- Address limit = sp();
+ Address base = GetExpressionAddress(0);
+ Address limit = sp() - kPointerSize;
DCHECK(base >= limit); // stack grows downwards
// Include register-allocated locals in number of expressions.
return static_cast<int>((base - limit) / kPointerSize);
@@ -647,7 +648,8 @@ void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
SafepointEntry safepoint_entry;
Code* code = StackFrame::GetSafepointData(
isolate(), pc(), &safepoint_entry, &stack_slots);
- unsigned slot_space = stack_slots * kPointerSize;
+ unsigned slot_space =
+ stack_slots * kPointerSize - StandardFrameConstants::kFixedFrameSize;
// Visit the outgoing parameters.
Object** parameters_base = &Memory::Object_at(sp());
@@ -761,9 +763,7 @@ bool JavaScriptFrame::HasInlinedFrames() const {
int JavaScriptFrame::GetArgumentsLength() const {
// If there is an arguments adaptor frame get the arguments length from it.
if (has_adapted_arguments()) {
- STATIC_ASSERT(ArgumentsAdaptorFrameConstants::kLengthOffset ==
- StandardFrameConstants::kExpressionsOffset);
- return Smi::cast(GetExpression(caller_fp(), 0))->value();
+ return ArgumentsAdaptorFrame::GetLength(caller_fp());
} else {
return GetNumberOfIncomingArguments();
}
@@ -796,24 +796,21 @@ void JavaScriptFrame::GetFunctions(List<JSFunction*>* functions) const {
void JavaScriptFrame::Summarize(List<FrameSummary>* functions) {
DCHECK(functions->length() == 0);
- Code* code_pointer = LookupCode();
- int offset = static_cast<int>(pc() - code_pointer->address());
- FrameSummary summary(receiver(),
- function(),
- code_pointer,
- offset,
+ Code* code = LookupCode();
+ int offset = static_cast<int>(pc() - code->instruction_start());
+ AbstractCode* abstract_code = AbstractCode::cast(code);
+ FrameSummary summary(receiver(), function(), abstract_code, offset,
IsConstructor());
functions->Add(summary);
}
-
int JavaScriptFrame::LookupExceptionHandlerInTable(
- int* stack_slots, HandlerTable::CatchPrediction* prediction) {
+ int* stack_depth, HandlerTable::CatchPrediction* prediction) {
Code* code = LookupCode();
DCHECK(!code->is_optimized_code());
HandlerTable* table = HandlerTable::cast(code->handler_table());
int pc_offset = static_cast<int>(pc() - code->entry());
- return table->LookupRange(pc_offset, stack_slots, prediction);
+ return table->LookupRange(pc_offset, stack_depth, prediction);
}
@@ -826,7 +823,7 @@ void JavaScriptFrame::PrintFunctionAndOffset(JSFunction* function, Code* code,
PrintF(file, "+%d", code_offset);
if (print_line_number) {
SharedFunctionInfo* shared = function->shared();
- int source_pos = code->SourcePosition(pc);
+ int source_pos = code->SourcePosition(code_offset);
Object* maybe_script = shared->script();
if (maybe_script->IsScript()) {
Script* script = Script::cast(maybe_script);
@@ -896,26 +893,30 @@ void JavaScriptFrame::RestoreOperandStack(FixedArray* store) {
}
}
-
-FrameSummary::FrameSummary(Object* receiver, JSFunction* function, Code* code,
- int offset, bool is_constructor)
+FrameSummary::FrameSummary(Object* receiver, JSFunction* function,
+ AbstractCode* abstract_code, int code_offset,
+ bool is_constructor)
: receiver_(receiver, function->GetIsolate()),
function_(function),
- code_(code),
- offset_(offset),
+ abstract_code_(abstract_code),
+ code_offset_(code_offset),
is_constructor_(is_constructor) {}
-
void FrameSummary::Print() {
PrintF("receiver: ");
receiver_->ShortPrint();
PrintF("\nfunction: ");
function_->shared()->DebugName()->ShortPrint();
PrintF("\ncode: ");
- code_->ShortPrint();
- if (code_->kind() == Code::FUNCTION) PrintF(" NON-OPT");
- if (code_->kind() == Code::OPTIMIZED_FUNCTION) PrintF(" OPT");
- PrintF("\npc: %d\n", offset_);
+ abstract_code_->ShortPrint();
+ if (abstract_code_->IsCode()) {
+ Code* code = abstract_code_->GetCode();
+ if (code->kind() == Code::FUNCTION) PrintF(" UNOPT ");
+ if (code->kind() == Code::OPTIMIZED_FUNCTION) PrintF(" OPT ");
+ } else {
+ PrintF(" BYTECODE ");
+ }
+ PrintF("\npc: %d\n", code_offset_);
}
@@ -964,11 +965,9 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
JSFunction* function;
if (opcode == Translation::LITERAL) {
function = JSFunction::cast(literal_array->get(it.Next()));
- } else if (opcode == Translation::STACK_SLOT) {
- function = JSFunction::cast(StackSlotAt(it.Next()));
} else {
- CHECK_EQ(Translation::JS_FRAME_FUNCTION, opcode);
- function = this->function();
+ CHECK_EQ(opcode, Translation::STACK_SLOT);
+ function = JSFunction::cast(StackSlotAt(it.Next()));
}
DCHECK_EQ(shared_info, function->shared());
@@ -982,8 +981,6 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
receiver = literal_array->get(it.Next());
} else if (opcode == Translation::STACK_SLOT) {
receiver = StackSlotAt(it.Next());
- } else if (opcode == Translation::JS_FRAME_FUNCTION) {
- receiver = this->function();
} else {
// The receiver is not in a stack slot nor in a literal. We give up.
it.Skip(Translation::NumberOfOperandsFor(opcode));
@@ -994,24 +991,26 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
receiver = isolate()->heap()->undefined_value();
}
- Code* const code = shared_info->code();
+ AbstractCode* abstract_code;
- unsigned pc_offset;
+ unsigned code_offset;
if (frame_opcode == Translation::JS_FRAME) {
+ Code* code = shared_info->code();
DeoptimizationOutputData* const output_data =
DeoptimizationOutputData::cast(code->deoptimization_data());
unsigned const entry =
Deoptimizer::GetOutputInfo(output_data, ast_id, shared_info);
- pc_offset =
- FullCodeGenerator::PcField::decode(entry) + Code::kHeaderSize;
- DCHECK_NE(0U, pc_offset);
+ code_offset = FullCodeGenerator::PcField::decode(entry);
+ abstract_code = AbstractCode::cast(code);
} else {
// TODO(rmcilroy): Modify FrameSummary to enable us to summarize
// based on the BytecodeArray and bytecode offset.
DCHECK_EQ(frame_opcode, Translation::INTERPRETED_FRAME);
- pc_offset = 0;
+ code_offset = 0;
+ abstract_code = AbstractCode::cast(shared_info->bytecode_array());
}
- FrameSummary summary(receiver, function, code, pc_offset, is_constructor);
+ FrameSummary summary(receiver, function, abstract_code, code_offset,
+ is_constructor);
frames->Add(summary);
is_constructor = false;
} else if (frame_opcode == Translation::CONSTRUCT_STUB_FRAME) {
@@ -1034,7 +1033,7 @@ int OptimizedFrame::LookupExceptionHandlerInTable(
DCHECK(code->is_optimized_code());
HandlerTable* table = HandlerTable::cast(code->handler_table());
int pc_offset = static_cast<int>(pc() - code->entry());
- *stack_slots = code->stack_slots();
+ if (stack_slots) *stack_slots = code->stack_slots();
return table->LookupReturn(pc_offset, prediction);
}
@@ -1105,11 +1104,9 @@ void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) const {
Object* function;
if (opcode == Translation::LITERAL) {
function = literal_array->get(it.Next());
- } else if (opcode == Translation::STACK_SLOT) {
- function = StackSlotAt(it.Next());
} else {
- CHECK_EQ(Translation::JS_FRAME_FUNCTION, opcode);
- function = this->function();
+ CHECK_EQ(Translation::STACK_SLOT, opcode);
+ function = StackSlotAt(it.Next());
}
functions->Add(JSFunction::cast(function));
}
@@ -1127,6 +1124,64 @@ Object* OptimizedFrame::StackSlotAt(int index) const {
return Memory::Object_at(fp() + StackSlotOffsetRelativeToFp(index));
}
+int InterpretedFrame::LookupExceptionHandlerInTable(
+ int* context_register, HandlerTable::CatchPrediction* prediction) {
+ BytecodeArray* bytecode = function()->shared()->bytecode_array();
+ HandlerTable* table = HandlerTable::cast(bytecode->handler_table());
+ int pc_offset = GetBytecodeOffset() + 1; // Point after current bytecode.
+ return table->LookupRange(pc_offset, context_register, prediction);
+}
+
+int InterpretedFrame::GetBytecodeOffset() const {
+ const int index = InterpreterFrameConstants::kBytecodeOffsetExpressionIndex;
+ DCHECK_EQ(
+ InterpreterFrameConstants::kBytecodeOffsetFromFp,
+ InterpreterFrameConstants::kExpressionsOffset - index * kPointerSize);
+ int raw_offset = Smi::cast(GetExpression(index))->value();
+ return raw_offset - BytecodeArray::kHeaderSize + kHeapObjectTag;
+}
+
+void InterpretedFrame::PatchBytecodeOffset(int new_offset) {
+ const int index = InterpreterFrameConstants::kBytecodeOffsetExpressionIndex;
+ DCHECK_EQ(
+ InterpreterFrameConstants::kBytecodeOffsetFromFp,
+ InterpreterFrameConstants::kExpressionsOffset - index * kPointerSize);
+ int raw_offset = new_offset + BytecodeArray::kHeaderSize - kHeapObjectTag;
+ SetExpression(index, Smi::FromInt(raw_offset));
+}
+
+Object* InterpretedFrame::GetBytecodeArray() const {
+ const int index = InterpreterFrameConstants::kBytecodeArrayExpressionIndex;
+ DCHECK_EQ(
+ InterpreterFrameConstants::kBytecodeArrayFromFp,
+ InterpreterFrameConstants::kExpressionsOffset - index * kPointerSize);
+ return GetExpression(index);
+}
+
+void InterpretedFrame::PatchBytecodeArray(Object* bytecode_array) {
+ const int index = InterpreterFrameConstants::kBytecodeArrayExpressionIndex;
+ DCHECK_EQ(
+ InterpreterFrameConstants::kBytecodeArrayFromFp,
+ InterpreterFrameConstants::kExpressionsOffset - index * kPointerSize);
+ SetExpression(index, bytecode_array);
+}
+
+Object* InterpretedFrame::GetInterpreterRegister(int register_index) const {
+ const int index = InterpreterFrameConstants::kRegisterFileExpressionIndex;
+ DCHECK_EQ(
+ InterpreterFrameConstants::kRegisterFilePointerFromFp,
+ InterpreterFrameConstants::kExpressionsOffset - index * kPointerSize);
+ return GetExpression(index + register_index);
+}
+
+void InterpretedFrame::Summarize(List<FrameSummary>* functions) {
+ DCHECK(functions->length() == 0);
+ AbstractCode* abstract_code =
+ AbstractCode::cast(function()->shared()->bytecode_array());
+ FrameSummary summary(receiver(), function(), abstract_code,
+ GetBytecodeOffset(), IsConstructor());
+ functions->Add(summary);
+}
int ArgumentsAdaptorFrame::GetNumberOfIncomingArguments() const {
return Smi::cast(GetExpression(0))->value();
@@ -1137,19 +1192,21 @@ Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
return fp() + StandardFrameConstants::kCallerSPOffset;
}
-
-Address InternalFrame::GetCallerStackPointer() const {
- // Internal frames have no arguments. The stack pointer of the
- // caller is at a fixed offset from the frame pointer.
- return fp() + StandardFrameConstants::kCallerSPOffset;
+int ArgumentsAdaptorFrame::GetLength(Address fp) {
+ const int offset = ArgumentsAdaptorFrameConstants::kLengthOffset;
+ return Smi::cast(Memory::Object_at(fp + offset))->value();
}
-
Code* ArgumentsAdaptorFrame::unchecked_code() const {
return isolate()->builtins()->builtin(
Builtins::kArgumentsAdaptorTrampoline);
}
+Address InternalFrame::GetCallerStackPointer() const {
+ // Internal frames have no arguments. The stack pointer of the
+ // caller is at a fixed offset from the frame pointer.
+ return fp() + StandardFrameConstants::kCallerSPOffset;
+}
Code* InternalFrame::unchecked_code() const {
const int offset = InternalFrameConstants::kCodeOffset;
@@ -1212,7 +1269,8 @@ void JavaScriptFrame::Print(StringStream* accumulator,
Address pc = this->pc();
if (code != NULL && code->kind() == Code::FUNCTION &&
pc >= code->instruction_start() && pc < code->instruction_end()) {
- int source_pos = code->SourcePosition(pc);
+ int offset = static_cast<int>(pc - code->instruction_start());
+ int source_pos = code->SourcePosition(offset);
int line = script->GetLineNumber(source_pos) + 1;
accumulator->Add(":%d", line);
} else {
@@ -1369,7 +1427,6 @@ void JavaScriptFrame::Iterate(ObjectVisitor* v) const {
IteratePc(v, pc_address(), constant_pool_address(), LookupCode());
}
-
void InternalFrame::Iterate(ObjectVisitor* v) const {
// Internal frames only have object pointers on the expression stack
// as they never have any arguments.
@@ -1467,10 +1524,6 @@ Code* InnerPointerToCodeCache::GcSafeCastToCode(HeapObject* object,
Code* InnerPointerToCodeCache::GcSafeFindCodeForInnerPointer(
Address inner_pointer) {
Heap* heap = isolate_->heap();
- if (!heap->code_space()->Contains(inner_pointer) &&
- !heap->lo_space()->Contains(inner_pointer)) {
- return nullptr;
- }
// Check if the inner pointer points into a large object chunk.
LargePage* large_page = heap->lo_space()->FindPage(inner_pointer);
@@ -1478,6 +1531,10 @@ Code* InnerPointerToCodeCache::GcSafeFindCodeForInnerPointer(
return GcSafeCastToCode(large_page->GetObject(), inner_pointer);
}
+ if (!heap->code_space()->Contains(inner_pointer)) {
+ return nullptr;
+ }
+
// Iterate through the page until we reach the end or find an object starting
// after the inner pointer.
Page* page = Page::FromAddress(inner_pointer);
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 674d7daeca..f33eb16741 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -111,6 +111,43 @@ class StackHandler BASE_EMBEDDED {
V(CONSTRUCT, ConstructFrame) \
V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame)
+// Every pointer in a frame has a slot id. On 32-bit platforms, doubles consume
+// two slots.
+//
+// Stack slot indices >= 0 access the callee stack with slot 0 corresponding to
+// the callee's saved return address and 1 corresponding to the saved frame
+// pointer. Some frames have additional information stored in the fixed header,
+// for example JSFunctions store the function context and marker in the fixed
+// header, with slot index 2 corresponding to the current function context and 3
+// corresponding to the frame marker/JSFunction.
+//
+// slot JS frame
+// +-----------------+--------------------------------
+// -n-1 | parameter 0 | ^
+// |- - - - - - - - -| |
+// -n | | Caller
+// ... | ... | frame slots
+// -2 | parameter n-1 | (slot < 0)
+// |- - - - - - - - -| |
+// -1 | parameter n | v
+// -----+-----------------+--------------------------------
+// 0 | return addr | ^ ^
+// |- - - - - - - - -| | |
+// 1 | saved frame ptr | Fixed |
+// |- - - - - - - - -| Header <-- frame ptr |
+// 2 | [Constant Pool] | | |
+// |- - - - - - - - -| | |
+// 2+cp | Context | | if a constant pool |
+// |- - - - - - - - -| | is used, cp = 1, |
+// 3+cp |JSFunction/Marker| v otherwise, cp = 0 |
+// +-----------------+---- |
+// 4+cp | | ^ Callee
+// |- - - - - - - - -| | frame slots
+// ... | | Frame slots (slot >= 0)
+// |- - - - - - - - -| | |
+// | | v |
+// -----+-----------------+----- <-- stack ptr -------------
+//
class StandardFrameConstants : public AllStatic {
public:
@@ -178,24 +215,47 @@ class InterpreterFrameConstants : public AllStatic {
public:
// Fixed frame includes new.target and bytecode offset.
static const int kFixedFrameSize =
- StandardFrameConstants::kFixedFrameSize + 2 * kPointerSize;
+ StandardFrameConstants::kFixedFrameSize + 3 * kPointerSize;
static const int kFixedFrameSizeFromFp =
- StandardFrameConstants::kFixedFrameSizeFromFp + 2 * kPointerSize;
+ StandardFrameConstants::kFixedFrameSizeFromFp + 3 * kPointerSize;
// FP-relative.
- static const int kRegisterFilePointerFromFp =
+ static const int kNewTargetFromFp =
+ -StandardFrameConstants::kFixedFrameSizeFromFp - 1 * kPointerSize;
+ static const int kBytecodeArrayFromFp =
+ -StandardFrameConstants::kFixedFrameSizeFromFp - 2 * kPointerSize;
+ static const int kBytecodeOffsetFromFp =
-StandardFrameConstants::kFixedFrameSizeFromFp - 3 * kPointerSize;
+ static const int kRegisterFilePointerFromFp =
+ -StandardFrameConstants::kFixedFrameSizeFromFp - 4 * kPointerSize;
+
+ static const int kExpressionsOffset = kRegisterFilePointerFromFp;
+
+ // Expression index for {StandardFrame::GetExpressionAddress}.
+ static const int kBytecodeArrayExpressionIndex = -2;
+ static const int kBytecodeOffsetExpressionIndex = -1;
+ static const int kRegisterFileExpressionIndex = 0;
// Register file pointer relative.
static const int kLastParamFromRegisterPointer =
- StandardFrameConstants::kFixedFrameSize + 3 * kPointerSize;
+ StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
static const int kBytecodeOffsetFromRegisterPointer = 1 * kPointerSize;
- static const int kNewTargetFromRegisterPointer = 2 * kPointerSize;
- static const int kFunctionFromRegisterPointer = 3 * kPointerSize;
- static const int kContextFromRegisterPointer = 4 * kPointerSize;
+ static const int kBytecodeArrayFromRegisterPointer = 2 * kPointerSize;
+ static const int kNewTargetFromRegisterPointer = 3 * kPointerSize;
+ static const int kFunctionFromRegisterPointer = 4 * kPointerSize;
+ static const int kContextFromRegisterPointer = 5 * kPointerSize;
};
+inline static int FPOffsetToFrameSlot(int frame_offset) {
+ return StandardFrameConstants::kFixedSlotCountAboveFp - 1 -
+ frame_offset / kPointerSize;
+}
+
+inline static int FrameSlotToFPOffset(int slot) {
+ return (StandardFrameConstants::kFixedSlotCountAboveFp - 1 - slot) *
+ kPointerSize;
+}
// Abstract base class for all stack frames.
class StackFrame BASE_EMBEDDED {
@@ -249,6 +309,7 @@ class StackFrame BASE_EMBEDDED {
bool is_entry_construct() const { return type() == ENTRY_CONSTRUCT; }
bool is_exit() const { return type() == EXIT; }
bool is_optimized() const { return type() == OPTIMIZED; }
+ bool is_interpreted() const { return type() == INTERPRETED; }
bool is_arguments_adaptor() const { return type() == ARGUMENTS_ADAPTOR; }
bool is_internal() const { return type() == INTERNAL; }
bool is_stub_failure_trampoline() const {
@@ -485,7 +546,6 @@ class StandardFrame: public StackFrame {
inline Object* GetExpression(int index) const;
inline void SetExpression(int index, Object* value);
int ComputeExpressionsCount() const;
- static Object* GetExpression(Address fp, int index);
void SetCallerFp(Address caller_fp) override;
@@ -516,8 +576,7 @@ class StandardFrame: public StackFrame {
void IterateExpressions(ObjectVisitor* v) const;
// Returns the address of the n'th expression stack element.
- Address GetExpressionAddress(int n) const;
- static Address GetExpressionAddress(Address fp, int n);
+ virtual Address GetExpressionAddress(int n) const;
// Determines if the standard frame for the given frame pointer is
// an arguments adaptor frame.
@@ -538,14 +597,14 @@ class StandardFrame: public StackFrame {
class FrameSummary BASE_EMBEDDED {
public:
- FrameSummary(Object* receiver, JSFunction* function, Code* code, int offset,
+ FrameSummary(Object* receiver, JSFunction* function,
+ AbstractCode* abstract_code, int code_offset,
bool is_constructor);
Handle<Object> receiver() { return receiver_; }
Handle<JSFunction> function() { return function_; }
- Handle<Code> code() { return code_; }
- Address pc() { return code_->address() + offset_; }
- int offset() { return offset_; }
+ Handle<AbstractCode> abstract_code() { return abstract_code_; }
+ int code_offset() { return code_offset_; }
bool is_constructor() { return is_constructor_; }
void Print();
@@ -553,8 +612,8 @@ class FrameSummary BASE_EMBEDDED {
private:
Handle<Object> receiver_;
Handle<JSFunction> function_;
- Handle<Code> code_;
- int offset_;
+ Handle<AbstractCode> abstract_code_;
+ int code_offset_;
bool is_constructor_;
};
@@ -617,9 +676,12 @@ class JavaScriptFrame: public StandardFrame {
virtual void Summarize(List<FrameSummary>* frames);
// Lookup exception handler for current {pc}, returns -1 if none found. Also
- // returns the expected number of stack slots at the handler site.
+ // returns data associated with the handler site specific to the frame type:
+ // - JavaScriptFrame : Data is the stack depth at entry of the try-block.
+ // - OptimizedFrame : Data is the stack slot count of the entire frame.
+ // - InterpretedFrame: Data is the register index holding the context.
virtual int LookupExceptionHandlerInTable(
- int* stack_slots, HandlerTable::CatchPrediction* prediction);
+ int* data, HandlerTable::CatchPrediction* prediction);
// Architecture-specific register description.
static Register fp_register();
@@ -691,10 +753,9 @@ class OptimizedFrame : public JavaScriptFrame {
void Summarize(List<FrameSummary>* frames) override;
- // Lookup exception handler for current {pc}, returns -1 if none found. Also
- // returns the expected number of stack slots at the handler site.
+ // Lookup exception handler for current {pc}, returns -1 if none found.
int LookupExceptionHandlerInTable(
- int* stack_slots, HandlerTable::CatchPrediction* prediction) override;
+ int* data, HandlerTable::CatchPrediction* prediction) override;
DeoptimizationInputData* GetDeoptimizationData(int* deopt_index) const;
@@ -711,11 +772,38 @@ class OptimizedFrame : public JavaScriptFrame {
class InterpretedFrame : public JavaScriptFrame {
+ public:
Type type() const override { return INTERPRETED; }
+ // Lookup exception handler for current {pc}, returns -1 if none found.
+ int LookupExceptionHandlerInTable(
+ int* data, HandlerTable::CatchPrediction* prediction) override;
+
+ // Returns the current offset into the bytecode stream.
+ int GetBytecodeOffset() const;
+
+ // Updates the current offset into the bytecode stream, mainly used for stack
+ // unwinding to continue execution at a different bytecode offset.
+ void PatchBytecodeOffset(int new_offset);
+
+ // Returns the frame's current bytecode array.
+ Object* GetBytecodeArray() const;
+
+ // Updates the frame's BytecodeArray with |bytecode_array|. Used by the
+ // debugger to swap execution onto a BytecodeArray patched with breakpoints.
+ void PatchBytecodeArray(Object* bytecode_array);
+
+ // Access to the interpreter register file for this frame.
+ Object* GetInterpreterRegister(int register_index) const;
+
+ // Build a list with summaries for this frame including all inlined frames.
+ void Summarize(List<FrameSummary>* frames) override;
+
protected:
inline explicit InterpretedFrame(StackFrameIteratorBase* iterator);
+ Address GetExpressionAddress(int n) const override;
+
private:
friend class StackFrameIteratorBase;
};
@@ -740,6 +828,8 @@ class ArgumentsAdaptorFrame: public JavaScriptFrame {
void Print(StringStream* accumulator, PrintMode mode,
int index) const override;
+ static int GetLength(Address fp);
+
protected:
inline explicit ArgumentsAdaptorFrame(StackFrameIteratorBase* iterator);
diff --git a/deps/v8/src/full-codegen/arm/full-codegen-arm.cc b/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
index 25be8a6636..6e6a65511a 100644
--- a/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
+++ b/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
@@ -19,8 +19,7 @@
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm_)
-
+#define __ ACCESS_MASM(masm())
// A patch site is a location in the code which it is possible to patch. This
// class has a number of methods to emit the code which is patchable and the
@@ -77,6 +76,7 @@ class JumpPatchSite BASE_EMBEDDED {
}
private:
+ MacroAssembler* masm() { return masm_; }
MacroAssembler* masm_;
Label patch_site_;
#ifdef DEBUG
@@ -110,13 +110,6 @@ void FullCodeGenerator::Generate() {
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ stop("stop-at");
- }
-#endif
-
if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ ldr(r2, MemOperand(sp, receiver_offset));
@@ -137,6 +130,7 @@ void FullCodeGenerator::Generate() {
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
+ OperandStackDepthIncrement(locals_count);
if (locals_count > 0) {
if (locals_count >= 128) {
Label ok;
@@ -264,21 +258,12 @@ void FullCodeGenerator::Generate() {
Variable* rest_param = scope()->rest_parameter(&rest_index);
if (rest_param) {
Comment cmnt(masm_, "[ Allocate rest parameter array");
-
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
-
- __ mov(RestParamAccessDescriptor::parameter_count(),
- Operand(Smi::FromInt(num_parameters)));
- __ add(RestParamAccessDescriptor::parameter_pointer(), fp,
- Operand(StandardFrameConstants::kCallerSPOffset + offset));
- __ mov(RestParamAccessDescriptor::rest_parameter_index(),
- Operand(Smi::FromInt(rest_index)));
- function_in_register_r1 = false;
-
- RestParamAccessStub stub(isolate());
+ if (!function_in_register_r1) {
+ __ ldr(r1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ FastNewRestParameterStub stub(isolate());
__ CallStub(&stub);
-
+ function_in_register_r1 = false;
SetVar(rest_param, r0, r1, r2);
}
@@ -286,28 +271,20 @@ void FullCodeGenerator::Generate() {
if (arguments != NULL) {
// Function uses arguments object.
Comment cmnt(masm_, "[ Allocate arguments object");
- DCHECK(r1.is(ArgumentsAccessNewDescriptor::function()));
if (!function_in_register_r1) {
// Load this again, if it's used by the local context below.
__ ldr(r1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
- // Receiver is just before the parameters on the caller's stack.
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
- __ mov(ArgumentsAccessNewDescriptor::parameter_count(),
- Operand(Smi::FromInt(num_parameters)));
- __ add(ArgumentsAccessNewDescriptor::parameter_pointer(), fp,
- Operand(StandardFrameConstants::kCallerSPOffset + offset));
-
- // Arguments to ArgumentsAccessStub:
- // function, parameter pointer, parameter count.
- // The stub will rewrite parameter pointer and parameter count if the
- // previous stack frame was an arguments adapter frame.
- bool is_unmapped = is_strict(language_mode()) || !has_simple_parameters();
- ArgumentsAccessStub::Type type = ArgumentsAccessStub::ComputeType(
- is_unmapped, literal()->has_duplicate_parameters());
- ArgumentsAccessStub stub(isolate(), type);
- __ CallStub(&stub);
+ if (is_strict(language_mode()) || !has_simple_parameters()) {
+ FastNewStrictArgumentsStub stub(isolate());
+ __ CallStub(&stub);
+ } else if (literal()->has_duplicate_parameters()) {
+ __ Push(r1);
+ __ CallRuntime(Runtime::kNewSloppyArguments_Generic);
+ } else {
+ FastNewSloppyArgumentsStub stub(isolate());
+ __ CallStub(&stub);
+ }
SetVar(arguments, r0, r1, r2);
}
@@ -439,6 +416,30 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
}
+void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
+ bool is_tail_call) {
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else {
+ int distance = masm_->pc_offset();
+ weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
+ }
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ b(pl, &ok);
+ // Don't need to save result register if we are going to do a tail call.
+ if (!is_tail_call) {
+ __ push(r0);
+ }
+ __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+ if (!is_tail_call) {
+ __ pop(r0);
+ }
+ EmitProfilingCounterReset();
+ __ bind(&ok);
+}
void FullCodeGenerator::EmitReturnSequence() {
Comment cmnt(masm_, "[ Return sequence");
@@ -452,24 +453,7 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(r0);
__ CallRuntime(Runtime::kTraceExit);
}
- // Pretend that the exit is a backwards jump to the entry.
- int weight = 1;
- if (info_->ShouldSelfOptimize()) {
- weight = FLAG_interrupt_budget / FLAG_self_opt_count;
- } else {
- int distance = masm_->pc_offset();
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- }
- EmitProfilingCounterDecrement(weight);
- Label ok;
- __ b(pl, &ok);
- __ push(r0);
- __ Call(isolate()->builtins()->InterruptCheck(),
- RelocInfo::CODE_TARGET);
- __ pop(r0);
- EmitProfilingCounterReset();
- __ bind(&ok);
+ EmitProfilingCounterHandlingForReturnSequence(false);
// Make sure that the constant pool is not emitted inside of the return
// sequence.
@@ -492,7 +476,7 @@ void FullCodeGenerator::EmitReturnSequence() {
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
codegen()->GetVar(result_register(), var);
- __ push(result_register());
+ codegen()->PushOperand(result_register());
}
@@ -509,7 +493,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(
void FullCodeGenerator::StackValueContext::Plug(
Heap::RootListIndex index) const {
__ LoadRoot(result_register(), index);
- __ push(result_register());
+ codegen()->PushOperand(result_register());
}
@@ -544,7 +528,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(
void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
// Immediates cannot be pushed directly.
__ mov(result_register(), Operand(lit));
- __ push(result_register());
+ codegen()->PushOperand(result_register());
}
@@ -553,7 +537,7 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
true,
true_label_,
false_label_);
- DCHECK(!lit->IsUndetectableObject()); // There are no undetectable literals.
+ DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
if (false_label_ != fall_through_) __ b(false_label_);
} else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -578,41 +562,14 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
}
-void FullCodeGenerator::EffectContext::DropAndPlug(int count,
- Register reg) const {
- DCHECK(count > 0);
- __ Drop(count);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
- int count,
- Register reg) const {
- DCHECK(count > 0);
- __ Drop(count);
- __ Move(result_register(), reg);
-}
-
-
void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
Register reg) const {
DCHECK(count > 0);
- if (count > 1) __ Drop(count - 1);
+ if (count > 1) codegen()->DropOperands(count - 1);
__ str(reg, MemOperand(sp, 0));
}
-void FullCodeGenerator::TestContext::DropAndPlug(int count,
- Register reg) const {
- DCHECK(count > 0);
- // For simplicity we always test the accumulator register.
- __ Drop(count);
- __ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
Label* materialize_false) const {
DCHECK(materialize_true == materialize_false);
@@ -643,7 +600,7 @@ void FullCodeGenerator::StackValueContext::Plug(
__ bind(materialize_false);
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
__ bind(&done);
- __ push(ip);
+ codegen()->PushOperand(ip);
}
@@ -665,7 +622,7 @@ void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
Heap::RootListIndex value_root_index =
flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
__ LoadRoot(ip, value_root_index);
- __ push(ip);
+ codegen()->PushOperand(ip);
}
@@ -789,7 +746,7 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// The variable in the declaration always resides in the current function
// context.
DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (generate_debug_code_) {
+ if (FLAG_debug_code) {
// Check that we're not inside a with or catch context.
__ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset));
__ CompareRoot(r1, Heap::kWithContextMapRootIndex);
@@ -908,11 +865,11 @@ void FullCodeGenerator::VisitFunctionDeclaration(
case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
__ mov(r2, Operand(variable->name()));
- __ Push(r2);
+ PushOperand(r2);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- __ CallRuntime(Runtime::kDeclareLookupSlot);
+ PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -986,8 +943,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetExpressionPosition(clause);
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), Token::EQ_STRICT,
- strength(language_mode())).code();
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
@@ -1010,7 +967,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Discard the test value and jump to the default if present, otherwise to
// the end of the statement.
__ bind(&next_test);
- __ Drop(1); // Switch value is no longer needed.
+ DropOperands(1); // Switch value is no longer needed.
if (default_clause == NULL) {
__ b(nested_statement.break_label());
} else {
@@ -1041,25 +998,21 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
ForIn loop_statement(this, stmt);
increment_loop_depth();
- // Get the object to enumerate over. If the object is null or undefined, skip
- // over the loop. See ECMA-262 version 5, section 12.6.4.
+ // Get the object to enumerate over.
SetExpressionAsStatementPosition(stmt->enumerable());
VisitForAccumulatorValue(stmt->enumerable());
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, ip);
- __ b(eq, &exit);
- Register null_value = r5;
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ cmp(r0, null_value);
- __ b(eq, &exit);
+ OperandStackDepthIncrement(ForIn::kElementCount);
- PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
-
- // Convert the object to a JS object.
+ // If the object is null or undefined, skip over the loop, otherwise convert
+ // it to a JS receiver. See ECMA-262 version 5, section 12.6.4.
Label convert, done_convert;
__ JumpIfSmi(r0, &convert);
__ CompareObjectType(r0, r1, r1, FIRST_JS_RECEIVER_TYPE);
__ b(ge, &done_convert);
+ __ CompareRoot(r0, Heap::kNullValueRootIndex);
+ __ b(eq, &exit);
+ __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
+ __ b(eq, &exit);
__ bind(&convert);
ToObjectStub stub(isolate());
__ CallStub(&stub);
@@ -1067,16 +1020,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
__ push(r0);
- // Check for proxies.
- Label call_runtime;
- __ CompareObjectType(r0, r1, r1, JS_PROXY_TYPE);
- __ b(eq, &call_runtime);
-
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
// guarantee cache validity, call the runtime system to check cache
// validity or get the property names in a fixed array.
- __ CheckEnumCache(null_value, &call_runtime);
+ // Note: Proxies never have an enum cache, so will always take the
+ // slow path.
+ Label call_runtime;
+ __ CheckEnumCache(&call_runtime);
// The enum cache is valid. Load the map of the object being
// iterated over and use the cache for the iteration.
@@ -1087,7 +1038,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(r0); // Duplicate the enumerable object on the stack.
- __ CallRuntime(Runtime::kGetPropertyNamesFast);
+ __ CallRuntime(Runtime::kForInEnumerate);
PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
// If we got a map from the runtime call, we can do a fast
@@ -1125,15 +1076,17 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We got a fixed array in register r0. Iterate through that.
__ bind(&fixed_array);
+ int const vector_index = SmiFromSlot(slot)->value();
__ EmitLoadTypeFeedbackVector(r1);
__ mov(r2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
- int vector_index = SmiFromSlot(slot)->value();
__ str(r2, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(vector_index)));
__ mov(r1, Operand(Smi::FromInt(1))); // Smi(1) indicates slow check
__ Push(r1, r0); // Smi and array
__ ldr(r1, FieldMemOperand(r0, FixedArray::kLengthOffset));
+ __ Push(r1); // Fixed array length (as smi).
+ PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
__ mov(r0, Operand(Smi::FromInt(0)));
- __ Push(r1, r0); // Fixed array length (as smi) and initial index.
+ __ Push(r0); // Initial index.
// Generate code for doing the condition check.
__ bind(&loop);
@@ -1161,6 +1114,16 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(r4, Operand(r2));
__ b(eq, &update_each);
+ // We might get here from TurboFan or Crankshaft when something in the
+ // for-in loop body deopts and only now notice in fullcodegen, that we
+ // can now longer use the enum cache, i.e. left fast mode. So better record
+ // this information here, in case we later OSR back into this loop or
+ // reoptimize the whole function w/o rerunning the loop with the slow
+ // mode object in fullcodegen (which would result in a deopt loop).
+ __ EmitLoadTypeFeedbackVector(r0);
+ __ mov(r2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+ __ str(r2, FieldMemOperand(r0, FixedArray::OffsetOfElementAt(vector_index)));
+
// Convert the entry to a string or (smi) 0 if it isn't a property
// any more. If the property has been removed while iterating, we
// just skip it.
@@ -1200,7 +1163,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Remove the pointers stored on the stack.
__ bind(loop_statement.break_label());
- __ Drop(5);
+ DropOperands(5);
// Exit and decrement the loop depth.
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1444,12 +1407,11 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
// by eval-introduced variables.
EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
__ bind(&slow);
- __ mov(r1, Operand(var->name()));
- __ Push(cp, r1); // Context and name.
+ __ Push(var->name());
Runtime::FunctionId function_id =
typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
- : Runtime::kLoadLookupSlotNoReferenceError;
+ : Runtime::kLoadLookupSlotInsideTypeof;
__ CallRuntime(function_id);
__ bind(&done);
context()->Plug(r0);
@@ -1474,7 +1436,7 @@ void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
Expression* expression = (property == NULL) ? NULL : property->value();
if (expression == NULL) {
__ LoadRoot(r1, Heap::kNullValueRootIndex);
- __ push(r1);
+ PushOperand(r1);
} else {
VisitForStackValue(expression);
if (NeedsHomeObject(expression)) {
@@ -1519,7 +1481,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
if (!result_saved) {
- __ push(r0); // Save result on stack
+ PushOperand(r0); // Save result on stack
result_saved = true;
}
switch (property->kind()) {
@@ -1551,7 +1513,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
// Duplicate receiver on stack.
__ ldr(r0, MemOperand(sp));
- __ push(r0);
+ PushOperand(r0);
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
@@ -1559,19 +1521,19 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
EmitSetHomeObject(value, 2, property->GetSlot());
}
__ mov(r0, Operand(Smi::FromInt(SLOPPY))); // PropertyAttributes
- __ push(r0);
- __ CallRuntime(Runtime::kSetProperty);
+ PushOperand(r0);
+ CallRuntimeWithOperands(Runtime::kSetProperty);
} else {
- __ Drop(3);
+ DropOperands(3);
}
break;
case ObjectLiteral::Property::PROTOTYPE:
// Duplicate receiver on stack.
__ ldr(r0, MemOperand(sp));
- __ push(r0);
+ PushOperand(r0);
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype);
+ CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
NO_REGISTERS);
break;
@@ -1595,13 +1557,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
it != accessor_table.end();
++it) {
__ ldr(r0, MemOperand(sp)); // Duplicate receiver.
- __ push(r0);
+ PushOperand(r0);
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
EmitAccessor(it->second->setter);
__ mov(r0, Operand(Smi::FromInt(NONE)));
- __ push(r0);
- __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
+ PushOperand(r0);
+ CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1618,18 +1580,18 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Expression* value = property->value();
if (!result_saved) {
- __ push(r0); // Save result on the stack
+ PushOperand(r0); // Save result on the stack
result_saved = true;
}
__ ldr(r0, MemOperand(sp)); // Duplicate receiver.
- __ push(r0);
+ PushOperand(r0);
if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
DCHECK(!property->is_computed_name());
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype);
+ CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
NO_REGISTERS);
} else {
@@ -1644,11 +1606,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
case ObjectLiteral::Property::COMPUTED:
if (property->emit_store()) {
- __ mov(r0, Operand(Smi::FromInt(NONE)));
- __ push(r0);
- __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
+ PushOperand(Smi::FromInt(NONE));
+ PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+ CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
} else {
- __ Drop(3);
+ DropOperands(3);
}
break;
@@ -1657,15 +1619,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
break;
case ObjectLiteral::Property::GETTER:
- __ mov(r0, Operand(Smi::FromInt(NONE)));
- __ push(r0);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(NONE));
+ CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
- __ mov(r0, Operand(Smi::FromInt(NONE)));
- __ push(r0);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(NONE));
+ CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
break;
}
}
@@ -1724,14 +1684,14 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
int array_index = 0;
for (; array_index < length; array_index++) {
Expression* subexpr = subexprs->at(array_index);
- if (subexpr->IsSpread()) break;
+ DCHECK(!subexpr->IsSpread());
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
if (!result_saved) {
- __ push(r0);
+ PushOperand(r0);
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
@@ -1752,21 +1712,16 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// (inclusive) and these elements gets appended to the array. Note that the
// number elements an iterable produces is unknown ahead of time.
if (array_index < length && result_saved) {
- __ Pop(r0);
+ PopOperand(r0);
result_saved = false;
}
for (; array_index < length; array_index++) {
Expression* subexpr = subexprs->at(array_index);
- __ Push(r0);
- if (subexpr->IsSpread()) {
- VisitForStackValue(subexpr->AsSpread()->expression());
- __ InvokeBuiltin(Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX,
- CALL_FUNCTION);
- } else {
- VisitForStackValue(subexpr);
- __ CallRuntime(Runtime::kAppendElement);
- }
+ PushOperand(r0);
+ DCHECK(!subexpr->IsSpread());
+ VisitForStackValue(subexpr);
+ CallRuntimeWithOperands(Runtime::kAppendElement);
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
@@ -1807,12 +1762,12 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
property->obj()->AsSuperPropertyReference()->this_var());
VisitForAccumulatorValue(
property->obj()->AsSuperPropertyReference()->home_object());
- __ Push(result_register());
+ PushOperand(result_register());
if (expr->is_compound()) {
const Register scratch = r1;
__ ldr(scratch, MemOperand(sp, kPointerSize));
- __ Push(scratch);
- __ Push(result_register());
+ PushOperand(scratch);
+ PushOperand(result_register());
}
break;
case KEYED_SUPER_PROPERTY:
@@ -1821,14 +1776,14 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForStackValue(
property->obj()->AsSuperPropertyReference()->home_object());
VisitForAccumulatorValue(property->key());
- __ Push(result_register());
+ PushOperand(result_register());
if (expr->is_compound()) {
const Register scratch = r1;
__ ldr(scratch, MemOperand(sp, 2 * kPointerSize));
- __ Push(scratch);
+ PushOperand(scratch);
__ ldr(scratch, MemOperand(sp, 2 * kPointerSize));
- __ Push(scratch);
- __ Push(result_register());
+ PushOperand(scratch);
+ PushOperand(result_register());
}
break;
case KEYED_PROPERTY:
@@ -1874,7 +1829,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
Token::Value op = expr->binary_op();
- __ push(r0); // Left operand goes on the stack.
+ PushOperand(r0); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
AccumulatorValueContext context(this);
@@ -1940,8 +1895,16 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ jmp(&suspend);
__ bind(&continuation);
+ // When we arrive here, the stack top is the resume mode and
+ // result_register() holds the input value (the argument given to the
+ // respective resume operation).
__ RecordGeneratorContinuation();
- __ jmp(&resume);
+ __ pop(r1);
+ __ cmp(r1, Operand(Smi::FromInt(JSGeneratorObject::RETURN)));
+ __ b(ne, &resume);
+ __ push(result_register());
+ EmitCreateIteratorResult(true);
+ EmitUnwindAndReturn();
__ bind(&suspend);
VisitForAccumulatorValue(expr->generator_object());
@@ -1959,7 +1922,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&post_runtime);
- __ pop(result_register());
+ PopOperand(result_register());
EmitReturnSequence();
__ bind(&resume);
@@ -1968,123 +1931,15 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
}
case Yield::kFinal: {
- VisitForAccumulatorValue(expr->generator_object());
- __ mov(r1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
- __ str(r1, FieldMemOperand(result_register(),
- JSGeneratorObject::kContinuationOffset));
// Pop value from top-of-stack slot, box result into result register.
+ OperandStackDepthDecrement(1);
EmitCreateIteratorResult(true);
- EmitUnwindBeforeReturn();
- EmitReturnSequence();
+ EmitUnwindAndReturn();
break;
}
- case Yield::kDelegating: {
- VisitForStackValue(expr->generator_object());
-
- // Initial stack layout is as follows:
- // [sp + 1 * kPointerSize] iter
- // [sp + 0 * kPointerSize] g
-
- Label l_catch, l_try, l_suspend, l_continuation, l_resume;
- Label l_next, l_call, l_loop;
- Register load_receiver = LoadDescriptor::ReceiverRegister();
- Register load_name = LoadDescriptor::NameRegister();
-
- // Initial send value is undefined.
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ b(&l_next);
-
- // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
- __ bind(&l_catch);
- __ LoadRoot(load_name, Heap::kthrow_stringRootIndex); // "throw"
- __ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter
- __ Push(load_name, r3, r0); // "throw", iter, except
- __ jmp(&l_call);
-
- // try { received = %yield result }
- // Shuffle the received result above a try handler and yield it without
- // re-boxing.
- __ bind(&l_try);
- __ pop(r0); // result
- int handler_index = NewHandlerTableEntry();
- EnterTryBlock(handler_index, &l_catch);
- const int try_block_size = TryCatch::kElementCount * kPointerSize;
- __ push(r0); // result
-
- __ jmp(&l_suspend);
- __ bind(&l_continuation);
- __ RecordGeneratorContinuation();
- __ jmp(&l_resume);
-
- __ bind(&l_suspend);
- const int generator_object_depth = kPointerSize + try_block_size;
- __ ldr(r0, MemOperand(sp, generator_object_depth));
- __ push(r0); // g
- __ Push(Smi::FromInt(handler_index)); // handler-index
- DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
- __ mov(r1, Operand(Smi::FromInt(l_continuation.pos())));
- __ str(r1, FieldMemOperand(r0, JSGeneratorObject::kContinuationOffset));
- __ str(cp, FieldMemOperand(r0, JSGeneratorObject::kContextOffset));
- __ mov(r1, cp);
- __ RecordWriteField(r0, JSGeneratorObject::kContextOffset, r1, r2,
- kLRHasBeenSaved, kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 2);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ pop(r0); // result
- EmitReturnSequence();
- __ bind(&l_resume); // received in r0
- ExitTryBlock(handler_index);
-
- // receiver = iter; f = 'next'; arg = received;
- __ bind(&l_next);
-
- __ LoadRoot(load_name, Heap::knext_stringRootIndex); // "next"
- __ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter
- __ Push(load_name, r3, r0); // "next", iter, received
-
- // result = receiver[f](arg);
- __ bind(&l_call);
- __ ldr(load_receiver, MemOperand(sp, kPointerSize));
- __ ldr(load_name, MemOperand(sp, 2 * kPointerSize));
- __ mov(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->KeyedLoadFeedbackSlot())));
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), SLOPPY).code();
- CallIC(ic, TypeFeedbackId::None());
- __ mov(r1, r0);
- __ str(r1, MemOperand(sp, 2 * kPointerSize));
- SetCallPosition(expr);
- __ mov(r0, Operand(1));
- __ Call(
- isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
- RelocInfo::CODE_TARGET);
-
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ Drop(1); // The function is still on the stack; drop it.
-
- // if (!result.done) goto l_try;
- __ bind(&l_loop);
- __ Move(load_receiver, r0);
-
- __ push(load_receiver); // save result
- __ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
- __ mov(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->DoneFeedbackSlot())));
- CallLoadIC(NOT_INSIDE_TYPEOF); // r0=result.done
- Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(bool_ic);
- __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
- __ b(ne, &l_try);
-
- // result.value
- __ pop(load_receiver); // result
- __ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
- __ mov(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->ValueFeedbackSlot())));
- CallLoadIC(NOT_INSIDE_TYPEOF); // r0=result.value
- context()->DropAndPlug(2, r0); // drop iter and g
- break;
- }
+ case Yield::kDelegating:
+ UNREACHABLE();
}
}
@@ -2098,7 +1953,14 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// r1 will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
VisitForAccumulatorValue(value);
- __ pop(r1);
+ PopOperand(r1);
+
+ // Store input value into generator object.
+ __ str(result_register(),
+ FieldMemOperand(r1, JSGeneratorObject::kInputOffset));
+ __ mov(r2, result_register());
+ __ RecordWriteField(r1, JSGeneratorObject::kInputOffset, r2, r3,
+ kLRHasBeenSaved, kDontSaveFPRegs);
// Load suspended function and context.
__ ldr(cp, FieldMemOperand(r1, JSGeneratorObject::kContextOffset));
@@ -2160,6 +2022,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ add(r3, r3, r2);
__ mov(r2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
__ str(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
+ __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
__ Jump(r3);
}
__ bind(&slow_resume);
@@ -2174,6 +2037,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ push(r2);
__ b(&push_operand_holes);
__ bind(&call_resume);
+ __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
DCHECK(!result_register().is(r1));
__ Push(r1, result_register());
__ Push(Smi::FromInt(resume_mode));
@@ -2185,6 +2049,25 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
context()->Plug(result_register());
}
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
+ OperandStackDepthIncrement(2);
+ __ Push(reg1, reg2);
+}
+
+void FullCodeGenerator::PopOperands(Register reg1, Register reg2) {
+ OperandStackDepthDecrement(2);
+ __ Pop(reg1, reg2);
+}
+
+void FullCodeGenerator::EmitOperandStackDepthCheck() {
+ if (FLAG_debug_code) {
+ int expected_diff = StandardFrameConstants::kFixedFrameSizeFromFp +
+ operand_stack_depth_ * kPointerSize;
+ __ sub(r0, fp, sp);
+ __ cmp(r0, Operand(expected_diff));
+ __ Assert(eq, kUnexpectedStackDepth);
+ }
+}
void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label allocate, done_allocate;
@@ -2218,37 +2101,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ mov(LoadDescriptor::NameRegister(), Operand(key->value()));
__ mov(LoadDescriptor::SlotRegister(),
Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallLoadIC(NOT_INSIDE_TYPEOF, language_mode());
-}
-
-
-void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
- // Stack: receiver, home_object.
- SetExpressionPosition(prop);
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!key->value()->IsSmi());
- DCHECK(prop->IsSuperAccess());
-
- __ Push(key->value());
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadFromSuper);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- SetExpressionPosition(prop);
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), language_mode()).code();
- __ mov(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallIC(ic);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
- // Stack: receiver, home_object, key.
- SetExpressionPosition(prop);
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+ CallLoadIC(NOT_INSIDE_TYPEOF);
}
@@ -2264,7 +2117,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
// Get the arguments.
Register left = r1;
Register right = r0;
- __ pop(left);
+ PopOperand(left);
// Perform combined smi check on both operands.
__ orr(scratch1, left, Operand(right));
@@ -2273,8 +2126,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
patch_site.EmitJumpIfSmi(scratch1, &smi_case);
__ bind(&stub_call);
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -2347,27 +2199,17 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
- // Constructor is in r0.
- DCHECK(lit != NULL);
- __ push(r0);
-
- // No access check is needed here since the constructor is created by the
- // class literal.
- Register scratch = r1;
- __ ldr(scratch,
- FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
- __ push(scratch);
-
for (int i = 0; i < lit->properties()->length(); i++) {
ObjectLiteral::Property* property = lit->properties()->at(i);
Expression* value = property->value();
+ Register scratch = r1;
if (property->is_static()) {
__ ldr(scratch, MemOperand(sp, kPointerSize)); // constructor
} else {
__ ldr(scratch, MemOperand(sp, 0)); // prototype
}
- __ push(scratch);
+ PushOperand(scratch);
EmitPropertyKey(property, lit->GetIdForProperty(i));
// The static prototype property is read only. We handle the non computed
@@ -2390,36 +2232,31 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
case ObjectLiteral::Property::PROTOTYPE:
UNREACHABLE();
case ObjectLiteral::Property::COMPUTED:
- __ CallRuntime(Runtime::kDefineClassMethod);
+ PushOperand(Smi::FromInt(DONT_ENUM));
+ PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+ CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
break;
case ObjectLiteral::Property::GETTER:
- __ mov(r0, Operand(Smi::FromInt(DONT_ENUM)));
- __ push(r0);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(DONT_ENUM));
+ CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
- __ mov(r0, Operand(Smi::FromInt(DONT_ENUM)));
- __ push(r0);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(DONT_ENUM));
+ CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
break;
default:
UNREACHABLE();
}
}
-
- // Set both the prototype and constructor to have fast properties, and also
- // freeze them in strong mode.
- __ CallRuntime(Runtime::kFinalizeClassDefinition);
}
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
- __ pop(r1);
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+ PopOperand(r1);
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -2442,10 +2279,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case NAMED_PROPERTY: {
- __ push(r0); // Preserve value.
+ PushOperand(r0); // Preserve value.
VisitForAccumulatorValue(prop->obj());
__ Move(StoreDescriptor::ReceiverRegister(), r0);
- __ pop(StoreDescriptor::ValueRegister()); // Restore value.
+ PopOperand(StoreDescriptor::ValueRegister()); // Restore value.
__ mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
EmitLoadStoreICSlot(slot);
@@ -2453,7 +2290,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case NAMED_SUPER_PROPERTY: {
- __ Push(r0);
+ PushOperand(r0);
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
VisitForAccumulatorValue(
prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2470,7 +2307,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case KEYED_SUPER_PROPERTY: {
- __ Push(r0);
+ PushOperand(r0);
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
VisitForStackValue(
prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2490,12 +2327,12 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case KEYED_PROPERTY: {
- __ push(r0); // Preserve value.
+ PushOperand(r0); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ Move(StoreDescriptor::NameRegister(), r0);
- __ Pop(StoreDescriptor::ValueRegister(),
- StoreDescriptor::ReceiverRegister());
+ PopOperands(StoreDescriptor::ValueRegister(),
+ StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
@@ -2578,17 +2415,17 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
(var->mode() == CONST && op == Token::INIT)) {
if (var->IsLookupSlot()) {
// Assignment to var.
- __ push(r0); // Value.
- __ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(language_mode())));
- __ Push(cp, r1, r0); // Context, name, language mode.
- __ CallRuntime(Runtime::kStoreLookupSlot);
+ __ Push(var->name());
+ __ Push(r0);
+ __ CallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreLookupSlot_Strict
+ : Runtime::kStoreLookupSlot_Sloppy);
} else {
// Assignment to var or initializing assignment to let/const in harmony
// mode.
DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
MemOperand location = VarOperand(var, r1);
- if (generate_debug_code_ && var->mode() == LET && op == Token::INIT) {
+ if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
// Check for an uninitialized let binding.
__ ldr(r2, location);
__ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
@@ -2634,7 +2471,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
- __ pop(StoreDescriptor::ReceiverRegister());
+ PopOperand(StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(expr->AssignmentSlot());
CallStoreIC();
@@ -2651,10 +2488,11 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
Literal* key = prop->key()->AsLiteral();
DCHECK(key != NULL);
- __ Push(key->value());
- __ Push(r0);
- __ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy));
+ PushOperand(key->value());
+ PushOperand(r0);
+ CallRuntimeWithOperands(is_strict(language_mode())
+ ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy);
}
@@ -2664,16 +2502,17 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
// stack : receiver ('this'), home_object, key
DCHECK(prop != NULL);
- __ Push(r0);
- __ CallRuntime((is_strict(language_mode())
- ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy));
+ PushOperand(r0);
+ CallRuntimeWithOperands(is_strict(language_mode())
+ ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy);
}
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
- __ Pop(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister());
+ PopOperands(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister());
DCHECK(StoreDescriptor::ValueRegister().is(r0));
Handle<Code> ic =
@@ -2708,7 +2547,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
__ Move(LoadDescriptor::NameRegister(), r0);
- __ pop(LoadDescriptor::ReceiverRegister());
+ PopOperand(LoadDescriptor::ReceiverRegister());
EmitKeyedPropertyLoad(expr);
} else {
VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
@@ -2747,7 +2586,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ push(ip);
+ PushOperand(ip);
convert_mode = ConvertReceiverMode::kNullOrUndefined;
} else {
// Load the function from the receiver.
@@ -2758,7 +2597,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
__ ldr(ip, MemOperand(sp, 0));
- __ push(ip);
+ PushOperand(ip);
__ str(r0, MemOperand(sp, kPointerSize));
convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
}
@@ -2781,12 +2620,11 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
VisitForStackValue(super_ref->home_object());
VisitForAccumulatorValue(super_ref->this_var());
- __ Push(r0);
- __ Push(r0);
+ PushOperand(r0);
+ PushOperand(r0);
__ ldr(scratch, MemOperand(sp, kPointerSize * 2));
- __ Push(scratch);
- __ Push(key->value());
- __ Push(Smi::FromInt(language_mode()));
+ PushOperand(scratch);
+ PushOperand(key->value());
// Stack here:
// - home_object
@@ -2794,8 +2632,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - this (receiver) <-- LoadFromSuper will pop here and below.
// - home_object
// - key
- // - language_mode
- __ CallRuntime(Runtime::kLoadFromSuper);
+ CallRuntimeWithOperands(Runtime::kLoadFromSuper);
// Replace home_object with target function.
__ str(r0, MemOperand(sp, kPointerSize));
@@ -2824,7 +2661,7 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
// Push the target function under the receiver.
__ ldr(ip, MemOperand(sp, 0));
- __ push(ip);
+ PushOperand(ip);
__ str(r0, MemOperand(sp, kPointerSize));
EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
@@ -2843,12 +2680,11 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
VisitForStackValue(super_ref->home_object());
VisitForAccumulatorValue(super_ref->this_var());
- __ Push(r0);
- __ Push(r0);
+ PushOperand(r0);
+ PushOperand(r0);
__ ldr(scratch, MemOperand(sp, kPointerSize * 2));
- __ Push(scratch);
+ PushOperand(scratch);
VisitForStackValue(prop->key());
- __ Push(Smi::FromInt(language_mode()));
// Stack here:
// - home_object
@@ -2856,8 +2692,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
// - home_object
// - key
- // - language_mode
- __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+ CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
// Replace home_object with target function.
__ str(r0, MemOperand(sp, kPointerSize));
@@ -2879,12 +2714,23 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
SetCallPosition(expr);
- Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
+ if (expr->tail_call_mode() == TailCallMode::kAllow) {
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceTailCall);
+ }
+ // Update profiling counters before the tail call since we will
+ // not return to this function.
+ EmitProfilingCounterHandlingForReturnSequence(true);
+ }
+ Handle<Code> ic =
+ CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
+ .code();
__ mov(r3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
// Don't assign a type feedback id to the IC, since type feedback is provided
// by the vector above.
CallIC(ic);
+ OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
// Restore context register.
@@ -2929,11 +2775,9 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
__ bind(&slow);
// Call the runtime to find the function to call (returned in r0)
// and the object holding it (returned in edx).
- DCHECK(!context_register().is(r2));
- __ mov(r2, Operand(callee->name()));
- __ Push(context_register(), r2);
- __ CallRuntime(Runtime::kLoadLookupSlot);
- __ Push(r0, r1); // Function, receiver.
+ __ Push(callee->name());
+ __ CallRuntime(Runtime::kLoadLookupSlotForCall);
+ PushOperands(r0, r1); // Function, receiver.
PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
// If fast case code has been generated, emit code to push the
@@ -2955,7 +2799,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
VisitForStackValue(callee);
// refEnv.WithBaseObject()
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ push(r2); // Reserved receiver slot.
+ PushOperand(r2); // Reserved receiver slot.
}
}
@@ -2989,7 +2833,10 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
SetCallPosition(expr);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ mov(r0, Operand(arg_count));
- __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ expr->tail_call_mode()),
+ RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3030,6 +2877,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
CallConstructStub stub(isolate());
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3050,7 +2898,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
FieldMemOperand(result_register(), HeapObject::kMapOffset));
__ ldr(result_register(),
FieldMemOperand(result_register(), Map::kPrototypeOffset));
- __ Push(result_register());
+ PushOperand(result_register());
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -3072,6 +2920,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
__ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
__ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
@@ -3124,76 +2973,6 @@ void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r1, SIMD128_VALUE_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r2, FIRST_FUNCTION_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(hs, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK);
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ ldr(r1, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ cmp(r2, Operand(0x80000000));
- __ cmp(r1, Operand(0x00000000), eq);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3282,64 +3061,6 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ pop(r1);
- __ cmp(r0, r1);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- // ArgumentsAccessStub expects the key in edx and the formal
- // parameter count in r0.
- VisitForAccumulatorValue(args->at(0));
- __ mov(r1, r0);
- __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
- ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
- __ CallStub(&stub);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
-
- // Get the number of formal parameters.
- __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
-
- // Check if the calling frame is an arguments adaptor frame.
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset), eq);
-
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3408,28 +3129,6 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = nullptr;
- Label* if_false = nullptr;
- Label* fall_through = nullptr;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r1, JS_DATE_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(3, args->length());
@@ -3441,7 +3140,7 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(0)); // index
VisitForStackValue(args->at(1)); // value
VisitForAccumulatorValue(args->at(2)); // string
- __ Pop(index, value);
+ PopOperands(index, value);
if (FLAG_debug_code) {
__ SmiTst(value);
@@ -3474,7 +3173,7 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(0)); // index
VisitForStackValue(args->at(1)); // value
VisitForAccumulatorValue(args->at(2)); // string
- __ Pop(index, value);
+ PopOperands(index, value);
if (FLAG_debug_code) {
__ SmiTst(value);
@@ -3497,34 +3196,6 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- VisitForStackValue(args->at(0)); // Load the object.
- VisitForAccumulatorValue(args->at(1)); // Load the value.
- __ pop(r1); // r0 = value. r1 = object.
-
- Label done;
- // If the object is a smi, return the value.
- __ JumpIfSmi(r1, &done);
-
- // If the object is not a value type, return the value.
- __ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
- __ b(ne, &done);
-
- // Store the value.
- __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
- // Update the write barrier. Save the value as it will be
- // overwritten by the write barrier code and is needed afterward.
- __ mov(r2, r0);
- __ RecordWriteField(
- r1, JSValue::kValueOffset, r2, r3, kLRHasBeenSaved, kDontSaveFPRegs);
-
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -3542,26 +3213,6 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitToName(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into r0 and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- Label convert, done_convert;
- __ JumpIfSmi(r0, &convert);
- STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
- __ CompareObjectType(r0, r1, r1, LAST_NAME_TYPE);
- __ b(ls, &done_convert);
- __ bind(&convert);
- __ Push(r0);
- __ CallRuntime(Runtime::kToName);
- __ bind(&done_convert);
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3590,7 +3241,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
Register index = r0;
Register result = r3;
- __ pop(object);
+ PopOperand(object);
Label need_conversion;
Label index_out_of_range;
@@ -3636,7 +3287,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
Register scratch = r3;
Register result = r0;
- __ pop(object);
+ PopOperand(object);
Label need_conversion;
Label index_out_of_range;
@@ -3686,6 +3337,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
// Call the target.
__ mov(r0, Operand(argc));
__ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(argc + 1);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
@@ -3738,243 +3390,6 @@ void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
- Label bailout, done, one_char_separator, long_separator, non_trivial_array,
- not_size_one_array, loop, empty_separator_loop, one_char_separator_loop,
- one_char_separator_loop_entry, long_separator_loop;
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- VisitForStackValue(args->at(1));
- VisitForAccumulatorValue(args->at(0));
-
- // All aliases of the same register have disjoint lifetimes.
- Register array = r0;
- Register elements = no_reg; // Will be r0.
- Register result = no_reg; // Will be r0.
- Register separator = r1;
- Register array_length = r2;
- Register result_pos = no_reg; // Will be r2
- Register string_length = r3;
- Register string = r4;
- Register element = r5;
- Register elements_end = r6;
- Register scratch = r9;
-
- // Separator operand is on the stack.
- __ pop(separator);
-
- // Check that the array is a JSArray.
- __ JumpIfSmi(array, &bailout);
- __ CompareObjectType(array, scratch, array_length, JS_ARRAY_TYPE);
- __ b(ne, &bailout);
-
- // Check that the array has fast elements.
- __ CheckFastElements(scratch, array_length, &bailout);
-
- // If the array has length zero, return the empty string.
- __ ldr(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
- __ SmiUntag(array_length, SetCC);
- __ b(ne, &non_trivial_array);
- __ LoadRoot(r0, Heap::kempty_stringRootIndex);
- __ b(&done);
-
- __ bind(&non_trivial_array);
-
- // Get the FixedArray containing array's elements.
- elements = array;
- __ ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset));
- array = no_reg; // End of array's live range.
-
- // Check that all array elements are sequential one-byte strings, and
- // accumulate the sum of their lengths, as a smi-encoded value.
- __ mov(string_length, Operand::Zero());
- __ add(element,
- elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
- // Loop condition: while (element < elements_end).
- // Live values in registers:
- // elements: Fixed array of strings.
- // array_length: Length of the fixed array of strings (not smi)
- // separator: Separator string
- // string_length: Accumulated sum of string lengths (smi).
- // element: Current array element.
- // elements_end: Array end.
- if (generate_debug_code_) {
- __ cmp(array_length, Operand::Zero());
- __ Assert(gt, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
- }
- __ bind(&loop);
- __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ JumpIfSmi(string, &bailout);
- __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch, scratch, &bailout);
- __ ldr(scratch, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
- __ add(string_length, string_length, Operand(scratch), SetCC);
- __ b(vs, &bailout);
- __ cmp(element, elements_end);
- __ b(lt, &loop);
-
- // If array_length is 1, return elements[0], a string.
- __ cmp(array_length, Operand(1));
- __ b(ne, &not_size_one_array);
- __ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize));
- __ b(&done);
-
- __ bind(&not_size_one_array);
-
- // Live values in registers:
- // separator: Separator string
- // array_length: Length of the array.
- // string_length: Sum of string lengths (smi).
- // elements: FixedArray of strings.
-
- // Check that the separator is a flat one-byte string.
- __ JumpIfSmi(separator, &bailout);
- __ ldr(scratch, FieldMemOperand(separator, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch, scratch, &bailout);
-
- // Add (separator length times array_length) - separator length to the
- // string_length to get the length of the result string. array_length is not
- // smi but the other values are, so the result is a smi
- __ ldr(scratch, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
- __ sub(string_length, string_length, Operand(scratch));
- __ smull(scratch, ip, array_length, scratch);
- // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
- // zero.
- __ cmp(ip, Operand::Zero());
- __ b(ne, &bailout);
- __ tst(scratch, Operand(0x80000000));
- __ b(ne, &bailout);
- __ add(string_length, string_length, Operand(scratch), SetCC);
- __ b(vs, &bailout);
- __ SmiUntag(string_length);
-
- // Bailout for large object allocations.
- __ cmp(string_length, Operand(Page::kMaxRegularHeapObjectSize));
- __ b(gt, &bailout);
-
- // Get first element in the array to free up the elements register to be used
- // for the result.
- __ add(element,
- elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- result = elements; // End of live range for elements.
- elements = no_reg;
- // Live values in registers:
- // element: First array element
- // separator: Separator string
- // string_length: Length of result string (not smi)
- // array_length: Length of the array.
- __ AllocateOneByteString(result, string_length, scratch,
- string, // used as scratch
- elements_end, // used as scratch
- &bailout);
- // Prepare for looping. Set up elements_end to end of the array. Set
- // result_pos to the position of the result where to write the first
- // character.
- __ add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
- result_pos = array_length; // End of live range for array_length.
- array_length = no_reg;
- __ add(result_pos,
- result,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
- // Check the length of the separator.
- __ ldr(scratch, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
- __ cmp(scratch, Operand(Smi::FromInt(1)));
- __ b(eq, &one_char_separator);
- __ b(gt, &long_separator);
-
- // Empty separator case
- __ bind(&empty_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
-
- // Copy next array element to the result.
- __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ add(string,
- string,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ cmp(element, elements_end);
- __ b(lt, &empty_separator_loop); // End while (element < elements_end).
- DCHECK(result.is(r0));
- __ b(&done);
-
- // One-character separator case
- __ bind(&one_char_separator);
- // Replace separator with its one-byte character value.
- __ ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ jmp(&one_char_separator_loop_entry);
-
- __ bind(&one_char_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
- // separator: Single separator one-byte char (in lower byte).
-
- // Copy the separator character to the result.
- __ strb(separator, MemOperand(result_pos, 1, PostIndex));
-
- // Copy next array element to the result.
- __ bind(&one_char_separator_loop_entry);
- __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ add(string,
- string,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ cmp(element, elements_end);
- __ b(lt, &one_char_separator_loop); // End while (element < elements_end).
- DCHECK(result.is(r0));
- __ b(&done);
-
- // Long separator case (separator is more than one character). Entry is at the
- // label long_separator below.
- __ bind(&long_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
- // separator: Separator string.
-
- // Copy the separator to the result.
- __ ldr(string_length, FieldMemOperand(separator, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ add(string,
- separator,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch);
-
- __ bind(&long_separator);
- __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ add(string,
- string,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ cmp(element, elements_end);
- __ b(lt, &long_separator_loop); // End while (element < elements_end).
- DCHECK(result.is(r0));
- __ b(&done);
-
- __ bind(&bailout);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ bind(&done);
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
ExternalReference debug_is_active =
@@ -4008,7 +3423,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ b(&done);
__ bind(&runtime);
- __ CallRuntime(Runtime::kCreateIterResultObject);
+ CallRuntimeWithOperands(Runtime::kCreateIterResultObject);
__ bind(&done);
context()->Plug(r0);
@@ -4018,7 +3433,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push undefined as the receiver.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ push(r0);
+ PushOperand(r0);
__ LoadNativeContextSlot(expr->context_index(), r0);
}
@@ -4033,6 +3448,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
__ mov(r0, Operand(arg_count));
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
}
@@ -4046,7 +3462,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Push the target function under the receiver.
__ ldr(ip, MemOperand(sp, 0));
- __ push(ip);
+ PushOperand(ip);
__ str(r0, MemOperand(sp, kPointerSize));
// Push the arguments ("left-to-right").
@@ -4082,6 +3498,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Call the C runtime function.
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
__ CallRuntime(expr->function(), arg_count);
+ OperandStackDepthDecrement(arg_count);
context()->Plug(r0);
}
}
@@ -4099,9 +3516,9 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy);
+ CallRuntimeWithOperands(is_strict(language_mode())
+ ? Runtime::kDeleteProperty_Strict
+ : Runtime::kDeleteProperty_Sloppy);
context()->Plug(r0);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -4122,9 +3539,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
} else {
// Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
- DCHECK(!context_register().is(r2));
- __ mov(r2, Operand(var->name()));
- __ Push(context_register(), r2);
+ __ Push(var->name());
__ CallRuntime(Runtime::kDeleteLookupSlot);
context()->Plug(r0);
}
@@ -4169,6 +3584,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
&materialize_false,
&materialize_true,
&materialize_true);
+ if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
__ bind(&materialize_true);
PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
__ LoadRoot(r0, Heap::kTrueValueRootIndex);
@@ -4219,7 +3635,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && !context()->IsEffect()) {
__ mov(ip, Operand(Smi::FromInt(0)));
- __ push(ip);
+ PushOperand(ip);
}
switch (assign_type) {
case NAMED_PROPERTY: {
@@ -4234,11 +3650,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
VisitForAccumulatorValue(
prop->obj()->AsSuperPropertyReference()->home_object());
- __ Push(result_register());
+ PushOperand(result_register());
const Register scratch = r1;
__ ldr(scratch, MemOperand(sp, kPointerSize));
- __ Push(scratch);
- __ Push(result_register());
+ PushOperand(scratch);
+ PushOperand(result_register());
EmitNamedSuperPropertyLoad(prop);
break;
}
@@ -4248,13 +3664,13 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
VisitForStackValue(
prop->obj()->AsSuperPropertyReference()->home_object());
VisitForAccumulatorValue(prop->key());
- __ Push(result_register());
+ PushOperand(result_register());
const Register scratch = r1;
__ ldr(scratch, MemOperand(sp, 2 * kPointerSize));
- __ Push(scratch);
+ PushOperand(scratch);
__ ldr(scratch, MemOperand(sp, 2 * kPointerSize));
- __ Push(scratch);
- __ Push(result_register());
+ PushOperand(scratch);
+ PushOperand(result_register());
EmitKeyedSuperPropertyLoad(prop);
break;
}
@@ -4338,7 +3754,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// of the stack.
switch (assign_type) {
case VARIABLE:
- __ push(r0);
+ PushOperand(r0);
break;
case NAMED_PROPERTY:
__ str(r0, MemOperand(sp, kPointerSize));
@@ -4363,8 +3779,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetExpressionPosition(expr);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD,
- strength(language_mode())).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD).code();
CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4397,7 +3812,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
__ mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
- __ pop(StoreDescriptor::ReceiverRegister());
+ PopOperand(StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(expr->CountSlot());
CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -4433,8 +3848,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case KEYED_PROPERTY: {
- __ Pop(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister());
+ PopOperands(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
EmitLoadStoreICSlot(expr->CountSlot());
@@ -4489,8 +3904,8 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ CompareRoot(r0, Heap::kFalseValueRootIndex);
Split(eq, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->undefined_string())) {
- __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
- __ b(eq, if_true);
+ __ CompareRoot(r0, Heap::kNullValueRootIndex);
+ __ b(eq, if_false);
__ JumpIfSmi(r0, if_false);
// Check for undetectable objects => true.
__ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
@@ -4556,7 +3971,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ CallRuntime(Runtime::kHasProperty);
+ CallRuntimeWithOperands(Runtime::kHasProperty);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(r0, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
@@ -4564,7 +3979,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::INSTANCEOF: {
VisitForAccumulatorValue(expr->right());
- __ pop(r1);
+ PopOperand(r1);
InstanceOfStub stub(isolate());
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
@@ -4576,7 +3991,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
Condition cond = CompareIC::ComputeCondition(op);
- __ pop(r1);
+ PopOperand(r1);
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
@@ -4589,8 +4004,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ bind(&slow_case);
}
- Handle<Code> ic = CodeFactory::CompareIC(
- isolate(), op, strength(language_mode())).code();
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -4678,7 +4092,7 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
DCHECK(closure_scope->is_function_scope());
__ ldr(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
- __ push(ip);
+ PushOperand(ip);
}
@@ -4687,21 +4101,12 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
void FullCodeGenerator::EnterFinallyBlock() {
DCHECK(!result_register().is(r1));
- // Store result register while executing finally block.
- __ push(result_register());
- // Cook return address in link register to stack (smi encoded Code* delta)
- __ sub(r1, lr, Operand(masm_->CodeObject()));
- __ SmiTag(r1);
-
- // Store result register while executing finally block.
- __ push(r1);
-
// Store pending message while executing finally block.
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ mov(ip, Operand(pending_message_obj));
__ ldr(r1, MemOperand(ip));
- __ push(r1);
+ PushOperand(r1);
ClearPendingMessage();
}
@@ -4710,19 +4115,11 @@ void FullCodeGenerator::EnterFinallyBlock() {
void FullCodeGenerator::ExitFinallyBlock() {
DCHECK(!result_register().is(r1));
// Restore pending message from stack.
- __ pop(r1);
+ PopOperand(r1);
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ mov(ip, Operand(pending_message_obj));
__ str(r1, MemOperand(ip));
-
- // Restore result register from stack.
- __ pop(r1);
-
- // Uncook return address and return.
- __ pop(result_register());
- __ SmiUntag(r1);
- __ add(pc, r1, Operand(masm_->CodeObject()));
}
@@ -4742,6 +4139,32 @@ void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
Operand(SmiFromSlot(slot)));
}
+void FullCodeGenerator::DeferredCommands::EmitCommands() {
+ DCHECK(!result_register().is(r1));
+ __ Pop(result_register()); // Restore the accumulator.
+ __ Pop(r1); // Get the token.
+ for (DeferredCommand cmd : commands_) {
+ Label skip;
+ __ cmp(r1, Operand(Smi::FromInt(cmd.token)));
+ __ b(ne, &skip);
+ switch (cmd.command) {
+ case kReturn:
+ codegen_->EmitUnwindAndReturn();
+ break;
+ case kThrow:
+ __ Push(result_register());
+ __ CallRuntime(Runtime::kReThrow);
+ break;
+ case kContinue:
+ codegen_->EmitContinue(cmd.target);
+ break;
+ case kBreak:
+ codegen_->EmitBreak(cmd.target);
+ break;
+ }
+ __ bind(&skip);
+ }
+}
#undef __
diff --git a/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc b/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
index e4141bb65f..d0278e7421 100644
--- a/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
+++ b/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
@@ -20,7 +20,7 @@
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm_)
+#define __ ACCESS_MASM(masm())
class JumpPatchSite BASE_EMBEDDED {
public:
@@ -76,6 +76,7 @@ class JumpPatchSite BASE_EMBEDDED {
}
private:
+ MacroAssembler* masm() { return masm_; }
MacroAssembler* masm_;
Label patch_site_;
Register reg_;
@@ -109,13 +110,6 @@ void FullCodeGenerator::Generate() {
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ Debug("stop-at", __LINE__, BREAK);
- }
-#endif
-
if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
int receiver_offset = info->scope()->num_parameters() * kXRegSize;
__ Peek(x10, receiver_offset);
@@ -141,7 +135,7 @@ void FullCodeGenerator::Generate() {
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
-
+ OperandStackDepthIncrement(locals_count);
if (locals_count > 0) {
if (locals_count >= 128) {
Label ok;
@@ -267,21 +261,12 @@ void FullCodeGenerator::Generate() {
Variable* rest_param = scope()->rest_parameter(&rest_index);
if (rest_param) {
Comment cmnt(masm_, "[ Allocate rest parameter array");
-
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
- __ Mov(RestParamAccessDescriptor::parameter_count(),
- Smi::FromInt(num_parameters));
- __ Add(RestParamAccessDescriptor::parameter_pointer(), fp,
- StandardFrameConstants::kCallerSPOffset + offset);
- __ Mov(RestParamAccessDescriptor::rest_parameter_index(),
- Smi::FromInt(rest_index));
-
- function_in_register_x1 = false;
-
- RestParamAccessStub stub(isolate());
+ if (!function_in_register_x1) {
+ __ Ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ FastNewRestParameterStub stub(isolate());
__ CallStub(&stub);
-
+ function_in_register_x1 = false;
SetVar(rest_param, x0, x1, x2);
}
@@ -289,28 +274,20 @@ void FullCodeGenerator::Generate() {
if (arguments != NULL) {
// Function uses arguments object.
Comment cmnt(masm_, "[ Allocate arguments object");
- DCHECK(x1.is(ArgumentsAccessNewDescriptor::function()));
if (!function_in_register_x1) {
// Load this again, if it's used by the local context below.
__ Ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
- // Receiver is just before the parameters on the caller's stack.
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
- __ Mov(ArgumentsAccessNewDescriptor::parameter_count(),
- Smi::FromInt(num_parameters));
- __ Add(ArgumentsAccessNewDescriptor::parameter_pointer(), fp,
- StandardFrameConstants::kCallerSPOffset + offset);
-
- // Arguments to ArgumentsAccessStub:
- // function, parameter pointer, parameter count.
- // The stub will rewrite parameter pointer and parameter count if the
- // previous stack frame was an arguments adapter frame.
- bool is_unmapped = is_strict(language_mode()) || !has_simple_parameters();
- ArgumentsAccessStub::Type type = ArgumentsAccessStub::ComputeType(
- is_unmapped, literal()->has_duplicate_parameters());
- ArgumentsAccessStub stub(isolate(), type);
- __ CallStub(&stub);
+ if (is_strict(language_mode()) || !has_simple_parameters()) {
+ FastNewStrictArgumentsStub stub(isolate());
+ __ CallStub(&stub);
+ } else if (literal()->has_duplicate_parameters()) {
+ __ Push(x1);
+ __ CallRuntime(Runtime::kNewSloppyArguments_Generic);
+ } else {
+ FastNewSloppyArgumentsStub stub(isolate());
+ __ CallStub(&stub);
+ }
SetVar(arguments, x0, x1, x2);
}
@@ -429,6 +406,30 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
}
+void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
+ bool is_tail_call) {
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else {
+ int distance = masm_->pc_offset() + kCodeSizeMultiplier / 2;
+ weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
+ }
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ B(pl, &ok);
+ // Don't need to save result register if we are going to do a tail call.
+ if (!is_tail_call) {
+ __ Push(x0);
+ }
+ __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+ if (!is_tail_call) {
+ __ Pop(x0);
+ }
+ EmitProfilingCounterReset();
+ __ Bind(&ok);
+}
void FullCodeGenerator::EmitReturnSequence() {
Comment cmnt(masm_, "[ Return sequence");
@@ -445,24 +446,7 @@ void FullCodeGenerator::EmitReturnSequence() {
__ CallRuntime(Runtime::kTraceExit);
DCHECK(x0.Is(result_register()));
}
- // Pretend that the exit is a backwards jump to the entry.
- int weight = 1;
- if (info_->ShouldSelfOptimize()) {
- weight = FLAG_interrupt_budget / FLAG_self_opt_count;
- } else {
- int distance = masm_->pc_offset() + kCodeSizeMultiplier / 2;
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- }
- EmitProfilingCounterDecrement(weight);
- Label ok;
- __ B(pl, &ok);
- __ Push(x0);
- __ Call(isolate()->builtins()->InterruptCheck(),
- RelocInfo::CODE_TARGET);
- __ Pop(x0);
- EmitProfilingCounterReset();
- __ Bind(&ok);
+ EmitProfilingCounterHandlingForReturnSequence(false);
SetReturnPosition(literal());
const Register& current_sp = __ StackPointer();
@@ -486,7 +470,7 @@ void FullCodeGenerator::EmitReturnSequence() {
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
codegen()->GetVar(result_register(), var);
- __ Push(result_register());
+ codegen()->PushOperand(result_register());
}
@@ -504,7 +488,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(
void FullCodeGenerator::StackValueContext::Plug(
Heap::RootListIndex index) const {
__ LoadRoot(result_register(), index);
- __ Push(result_register());
+ codegen()->PushOperand(result_register());
}
@@ -537,7 +521,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(
void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
// Immediates cannot be pushed directly.
__ Mov(result_register(), Operand(lit));
- __ Push(result_register());
+ codegen()->PushOperand(result_register());
}
@@ -546,7 +530,7 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
true,
true_label_,
false_label_);
- DCHECK(!lit->IsUndetectableObject()); // There are no undetectable literals.
+ DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
if (false_label_ != fall_through_) __ B(false_label_);
} else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -571,41 +555,14 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
}
-void FullCodeGenerator::EffectContext::DropAndPlug(int count,
- Register reg) const {
- DCHECK(count > 0);
- __ Drop(count);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
- int count,
- Register reg) const {
- DCHECK(count > 0);
- __ Drop(count);
- __ Move(result_register(), reg);
-}
-
-
void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
Register reg) const {
DCHECK(count > 0);
- if (count > 1) __ Drop(count - 1);
+ if (count > 1) codegen()->DropOperands(count - 1);
__ Poke(reg, 0);
}
-void FullCodeGenerator::TestContext::DropAndPlug(int count,
- Register reg) const {
- DCHECK(count > 0);
- // For simplicity we always test the accumulator register.
- __ Drop(count);
- __ Mov(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
Label* materialize_false) const {
DCHECK(materialize_true == materialize_false);
@@ -636,7 +593,7 @@ void FullCodeGenerator::StackValueContext::Plug(
__ Bind(materialize_false);
__ LoadRoot(x10, Heap::kFalseValueRootIndex);
__ Bind(&done);
- __ Push(x10);
+ codegen()->PushOperand(x10);
}
@@ -658,7 +615,7 @@ void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
Heap::RootListIndex value_root_index =
flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
__ LoadRoot(x10, value_root_index);
- __ Push(x10);
+ codegen()->PushOperand(x10);
}
@@ -787,7 +744,7 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// The variable in the declaration always resides in the current function
// context.
DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (generate_debug_code_) {
+ if (FLAG_debug_code) {
// Check that we're not inside a with or catch context.
__ Ldr(x1, FieldMemOperand(cp, HeapObject::kMapOffset));
__ CompareRoot(x1, Heap::kWithContextMapRootIndex);
@@ -908,11 +865,11 @@ void FullCodeGenerator::VisitFunctionDeclaration(
case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ Function Declaration");
__ Mov(x2, Operand(variable->name()));
- __ Push(x2);
+ PushOperand(x2);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- __ CallRuntime(Runtime::kDeclareLookupSlot);
+ PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -989,8 +946,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetExpressionPosition(clause);
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), Token::EQ_STRICT,
- strength(language_mode())).code();
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
@@ -1010,7 +967,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Discard the test value and jump to the default if present, otherwise to
// the end of the statement.
__ Bind(&next_test);
- __ Drop(1); // Switch value is no longer needed.
+ DropOperands(1); // Switch value is no longer needed.
if (default_clause == NULL) {
__ B(nested_statement.break_label());
} else {
@@ -1044,22 +1001,18 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
ForIn loop_statement(this, stmt);
increment_loop_depth();
- // Get the object to enumerate over. If the object is null or undefined, skip
- // over the loop. See ECMA-262 version 5, section 12.6.4.
+ // Get the object to enumerate over.
SetExpressionAsStatementPosition(stmt->enumerable());
VisitForAccumulatorValue(stmt->enumerable());
- __ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, &exit);
- Register null_value = x15;
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ Cmp(x0, null_value);
- __ B(eq, &exit);
-
- PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
+ OperandStackDepthIncrement(ForIn::kElementCount);
- // Convert the object to a JS object.
+ // If the object is null or undefined, skip over the loop, otherwise convert
+ // it to a JS receiver. See ECMA-262 version 5, section 12.6.4.
Label convert, done_convert;
__ JumpIfSmi(x0, &convert);
__ JumpIfObjectType(x0, x10, x11, FIRST_JS_RECEIVER_TYPE, &done_convert, ge);
+ __ JumpIfRoot(x0, Heap::kNullValueRootIndex, &exit);
+ __ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, &exit);
__ Bind(&convert);
ToObjectStub stub(isolate());
__ CallStub(&stub);
@@ -1067,15 +1020,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
__ Push(x0);
- // Check for proxies.
- Label call_runtime;
- __ JumpIfObjectType(x0, x10, x11, JS_PROXY_TYPE, &call_runtime, eq);
-
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
// guarantee cache validity, call the runtime system to check cache
// validity or get the property names in a fixed array.
- __ CheckEnumCache(x0, null_value, x10, x11, x12, x13, &call_runtime);
+ // Note: Proxies never have an enum cache, so will always take the
+ // slow path.
+ Label call_runtime;
+ __ CheckEnumCache(x0, x15, x10, x11, x12, x13, &call_runtime);
// The enum cache is valid. Load the map of the object being
// iterated over and use the cache for the iteration.
@@ -1086,7 +1038,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the set of properties to enumerate.
__ Bind(&call_runtime);
__ Push(x0); // Duplicate the enumerable object on the stack.
- __ CallRuntime(Runtime::kGetPropertyNamesFast);
+ __ CallRuntime(Runtime::kForInEnumerate);
PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
// If we got a map from the runtime call, we can do a fast
@@ -1120,14 +1072,15 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We got a fixed array in register x0. Iterate through that.
__ Bind(&fixed_array);
+ int const vector_index = SmiFromSlot(slot)->value();
__ EmitLoadTypeFeedbackVector(x1);
__ Mov(x10, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
- int vector_index = SmiFromSlot(slot)->value();
__ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(vector_index)));
__ Mov(x1, Smi::FromInt(1)); // Smi(1) indicates slow check.
__ Ldr(x2, FieldMemOperand(x0, FixedArray::kLengthOffset));
- // Smi and array, fixed array length (as smi) and initial index.
- __ Push(x1, x0, x2, xzr);
+ __ Push(x1, x0, x2); // Smi and array, fixed array length (as smi).
+ PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
+ __ Push(xzr); // Initial index.
// Generate code for doing the condition check.
__ Bind(&loop);
@@ -1155,6 +1108,16 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Cmp(x11, x2);
__ B(eq, &update_each);
+ // We might get here from TurboFan or Crankshaft when something in the
+ // for-in loop body deopts and only now notice in fullcodegen, that we
+ // can now longer use the enum cache, i.e. left fast mode. So better record
+ // this information here, in case we later OSR back into this loop or
+ // reoptimize the whole function w/o rerunning the loop with the slow
+ // mode object in fullcodegen (which would result in a deopt loop).
+ __ EmitLoadTypeFeedbackVector(x0);
+ __ Mov(x10, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+ __ Str(x10, FieldMemOperand(x0, FixedArray::OffsetOfElementAt(vector_index)));
+
// Convert the entry to a string or (smi) 0 if it isn't a property
// any more. If the property has been removed while iterating, we
// just skip it.
@@ -1193,7 +1156,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Remove the pointers stored on the stack.
__ Bind(loop_statement.break_label());
- __ Drop(5);
+ DropOperands(5);
// Exit and decrement the loop depth.
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1432,12 +1395,11 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
__ Bind(&slow);
Comment cmnt(masm_, "Lookup variable");
- __ Mov(x1, Operand(var->name()));
- __ Push(cp, x1); // Context and name.
+ __ Push(var->name());
Runtime::FunctionId function_id =
typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
- : Runtime::kLoadLookupSlotNoReferenceError;
+ : Runtime::kLoadLookupSlotInsideTypeof;
__ CallRuntime(function_id);
__ Bind(&done);
context()->Plug(x0);
@@ -1463,7 +1425,7 @@ void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
Expression* expression = (property == NULL) ? NULL : property->value();
if (expression == NULL) {
__ LoadRoot(x10, Heap::kNullValueRootIndex);
- __ Push(x10);
+ PushOperand(x10);
} else {
VisitForStackValue(expression);
if (NeedsHomeObject(expression)) {
@@ -1508,7 +1470,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
if (!result_saved) {
- __ Push(x0); // Save result on stack
+ PushOperand(x0); // Save result on stack
result_saved = true;
}
switch (property->kind()) {
@@ -1539,7 +1501,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
break;
}
__ Peek(x0, 0);
- __ Push(x0);
+ PushOperand(x0);
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
@@ -1547,19 +1509,19 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
EmitSetHomeObject(value, 2, property->GetSlot());
}
__ Mov(x0, Smi::FromInt(SLOPPY)); // Language mode
- __ Push(x0);
- __ CallRuntime(Runtime::kSetProperty);
+ PushOperand(x0);
+ CallRuntimeWithOperands(Runtime::kSetProperty);
} else {
- __ Drop(3);
+ DropOperands(3);
}
break;
case ObjectLiteral::Property::PROTOTYPE:
DCHECK(property->emit_store());
// Duplicate receiver on stack.
__ Peek(x0, 0);
- __ Push(x0);
+ PushOperand(x0);
VisitForStackValue(value);
- __ CallRuntime(Runtime::kInternalSetPrototype);
+ CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
NO_REGISTERS);
break;
@@ -1582,13 +1544,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
it != accessor_table.end();
++it) {
__ Peek(x10, 0); // Duplicate receiver.
- __ Push(x10);
+ PushOperand(x10);
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
EmitAccessor(it->second->setter);
__ Mov(x10, Smi::FromInt(NONE));
- __ Push(x10);
- __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
+ PushOperand(x10);
+ CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1605,18 +1567,18 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Expression* value = property->value();
if (!result_saved) {
- __ Push(x0); // Save result on stack
+ PushOperand(x0); // Save result on stack
result_saved = true;
}
__ Peek(x10, 0); // Duplicate receiver.
- __ Push(x10);
+ PushOperand(x10);
if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
DCHECK(!property->is_computed_name());
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype);
+ CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
NO_REGISTERS);
} else {
@@ -1631,11 +1593,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
case ObjectLiteral::Property::COMPUTED:
if (property->emit_store()) {
- __ Mov(x0, Smi::FromInt(NONE));
- __ Push(x0);
- __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
+ PushOperand(Smi::FromInt(NONE));
+ PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+ CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
} else {
- __ Drop(3);
+ DropOperands(3);
}
break;
@@ -1644,15 +1606,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
break;
case ObjectLiteral::Property::GETTER:
- __ Mov(x0, Smi::FromInt(NONE));
- __ Push(x0);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(NONE));
+ CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
- __ Mov(x0, Smi::FromInt(NONE));
- __ Push(x0);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(NONE));
+ CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
break;
}
}
@@ -1709,14 +1669,14 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
int array_index = 0;
for (; array_index < length; array_index++) {
Expression* subexpr = subexprs->at(array_index);
- if (subexpr->IsSpread()) break;
+ DCHECK(!subexpr->IsSpread());
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
if (!result_saved) {
- __ Push(x0);
+ PushOperand(x0);
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
@@ -1737,21 +1697,16 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// (inclusive) and these elements gets appended to the array. Note that the
// number elements an iterable produces is unknown ahead of time.
if (array_index < length && result_saved) {
- __ Pop(x0);
+ PopOperand(x0);
result_saved = false;
}
for (; array_index < length; array_index++) {
Expression* subexpr = subexprs->at(array_index);
- __ Push(x0);
- if (subexpr->IsSpread()) {
- VisitForStackValue(subexpr->AsSpread()->expression());
- __ InvokeBuiltin(Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX,
- CALL_FUNCTION);
- } else {
- VisitForStackValue(subexpr);
- __ CallRuntime(Runtime::kAppendElement);
- }
+ PushOperand(x0);
+ DCHECK(!subexpr->IsSpread());
+ VisitForStackValue(subexpr);
+ CallRuntimeWithOperands(Runtime::kAppendElement);
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
@@ -1792,11 +1747,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
property->obj()->AsSuperPropertyReference()->this_var());
VisitForAccumulatorValue(
property->obj()->AsSuperPropertyReference()->home_object());
- __ Push(result_register());
+ PushOperand(result_register());
if (expr->is_compound()) {
const Register scratch = x10;
__ Peek(scratch, kPointerSize);
- __ Push(scratch, result_register());
+ PushOperands(scratch, result_register());
}
break;
case KEYED_SUPER_PROPERTY:
@@ -1805,13 +1760,13 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForStackValue(
property->obj()->AsSuperPropertyReference()->home_object());
VisitForAccumulatorValue(property->key());
- __ Push(result_register());
+ PushOperand(result_register());
if (expr->is_compound()) {
const Register scratch1 = x10;
const Register scratch2 = x11;
__ Peek(scratch1, 2 * kPointerSize);
__ Peek(scratch2, kPointerSize);
- __ Push(scratch1, scratch2, result_register());
+ PushOperands(scratch1, scratch2, result_register());
}
break;
case KEYED_PROPERTY:
@@ -1856,7 +1811,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
Token::Value op = expr->binary_op();
- __ Push(x0); // Left operand goes on the stack.
+ PushOperand(x0); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
AccumulatorValueContext context(this);
@@ -1911,38 +1866,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ Mov(LoadDescriptor::NameRegister(), Operand(key->value()));
__ Mov(LoadDescriptor::SlotRegister(),
SmiFromSlot(prop->PropertyFeedbackSlot()));
- CallLoadIC(NOT_INSIDE_TYPEOF, language_mode());
-}
-
-
-void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
- // Stack: receiver, home_object.
- SetExpressionPosition(prop);
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!key->value()->IsSmi());
- DCHECK(prop->IsSuperAccess());
-
- __ Push(key->value());
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadFromSuper);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- SetExpressionPosition(prop);
- // Call keyed load IC. It has arguments key and receiver in x0 and x1.
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), language_mode()).code();
- __ Mov(LoadDescriptor::SlotRegister(),
- SmiFromSlot(prop->PropertyFeedbackSlot()));
- CallIC(ic);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
- // Stack: receiver, home_object, key.
- SetExpressionPosition(prop);
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+ CallLoadIC(NOT_INSIDE_TYPEOF);
}
@@ -1956,7 +1880,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
Register left = x1;
Register right = x0;
Register result = x0;
- __ Pop(left);
+ PopOperand(left);
// Perform combined smi check on both operands.
__ Orr(x10, left, right);
@@ -1965,8 +1889,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ Bind(&stub_call);
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
{
Assembler::BlockPoolsScope scope(masm_);
CallIC(code, expr->BinaryOperationFeedbackId());
@@ -2047,9 +1970,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
- __ Pop(x1);
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+ PopOperand(x1);
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
JumpPatchSite patch_site(masm_); // Unbound, signals no inlined smi code.
{
Assembler::BlockPoolsScope scope(masm_);
@@ -2061,27 +1983,17 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
- // Constructor is in x0.
- DCHECK(lit != NULL);
- __ push(x0);
-
- // No access check is needed here since the constructor is created by the
- // class literal.
- Register scratch = x1;
- __ Ldr(scratch,
- FieldMemOperand(x0, JSFunction::kPrototypeOrInitialMapOffset));
- __ Push(scratch);
-
for (int i = 0; i < lit->properties()->length(); i++) {
ObjectLiteral::Property* property = lit->properties()->at(i);
Expression* value = property->value();
+ Register scratch = x1;
if (property->is_static()) {
__ Peek(scratch, kPointerSize); // constructor
} else {
__ Peek(scratch, 0); // prototype
}
- __ Push(scratch);
+ PushOperand(scratch);
EmitPropertyKey(property, lit->GetIdForProperty(i));
// The static prototype property is read only. We handle the non computed
@@ -2104,29 +2016,25 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
case ObjectLiteral::Property::PROTOTYPE:
UNREACHABLE();
case ObjectLiteral::Property::COMPUTED:
- __ CallRuntime(Runtime::kDefineClassMethod);
+ PushOperand(Smi::FromInt(DONT_ENUM));
+ PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+ CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
break;
case ObjectLiteral::Property::GETTER:
- __ Mov(x0, Smi::FromInt(DONT_ENUM));
- __ Push(x0);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(DONT_ENUM));
+ CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
- __ Mov(x0, Smi::FromInt(DONT_ENUM));
- __ Push(x0);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(DONT_ENUM));
+ CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
break;
default:
UNREACHABLE();
}
}
-
- // Set both the prototype and constructor to have fast properties, and also
- // freeze them in strong mode.
- __ CallRuntime(Runtime::kFinalizeClassDefinition);
}
@@ -2145,12 +2053,12 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case NAMED_PROPERTY: {
- __ Push(x0); // Preserve value.
+ PushOperand(x0); // Preserve value.
VisitForAccumulatorValue(prop->obj());
// TODO(all): We could introduce a VisitForRegValue(reg, expr) to avoid
// this copy.
__ Mov(StoreDescriptor::ReceiverRegister(), x0);
- __ Pop(StoreDescriptor::ValueRegister()); // Restore value.
+ PopOperand(StoreDescriptor::ValueRegister()); // Restore value.
__ Mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
EmitLoadStoreICSlot(slot);
@@ -2158,7 +2066,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case NAMED_SUPER_PROPERTY: {
- __ Push(x0);
+ PushOperand(x0);
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
VisitForAccumulatorValue(
prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2175,7 +2083,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case KEYED_SUPER_PROPERTY: {
- __ Push(x0);
+ PushOperand(x0);
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
VisitForStackValue(
prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2195,12 +2103,12 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case KEYED_PROPERTY: {
- __ Push(x0); // Preserve value.
+ PushOperand(x0); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ Mov(StoreDescriptor::NameRegister(), x0);
- __ Pop(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::ValueRegister());
+ PopOperands(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::ValueRegister());
EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
@@ -2281,14 +2189,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
(var->mode() == CONST && op == Token::INIT)) {
if (var->IsLookupSlot()) {
// Assignment to var.
- __ Mov(x11, Operand(var->name()));
- __ Mov(x10, Smi::FromInt(language_mode()));
- // jssp[0] : mode.
- // jssp[8] : name.
- // jssp[16] : context.
- // jssp[24] : value.
- __ Push(x0, cp, x11, x10);
- __ CallRuntime(Runtime::kStoreLookupSlot);
+ __ Push(var->name());
+ __ Push(x0);
+ __ CallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreLookupSlot_Strict
+ : Runtime::kStoreLookupSlot_Sloppy);
} else {
// Assignment to var or initializing assignment to let/const in harmony
// mode.
@@ -2338,7 +2243,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ Mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
- __ Pop(StoreDescriptor::ReceiverRegister());
+ PopOperand(StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(expr->AssignmentSlot());
CallStoreIC();
@@ -2355,10 +2260,11 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
Literal* key = prop->key()->AsLiteral();
DCHECK(key != NULL);
- __ Push(key->value());
- __ Push(x0);
- __ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy));
+ PushOperand(key->value());
+ PushOperand(x0);
+ CallRuntimeWithOperands(is_strict(language_mode())
+ ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy);
}
@@ -2368,10 +2274,10 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
// stack : receiver ('this'), home_object, key
DCHECK(prop != NULL);
- __ Push(x0);
- __ CallRuntime((is_strict(language_mode())
- ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy));
+ PushOperand(x0);
+ CallRuntimeWithOperands(is_strict(language_mode())
+ ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy);
}
@@ -2380,7 +2286,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
// TODO(all): Could we pass this in registers rather than on the stack?
- __ Pop(StoreDescriptor::NameRegister(), StoreDescriptor::ReceiverRegister());
+ PopOperands(StoreDescriptor::NameRegister(),
+ StoreDescriptor::ReceiverRegister());
DCHECK(StoreDescriptor::ValueRegister().is(x0));
Handle<Code> ic =
@@ -2414,7 +2321,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
__ Move(LoadDescriptor::NameRegister(), x0);
- __ Pop(LoadDescriptor::ReceiverRegister());
+ PopOperand(LoadDescriptor::ReceiverRegister());
EmitKeyedPropertyLoad(expr);
} else {
VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
@@ -2456,7 +2363,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
UseScratchRegisterScope temps(masm_);
Register temp = temps.AcquireX();
__ LoadRoot(temp, Heap::kUndefinedValueRootIndex);
- __ Push(temp);
+ PushOperand(temp);
}
convert_mode = ConvertReceiverMode::kNullOrUndefined;
} else {
@@ -2467,8 +2374,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
EmitNamedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
- __ Pop(x10);
- __ Push(x0, x10);
+ PopOperand(x10);
+ PushOperands(x0, x10);
convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
}
@@ -2493,19 +2400,18 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
callee->AsProperty()->obj()->AsSuperPropertyReference();
VisitForStackValue(super_ref->home_object());
VisitForAccumulatorValue(super_ref->this_var());
- __ Push(x0);
+ PushOperand(x0);
__ Peek(scratch, kPointerSize);
- __ Push(x0, scratch);
- __ Push(key->value());
- __ Push(Smi::FromInt(language_mode()));
+ PushOperands(x0, scratch);
+ PushOperand(key->value());
// Stack here:
// - home_object
// - this (receiver)
// - this (receiver) <-- LoadFromSuper will pop here and below.
// - home_object
- // - language_mode
- __ CallRuntime(Runtime::kLoadFromSuper);
+ // - key
+ CallRuntimeWithOperands(Runtime::kLoadFromSuper);
// Replace home_object with target function.
__ Poke(x0, kPointerSize);
@@ -2534,8 +2440,8 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
- __ Pop(x10);
- __ Push(x0, x10);
+ PopOperand(x10);
+ PushOperands(x0, x10);
EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
}
@@ -2555,11 +2461,10 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
callee->AsProperty()->obj()->AsSuperPropertyReference();
VisitForStackValue(super_ref->home_object());
VisitForAccumulatorValue(super_ref->this_var());
- __ Push(x0);
+ PushOperand(x0);
__ Peek(scratch, kPointerSize);
- __ Push(x0, scratch);
+ PushOperands(x0, scratch);
VisitForStackValue(prop->key());
- __ Push(Smi::FromInt(language_mode()));
// Stack here:
// - home_object
@@ -2567,8 +2472,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
// - home_object
// - key
- // - language_mode
- __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+ CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
// Replace home_object with target function.
__ Poke(x0, kPointerSize);
@@ -2591,13 +2495,23 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
SetCallPosition(expr);
-
- Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
+ if (expr->tail_call_mode() == TailCallMode::kAllow) {
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceTailCall);
+ }
+ // Update profiling counters before the tail call since we will
+ // not return to this function.
+ EmitProfilingCounterHandlingForReturnSequence(true);
+ }
+ Handle<Code> ic =
+ CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
+ .code();
__ Mov(x3, SmiFromSlot(expr->CallFeedbackICSlot()));
__ Peek(x1, (arg_count + 1) * kXRegSize);
// Don't assign a type feedback id to the IC, since type feedback is provided
// by the vector above.
CallIC(ic);
+ OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
// Restore context register.
@@ -2644,10 +2558,9 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
__ Bind(&slow);
// Call the runtime to find the function to call (returned in x0)
// and the object holding it (returned in x1).
- __ Mov(x10, Operand(callee->name()));
- __ Push(context_register(), x10);
- __ CallRuntime(Runtime::kLoadLookupSlot);
- __ Push(x0, x1); // Receiver, function.
+ __ Push(callee->name());
+ __ CallRuntime(Runtime::kLoadLookupSlotForCall);
+ PushOperands(x0, x1); // Receiver, function.
PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
// If fast case code has been generated, emit code to push the
@@ -2668,7 +2581,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
VisitForStackValue(callee);
// refEnv.WithBaseObject()
__ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
- __ Push(x10); // Reserved receiver slot.
+ PushOperand(x10); // Reserved receiver slot.
}
}
@@ -2705,7 +2618,10 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
// Call the evaluated function.
__ Peek(x1, (arg_count + 1) * kXRegSize);
__ Mov(x0, arg_count);
- __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ expr->tail_call_mode()),
+ RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
// Restore context register.
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2746,6 +2662,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
CallConstructStub stub(isolate());
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
// Restore context register.
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2767,7 +2684,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
FieldMemOperand(result_register(), HeapObject::kMapOffset));
__ Ldr(result_register(),
FieldMemOperand(result_register(), Map::kPrototypeOffset));
- __ Push(result_register());
+ PushOperand(result_register());
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -2789,6 +2706,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
__ Peek(x1, arg_count * kXRegSize);
__ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
@@ -2840,77 +2758,6 @@ void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ JumpIfSmi(x0, if_false);
- __ CompareObjectType(x0, x10, x11, SIMD128_VALUE_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(x0, if_false);
- __ CompareObjectType(x0, x10, x11, FIRST_FUNCTION_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(hs, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- // Only a HeapNumber can be -0.0, so return false if we have something else.
- __ JumpIfNotHeapNumber(x0, if_false, DO_SMI_CHECK);
-
- // Test the bit pattern.
- __ Ldr(x10, FieldMemOperand(x0, HeapNumber::kValueOffset));
- __ Cmp(x10, 1); // Set V on 0x8000000000000000.
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(vs, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -2999,65 +2846,6 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ Pop(x1);
- __ Cmp(x0, x1);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- // ArgumentsAccessStub expects the key in x1.
- VisitForAccumulatorValue(args->at(0));
- __ Mov(x1, x0);
- __ Mov(x0, Smi::FromInt(info_->scope()->num_parameters()));
- ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
- __ CallStub(&stub);
- context()->Plug(x0);
-}
-
-
-void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
- Label exit;
- // Get the number of formal parameters.
- __ Mov(x0, Smi::FromInt(info_->scope()->num_parameters()));
-
- // Check if the calling frame is an arguments adaptor frame.
- __ Ldr(x12, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(x13, MemOperand(x12, StandardFrameConstants::kContextOffset));
- __ Cmp(x13, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ B(ne, &exit);
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ Ldr(x0, MemOperand(x12, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ Bind(&exit);
- context()->Plug(x0);
-}
-
-
void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
ASM_LOCATION("FullCodeGenerator::EmitClassOf");
ZoneList<Expression*>* args = expr->arguments();
@@ -3130,28 +2918,6 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = nullptr;
- Label* if_false = nullptr;
- Label* fall_through = nullptr;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ JumpIfSmi(x0, if_false);
- __ CompareObjectType(x0, x10, x11, JS_DATE_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(3, args->length());
@@ -3164,7 +2930,7 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(0)); // index
VisitForStackValue(args->at(1)); // value
VisitForAccumulatorValue(args->at(2)); // string
- __ Pop(value, index);
+ PopOperands(value, index);
if (FLAG_debug_code) {
__ AssertSmi(value, kNonSmiValue);
@@ -3194,7 +2960,7 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(0)); // index
VisitForStackValue(args->at(1)); // value
VisitForAccumulatorValue(args->at(2)); // string
- __ Pop(value, index);
+ PopOperands(value, index);
if (FLAG_debug_code) {
__ AssertSmi(value, kNonSmiValue);
@@ -3212,35 +2978,6 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- VisitForStackValue(args->at(0)); // Load the object.
- VisitForAccumulatorValue(args->at(1)); // Load the value.
- __ Pop(x1);
- // x0 = value.
- // x1 = object.
-
- Label done;
- // If the object is a smi, return the value.
- __ JumpIfSmi(x1, &done);
-
- // If the object is not a value type, return the value.
- __ JumpIfNotObjectType(x1, x10, x11, JS_VALUE_TYPE, &done);
-
- // Store the value.
- __ Str(x0, FieldMemOperand(x1, JSValue::kValueOffset));
- // Update the write barrier. Save the value as it will be
- // overwritten by the write barrier code and is needed afterward.
- __ Mov(x10, x0);
- __ RecordWriteField(
- x1, JSValue::kValueOffset, x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
-
- __ Bind(&done);
- context()->Plug(x0);
-}
-
-
void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -3258,25 +2995,6 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitToName(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into x0 and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- Label convert, done_convert;
- __ JumpIfSmi(x0, &convert);
- STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
- __ JumpIfObjectType(x0, x1, x1, LAST_NAME_TYPE, &done_convert, ls);
- __ Bind(&convert);
- __ Push(x0);
- __ CallRuntime(Runtime::kToName);
- __ Bind(&done_convert);
- context()->Plug(x0);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3310,7 +3028,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
Register index = x0;
Register result = x3;
- __ Pop(object);
+ PopOperand(object);
Label need_conversion;
Label index_out_of_range;
@@ -3355,7 +3073,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
Register index = x0;
Register result = x0;
- __ Pop(object);
+ PopOperand(object);
Label need_conversion;
Label index_out_of_range;
@@ -3405,6 +3123,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
// Call the target.
__ Mov(x0, argc);
__ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(argc + 1);
// Restore context register.
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
@@ -3457,226 +3176,6 @@ void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
- ASM_LOCATION("FullCodeGenerator::EmitFastOneByteArrayJoin");
-
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- VisitForStackValue(args->at(1));
- VisitForAccumulatorValue(args->at(0));
-
- Register array = x0;
- Register result = x0;
- Register elements = x1;
- Register element = x2;
- Register separator = x3;
- Register array_length = x4;
- Register result_pos = x5;
- Register map = x6;
- Register string_length = x10;
- Register elements_end = x11;
- Register string = x12;
- Register scratch1 = x13;
- Register scratch2 = x14;
- Register scratch3 = x7;
- Register separator_length = x15;
-
- Label bailout, done, one_char_separator, long_separator,
- non_trivial_array, not_size_one_array, loop,
- empty_separator_loop, one_char_separator_loop,
- one_char_separator_loop_entry, long_separator_loop;
-
- // The separator operand is on the stack.
- __ Pop(separator);
-
- // Check that the array is a JSArray.
- __ JumpIfSmi(array, &bailout);
- __ JumpIfNotObjectType(array, map, scratch1, JS_ARRAY_TYPE, &bailout);
-
- // Check that the array has fast elements.
- __ CheckFastElements(map, scratch1, &bailout);
-
- // If the array has length zero, return the empty string.
- // Load and untag the length of the array.
- // It is an unsigned value, so we can skip sign extension.
- // We assume little endianness.
- __ Ldrsw(array_length,
- UntagSmiFieldMemOperand(array, JSArray::kLengthOffset));
- __ Cbnz(array_length, &non_trivial_array);
- __ LoadRoot(result, Heap::kempty_stringRootIndex);
- __ B(&done);
-
- __ Bind(&non_trivial_array);
- // Get the FixedArray containing array's elements.
- __ Ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset));
-
- // Check that all array elements are sequential one-byte strings, and
- // accumulate the sum of their lengths.
- __ Mov(string_length, 0);
- __ Add(element, elements, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
- // Loop condition: while (element < elements_end).
- // Live values in registers:
- // elements: Fixed array of strings.
- // array_length: Length of the fixed array of strings (not smi)
- // separator: Separator string
- // string_length: Accumulated sum of string lengths (not smi).
- // element: Current array element.
- // elements_end: Array end.
- if (FLAG_debug_code) {
- __ Cmp(array_length, 0);
- __ Assert(gt, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
- }
- __ Bind(&loop);
- __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ JumpIfSmi(string, &bailout);
- __ Ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
- __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
- __ Ldrsw(scratch1,
- UntagSmiFieldMemOperand(string, SeqOneByteString::kLengthOffset));
- __ Adds(string_length, string_length, scratch1);
- __ B(vs, &bailout);
- __ Cmp(element, elements_end);
- __ B(lt, &loop);
-
- // If array_length is 1, return elements[0], a string.
- __ Cmp(array_length, 1);
- __ B(ne, &not_size_one_array);
- __ Ldr(result, FieldMemOperand(elements, FixedArray::kHeaderSize));
- __ B(&done);
-
- __ Bind(&not_size_one_array);
-
- // Live values in registers:
- // separator: Separator string
- // array_length: Length of the array (not smi).
- // string_length: Sum of string lengths (not smi).
- // elements: FixedArray of strings.
-
- // Check that the separator is a flat one-byte string.
- __ JumpIfSmi(separator, &bailout);
- __ Ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
- __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
-
- // Add (separator length times array_length) - separator length to the
- // string_length to get the length of the result string.
- // Load the separator length as untagged.
- // We assume little endianness, and that the length is positive.
- __ Ldrsw(separator_length,
- UntagSmiFieldMemOperand(separator,
- SeqOneByteString::kLengthOffset));
- __ Sub(string_length, string_length, separator_length);
- __ Umaddl(string_length, array_length.W(), separator_length.W(),
- string_length);
-
- // Bailout for large object allocations.
- __ Cmp(string_length, Page::kMaxRegularHeapObjectSize);
- __ B(gt, &bailout);
-
- // Get first element in the array.
- __ Add(element, elements, FixedArray::kHeaderSize - kHeapObjectTag);
- // Live values in registers:
- // element: First array element
- // separator: Separator string
- // string_length: Length of result string (not smi)
- // array_length: Length of the array (not smi).
- __ AllocateOneByteString(result, string_length, scratch1, scratch2, scratch3,
- &bailout);
-
- // Prepare for looping. Set up elements_end to end of the array. Set
- // result_pos to the position of the result where to write the first
- // character.
- // TODO(all): useless unless AllocateOneByteString trashes the register.
- __ Add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
- __ Add(result_pos, result, SeqOneByteString::kHeaderSize - kHeapObjectTag);
-
- // Check the length of the separator.
- __ Cmp(separator_length, 1);
- __ B(eq, &one_char_separator);
- __ B(gt, &long_separator);
-
- // Empty separator case
- __ Bind(&empty_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
-
- // Copy next array element to the result.
- __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ Ldrsw(string_length,
- UntagSmiFieldMemOperand(string, String::kLengthOffset));
- __ Add(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ CopyBytes(result_pos, string, string_length, scratch1);
- __ Cmp(element, elements_end);
- __ B(lt, &empty_separator_loop); // End while (element < elements_end).
- __ B(&done);
-
- // One-character separator case
- __ Bind(&one_char_separator);
- // Replace separator with its one-byte character value.
- __ Ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ B(&one_char_separator_loop_entry);
-
- __ Bind(&one_char_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
- // separator: Single separator one-byte char (in lower byte).
-
- // Copy the separator character to the result.
- __ Strb(separator, MemOperand(result_pos, 1, PostIndex));
-
- // Copy next array element to the result.
- __ Bind(&one_char_separator_loop_entry);
- __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ Ldrsw(string_length,
- UntagSmiFieldMemOperand(string, String::kLengthOffset));
- __ Add(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ CopyBytes(result_pos, string, string_length, scratch1);
- __ Cmp(element, elements_end);
- __ B(lt, &one_char_separator_loop); // End while (element < elements_end).
- __ B(&done);
-
- // Long separator case (separator is more than one character). Entry is at the
- // label long_separator below.
- __ Bind(&long_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
- // separator: Separator string.
-
- // Copy the separator to the result.
- // TODO(all): hoist next two instructions.
- __ Ldrsw(string_length,
- UntagSmiFieldMemOperand(separator, String::kLengthOffset));
- __ Add(string, separator, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ CopyBytes(result_pos, string, string_length, scratch1);
-
- __ Bind(&long_separator);
- __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ Ldrsw(string_length,
- UntagSmiFieldMemOperand(string, String::kLengthOffset));
- __ Add(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ CopyBytes(result_pos, string, string_length, scratch1);
- __ Cmp(element, elements_end);
- __ B(lt, &long_separator_loop); // End while (element < elements_end).
- __ B(&done);
-
- __ Bind(&bailout);
- // Returning undefined will force slower code to handle it.
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ Bind(&done);
- context()->Plug(result);
-}
-
-
void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
ExternalReference debug_is_active =
@@ -3721,7 +3220,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ B(&done);
__ Bind(&runtime);
- __ CallRuntime(Runtime::kCreateIterResultObject);
+ CallRuntimeWithOperands(Runtime::kCreateIterResultObject);
__ Bind(&done);
context()->Plug(x0);
@@ -3731,7 +3230,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push undefined as the receiver.
__ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
- __ Push(x0);
+ PushOperand(x0);
__ LoadNativeContextSlot(expr->context_index(), x0);
}
@@ -3746,6 +3245,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
__ Mov(x0, arg_count);
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
}
@@ -3758,8 +3258,8 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
EmitLoadJSRuntimeFunction(expr);
// Push the target function under the receiver.
- __ Pop(x10);
- __ Push(x0, x10);
+ PopOperand(x10);
+ PushOperands(x0, x10);
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
@@ -3793,6 +3293,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Call the C runtime function.
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
__ CallRuntime(expr->function(), arg_count);
+ OperandStackDepthDecrement(arg_count);
context()->Plug(x0);
}
}
@@ -3810,9 +3311,9 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy);
+ CallRuntimeWithOperands(is_strict(language_mode())
+ ? Runtime::kDeleteProperty_Strict
+ : Runtime::kDeleteProperty_Sloppy);
context()->Plug(x0);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -3833,8 +3334,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
} else {
// Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
- __ Mov(x2, Operand(var->name()));
- __ Push(context_register(), x2);
+ __ Push(var->name());
__ CallRuntime(Runtime::kDeleteLookupSlot);
context()->Plug(x0);
}
@@ -3876,6 +3376,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
&materialize_false,
&materialize_true,
&materialize_true);
+ if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
__ Bind(&materialize_true);
PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
@@ -3928,7 +3429,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && !context()->IsEffect()) {
- __ Push(xzr);
+ PushOperand(xzr);
}
switch (assign_type) {
case NAMED_PROPERTY: {
@@ -3943,10 +3444,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
VisitForAccumulatorValue(
prop->obj()->AsSuperPropertyReference()->home_object());
- __ Push(result_register());
+ PushOperand(result_register());
const Register scratch = x10;
__ Peek(scratch, kPointerSize);
- __ Push(scratch, result_register());
+ PushOperands(scratch, result_register());
EmitNamedSuperPropertyLoad(prop);
break;
}
@@ -3956,12 +3457,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
VisitForStackValue(
prop->obj()->AsSuperPropertyReference()->home_object());
VisitForAccumulatorValue(prop->key());
- __ Push(result_register());
+ PushOperand(result_register());
const Register scratch1 = x10;
const Register scratch2 = x11;
__ Peek(scratch1, 2 * kPointerSize);
__ Peek(scratch2, kPointerSize);
- __ Push(scratch1, scratch2, result_register());
+ PushOperands(scratch1, scratch2, result_register());
EmitKeyedSuperPropertyLoad(prop);
break;
}
@@ -4044,7 +3545,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// of the stack.
switch (assign_type) {
case VARIABLE:
- __ Push(x0);
+ PushOperand(x0);
break;
case NAMED_PROPERTY:
__ Poke(x0, kXRegSize);
@@ -4070,9 +3571,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
{
Assembler::BlockPoolsScope scope(masm_);
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), Token::ADD,
- strength(language_mode())).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD).code();
CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
}
@@ -4106,7 +3605,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
__ Mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
- __ Pop(StoreDescriptor::ReceiverRegister());
+ PopOperand(StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(expr->CountSlot());
CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -4142,8 +3641,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case KEYED_PROPERTY: {
- __ Pop(StoreDescriptor::NameRegister());
- __ Pop(StoreDescriptor::ReceiverRegister());
+ PopOperand(StoreDescriptor::NameRegister());
+ PopOperand(StoreDescriptor::ReceiverRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
EmitLoadStoreICSlot(expr->CountSlot());
@@ -4204,7 +3703,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else if (String::Equals(check, factory->undefined_string())) {
ASM_LOCATION(
"FullCodeGenerator::EmitLiteralCompareTypeof undefined_string");
- __ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, if_true);
+ __ JumpIfRoot(x0, Heap::kNullValueRootIndex, if_false);
__ JumpIfSmi(x0, if_false);
// Check for undetectable objects => true.
__ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset));
@@ -4274,7 +3773,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ CallRuntime(Runtime::kHasProperty);
+ CallRuntimeWithOperands(Runtime::kHasProperty);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(x0, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
@@ -4282,7 +3781,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::INSTANCEOF: {
VisitForAccumulatorValue(expr->right());
- __ Pop(x1);
+ PopOperand(x1);
InstanceOfStub stub(isolate());
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
@@ -4296,7 +3795,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Condition cond = CompareIC::ComputeCondition(op);
// Pop the stack value.
- __ Pop(x1);
+ PopOperand(x1);
JumpPatchSite patch_site(masm_);
if (ShouldInlineSmiCase(op)) {
@@ -4307,8 +3806,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ Bind(&slow_case);
}
- Handle<Code> ic = CodeFactory::CompareIC(
- isolate(), op, strength(language_mode())).code();
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -4385,8 +3883,16 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// looks at its pos(). Is it possible to do something more efficient here,
// perhaps using Adr?
__ Bind(&continuation);
+ // When we arrive here, the stack top is the resume mode and
+ // result_register() holds the input value (the argument given to the
+ // respective resume operation).
__ RecordGeneratorContinuation();
- __ B(&resume);
+ __ Pop(x1);
+ __ Cmp(x1, Smi::FromInt(JSGeneratorObject::RETURN));
+ __ B(ne, &resume);
+ __ Push(result_register());
+ EmitCreateIteratorResult(true);
+ EmitUnwindAndReturn();
__ Bind(&suspend);
VisitForAccumulatorValue(expr->generator_object());
@@ -4404,7 +3910,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Bind(&post_runtime);
- __ Pop(result_register());
+ PopOperand(result_register());
EmitReturnSequence();
__ Bind(&resume);
@@ -4413,127 +3919,15 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
}
case Yield::kFinal: {
- VisitForAccumulatorValue(expr->generator_object());
- __ Mov(x1, Smi::FromInt(JSGeneratorObject::kGeneratorClosed));
- __ Str(x1, FieldMemOperand(result_register(),
- JSGeneratorObject::kContinuationOffset));
// Pop value from top-of-stack slot, box result into result register.
+ OperandStackDepthDecrement(1);
EmitCreateIteratorResult(true);
- EmitUnwindBeforeReturn();
- EmitReturnSequence();
+ EmitUnwindAndReturn();
break;
}
- case Yield::kDelegating: {
- VisitForStackValue(expr->generator_object());
-
- // Initial stack layout is as follows:
- // [sp + 1 * kPointerSize] iter
- // [sp + 0 * kPointerSize] g
-
- Label l_catch, l_try, l_suspend, l_continuation, l_resume;
- Label l_next, l_call, l_loop;
- Register load_receiver = LoadDescriptor::ReceiverRegister();
- Register load_name = LoadDescriptor::NameRegister();
-
- // Initial send value is undefined.
- __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
- __ B(&l_next);
-
- // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
- __ Bind(&l_catch);
- __ LoadRoot(load_name, Heap::kthrow_stringRootIndex); // "throw"
- __ Peek(x3, 1 * kPointerSize); // iter
- __ Push(load_name, x3, x0); // "throw", iter, except
- __ B(&l_call);
-
- // try { received = %yield result }
- // Shuffle the received result above a try handler and yield it without
- // re-boxing.
- __ Bind(&l_try);
- __ Pop(x0); // result
- int handler_index = NewHandlerTableEntry();
- EnterTryBlock(handler_index, &l_catch);
- const int try_block_size = TryCatch::kElementCount * kPointerSize;
- __ Push(x0); // result
-
- __ B(&l_suspend);
- // TODO(jbramley): This label is bound here because the following code
- // looks at its pos(). Is it possible to do something more efficient here,
- // perhaps using Adr?
- __ Bind(&l_continuation);
- __ RecordGeneratorContinuation();
- __ B(&l_resume);
-
- __ Bind(&l_suspend);
- const int generator_object_depth = kPointerSize + try_block_size;
- __ Peek(x0, generator_object_depth);
- __ Push(x0); // g
- __ Push(Smi::FromInt(handler_index)); // handler-index
- DCHECK((l_continuation.pos() > 0) && Smi::IsValid(l_continuation.pos()));
- __ Mov(x1, Smi::FromInt(l_continuation.pos()));
- __ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset));
- __ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset));
- __ Mov(x1, cp);
- __ RecordWriteField(x0, JSGeneratorObject::kContextOffset, x1, x2,
- kLRHasBeenSaved, kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 2);
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ Pop(x0); // result
- EmitReturnSequence();
- __ Bind(&l_resume); // received in x0
- ExitTryBlock(handler_index);
-
- // receiver = iter; f = 'next'; arg = received;
- __ Bind(&l_next);
-
- __ LoadRoot(load_name, Heap::knext_stringRootIndex); // "next"
- __ Peek(x3, 1 * kPointerSize); // iter
- __ Push(load_name, x3, x0); // "next", iter, received
-
- // result = receiver[f](arg);
- __ Bind(&l_call);
- __ Peek(load_receiver, 1 * kPointerSize);
- __ Peek(load_name, 2 * kPointerSize);
- __ Mov(LoadDescriptor::SlotRegister(),
- SmiFromSlot(expr->KeyedLoadFeedbackSlot()));
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), SLOPPY).code();
- CallIC(ic, TypeFeedbackId::None());
- __ Mov(x1, x0);
- __ Poke(x1, 2 * kPointerSize);
- SetCallPosition(expr);
- __ Mov(x0, 1);
- __ Call(
- isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
- RelocInfo::CODE_TARGET);
-
- __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ Drop(1); // The function is still on the stack; drop it.
-
- // if (!result.done) goto l_try;
- __ Bind(&l_loop);
- __ Move(load_receiver, x0);
-
- __ Push(load_receiver); // save result
- __ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
- __ Mov(LoadDescriptor::SlotRegister(),
- SmiFromSlot(expr->DoneFeedbackSlot()));
- CallLoadIC(NOT_INSIDE_TYPEOF); // x0=result.done
- // The ToBooleanStub argument (result.done) is in x0.
- Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(bool_ic);
- __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
- __ B(ne, &l_try);
-
- // result.value
- __ Pop(load_receiver); // result
- __ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
- __ Mov(LoadDescriptor::SlotRegister(),
- SmiFromSlot(expr->ValueFeedbackSlot()));
- CallLoadIC(NOT_INSIDE_TYPEOF); // x0=result.value
- context()->DropAndPlug(2, x0); // drop iter and g
- break;
- }
+ case Yield::kDelegating:
+ UNREACHABLE();
}
}
@@ -4553,7 +3947,14 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
VisitForAccumulatorValue(value);
- __ Pop(generator_object);
+ PopOperand(generator_object);
+
+ // Store input value into generator object.
+ __ Str(result_register(),
+ FieldMemOperand(x1, JSGeneratorObject::kInputOffset));
+ __ Mov(x2, result_register());
+ __ RecordWriteField(x1, JSGeneratorObject::kInputOffset, x2, x3,
+ kLRHasBeenSaved, kDontSaveFPRegs);
// Load suspended function and context.
__ Ldr(cp, FieldMemOperand(generator_object,
@@ -4610,6 +4011,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ Mov(x12, Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
__ Str(x12, FieldMemOperand(generator_object,
JSGeneratorObject::kContinuationOffset));
+ __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
__ Br(x10);
__ Bind(&slow_resume);
@@ -4620,6 +4022,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ PushMultipleTimes(the_hole, operand_stack_size);
__ Mov(x10, Smi::FromInt(resume_mode));
+ __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
__ Push(generator_object, result_register(), x10);
__ CallRuntime(Runtime::kResumeJSGeneratorObject);
// Not reached: the runtime call returns elsewhere.
@@ -4629,6 +4032,31 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
context()->Plug(result_register());
}
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
+ OperandStackDepthIncrement(2);
+ __ Push(reg1, reg2);
+}
+
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2,
+ Register reg3) {
+ OperandStackDepthIncrement(3);
+ __ Push(reg1, reg2, reg3);
+}
+
+void FullCodeGenerator::PopOperands(Register reg1, Register reg2) {
+ OperandStackDepthDecrement(2);
+ __ Pop(reg1, reg2);
+}
+
+void FullCodeGenerator::EmitOperandStackDepthCheck() {
+ if (FLAG_debug_code) {
+ int expected_diff = StandardFrameConstants::kFixedFrameSizeFromFp +
+ operand_stack_depth_ * kPointerSize;
+ __ Sub(x0, fp, jssp);
+ __ Cmp(x0, Operand(expected_diff));
+ __ Assert(eq, kUnexpectedStackDepth);
+ }
+}
void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label allocate, done_allocate;
@@ -4716,25 +4144,19 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
DCHECK(closure_scope->is_function_scope());
__ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
- __ Push(x10);
+ PushOperand(x10);
}
void FullCodeGenerator::EnterFinallyBlock() {
ASM_LOCATION("FullCodeGenerator::EnterFinallyBlock");
DCHECK(!result_register().is(x10));
- // Preserve the result register while executing finally block.
- // Also cook the return address in lr to the stack (smi encoded Code* delta).
- __ Sub(x10, lr, Operand(masm_->CodeObject()));
- __ SmiTag(x10);
- __ Push(result_register(), x10);
-
// Store pending message while executing finally block.
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ Mov(x10, pending_message_obj);
__ Ldr(x10, MemOperand(x10));
- __ Push(x10);
+ PushOperand(x10);
ClearPendingMessage();
}
@@ -4745,19 +4167,11 @@ void FullCodeGenerator::ExitFinallyBlock() {
DCHECK(!result_register().is(x10));
// Restore pending message from stack.
- __ Pop(x10);
+ PopOperand(x10);
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ Mov(x13, pending_message_obj);
__ Str(x10, MemOperand(x13));
-
- // Restore result register and cooked return address from the stack.
- __ Pop(x10, result_register());
-
- // Uncook the return address (see EnterFinallyBlock).
- __ SmiUntag(x10);
- __ Add(x11, x10, Operand(masm_->CodeObject()));
- __ Br(x11);
}
@@ -4776,6 +4190,30 @@ void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
__ Mov(VectorStoreICTrampolineDescriptor::SlotRegister(), SmiFromSlot(slot));
}
+void FullCodeGenerator::DeferredCommands::EmitCommands() {
+ __ Pop(result_register(), x1); // Restore the accumulator and get the token.
+ for (DeferredCommand cmd : commands_) {
+ Label skip;
+ __ Cmp(x1, Operand(Smi::FromInt(cmd.token)));
+ __ B(ne, &skip);
+ switch (cmd.command) {
+ case kReturn:
+ codegen_->EmitUnwindAndReturn();
+ break;
+ case kThrow:
+ __ Push(result_register());
+ __ CallRuntime(Runtime::kReThrow);
+ break;
+ case kContinue:
+ codegen_->EmitContinue(cmd.target);
+ break;
+ case kBreak:
+ codegen_->EmitBreak(cmd.target);
+ break;
+ }
+ __ bind(&skip);
+ }
+}
#undef __
diff --git a/deps/v8/src/full-codegen/full-codegen.cc b/deps/v8/src/full-codegen/full-codegen.cc
index 416a69c708..8255089f7e 100644
--- a/deps/v8/src/full-codegen/full-codegen.cc
+++ b/deps/v8/src/full-codegen/full-codegen.cc
@@ -17,6 +17,7 @@
#include "src/isolate-inl.h"
#include "src/macro-assembler.h"
#include "src/snapshot/snapshot.h"
+#include "src/tracing/trace-event.h"
namespace v8 {
namespace internal {
@@ -27,6 +28,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
Isolate* isolate = info->isolate();
TimerEventScope<TimerEventCompileFullCode> timer(info->isolate());
+ TRACE_EVENT0("v8", "V8.CompileFullCode");
// Ensure that the feedback vector is large enough.
info->EnsureFeedbackVector();
@@ -127,7 +129,7 @@ void FullCodeGenerator::PopulateHandlerTable(Handle<Code> code) {
table->SetRangeStart(i, handler_table_[i].range_start);
table->SetRangeEnd(i, handler_table_[i].range_end);
table->SetRangeHandler(i, handler_table_[i].handler_offset, prediction);
- table->SetRangeDepth(i, handler_table_[i].stack_depth);
+ table->SetRangeData(i, handler_table_[i].stack_depth);
}
code->set_handler_table(*table);
}
@@ -143,13 +145,11 @@ int FullCodeGenerator::NewHandlerTableEntry() {
bool FullCodeGenerator::MustCreateObjectLiteralWithRuntime(
ObjectLiteral* expr) const {
- int literal_flags = expr->ComputeFlags();
// FastCloneShallowObjectStub doesn't copy elements, and object literals don't
// support copy-on-write (COW) elements for now.
// TODO(mvstanton): make object literals support COW elements.
- return masm()->serializer_enabled() ||
- literal_flags != ObjectLiteral::kShallowProperties ||
- literal_flags != ObjectLiteral::kFastElements ||
+ return masm()->serializer_enabled() || !expr->fast_elements() ||
+ !expr->has_shallow_properties() ||
expr->properties_count() >
FastCloneShallowObjectStub::kMaximumClonedProperties;
}
@@ -165,14 +165,7 @@ bool FullCodeGenerator::MustCreateArrayLiteralWithRuntime(
void FullCodeGenerator::Initialize() {
InitializeAstVisitor(info_->isolate());
- // The generation of debug code must match between the snapshot code and the
- // code that is generated later. This is assumed by the debugger when it is
- // calculating PC offsets after generating a debug version of code. Therefore
- // we disable the production of debug code in the full compiler if we are
- // either generating a snapshot or we booted from a snapshot.
- generate_debug_code_ = FLAG_debug_code && !masm_->serializer_enabled() &&
- !info_->isolate()->snapshot_available();
- masm_->set_emit_debug_code(generate_debug_code_);
+ masm_->set_emit_debug_code(FLAG_debug_code);
masm_->set_predictable_code_size(true);
}
@@ -183,10 +176,8 @@ void FullCodeGenerator::PrepareForBailout(Expression* node, State state) {
void FullCodeGenerator::CallLoadIC(TypeofMode typeof_mode,
- LanguageMode language_mode,
TypeFeedbackId id) {
- Handle<Code> ic =
- CodeFactory::LoadIC(isolate(), typeof_mode, language_mode).code();
+ Handle<Code> ic = CodeFactory::LoadIC(isolate(), typeof_mode).code();
CallIC(ic, id);
}
@@ -281,7 +272,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(Register reg) const {
void FullCodeGenerator::StackValueContext::Plug(Register reg) const {
- __ Push(reg);
+ codegen()->PushOperand(reg);
}
@@ -295,14 +286,36 @@ void FullCodeGenerator::TestContext::Plug(Register reg) const {
void FullCodeGenerator::EffectContext::Plug(bool flag) const {}
+void FullCodeGenerator::EffectContext::DropAndPlug(int count,
+ Register reg) const {
+ DCHECK(count > 0);
+ codegen()->DropOperands(count);
+}
+
+void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
+ int count, Register reg) const {
+ DCHECK(count > 0);
+ codegen()->DropOperands(count);
+ __ Move(result_register(), reg);
+}
+
+void FullCodeGenerator::TestContext::DropAndPlug(int count,
+ Register reg) const {
+ DCHECK(count > 0);
+ // For simplicity we always test the accumulator register.
+ codegen()->DropOperands(count);
+ __ Move(result_register(), reg);
+ codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+ codegen()->DoTest(this);
+}
void FullCodeGenerator::EffectContext::PlugTOS() const {
- __ Drop(1);
+ codegen()->DropOperands(1);
}
void FullCodeGenerator::AccumulatorValueContext::PlugTOS() const {
- __ Pop(result_register());
+ codegen()->PopOperand(result_register());
}
@@ -312,7 +325,7 @@ void FullCodeGenerator::StackValueContext::PlugTOS() const {
void FullCodeGenerator::TestContext::PlugTOS() const {
// For simplicity we always test the accumulator register.
- __ Pop(result_register());
+ codegen()->PopOperand(result_register());
codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
}
@@ -442,6 +455,47 @@ int FullCodeGenerator::DeclareGlobalsFlags() {
DeclareGlobalsLanguageMode::encode(language_mode());
}
+void FullCodeGenerator::PushOperand(Handle<Object> handle) {
+ OperandStackDepthIncrement(1);
+ __ Push(handle);
+}
+
+void FullCodeGenerator::PushOperand(Smi* smi) {
+ OperandStackDepthIncrement(1);
+ __ Push(smi);
+}
+
+void FullCodeGenerator::PushOperand(Register reg) {
+ OperandStackDepthIncrement(1);
+ __ Push(reg);
+}
+
+void FullCodeGenerator::PopOperand(Register reg) {
+ OperandStackDepthDecrement(1);
+ __ Pop(reg);
+}
+
+void FullCodeGenerator::DropOperands(int count) {
+ OperandStackDepthDecrement(count);
+ __ Drop(count);
+}
+
+void FullCodeGenerator::CallRuntimeWithOperands(Runtime::FunctionId id) {
+ OperandStackDepthDecrement(Runtime::FunctionForId(id)->nargs);
+ __ CallRuntime(id);
+}
+
+void FullCodeGenerator::OperandStackDepthIncrement(int count) {
+ DCHECK_GE(count, 0);
+ DCHECK_GE(operand_stack_depth_, 0);
+ operand_stack_depth_ += count;
+}
+
+void FullCodeGenerator::OperandStackDepthDecrement(int count) {
+ DCHECK_GE(count, 0);
+ DCHECK_GE(operand_stack_depth_, count);
+ operand_stack_depth_ -= count;
+}
void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
// Load the arguments on the stack and call the stub.
@@ -452,6 +506,7 @@ void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallStub(&stub);
+ OperandStackDepthDecrement(3);
context()->Plug(result_register());
}
@@ -466,19 +521,20 @@ void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
VisitForStackValue(args->at(2));
VisitForStackValue(args->at(3));
__ CallStub(&stub);
+ OperandStackDepthDecrement(4);
context()->Plug(result_register());
}
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
+ MathPowStub stub(isolate(), MathPowStub::ON_STACK);
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
-
- MathPowStub stub(isolate(), MathPowStub::ON_STACK);
__ CallStub(&stub);
+ OperandStackDepthDecrement(2);
context()->Plug(result_register());
}
@@ -502,7 +558,7 @@ void FullCodeGenerator::EmitIntrinsicAsStubCall(CallRuntime* expr,
__ Move(callable.descriptor().GetRegisterParameter(last),
result_register());
for (int i = last; i-- > 0;) {
- __ Pop(callable.descriptor().GetRegisterParameter(i));
+ PopOperand(callable.descriptor().GetRegisterParameter(i));
}
}
__ Call(callable.code(), RelocInfo::CODE_TARGET);
@@ -520,6 +576,11 @@ void FullCodeGenerator::EmitToString(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitToName(CallRuntime* expr) {
+ EmitIntrinsicAsStubCall(expr, CodeFactory::ToName(isolate()));
+}
+
+
void FullCodeGenerator::EmitToLength(CallRuntime* expr) {
EmitIntrinsicAsStubCall(expr, CodeFactory::ToLength(isolate()));
}
@@ -630,6 +691,13 @@ void FullCodeGenerator::EmitGeneratorNext(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitGeneratorReturn(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK(args->length() == 2);
+ EmitGeneratorResume(args->at(0), args->at(1), JSGeneratorObject::RETURN);
+}
+
+
void FullCodeGenerator::EmitGeneratorThrow(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -843,10 +911,7 @@ void FullCodeGenerator::VisitIfStatement(IfStatement* stmt) {
PrepareForBailoutForId(stmt->IfId(), NO_REGISTERS);
}
-
-void FullCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
- Comment cmnt(masm_, "[ ContinueStatement");
- SetStatementPosition(stmt);
+void FullCodeGenerator::EmitContinue(Statement* target) {
NestedStatement* current = nesting_stack_;
int stack_depth = 0;
int context_length = 0;
@@ -855,7 +920,15 @@ void FullCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
// try...finally on our way out, we will unconditionally preserve the
// accumulator on the stack.
ClearAccumulator();
- while (!current->IsContinueTarget(stmt->target())) {
+ while (!current->IsContinueTarget(target)) {
+ if (current->IsTryFinally()) {
+ Comment cmnt(masm(), "[ Deferred continue through finally");
+ current->Exit(&stack_depth, &context_length);
+ DCHECK_EQ(0, stack_depth);
+ DCHECK_EQ(0, context_length);
+ current->AsTryFinally()->deferred_commands()->RecordContinue(target);
+ return;
+ }
current = current->Exit(&stack_depth, &context_length);
}
__ Drop(stack_depth);
@@ -871,10 +944,13 @@ void FullCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
__ jmp(current->AsIteration()->continue_label());
}
-
-void FullCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
- Comment cmnt(masm_, "[ BreakStatement");
+void FullCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
+ Comment cmnt(masm_, "[ ContinueStatement");
SetStatementPosition(stmt);
+ EmitContinue(stmt->target());
+}
+
+void FullCodeGenerator::EmitBreak(Statement* target) {
NestedStatement* current = nesting_stack_;
int stack_depth = 0;
int context_length = 0;
@@ -883,7 +959,15 @@ void FullCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
// try...finally on our way out, we will unconditionally preserve the
// accumulator on the stack.
ClearAccumulator();
- while (!current->IsBreakTarget(stmt->target())) {
+ while (!current->IsBreakTarget(target)) {
+ if (current->IsTryFinally()) {
+ Comment cmnt(masm(), "[ Deferred break through finally");
+ current->Exit(&stack_depth, &context_length);
+ DCHECK_EQ(0, stack_depth);
+ DCHECK_EQ(0, context_length);
+ current->AsTryFinally()->deferred_commands()->RecordBreak(target);
+ return;
+ }
current = current->Exit(&stack_depth, &context_length);
}
__ Drop(stack_depth);
@@ -899,24 +983,62 @@ void FullCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
__ jmp(current->AsBreakable()->break_label());
}
+void FullCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
+ Comment cmnt(masm_, "[ BreakStatement");
+ SetStatementPosition(stmt);
+ EmitBreak(stmt->target());
+}
-void FullCodeGenerator::EmitUnwindBeforeReturn() {
+void FullCodeGenerator::EmitUnwindAndReturn() {
NestedStatement* current = nesting_stack_;
int stack_depth = 0;
int context_length = 0;
while (current != NULL) {
+ if (current->IsTryFinally()) {
+ Comment cmnt(masm(), "[ Deferred return through finally");
+ current->Exit(&stack_depth, &context_length);
+ DCHECK_EQ(0, stack_depth);
+ DCHECK_EQ(0, context_length);
+ current->AsTryFinally()->deferred_commands()->RecordReturn();
+ return;
+ }
current = current->Exit(&stack_depth, &context_length);
}
__ Drop(stack_depth);
+ EmitReturnSequence();
}
+void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
+ // Stack: receiver, home_object
+ SetExpressionPosition(prop);
+ Literal* key = prop->key()->AsLiteral();
+ DCHECK(!key->value()->IsSmi());
+ DCHECK(prop->IsSuperAccess());
+
+ PushOperand(key->value());
+ CallRuntimeWithOperands(Runtime::kLoadFromSuper);
+}
+
+void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+ SetExpressionPosition(prop);
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
+ __ Move(LoadDescriptor::SlotRegister(),
+ SmiFromSlot(prop->PropertyFeedbackSlot()));
+ CallIC(ic);
+}
+
+void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
+ // Stack: receiver, home_object, key.
+ SetExpressionPosition(prop);
+ CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
+}
void FullCodeGenerator::EmitPropertyKey(ObjectLiteralProperty* property,
BailoutId bailout_id) {
VisitForStackValue(property->key());
- __ CallRuntime(Runtime::kToName);
+ CallRuntimeWithOperands(Runtime::kToName);
PrepareForBailoutForId(bailout_id, NO_REGISTERS);
- __ Push(result_register());
+ PushOperand(result_register());
}
@@ -925,8 +1047,7 @@ void FullCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
SetStatementPosition(stmt);
Expression* expr = stmt->expression();
VisitForAccumulatorValue(expr);
- EmitUnwindBeforeReturn();
- EmitReturnSequence();
+ EmitUnwindAndReturn();
}
@@ -939,9 +1060,9 @@ void FullCodeGenerator::VisitWithStatement(WithStatement* stmt) {
__ Move(callable.descriptor().GetRegisterParameter(0), result_register());
__ Call(callable.code(), RelocInfo::CODE_TARGET);
PrepareForBailoutForId(stmt->ToObjectId(), NO_REGISTERS);
- __ Push(result_register());
+ PushOperand(result_register());
PushFunctionArgumentForContextAllocation();
- __ CallRuntime(Runtime::kPushWithContext);
+ CallRuntimeWithOperands(Runtime::kPushWithContext);
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -1129,16 +1250,15 @@ void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
Label try_entry, handler_entry, exit;
__ jmp(&try_entry);
__ bind(&handler_entry);
- PrepareForBailoutForId(stmt->HandlerId(), NO_REGISTERS);
ClearPendingMessage();
// Exception handler code, the exception is in the result register.
// Extend the context before executing the catch block.
{ Comment cmnt(masm_, "[ Extend catch context");
- __ Push(stmt->variable()->name());
- __ Push(result_register());
+ PushOperand(stmt->variable()->name());
+ PushOperand(result_register());
PushFunctionArgumentForContextAllocation();
- __ CallRuntime(Runtime::kPushCatchContext);
+ CallRuntimeWithOperands(Runtime::kPushCatchContext);
StoreToFrameField(StandardFrameConstants::kContextOffset,
context_register());
}
@@ -1178,51 +1298,40 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// executing the try body, and removing it again afterwards.
//
// The try-finally construct can enter the finally block in three ways:
- // 1. By exiting the try-block normally. This removes the try-handler and
- // calls the finally block code before continuing.
+ // 1. By exiting the try-block normally. This exits the try block,
+ // pushes the continuation token and falls through to the finally
+ // block.
// 2. By exiting the try-block with a function-local control flow transfer
- // (break/continue/return). The site of the, e.g., break removes the
- // try handler and calls the finally block code before continuing
- // its outward control transfer.
- // 3. By exiting the try-block with a thrown exception.
- // This can happen in nested function calls. It traverses the try-handler
- // chain and consumes the try-handler entry before jumping to the
- // handler code. The handler code then calls the finally-block before
- // rethrowing the exception.
- //
- // The finally block must assume a return address on top of the stack
- // (or in the link register on ARM chips) and a value (return value or
- // exception) in the result register (rax/eax/r0), both of which must
- // be preserved. The return address isn't GC-safe, so it should be
- // cooked before GC.
+ // (break/continue/return). The site of the, e.g., break exits the
+ // try block, pushes the continuation token and jumps to the
+ // finally block. After the finally block executes, the execution
+ // continues based on the continuation token to a block that
+ // continues with the control flow transfer.
+ // 3. By exiting the try-block with a thrown exception. In the handler,
+ // we push the exception and continuation token and jump to the
+ // finally block (which will again dispatch based on the token once
+ // it is finished).
+
Label try_entry, handler_entry, finally_entry;
+ DeferredCommands deferred(this, &finally_entry);
// Jump to try-handler setup and try-block code.
__ jmp(&try_entry);
__ bind(&handler_entry);
- PrepareForBailoutForId(stmt->HandlerId(), NO_REGISTERS);
// Exception handler code. This code is only executed when an exception
- // is thrown. The exception is in the result register, and must be
- // preserved by the finally block. Call the finally block and then
- // rethrow the exception if it returns.
- __ Call(&finally_entry);
- __ Push(result_register());
- __ CallRuntime(Runtime::kReThrow);
-
- // Finally block implementation.
- __ bind(&finally_entry);
- EnterFinallyBlock();
- { Finally finally_body(this);
- Visit(stmt->finally_block());
+ // is thrown. Record the continuation and jump to the finally block.
+ {
+ Comment cmt_handler(masm(), "[ Finally handler");
+ deferred.RecordThrow();
}
- ExitFinallyBlock(); // Return to the calling code.
// Set up try handler.
__ bind(&try_entry);
int handler_index = NewHandlerTableEntry();
EnterTryBlock(handler_index, &handler_entry);
- { TryFinally try_body(this, &finally_entry);
+ {
+ TryFinally try_body(this, &deferred);
Visit(stmt->try_block());
}
ExitTryBlock(handler_index);
@@ -1231,7 +1340,25 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// finally block will unconditionally preserve the result register on the
// stack.
ClearAccumulator();
- __ Call(&finally_entry);
+ deferred.EmitFallThrough();
+ // Fall through to the finally block.
+
+ // Finally block implementation.
+ __ bind(&finally_entry);
+ Comment cmnt_finally(masm(), "[ Finally block");
+ OperandStackDepthIncrement(2); // Token and accumulator are on stack.
+ EnterFinallyBlock();
+ {
+ Finally finally_body(this);
+ Visit(stmt->finally_block());
+ }
+ ExitFinallyBlock();
+ OperandStackDepthDecrement(2); // Token and accumulator were on stack.
+
+ {
+ Comment cmnt_deferred(masm(), "[ Post-finally dispatch");
+ deferred.EmitCommands(); // Return to the calling code.
+ }
}
@@ -1256,6 +1383,7 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
Label true_case, false_case, done;
VisitForControl(expr->condition(), &true_case, &false_case, &true_case);
+ int original_stack_depth = operand_stack_depth_;
PrepareForBailoutForId(expr->ThenId(), NO_REGISTERS);
__ bind(&true_case);
SetExpressionPosition(expr->then_expression());
@@ -1270,6 +1398,7 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) {
__ jmp(&done);
}
+ operand_stack_depth_ = original_stack_depth;
PrepareForBailoutForId(expr->ElseId(), NO_REGISTERS);
__ bind(&false_case);
SetExpressionPosition(expr->else_expression());
@@ -1308,28 +1437,36 @@ void FullCodeGenerator::VisitClassLiteral(ClassLiteral* lit) {
EnterBlockScopeIfNeeded block_scope_state(
this, lit->scope(), lit->EntryId(), lit->DeclsId(), lit->ExitId());
- if (lit->raw_name() != NULL) {
- __ Push(lit->name());
- } else {
- __ Push(isolate()->factory()->undefined_value());
- }
-
if (lit->extends() != NULL) {
VisitForStackValue(lit->extends());
} else {
- __ Push(isolate()->factory()->the_hole_value());
+ PushOperand(isolate()->factory()->the_hole_value());
}
VisitForStackValue(lit->constructor());
- __ Push(Smi::FromInt(lit->start_position()));
- __ Push(Smi::FromInt(lit->end_position()));
+ PushOperand(Smi::FromInt(lit->start_position()));
+ PushOperand(Smi::FromInt(lit->end_position()));
- __ CallRuntime(Runtime::kDefineClass);
+ CallRuntimeWithOperands(Runtime::kDefineClass);
PrepareForBailoutForId(lit->CreateLiteralId(), TOS_REG);
+ PushOperand(result_register());
+
+ // Load the "prototype" from the constructor.
+ __ Move(LoadDescriptor::ReceiverRegister(), result_register());
+ __ LoadRoot(LoadDescriptor::NameRegister(),
+ Heap::kprototype_stringRootIndex);
+ __ Move(LoadDescriptor::SlotRegister(), SmiFromSlot(lit->PrototypeSlot()));
+ CallLoadIC(NOT_INSIDE_TYPEOF);
+ PrepareForBailoutForId(lit->PrototypeId(), TOS_REG);
+ PushOperand(result_register());
EmitClassDefineProperties(lit);
+ // Set both the prototype and constructor to have fast properties, and also
+ // freeze them in strong mode.
+ CallRuntimeWithOperands(Runtime::kFinalizeClassDefinition);
+
if (lit->class_variable_proxy() != nullptr) {
EmitVariableAssignment(lit->class_variable_proxy()->var(), Token::INIT,
lit->ProxySlot());
@@ -1343,35 +1480,8 @@ void FullCodeGenerator::VisitClassLiteral(ClassLiteral* lit) {
void FullCodeGenerator::VisitNativeFunctionLiteral(
NativeFunctionLiteral* expr) {
Comment cmnt(masm_, "[ NativeFunctionLiteral");
-
- v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate());
-
- // Compute the function template for the native function.
- Handle<String> name = expr->name();
- v8::Local<v8::FunctionTemplate> fun_template =
- expr->extension()->GetNativeFunctionTemplate(v8_isolate,
- v8::Utils::ToLocal(name));
- DCHECK(!fun_template.IsEmpty());
-
- // Instantiate the function and create a shared function info from it.
- Handle<JSFunction> fun = Handle<JSFunction>::cast(Utils::OpenHandle(
- *fun_template->GetFunction(v8_isolate->GetCurrentContext())
- .ToLocalChecked()));
- const int literals = fun->NumberOfLiterals();
- Handle<Code> code = Handle<Code>(fun->shared()->code());
- Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
Handle<SharedFunctionInfo> shared =
- isolate()->factory()->NewSharedFunctionInfo(
- name, literals, FunctionKind::kNormalFunction, code,
- Handle<ScopeInfo>(fun->shared()->scope_info()),
- Handle<TypeFeedbackVector>(fun->shared()->feedback_vector()));
- shared->set_construct_stub(*construct_stub);
-
- // Copy the function data to the shared function info.
- shared->set_function_data(fun->shared()->function_data());
- int parameters = fun->shared()->internal_formal_parameter_count();
- shared->set_internal_formal_parameter_count(parameters);
-
+ Compiler::GetSharedFunctionInfoForNative(expr->extension(), expr->name());
EmitNewClosure(shared, false);
}
@@ -1380,8 +1490,12 @@ void FullCodeGenerator::VisitThrow(Throw* expr) {
Comment cmnt(masm_, "[ Throw");
VisitForStackValue(expr->exception());
SetExpressionPosition(expr);
- __ CallRuntime(Runtime::kThrow);
+ CallRuntimeWithOperands(Runtime::kThrow);
// Never returns here.
+
+ // Even though this expression doesn't produce a value, we need to simulate
+ // plugging of the value context to ensure stack depth tracking is in sync.
+ if (context()->IsStackValue()) OperandStackDepthIncrement(1);
}
@@ -1390,17 +1504,14 @@ void FullCodeGenerator::EnterTryBlock(int handler_index, Label* handler) {
entry->range_start = masm()->pc_offset();
entry->handler_offset = handler->pos();
entry->try_catch_depth = try_catch_depth_;
+ entry->stack_depth = operand_stack_depth_;
- // Determine expression stack depth of try statement.
- int stack_depth = info_->scope()->num_stack_slots(); // Include stack locals.
- for (NestedStatement* current = nesting_stack_; current != NULL; /*nop*/) {
- current = current->AccumulateDepth(&stack_depth);
- }
- entry->stack_depth = stack_depth;
+ // We are using the operand stack depth, check for accuracy.
+ EmitOperandStackDepthCheck();
// Push context onto operand stack.
STATIC_ASSERT(TryBlockConstant::kElementCount == 1);
- __ Push(context_register());
+ PushOperand(context_register());
}
@@ -1409,7 +1520,7 @@ void FullCodeGenerator::ExitTryBlock(int handler_index) {
entry->range_end = masm()->pc_offset();
// Drop context from operand stack.
- __ Drop(TryBlockConstant::kElementCount);
+ DropOperands(TryBlockConstant::kElementCount);
}
@@ -1420,7 +1531,9 @@ void FullCodeGenerator::VisitCall(Call* expr) {
expr->return_is_recorded_ = false;
#endif
- Comment cmnt(masm_, "[ Call");
+ Comment cmnt(masm_, (expr->tail_call_mode() == TailCallMode::kAllow)
+ ? "[ TailCall"
+ : "[ Call");
Expression* callee = expr->expression();
Call::CallType call_type = expr->GetCallType(isolate());
@@ -1460,6 +1573,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
case Call::OTHER_CALL:
// Call to an arbitrary expression not handled specially above.
VisitForStackValue(callee);
+ OperandStackDepthIncrement(1);
__ PushRoot(Heap::kUndefinedValueRootIndex);
// Emit function call.
EmitCall(expr);
@@ -1481,8 +1595,7 @@ void FullCodeGenerator::VisitEmptyParentheses(EmptyParentheses* expr) {
}
-void FullCodeGenerator::VisitRewritableAssignmentExpression(
- RewritableAssignmentExpression* expr) {
+void FullCodeGenerator::VisitRewritableExpression(RewritableExpression* expr) {
Visit(expr->expression());
}
@@ -1506,13 +1619,49 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
// Down to the handler block and also drop context.
__ Drop(*stack_depth + kElementCount);
}
- __ Call(finally_entry_);
-
*stack_depth = 0;
*context_length = 0;
return previous_;
}
+void FullCodeGenerator::DeferredCommands::RecordBreak(Statement* target) {
+ TokenId token = dispenser_.GetBreakContinueToken();
+ commands_.push_back({kBreak, token, target});
+ EmitJumpToFinally(token);
+}
+
+void FullCodeGenerator::DeferredCommands::RecordContinue(Statement* target) {
+ TokenId token = dispenser_.GetBreakContinueToken();
+ commands_.push_back({kContinue, token, target});
+ EmitJumpToFinally(token);
+}
+
+void FullCodeGenerator::DeferredCommands::RecordReturn() {
+ if (return_token_ == TokenDispenserForFinally::kInvalidToken) {
+ return_token_ = TokenDispenserForFinally::kReturnToken;
+ commands_.push_back({kReturn, return_token_, nullptr});
+ }
+ EmitJumpToFinally(return_token_);
+}
+
+void FullCodeGenerator::DeferredCommands::RecordThrow() {
+ if (throw_token_ == TokenDispenserForFinally::kInvalidToken) {
+ throw_token_ = TokenDispenserForFinally::kThrowToken;
+ commands_.push_back({kThrow, throw_token_, nullptr});
+ }
+ EmitJumpToFinally(throw_token_);
+}
+
+void FullCodeGenerator::DeferredCommands::EmitFallThrough() {
+ __ Push(Smi::FromInt(TokenDispenserForFinally::kFallThroughToken));
+ __ Push(result_register());
+}
+
+void FullCodeGenerator::DeferredCommands::EmitJumpToFinally(TokenId token) {
+ __ Push(Smi::FromInt(token));
+ __ Push(result_register());
+ __ jmp(finally_entry_);
+}
bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) {
Expression* sub_expr;
@@ -1640,9 +1789,9 @@ FullCodeGenerator::EnterBlockScopeIfNeeded::EnterBlockScopeIfNeeded(
{
if (needs_block_context_) {
Comment cmnt(masm(), "[ Extend block context");
- __ Push(scope->GetScopeInfo(codegen->isolate()));
+ codegen_->PushOperand(scope->GetScopeInfo(codegen->isolate()));
codegen_->PushFunctionArgumentForContextAllocation();
- __ CallRuntime(Runtime::kPushBlockContext);
+ codegen_->CallRuntimeWithOperands(Runtime::kPushBlockContext);
// Replace the context stored in the frame.
codegen_->StoreToFrameField(StandardFrameConstants::kContextOffset,
diff --git a/deps/v8/src/full-codegen/full-codegen.h b/deps/v8/src/full-codegen/full-codegen.h
index 52eddafa1a..6ab02313bb 100644
--- a/deps/v8/src/full-codegen/full-codegen.h
+++ b/deps/v8/src/full-codegen/full-codegen.h
@@ -42,6 +42,7 @@ class FullCodeGenerator: public AstVisitor {
nesting_stack_(NULL),
loop_depth_(0),
try_catch_depth_(0),
+ operand_stack_depth_(0),
globals_(NULL),
context_(NULL),
bailout_entries_(info->HasDeoptimizationSupport()
@@ -96,9 +97,12 @@ class FullCodeGenerator: public AstVisitor {
#error Unsupported target architecture.
#endif
+ static Register result_register();
+
private:
class Breakable;
class Iteration;
+ class TryFinally;
class TestContext;
@@ -115,11 +119,13 @@ class FullCodeGenerator: public AstVisitor {
codegen_->nesting_stack_ = previous_;
}
- virtual Breakable* AsBreakable() { return NULL; }
- virtual Iteration* AsIteration() { return NULL; }
+ virtual Breakable* AsBreakable() { return nullptr; }
+ virtual Iteration* AsIteration() { return nullptr; }
+ virtual TryFinally* AsTryFinally() { return nullptr; }
virtual bool IsContinueTarget(Statement* target) { return false; }
virtual bool IsBreakTarget(Statement* target) { return false; }
+ virtual bool IsTryFinally() { return false; }
// Notify the statement that we are exiting it via break, continue, or
// return and give it a chance to generate cleanup code. Return the
@@ -131,11 +137,6 @@ class FullCodeGenerator: public AstVisitor {
return previous_;
}
- // Like the Exit() method above, but limited to accumulating stack depth.
- virtual NestedStatement* AccumulateDepth(int* stack_depth) {
- return previous_;
- }
-
protected:
MacroAssembler* masm() { return codegen_->masm(); }
@@ -211,10 +212,43 @@ class FullCodeGenerator: public AstVisitor {
*stack_depth += kElementCount;
return previous_;
}
- NestedStatement* AccumulateDepth(int* stack_depth) override {
- *stack_depth += kElementCount;
- return previous_;
- }
+ };
+
+ class DeferredCommands {
+ public:
+ enum Command { kReturn, kThrow, kBreak, kContinue };
+ typedef int TokenId;
+ struct DeferredCommand {
+ Command command;
+ TokenId token;
+ Statement* target;
+ };
+
+ DeferredCommands(FullCodeGenerator* codegen, Label* finally_entry)
+ : codegen_(codegen),
+ commands_(codegen->zone()),
+ return_token_(TokenDispenserForFinally::kInvalidToken),
+ throw_token_(TokenDispenserForFinally::kInvalidToken),
+ finally_entry_(finally_entry) {}
+
+ void EmitCommands();
+
+ void RecordBreak(Statement* target);
+ void RecordContinue(Statement* target);
+ void RecordReturn();
+ void RecordThrow();
+ void EmitFallThrough();
+
+ private:
+ MacroAssembler* masm() { return codegen_->masm(); }
+ void EmitJumpToFinally(TokenId token);
+
+ FullCodeGenerator* codegen_;
+ ZoneVector<DeferredCommand> commands_;
+ TokenDispenserForFinally dispenser_;
+ TokenId return_token_;
+ TokenId throw_token_;
+ Label* finally_entry_;
};
// The try block of a try/finally statement.
@@ -222,18 +256,18 @@ class FullCodeGenerator: public AstVisitor {
public:
static const int kElementCount = TryBlockConstant::kElementCount;
- TryFinally(FullCodeGenerator* codegen, Label* finally_entry)
- : NestedStatement(codegen), finally_entry_(finally_entry) {
- }
+ TryFinally(FullCodeGenerator* codegen, DeferredCommands* commands)
+ : NestedStatement(codegen), deferred_commands_(commands) {}
NestedStatement* Exit(int* stack_depth, int* context_length) override;
- NestedStatement* AccumulateDepth(int* stack_depth) override {
- *stack_depth += kElementCount;
- return previous_;
- }
+
+ bool IsTryFinally() override { return true; }
+ TryFinally* AsTryFinally() override { return this; }
+
+ DeferredCommands* deferred_commands() { return deferred_commands_; }
private:
- Label* finally_entry_;
+ DeferredCommands* deferred_commands_;
};
// The finally block of a try/finally statement.
@@ -247,10 +281,6 @@ class FullCodeGenerator: public AstVisitor {
*stack_depth += kElementCount;
return previous_;
}
- NestedStatement* AccumulateDepth(int* stack_depth) override {
- *stack_depth += kElementCount;
- return previous_;
- }
};
// The body of a for/in loop.
@@ -266,10 +296,6 @@ class FullCodeGenerator: public AstVisitor {
*stack_depth += kElementCount;
return previous_;
}
- NestedStatement* AccumulateDepth(int* stack_depth) override {
- *stack_depth += kElementCount;
- return previous_;
- }
};
@@ -354,18 +380,21 @@ class FullCodeGenerator: public AstVisitor {
MemOperand VarOperand(Variable* var, Register scratch);
void VisitForEffect(Expression* expr) {
+ if (FLAG_verify_operand_stack_depth) EmitOperandStackDepthCheck();
EffectContext context(this);
Visit(expr);
PrepareForBailout(expr, NO_REGISTERS);
}
void VisitForAccumulatorValue(Expression* expr) {
+ if (FLAG_verify_operand_stack_depth) EmitOperandStackDepthCheck();
AccumulatorValueContext context(this);
Visit(expr);
PrepareForBailout(expr, TOS_REG);
}
void VisitForStackValue(Expression* expr) {
+ if (FLAG_verify_operand_stack_depth) EmitOperandStackDepthCheck();
StackValueContext context(this);
Visit(expr);
PrepareForBailout(expr, NO_REGISTERS);
@@ -375,6 +404,7 @@ class FullCodeGenerator: public AstVisitor {
Label* if_true,
Label* if_false,
Label* fall_through) {
+ if (FLAG_verify_operand_stack_depth) EmitOperandStackDepthCheck();
TestContext context(this, expr, if_true, if_false, fall_through);
Visit(expr);
// For test contexts, we prepare for bailout before branching, not at
@@ -389,6 +419,34 @@ class FullCodeGenerator: public AstVisitor {
void DeclareGlobals(Handle<FixedArray> pairs);
int DeclareGlobalsFlags();
+ // Push, pop or drop values onto/from the operand stack.
+ void PushOperand(Register reg);
+ void PopOperand(Register reg);
+ void DropOperands(int count);
+
+ // Convenience helpers for pushing onto the operand stack.
+ void PushOperand(MemOperand operand);
+ void PushOperand(Handle<Object> handle);
+ void PushOperand(Smi* smi);
+
+ // Convenience helpers for pushing/popping multiple operands.
+ void PushOperands(Register reg1, Register reg2);
+ void PushOperands(Register reg1, Register reg2, Register reg3);
+ void PushOperands(Register reg1, Register reg2, Register reg3, Register reg4);
+ void PopOperands(Register reg1, Register reg2);
+
+ // Convenience helper for calling a runtime function that consumes arguments
+ // from the operand stack (only usable for functions with known arity).
+ void CallRuntimeWithOperands(Runtime::FunctionId function_id);
+
+ // Static tracking of the operand stack depth.
+ void OperandStackDepthDecrement(int count);
+ void OperandStackDepthIncrement(int count);
+
+ // Generate debug code that verifies that our static tracking of the operand
+ // stack depth is in sync with the actual operand stack during runtime.
+ void EmitOperandStackDepthCheck();
+
// Generate code to create an iterator result object. The "value" property is
// set to a value popped from the stack, and "done" is set according to the
// argument. The result object is left in the result register.
@@ -455,11 +513,13 @@ class FullCodeGenerator: public AstVisitor {
// Emit code to pop values from the stack associated with nested statements
// like try/catch, try/finally, etc, running the finallies and unwinding the
- // handlers as needed.
- void EmitUnwindBeforeReturn();
+ // handlers as needed. Also emits the return sequence if necessary (i.e.,
+ // if the return is not delayed by a finally block).
+ void EmitUnwindAndReturn();
// Platform-specific return sequence
void EmitReturnSequence();
+ void EmitProfilingCounterHandlingForReturnSequence(bool is_tail_call);
// Platform-specific code sequences for calls
void EmitCall(Call* expr, ConvertReceiverMode = ConvertReceiverMode::kAny);
@@ -477,26 +537,18 @@ class FullCodeGenerator: public AstVisitor {
F(IsRegExp) \
F(IsJSProxy) \
F(Call) \
- F(ArgumentsLength) \
- F(Arguments) \
F(ValueOf) \
- F(SetValueOf) \
- F(IsDate) \
F(StringCharFromCode) \
F(StringCharAt) \
F(OneByteSeqStringSetChar) \
F(TwoByteSeqStringSetChar) \
- F(ObjectEquals) \
- F(IsFunction) \
F(IsJSReceiver) \
- F(IsSimdValue) \
F(MathPow) \
- F(IsMinusZero) \
F(HasCachedArrayIndex) \
F(GetCachedArrayIndex) \
F(GetSuperConstructor) \
- F(FastOneByteArrayJoin) \
F(GeneratorNext) \
+ F(GeneratorReturn) \
F(GeneratorThrow) \
F(DebugBreakInOptimizedCode) \
F(ClassOf) \
@@ -633,7 +685,7 @@ class FullCodeGenerator: public AstVisitor {
TypeFeedbackId id = TypeFeedbackId::None());
// Inside typeof reference errors are never thrown.
- void CallLoadIC(TypeofMode typeof_mode, LanguageMode language_mode = SLOPPY,
+ void CallLoadIC(TypeofMode typeof_mode,
TypeFeedbackId id = TypeFeedbackId::None());
void CallStoreIC(TypeFeedbackId id = TypeFeedbackId::None());
@@ -669,6 +721,9 @@ class FullCodeGenerator: public AstVisitor {
void ExitFinallyBlock();
void ClearPendingMessage();
+ void EmitContinue(Statement* target);
+ void EmitBreak(Statement* target);
+
// Loop nesting counter.
int loop_depth() { return loop_depth_; }
void increment_loop_depth() { loop_depth_++; }
@@ -693,7 +748,6 @@ class FullCodeGenerator: public AstVisitor {
FunctionLiteral* literal() const { return info_->literal(); }
Scope* scope() { return scope_; }
- static Register result_register();
static Register context_register();
// Set fields in the stack frame. Offsets are the frame pointer relative
@@ -945,6 +999,7 @@ class FullCodeGenerator: public AstVisitor {
NestedStatement* nesting_stack_;
int loop_depth_;
int try_catch_depth_;
+ int operand_stack_depth_;
ZoneList<Handle<Object> >* globals_;
Handle<FixedArray> modules_;
int module_index_;
@@ -954,7 +1009,6 @@ class FullCodeGenerator: public AstVisitor {
ZoneVector<HandlerTableEntry> handler_table_;
int ic_total_count_;
Handle<Cell> profiling_counter_;
- bool generate_debug_code_;
friend class NestedStatement;
diff --git a/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc b/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
index 4ef3a0984f..fadcd7cb5d 100644
--- a/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
@@ -17,8 +17,7 @@
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm_)
-
+#define __ ACCESS_MASM(masm())
class JumpPatchSite BASE_EMBEDDED {
public:
@@ -68,6 +67,7 @@ class JumpPatchSite BASE_EMBEDDED {
__ j(cc, target, distance);
}
+ MacroAssembler* masm() { return masm_; }
MacroAssembler* masm_;
Label patch_site_;
#ifdef DEBUG
@@ -99,13 +99,6 @@ void FullCodeGenerator::Generate() {
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ int3();
- }
-#endif
-
if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
__ mov(ecx, Operand(esp, receiver_offset));
@@ -126,6 +119,7 @@ void FullCodeGenerator::Generate() {
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
DCHECK(!IsGeneratorFunction(literal()->kind()) || locals_count == 0);
+ OperandStackDepthIncrement(locals_count);
if (locals_count == 1) {
__ push(Immediate(isolate()->factory()->undefined_value()));
} else if (locals_count > 1) {
@@ -259,48 +253,33 @@ void FullCodeGenerator::Generate() {
Variable* rest_param = scope()->rest_parameter(&rest_index);
if (rest_param) {
Comment cmnt(masm_, "[ Allocate rest parameter array");
-
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
-
- __ mov(RestParamAccessDescriptor::parameter_count(),
- Immediate(Smi::FromInt(num_parameters)));
- __ lea(RestParamAccessDescriptor::parameter_pointer(),
- Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
- __ mov(RestParamAccessDescriptor::rest_parameter_index(),
- Immediate(Smi::FromInt(rest_index)));
- function_in_register = false;
-
- RestParamAccessStub stub(isolate());
+ if (!function_in_register) {
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ FastNewRestParameterStub stub(isolate());
__ CallStub(&stub);
+ function_in_register = false;
SetVar(rest_param, eax, ebx, edx);
}
Variable* arguments = scope()->arguments();
if (arguments != NULL) {
- // Function uses arguments object.
+ // Arguments object must be allocated after the context object, in
+ // case the "arguments" or ".arguments" variables are in the context.
Comment cmnt(masm_, "[ Allocate arguments object");
- DCHECK(edi.is(ArgumentsAccessNewDescriptor::function()));
if (!function_in_register) {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
}
- // Receiver is just before the parameters on the caller's stack.
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
- __ mov(ArgumentsAccessNewDescriptor::parameter_count(),
- Immediate(Smi::FromInt(num_parameters)));
- __ lea(ArgumentsAccessNewDescriptor::parameter_pointer(),
- Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
-
- // Arguments to ArgumentsAccessStub:
- // function, parameter pointer, parameter count.
- // The stub will rewrite parameter pointer and parameter count if the
- // previous stack frame was an arguments adapter frame.
- bool is_unmapped = is_strict(language_mode()) || !has_simple_parameters();
- ArgumentsAccessStub::Type type = ArgumentsAccessStub::ComputeType(
- is_unmapped, literal()->has_duplicate_parameters());
- ArgumentsAccessStub stub(isolate(), type);
- __ CallStub(&stub);
+ if (is_strict(language_mode()) || !has_simple_parameters()) {
+ FastNewStrictArgumentsStub stub(isolate());
+ __ CallStub(&stub);
+ } else if (literal()->has_duplicate_parameters()) {
+ __ Push(edi);
+ __ CallRuntime(Runtime::kNewSloppyArguments_Generic);
+ } else {
+ FastNewSloppyArgumentsStub stub(isolate());
+ __ CallStub(&stub);
+ }
SetVar(arguments, eax, ebx, edx);
}
@@ -401,6 +380,30 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
}
+void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
+ bool is_tail_call) {
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else {
+ int distance = masm_->pc_offset();
+ weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
+ }
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ j(positive, &ok, Label::kNear);
+ // Don't need to save result register if we are going to do a tail call.
+ if (!is_tail_call) {
+ __ push(eax);
+ }
+ __ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+ if (!is_tail_call) {
+ __ pop(eax);
+ }
+ EmitProfilingCounterReset();
+ __ bind(&ok);
+}
void FullCodeGenerator::EmitReturnSequence() {
Comment cmnt(masm_, "[ Return sequence");
@@ -413,24 +416,7 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(eax);
__ CallRuntime(Runtime::kTraceExit);
}
- // Pretend that the exit is a backwards jump to the entry.
- int weight = 1;
- if (info_->ShouldSelfOptimize()) {
- weight = FLAG_interrupt_budget / FLAG_self_opt_count;
- } else {
- int distance = masm_->pc_offset();
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- }
- EmitProfilingCounterDecrement(weight);
- Label ok;
- __ j(positive, &ok, Label::kNear);
- __ push(eax);
- __ call(isolate()->builtins()->InterruptCheck(),
- RelocInfo::CODE_TARGET);
- __ pop(eax);
- EmitProfilingCounterReset();
- __ bind(&ok);
+ EmitProfilingCounterHandlingForReturnSequence(false);
SetReturnPosition(literal());
__ leave();
@@ -446,7 +432,7 @@ void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
MemOperand operand = codegen()->VarOperand(var, result_register());
// Memory operands can be pushed directly.
- __ push(operand);
+ codegen()->PushOperand(operand);
}
@@ -487,6 +473,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(
void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
+ codegen()->OperandStackDepthIncrement(1);
if (lit->IsSmi()) {
__ SafePush(Immediate(lit));
} else {
@@ -500,7 +487,7 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
true,
true_label_,
false_label_);
- DCHECK(!lit->IsUndetectableObject()); // There are no undetectable literals.
+ DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
if (false_label_ != fall_through_) __ jmp(false_label_);
} else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -525,41 +512,14 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
}
-void FullCodeGenerator::EffectContext::DropAndPlug(int count,
- Register reg) const {
- DCHECK(count > 0);
- __ Drop(count);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
- int count,
- Register reg) const {
- DCHECK(count > 0);
- __ Drop(count);
- __ Move(result_register(), reg);
-}
-
-
void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
Register reg) const {
DCHECK(count > 0);
- if (count > 1) __ Drop(count - 1);
+ if (count > 1) codegen()->DropOperands(count - 1);
__ mov(Operand(esp, 0), reg);
}
-void FullCodeGenerator::TestContext::DropAndPlug(int count,
- Register reg) const {
- DCHECK(count > 0);
- // For simplicity we always test the accumulator register.
- __ Drop(count);
- __ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
Label* materialize_false) const {
DCHECK(materialize_true == materialize_false);
@@ -583,6 +543,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(
void FullCodeGenerator::StackValueContext::Plug(
Label* materialize_true,
Label* materialize_false) const {
+ codegen()->OperandStackDepthIncrement(1);
Label done;
__ bind(materialize_true);
__ push(Immediate(isolate()->factory()->true_value()));
@@ -609,6 +570,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
+ codegen()->OperandStackDepthIncrement(1);
Handle<Object> value = flag
? isolate()->factory()->true_value()
: isolate()->factory()->false_value();
@@ -731,7 +693,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// The variable in the declaration always resides in the current context.
DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (generate_debug_code_) {
+ if (FLAG_debug_code) {
// Check that we're not inside a with or catch context.
__ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
__ cmp(ebx, isolate()->factory()->with_context_map());
@@ -846,11 +808,10 @@ void FullCodeGenerator::VisitFunctionDeclaration(
case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
- __ push(Immediate(variable->name()));
+ PushOperand(variable->name());
VisitForStackValue(declaration->fun());
- __ push(
- Immediate(Smi::FromInt(variable->DeclarationPropertyAttributes())));
- __ CallRuntime(Runtime::kDeclareLookupSlot);
+ PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -923,8 +884,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
}
SetExpressionPosition(clause);
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), Token::EQ_STRICT,
- strength(language_mode())).code();
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
@@ -946,7 +907,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Discard the test value and jump to the default if present, otherwise to
// the end of the statement.
__ bind(&next_test);
- __ Drop(1); // Switch value is no longer needed.
+ DropOperands(1); // Switch value is no longer needed.
if (default_clause == NULL) {
__ jmp(nested_statement.break_label());
} else {
@@ -977,22 +938,21 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
ForIn loop_statement(this, stmt);
increment_loop_depth();
- // Get the object to enumerate over. If the object is null or undefined, skip
- // over the loop. See ECMA-262 version 5, section 12.6.4.
+ // Get the object to enumerate over.
SetExpressionAsStatementPosition(stmt->enumerable());
VisitForAccumulatorValue(stmt->enumerable());
- __ cmp(eax, isolate()->factory()->undefined_value());
- __ j(equal, &exit);
- __ cmp(eax, isolate()->factory()->null_value());
- __ j(equal, &exit);
-
- PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
+ OperandStackDepthIncrement(ForIn::kElementCount);
- // Convert the object to a JS object.
+ // If the object is null or undefined, skip over the loop, otherwise convert
+ // it to a JS receiver. See ECMA-262 version 5, section 12.6.4.
Label convert, done_convert;
__ JumpIfSmi(eax, &convert, Label::kNear);
__ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
__ j(above_equal, &done_convert, Label::kNear);
+ __ cmp(eax, isolate()->factory()->undefined_value());
+ __ j(equal, &exit);
+ __ cmp(eax, isolate()->factory()->null_value());
+ __ j(equal, &exit);
__ bind(&convert);
ToObjectStub stub(isolate());
__ CallStub(&stub);
@@ -1000,15 +960,13 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
__ push(eax);
- // Check for proxies.
- Label call_runtime, use_cache, fixed_array;
- __ CmpObjectType(eax, JS_PROXY_TYPE, ecx);
- __ j(equal, &call_runtime);
-
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
// guarantee cache validity, call the runtime system to check cache
// validity or get the property names in a fixed array.
+ // Note: Proxies never have an enum cache, so will always take the
+ // slow path.
+ Label call_runtime, use_cache, fixed_array;
__ CheckEnumCache(&call_runtime);
__ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
@@ -1017,7 +975,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(eax);
- __ CallRuntime(Runtime::kGetPropertyNamesFast);
+ __ CallRuntime(Runtime::kForInEnumerate);
PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->meta_map());
@@ -1051,14 +1009,15 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&fixed_array);
// No need for a write barrier, we are storing a Smi in the feedback vector.
+ int const vector_index = SmiFromSlot(slot)->value();
__ EmitLoadTypeFeedbackVector(ebx);
- int vector_index = SmiFromSlot(slot)->value();
__ mov(FieldOperand(ebx, FixedArray::OffsetOfElementAt(vector_index)),
Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
__ push(Immediate(Smi::FromInt(1))); // Smi(1) indicates slow check
__ push(eax); // Array
__ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
__ push(eax); // Fixed array length (as smi).
+ PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
__ push(Immediate(Smi::FromInt(0))); // Initial index.
// Generate code for doing the condition check.
@@ -1084,6 +1043,16 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
__ j(equal, &update_each, Label::kNear);
+ // We might get here from TurboFan or Crankshaft when something in the
+ // for-in loop body deopts and only now notice in fullcodegen, that we
+ // can now longer use the enum cache, i.e. left fast mode. So better record
+ // this information here, in case we later OSR back into this loop or
+ // reoptimize the whole function w/o rerunning the loop with the slow
+ // mode object in fullcodegen (which would result in a deopt loop).
+ __ EmitLoadTypeFeedbackVector(edx);
+ __ mov(FieldOperand(edx, FixedArray::OffsetOfElementAt(vector_index)),
+ Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+
// Convert the entry to a string or null if it isn't a property
// anymore. If the property has been removed while iterating, we
// just skip it.
@@ -1121,6 +1090,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Remove the pointers stored on the stack.
__ bind(loop_statement.break_label());
__ add(esp, Immediate(5 * kPointerSize));
+ OperandStackDepthDecrement(ForIn::kElementCount);
// Exit and decrement the loop depth.
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1363,12 +1333,11 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
// by eval-introduced variables.
EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
__ bind(&slow);
- __ push(esi); // Context.
__ push(Immediate(var->name()));
Runtime::FunctionId function_id =
typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
- : Runtime::kLoadLookupSlotNoReferenceError;
+ : Runtime::kLoadLookupSlotInsideTypeof;
__ CallRuntime(function_id);
__ bind(&done);
context()->Plug(eax);
@@ -1393,7 +1362,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
Expression* expression = (property == NULL) ? NULL : property->value();
if (expression == NULL) {
- __ push(Immediate(isolate()->factory()->null_value()));
+ PushOperand(isolate()->factory()->null_value());
} else {
VisitForStackValue(expression);
if (NeedsHomeObject(expression)) {
@@ -1443,7 +1412,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
if (!result_saved) {
- __ push(eax); // Save result on the stack
+ PushOperand(eax); // Save result on the stack
result_saved = true;
}
switch (property->kind()) {
@@ -1472,24 +1441,24 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
break;
}
- __ push(Operand(esp, 0)); // Duplicate receiver.
+ PushOperand(Operand(esp, 0)); // Duplicate receiver.
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
if (NeedsHomeObject(value)) {
EmitSetHomeObject(value, 2, property->GetSlot());
}
- __ push(Immediate(Smi::FromInt(SLOPPY))); // Language mode
- __ CallRuntime(Runtime::kSetProperty);
+ PushOperand(Smi::FromInt(SLOPPY)); // Language mode
+ CallRuntimeWithOperands(Runtime::kSetProperty);
} else {
- __ Drop(3);
+ DropOperands(3);
}
break;
case ObjectLiteral::Property::PROTOTYPE:
- __ push(Operand(esp, 0)); // Duplicate receiver.
+ PushOperand(Operand(esp, 0)); // Duplicate receiver.
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype);
+ CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
NO_REGISTERS);
break;
@@ -1511,14 +1480,14 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
for (AccessorTable::Iterator it = accessor_table.begin();
it != accessor_table.end();
++it) {
- __ push(Operand(esp, 0)); // Duplicate receiver.
+ PushOperand(Operand(esp, 0)); // Duplicate receiver.
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
EmitAccessor(it->second->setter);
- __ push(Immediate(Smi::FromInt(NONE)));
- __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
+ PushOperand(Smi::FromInt(NONE));
+ CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1535,17 +1504,17 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Expression* value = property->value();
if (!result_saved) {
- __ push(eax); // Save result on the stack
+ PushOperand(eax); // Save result on the stack
result_saved = true;
}
- __ push(Operand(esp, 0)); // Duplicate receiver.
+ PushOperand(Operand(esp, 0)); // Duplicate receiver.
if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
DCHECK(!property->is_computed_name());
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype);
+ CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
NO_REGISTERS);
} else {
@@ -1560,10 +1529,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
case ObjectLiteral::Property::COMPUTED:
if (property->emit_store()) {
- __ push(Immediate(Smi::FromInt(NONE)));
- __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
+ PushOperand(Smi::FromInt(NONE));
+ PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+ CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
} else {
- __ Drop(3);
+ DropOperands(3);
}
break;
@@ -1572,13 +1542,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
break;
case ObjectLiteral::Property::GETTER:
- __ push(Immediate(Smi::FromInt(NONE)));
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(NONE));
+ CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
- __ push(Immediate(Smi::FromInt(NONE)));
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(NONE));
+ CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
break;
}
}
@@ -1636,14 +1606,14 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
int array_index = 0;
for (; array_index < length; array_index++) {
Expression* subexpr = subexprs->at(array_index);
- if (subexpr->IsSpread()) break;
+ DCHECK(!subexpr->IsSpread());
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
if (!result_saved) {
- __ push(eax); // array literal.
+ PushOperand(eax); // array literal.
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
@@ -1664,21 +1634,16 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// (inclusive) and these elements gets appended to the array. Note that the
// number elements an iterable produces is unknown ahead of time.
if (array_index < length && result_saved) {
- __ Pop(eax);
+ PopOperand(eax);
result_saved = false;
}
for (; array_index < length; array_index++) {
Expression* subexpr = subexprs->at(array_index);
- __ Push(eax);
- if (subexpr->IsSpread()) {
- VisitForStackValue(subexpr->AsSpread()->expression());
- __ InvokeBuiltin(Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX,
- CALL_FUNCTION);
- } else {
- VisitForStackValue(subexpr);
- __ CallRuntime(Runtime::kAppendElement);
- }
+ PushOperand(eax);
+ DCHECK(!subexpr->IsSpread());
+ VisitForStackValue(subexpr);
+ CallRuntimeWithOperands(Runtime::kAppendElement);
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
@@ -1710,10 +1675,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
property->obj()->AsSuperPropertyReference()->this_var());
VisitForAccumulatorValue(
property->obj()->AsSuperPropertyReference()->home_object());
- __ push(result_register());
+ PushOperand(result_register());
if (expr->is_compound()) {
- __ push(MemOperand(esp, kPointerSize));
- __ push(result_register());
+ PushOperand(MemOperand(esp, kPointerSize));
+ PushOperand(result_register());
}
break;
case NAMED_PROPERTY:
@@ -1731,11 +1696,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForStackValue(
property->obj()->AsSuperPropertyReference()->home_object());
VisitForAccumulatorValue(property->key());
- __ Push(result_register());
+ PushOperand(result_register());
if (expr->is_compound()) {
- __ push(MemOperand(esp, 2 * kPointerSize));
- __ push(MemOperand(esp, 2 * kPointerSize));
- __ push(result_register());
+ PushOperand(MemOperand(esp, 2 * kPointerSize));
+ PushOperand(MemOperand(esp, 2 * kPointerSize));
+ PushOperand(result_register());
}
break;
case KEYED_PROPERTY: {
@@ -1782,7 +1747,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
Token::Value op = expr->binary_op();
- __ push(eax); // Left operand goes on the stack.
+ PushOperand(eax); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
if (ShouldInlineSmiCase(op)) {
@@ -1847,8 +1812,16 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ jmp(&suspend);
__ bind(&continuation);
+ // When we arrive here, the stack top is the resume mode and
+ // result_register() holds the input value (the argument given to the
+ // respective resume operation).
__ RecordGeneratorContinuation();
- __ jmp(&resume);
+ __ pop(ebx);
+ __ cmp(ebx, Immediate(Smi::FromInt(JSGeneratorObject::RETURN)));
+ __ j(not_equal, &resume);
+ __ push(result_register());
+ EmitCreateIteratorResult(true);
+ EmitUnwindAndReturn();
__ bind(&suspend);
VisitForAccumulatorValue(expr->generator_object());
@@ -1867,7 +1840,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(context_register(),
Operand(ebp, StandardFrameConstants::kContextOffset));
__ bind(&post_runtime);
- __ pop(result_register());
+ PopOperand(result_register());
EmitReturnSequence();
__ bind(&resume);
@@ -1876,126 +1849,15 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
}
case Yield::kFinal: {
- VisitForAccumulatorValue(expr->generator_object());
- __ mov(FieldOperand(result_register(),
- JSGeneratorObject::kContinuationOffset),
- Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
// Pop value from top-of-stack slot, box result into result register.
+ OperandStackDepthDecrement(1);
EmitCreateIteratorResult(true);
- EmitUnwindBeforeReturn();
- EmitReturnSequence();
+ EmitUnwindAndReturn();
break;
}
- case Yield::kDelegating: {
- VisitForStackValue(expr->generator_object());
-
- // Initial stack layout is as follows:
- // [sp + 1 * kPointerSize] iter
- // [sp + 0 * kPointerSize] g
-
- Label l_catch, l_try, l_suspend, l_continuation, l_resume;
- Label l_next, l_call, l_loop;
- Register load_receiver = LoadDescriptor::ReceiverRegister();
- Register load_name = LoadDescriptor::NameRegister();
-
- // Initial send value is undefined.
- __ mov(eax, isolate()->factory()->undefined_value());
- __ jmp(&l_next);
-
- // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
- __ bind(&l_catch);
- __ mov(load_name, isolate()->factory()->throw_string()); // "throw"
- __ push(load_name); // "throw"
- __ push(Operand(esp, 2 * kPointerSize)); // iter
- __ push(eax); // exception
- __ jmp(&l_call);
-
- // try { received = %yield result }
- // Shuffle the received result above a try handler and yield it without
- // re-boxing.
- __ bind(&l_try);
- __ pop(eax); // result
- int handler_index = NewHandlerTableEntry();
- EnterTryBlock(handler_index, &l_catch);
- const int try_block_size = TryCatch::kElementCount * kPointerSize;
- __ push(eax); // result
-
- __ jmp(&l_suspend);
- __ bind(&l_continuation);
- __ RecordGeneratorContinuation();
- __ jmp(&l_resume);
-
- __ bind(&l_suspend);
- const int generator_object_depth = kPointerSize + try_block_size;
- __ mov(eax, Operand(esp, generator_object_depth));
- __ push(eax); // g
- __ push(Immediate(Smi::FromInt(handler_index))); // handler-index
- DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
- __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
- Immediate(Smi::FromInt(l_continuation.pos())));
- __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
- __ mov(ecx, esi);
- __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
- kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 2);
- __ mov(context_register(),
- Operand(ebp, StandardFrameConstants::kContextOffset));
- __ pop(eax); // result
- EmitReturnSequence();
- __ bind(&l_resume); // received in eax
- ExitTryBlock(handler_index);
-
- // receiver = iter; f = iter.next; arg = received;
- __ bind(&l_next);
-
- __ mov(load_name, isolate()->factory()->next_string());
- __ push(load_name); // "next"
- __ push(Operand(esp, 2 * kPointerSize)); // iter
- __ push(eax); // received
-
- // result = receiver[f](arg);
- __ bind(&l_call);
- __ mov(load_receiver, Operand(esp, kPointerSize));
- __ mov(LoadDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(expr->KeyedLoadFeedbackSlot())));
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), SLOPPY).code();
- CallIC(ic, TypeFeedbackId::None());
- __ mov(edi, eax);
- __ mov(Operand(esp, 2 * kPointerSize), edi);
- SetCallPosition(expr);
- __ Set(eax, 1);
- __ Call(
- isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
- RelocInfo::CODE_TARGET);
-
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ Drop(1); // The function is still on the stack; drop it.
-
- // if (!result.done) goto l_try;
- __ bind(&l_loop);
- __ push(eax); // save result
- __ Move(load_receiver, eax); // result
- __ mov(load_name,
- isolate()->factory()->done_string()); // "done"
- __ mov(LoadDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(expr->DoneFeedbackSlot())));
- CallLoadIC(NOT_INSIDE_TYPEOF); // result.done in eax
- Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(bool_ic);
- __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
- __ j(not_equal, &l_try);
-
- // result.value
- __ pop(load_receiver); // result
- __ mov(load_name,
- isolate()->factory()->value_string()); // "value"
- __ mov(LoadDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(expr->ValueFeedbackSlot())));
- CallLoadIC(NOT_INSIDE_TYPEOF); // result.value in eax
- context()->DropAndPlug(2, eax); // drop iter and g
- break;
- }
+ case Yield::kDelegating:
+ UNREACHABLE();
}
}
@@ -2009,7 +1871,13 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// ebx will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
VisitForAccumulatorValue(value);
- __ pop(ebx);
+ PopOperand(ebx);
+
+ // Store input value into generator object.
+ __ mov(FieldOperand(ebx, JSGeneratorObject::kInputOffset), result_register());
+ __ mov(ecx, result_register());
+ __ RecordWriteField(ebx, JSGeneratorObject::kInputOffset, ecx, edx,
+ kDontSaveFPRegs);
// Load suspended function and context.
__ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset));
@@ -2059,6 +1927,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ add(edx, ecx);
__ mov(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+ __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
__ jmp(edx);
__ bind(&slow_resume);
}
@@ -2072,6 +1941,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ push(ecx);
__ jmp(&push_operand_holes);
__ bind(&call_resume);
+ __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
__ push(ebx);
__ push(result_register());
__ Push(Smi::FromInt(resume_mode));
@@ -2083,6 +1953,21 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
context()->Plug(result_register());
}
+void FullCodeGenerator::PushOperand(MemOperand operand) {
+ OperandStackDepthIncrement(1);
+ __ Push(operand);
+}
+
+void FullCodeGenerator::EmitOperandStackDepthCheck() {
+ if (FLAG_debug_code) {
+ int expected_diff = StandardFrameConstants::kFixedFrameSizeFromFp +
+ operand_stack_depth_ * kPointerSize;
+ __ mov(eax, ebp);
+ __ sub(eax, esp);
+ __ cmp(eax, Immediate(expected_diff));
+ __ Assert(equal, kUnexpectedStackDepth);
+ }
+}
void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label allocate, done_allocate;
@@ -2118,37 +2003,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ mov(LoadDescriptor::NameRegister(), Immediate(key->value()));
__ mov(LoadDescriptor::SlotRegister(),
Immediate(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallLoadIC(NOT_INSIDE_TYPEOF, language_mode());
-}
-
-
-void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
- // Stack: receiver, home_object.
- SetExpressionPosition(prop);
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!key->value()->IsSmi());
- DCHECK(prop->IsSuperAccess());
-
- __ push(Immediate(key->value()));
- __ push(Immediate(Smi::FromInt(language_mode())));
- __ CallRuntime(Runtime::kLoadFromSuper);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- SetExpressionPosition(prop);
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), language_mode()).code();
- __ mov(LoadDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallIC(ic);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
- // Stack: receiver, home_object, key.
- SetExpressionPosition(prop);
- __ push(Immediate(Smi::FromInt(language_mode())));
- __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+ CallLoadIC(NOT_INSIDE_TYPEOF);
}
@@ -2159,7 +2014,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
// Do combined smi check of the operands. Left operand is on the
// stack. Right operand is in eax.
Label smi_case, done, stub_call;
- __ pop(edx);
+ PopOperand(edx);
__ mov(ecx, eax);
__ or_(eax, edx);
JumpPatchSite patch_site(masm_);
@@ -2167,8 +2022,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ mov(eax, ecx);
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -2248,24 +2102,14 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
- // Constructor is in eax.
- DCHECK(lit != NULL);
- __ push(eax);
-
- // No access check is needed here since the constructor is created by the
- // class literal.
- Register scratch = ebx;
- __ mov(scratch, FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset));
- __ Push(scratch);
-
for (int i = 0; i < lit->properties()->length(); i++) {
ObjectLiteral::Property* property = lit->properties()->at(i);
Expression* value = property->value();
if (property->is_static()) {
- __ push(Operand(esp, kPointerSize)); // constructor
+ PushOperand(Operand(esp, kPointerSize)); // constructor
} else {
- __ push(Operand(esp, 0)); // prototype
+ PushOperand(Operand(esp, 0)); // prototype
}
EmitPropertyKey(property, lit->GetIdForProperty(i));
@@ -2289,31 +2133,28 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
case ObjectLiteral::Property::PROTOTYPE:
UNREACHABLE();
case ObjectLiteral::Property::COMPUTED:
- __ CallRuntime(Runtime::kDefineClassMethod);
+ PushOperand(Smi::FromInt(DONT_ENUM));
+ PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+ CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
break;
case ObjectLiteral::Property::GETTER:
- __ push(Immediate(Smi::FromInt(DONT_ENUM)));
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(DONT_ENUM));
+ CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
- __ push(Immediate(Smi::FromInt(DONT_ENUM)));
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(DONT_ENUM));
+ CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
break;
}
}
-
- // Set both the prototype and constructor to have fast properties, and also
- // freeze them in strong mode.
- __ CallRuntime(Runtime::kFinalizeClassDefinition);
}
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
- __ pop(edx);
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+ PopOperand(edx);
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -2336,10 +2177,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case NAMED_PROPERTY: {
- __ push(eax); // Preserve value.
+ PushOperand(eax); // Preserve value.
VisitForAccumulatorValue(prop->obj());
__ Move(StoreDescriptor::ReceiverRegister(), eax);
- __ pop(StoreDescriptor::ValueRegister()); // Restore value.
+ PopOperand(StoreDescriptor::ValueRegister()); // Restore value.
__ mov(StoreDescriptor::NameRegister(),
prop->key()->AsLiteral()->value());
EmitLoadStoreICSlot(slot);
@@ -2347,7 +2188,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case NAMED_SUPER_PROPERTY: {
- __ push(eax);
+ PushOperand(eax);
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
VisitForAccumulatorValue(
prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2364,7 +2205,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case KEYED_SUPER_PROPERTY: {
- __ push(eax);
+ PushOperand(eax);
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
VisitForStackValue(
prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2384,12 +2225,12 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case KEYED_PROPERTY: {
- __ push(eax); // Preserve value.
+ PushOperand(eax); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ Move(StoreDescriptor::NameRegister(), eax);
- __ pop(StoreDescriptor::ReceiverRegister()); // Receiver.
- __ pop(StoreDescriptor::ValueRegister()); // Restore value.
+ PopOperand(StoreDescriptor::ReceiverRegister()); // Receiver.
+ PopOperand(StoreDescriptor::ValueRegister()); // Restore value.
EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
@@ -2469,17 +2310,17 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
(var->mode() == CONST && op == Token::INIT)) {
if (var->IsLookupSlot()) {
// Assignment to var.
- __ push(eax); // Value.
- __ push(esi); // Context.
- __ push(Immediate(var->name()));
- __ push(Immediate(Smi::FromInt(language_mode())));
- __ CallRuntime(Runtime::kStoreLookupSlot);
+ __ Push(Immediate(var->name()));
+ __ Push(eax);
+ __ CallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreLookupSlot_Strict
+ : Runtime::kStoreLookupSlot_Sloppy);
} else {
// Assignment to var or initializing assignment to let/const in harmony
// mode.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
MemOperand location = VarOperand(var, ecx);
- if (generate_debug_code_ && var->mode() == LET && op == Token::INIT) {
+ if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
// Check for an uninitialized let binding.
__ mov(edx, location);
__ cmp(edx, isolate()->factory()->the_hole_value());
@@ -2526,7 +2367,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
DCHECK(prop->key()->IsLiteral());
__ mov(StoreDescriptor::NameRegister(), prop->key()->AsLiteral()->value());
- __ pop(StoreDescriptor::ReceiverRegister());
+ PopOperand(StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(expr->AssignmentSlot());
CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2542,10 +2383,11 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
Literal* key = prop->key()->AsLiteral();
DCHECK(key != NULL);
- __ push(Immediate(key->value()));
- __ push(eax);
- __ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy));
+ PushOperand(key->value());
+ PushOperand(eax);
+ CallRuntimeWithOperands(is_strict(language_mode())
+ ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy);
}
@@ -2554,10 +2396,10 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
// eax : value
// stack : receiver ('this'), home_object, key
- __ push(eax);
- __ CallRuntime((is_strict(language_mode())
- ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy));
+ PushOperand(eax);
+ CallRuntimeWithOperands(is_strict(language_mode())
+ ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy);
}
@@ -2567,8 +2409,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// esp[0] : key
// esp[kPointerSize] : receiver
- __ pop(StoreDescriptor::NameRegister()); // Key.
- __ pop(StoreDescriptor::ReceiverRegister());
+ PopOperand(StoreDescriptor::NameRegister()); // Key.
+ PopOperand(StoreDescriptor::ReceiverRegister());
DCHECK(StoreDescriptor::ValueRegister().is(eax));
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
@@ -2600,7 +2442,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
if (!expr->IsSuperAccess()) {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
- __ pop(LoadDescriptor::ReceiverRegister()); // Object.
+ PopOperand(LoadDescriptor::ReceiverRegister()); // Object.
__ Move(LoadDescriptor::NameRegister(), result_register()); // Key.
EmitKeyedPropertyLoad(expr);
} else {
@@ -2636,7 +2478,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
}
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
- __ push(Immediate(isolate()->factory()->undefined_value()));
+ PushOperand(isolate()->factory()->undefined_value());
convert_mode = ConvertReceiverMode::kNullOrUndefined;
} else {
// Load the function from the receiver.
@@ -2646,7 +2488,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
EmitNamedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
- __ push(Operand(esp, 0));
+ PushOperand(Operand(esp, 0));
__ mov(Operand(esp, kPointerSize), eax);
convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
}
@@ -2668,19 +2510,17 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
VisitForStackValue(super_ref->home_object());
VisitForAccumulatorValue(super_ref->this_var());
- __ push(eax);
- __ push(eax);
- __ push(Operand(esp, kPointerSize * 2));
- __ push(Immediate(key->value()));
- __ push(Immediate(Smi::FromInt(language_mode())));
+ PushOperand(eax);
+ PushOperand(eax);
+ PushOperand(Operand(esp, kPointerSize * 2));
+ PushOperand(key->value());
// Stack here:
// - home_object
// - this (receiver)
// - this (receiver) <-- LoadFromSuper will pop here and below.
// - home_object
// - key
- // - language_mode
- __ CallRuntime(Runtime::kLoadFromSuper);
+ CallRuntimeWithOperands(Runtime::kLoadFromSuper);
// Replace home_object with target function.
__ mov(Operand(esp, kPointerSize), eax);
@@ -2708,7 +2548,7 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
- __ push(Operand(esp, 0));
+ PushOperand(Operand(esp, 0));
__ mov(Operand(esp, kPointerSize), eax);
EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
@@ -2726,19 +2566,17 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
VisitForStackValue(super_ref->home_object());
VisitForAccumulatorValue(super_ref->this_var());
- __ push(eax);
- __ push(eax);
- __ push(Operand(esp, kPointerSize * 2));
+ PushOperand(eax);
+ PushOperand(eax);
+ PushOperand(Operand(esp, kPointerSize * 2));
VisitForStackValue(prop->key());
- __ push(Immediate(Smi::FromInt(language_mode())));
// Stack here:
// - home_object
// - this (receiver)
// - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
// - home_object
// - key
- // - language_mode
- __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+ CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
// Replace home_object with target function.
__ mov(Operand(esp, kPointerSize), eax);
@@ -2760,12 +2598,23 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
SetCallPosition(expr);
- Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
+ if (expr->tail_call_mode() == TailCallMode::kAllow) {
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceTailCall);
+ }
+ // Update profiling counters before the tail call since we will
+ // not return to this function.
+ EmitProfilingCounterHandlingForReturnSequence(true);
+ }
+ Handle<Code> ic =
+ CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
+ .code();
__ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
// Don't assign a type feedback id to the IC, since type feedback is provided
// by the vector above.
CallIC(ic);
+ OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
@@ -2811,11 +2660,10 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
__ bind(&slow);
// Call the runtime to find the function to call (returned in eax) and
// the object holding it (returned in edx).
- __ push(context_register());
- __ push(Immediate(callee->name()));
- __ CallRuntime(Runtime::kLoadLookupSlot);
- __ push(eax); // Function.
- __ push(edx); // Receiver.
+ __ Push(callee->name());
+ __ CallRuntime(Runtime::kLoadLookupSlotForCall);
+ PushOperand(eax); // Function.
+ PushOperand(edx); // Receiver.
PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
// If fast case code has been generated, emit code to push the function
@@ -2834,7 +2682,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
} else {
VisitForStackValue(callee);
// refEnv.WithBaseObject()
- __ push(Immediate(isolate()->factory()->undefined_value()));
+ PushOperand(isolate()->factory()->undefined_value());
}
}
@@ -2866,7 +2714,10 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
SetCallPosition(expr);
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
__ Set(eax, arg_count);
- __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ expr->tail_call_mode()),
+ RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2907,6 +2758,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
CallConstructStub stub(isolate());
__ call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2925,7 +2777,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
__ AssertFunction(result_register());
__ mov(result_register(),
FieldOperand(result_register(), HeapObject::kMapOffset));
- __ Push(FieldOperand(result_register(), Map::kPrototypeOffset));
+ PushOperand(FieldOperand(result_register(), Map::kPrototypeOffset));
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -2947,6 +2799,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
__ mov(edi, Operand(esp, arg_count * kPointerSize));
__ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
@@ -2999,77 +2852,6 @@ void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, SIMD128_VALUE_TYPE, ebx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, FIRST_FUNCTION_TYPE, ebx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(above_equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
- __ CheckMap(eax, map, if_false, DO_SMI_CHECK);
- // Check if the exponent half is 0x80000000. Comparing against 1 and
- // checking for overflow is the shortest possible encoding.
- __ cmp(FieldOperand(eax, HeapNumber::kExponentOffset), Immediate(0x1));
- __ j(no_overflow, if_false);
- __ cmp(FieldOperand(eax, HeapNumber::kMantissaOffset), Immediate(0x0));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3158,68 +2940,6 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ pop(ebx);
- __ cmp(eax, ebx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- // ArgumentsAccessStub expects the key in edx and the formal
- // parameter count in eax.
- VisitForAccumulatorValue(args->at(0));
- __ mov(edx, eax);
- __ Move(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
- ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
-
- Label exit;
- // Get the number of formal parameters.
- __ Move(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
-
- // Check if the calling frame is an arguments adaptor frame.
- __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &exit);
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ bind(&exit);
- __ AssertSmi(eax);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3288,28 +3008,6 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = nullptr;
- Label* if_false = nullptr;
- Label* fall_through = nullptr;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, JS_DATE_TYPE, ebx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(3, args->length());
@@ -3322,8 +3020,8 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(1)); // value
VisitForAccumulatorValue(args->at(2)); // string
- __ pop(value);
- __ pop(index);
+ PopOperand(value);
+ PopOperand(index);
if (FLAG_debug_code) {
__ test(value, Immediate(kSmiTagMask));
@@ -3357,8 +3055,8 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(0)); // index
VisitForStackValue(args->at(1)); // value
VisitForAccumulatorValue(args->at(2)); // string
- __ pop(value);
- __ pop(index);
+ PopOperand(value);
+ PopOperand(index);
if (FLAG_debug_code) {
__ test(value, Immediate(kSmiTagMask));
@@ -3379,35 +3077,6 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- VisitForStackValue(args->at(0)); // Load the object.
- VisitForAccumulatorValue(args->at(1)); // Load the value.
- __ pop(ebx); // eax = value. ebx = object.
-
- Label done;
- // If the object is a smi, return the value.
- __ JumpIfSmi(ebx, &done, Label::kNear);
-
- // If the object is not a value type, return the value.
- __ CmpObjectType(ebx, JS_VALUE_TYPE, ecx);
- __ j(not_equal, &done, Label::kNear);
-
- // Store the value.
- __ mov(FieldOperand(ebx, JSValue::kValueOffset), eax);
-
- // Update the write barrier. Save the value as it will be
- // overwritten by the write barrier code and is needed afterward.
- __ mov(edx, eax);
- __ RecordWriteField(ebx, JSValue::kValueOffset, edx, ecx, kDontSaveFPRegs);
-
- __ bind(&done);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -3425,27 +3094,6 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitToName(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into eax and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- // Convert the object to a name.
- Label convert, done_convert;
- __ JumpIfSmi(eax, &convert, Label::kNear);
- STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
- __ CmpObjectType(eax, LAST_NAME_TYPE, ecx);
- __ j(below_equal, &done_convert, Label::kNear);
- __ bind(&convert);
- __ Push(eax);
- __ CallRuntime(Runtime::kToName);
- __ bind(&done_convert);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3476,7 +3124,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
Register index = eax;
Register result = edx;
- __ pop(object);
+ PopOperand(object);
Label need_conversion;
Label index_out_of_range;
@@ -3523,7 +3171,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
Register scratch = edx;
Register result = eax;
- __ pop(object);
+ PopOperand(object);
Label need_conversion;
Label index_out_of_range;
@@ -3573,6 +3221,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
// Call the target.
__ mov(eax, Immediate(argc));
__ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(argc + 1);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
@@ -3629,275 +3278,6 @@ void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
- Label bailout, done, one_char_separator, long_separator,
- non_trivial_array, not_size_one_array, loop,
- loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
-
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- // We will leave the separator on the stack until the end of the function.
- VisitForStackValue(args->at(1));
- // Load this to eax (= array)
- VisitForAccumulatorValue(args->at(0));
- // All aliases of the same register have disjoint lifetimes.
- Register array = eax;
- Register elements = no_reg; // Will be eax.
-
- Register index = edx;
-
- Register string_length = ecx;
-
- Register string = esi;
-
- Register scratch = ebx;
-
- Register array_length = edi;
- Register result_pos = no_reg; // Will be edi.
-
- // Separator operand is already pushed.
- Operand separator_operand = Operand(esp, 2 * kPointerSize);
- Operand result_operand = Operand(esp, 1 * kPointerSize);
- Operand array_length_operand = Operand(esp, 0);
- __ sub(esp, Immediate(2 * kPointerSize));
- __ cld();
- // Check that the array is a JSArray
- __ JumpIfSmi(array, &bailout);
- __ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &bailout);
-
- // Check that the array has fast elements.
- __ CheckFastElements(scratch, &bailout);
-
- // If the array has length zero, return the empty string.
- __ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
- __ SmiUntag(array_length);
- __ j(not_zero, &non_trivial_array);
- __ mov(result_operand, isolate()->factory()->empty_string());
- __ jmp(&done);
-
- // Save the array length.
- __ bind(&non_trivial_array);
- __ mov(array_length_operand, array_length);
-
- // Save the FixedArray containing array's elements.
- // End of array's live range.
- elements = array;
- __ mov(elements, FieldOperand(array, JSArray::kElementsOffset));
- array = no_reg;
-
-
- // Check that all array elements are sequential one-byte strings, and
- // accumulate the sum of their lengths, as a smi-encoded value.
- __ Move(index, Immediate(0));
- __ Move(string_length, Immediate(0));
- // Loop condition: while (index < length).
- // Live loop registers: index, array_length, string,
- // scratch, string_length, elements.
- if (generate_debug_code_) {
- __ cmp(index, array_length);
- __ Assert(less, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
- }
- __ bind(&loop);
- __ mov(string, FieldOperand(elements,
- index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ JumpIfSmi(string, &bailout);
- __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, Immediate(
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmp(scratch, kStringTag | kOneByteStringTag | kSeqStringTag);
- __ j(not_equal, &bailout);
- __ add(string_length,
- FieldOperand(string, SeqOneByteString::kLengthOffset));
- __ j(overflow, &bailout);
- __ add(index, Immediate(1));
- __ cmp(index, array_length);
- __ j(less, &loop);
-
- // If array_length is 1, return elements[0], a string.
- __ cmp(array_length, 1);
- __ j(not_equal, &not_size_one_array);
- __ mov(scratch, FieldOperand(elements, FixedArray::kHeaderSize));
- __ mov(result_operand, scratch);
- __ jmp(&done);
-
- __ bind(&not_size_one_array);
-
- // End of array_length live range.
- result_pos = array_length;
- array_length = no_reg;
-
- // Live registers:
- // string_length: Sum of string lengths, as a smi.
- // elements: FixedArray of strings.
-
- // Check that the separator is a flat one-byte string.
- __ mov(string, separator_operand);
- __ JumpIfSmi(string, &bailout);
- __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, Immediate(
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmp(scratch, kStringTag | kOneByteStringTag | kSeqStringTag);
- __ j(not_equal, &bailout);
-
- // Add (separator length times array_length) - separator length
- // to string_length.
- __ mov(scratch, separator_operand);
- __ mov(scratch, FieldOperand(scratch, SeqOneByteString::kLengthOffset));
- __ sub(string_length, scratch); // May be negative, temporarily.
- __ imul(scratch, array_length_operand);
- __ j(overflow, &bailout);
- __ add(string_length, scratch);
- __ j(overflow, &bailout);
-
- __ shr(string_length, 1);
-
- // Bailout for large object allocations.
- __ cmp(string_length, Page::kMaxRegularHeapObjectSize);
- __ j(greater, &bailout);
-
- // Live registers and stack values:
- // string_length
- // elements
- __ AllocateOneByteString(result_pos, string_length, scratch, index, string,
- &bailout);
- __ mov(result_operand, result_pos);
- __ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
-
-
- __ mov(string, separator_operand);
- __ cmp(FieldOperand(string, SeqOneByteString::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ j(equal, &one_char_separator);
- __ j(greater, &long_separator);
-
-
- // Empty separator case
- __ mov(index, Immediate(0));
- __ jmp(&loop_1_condition);
- // Loop condition: while (index < length).
- __ bind(&loop_1);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
- // elements: the FixedArray of strings we are joining.
-
- // Get string = array[index].
- __ mov(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ add(index, Immediate(1));
- __ bind(&loop_1_condition);
- __ cmp(index, array_length_operand);
- __ j(less, &loop_1); // End while (index < length).
- __ jmp(&done);
-
-
-
- // One-character separator case
- __ bind(&one_char_separator);
- // Replace separator with its one-byte character value.
- __ mov_b(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ mov_b(separator_operand, scratch);
-
- __ Move(index, Immediate(0));
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ jmp(&loop_2_entry);
- // Loop condition: while (index < length).
- __ bind(&loop_2);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
-
- // Copy the separator character to the result.
- __ mov_b(scratch, separator_operand);
- __ mov_b(Operand(result_pos, 0), scratch);
- __ inc(result_pos);
-
- __ bind(&loop_2_entry);
- // Get string = array[index].
- __ mov(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ add(index, Immediate(1));
-
- __ cmp(index, array_length_operand);
- __ j(less, &loop_2); // End while (index < length).
- __ jmp(&done);
-
-
- // Long separator case (separator is more than one character).
- __ bind(&long_separator);
-
- __ Move(index, Immediate(0));
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ jmp(&loop_3_entry);
- // Loop condition: while (index < length).
- __ bind(&loop_3);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
-
- // Copy the separator to the result.
- __ mov(string, separator_operand);
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
-
- __ bind(&loop_3_entry);
- // Get string = array[index].
- __ mov(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ add(index, Immediate(1));
-
- __ cmp(index, array_length_operand);
- __ j(less, &loop_3); // End while (index < length).
- __ jmp(&done);
-
-
- __ bind(&bailout);
- __ mov(result_operand, isolate()->factory()->undefined_value());
- __ bind(&done);
- __ mov(eax, result_operand);
- // Drop temp values from the stack, and restore context register.
- __ add(esp, Immediate(3 * kPointerSize));
-
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
ExternalReference debug_is_active =
@@ -3930,7 +3310,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ jmp(&done, Label::kNear);
__ bind(&runtime);
- __ CallRuntime(Runtime::kCreateIterResultObject);
+ CallRuntimeWithOperands(Runtime::kCreateIterResultObject);
__ bind(&done);
context()->Plug(eax);
@@ -3939,7 +3319,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push undefined as receiver.
- __ push(Immediate(isolate()->factory()->undefined_value()));
+ PushOperand(isolate()->factory()->undefined_value());
__ LoadGlobalFunction(expr->context_index(), eax);
}
@@ -3954,6 +3334,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
__ Set(eax, arg_count);
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
}
@@ -3966,7 +3347,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
EmitLoadJSRuntimeFunction(expr);
// Push the target function under the receiver.
- __ push(Operand(esp, 0));
+ PushOperand(Operand(esp, 0));
__ mov(Operand(esp, kPointerSize), eax);
// Push the arguments ("left-to-right").
@@ -4001,6 +3382,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Call the C runtime function.
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
__ CallRuntime(expr->function(), arg_count);
+ OperandStackDepthDecrement(arg_count);
context()->Plug(eax);
}
}
@@ -4018,9 +3400,9 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy);
+ CallRuntimeWithOperands(is_strict(language_mode())
+ ? Runtime::kDeleteProperty_Strict
+ : Runtime::kDeleteProperty_Sloppy);
context()->Plug(eax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -4042,8 +3424,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
} else {
// Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
- __ push(context_register());
- __ push(Immediate(var->name()));
+ __ Push(var->name());
__ CallRuntime(Runtime::kDeleteLookupSlot);
context()->Plug(eax);
}
@@ -4088,6 +3469,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
&materialize_false,
&materialize_true,
&materialize_true);
+ if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
__ bind(&materialize_true);
PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
if (context()->IsAccumulatorValue()) {
@@ -4143,7 +3525,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && !context()->IsEffect()) {
- __ push(Immediate(Smi::FromInt(0)));
+ PushOperand(Smi::FromInt(0));
}
switch (assign_type) {
case NAMED_PROPERTY: {
@@ -4158,9 +3540,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
VisitForAccumulatorValue(
prop->obj()->AsSuperPropertyReference()->home_object());
- __ push(result_register());
- __ push(MemOperand(esp, kPointerSize));
- __ push(result_register());
+ PushOperand(result_register());
+ PushOperand(MemOperand(esp, kPointerSize));
+ PushOperand(result_register());
EmitNamedSuperPropertyLoad(prop);
break;
}
@@ -4170,10 +3552,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
VisitForStackValue(
prop->obj()->AsSuperPropertyReference()->home_object());
VisitForAccumulatorValue(prop->key());
- __ push(result_register());
- __ push(MemOperand(esp, 2 * kPointerSize));
- __ push(MemOperand(esp, 2 * kPointerSize));
- __ push(result_register());
+ PushOperand(result_register());
+ PushOperand(MemOperand(esp, 2 * kPointerSize));
+ PushOperand(MemOperand(esp, 2 * kPointerSize));
+ PushOperand(result_register());
EmitKeyedSuperPropertyLoad(prop);
break;
}
@@ -4263,7 +3645,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// of the stack.
switch (assign_type) {
case VARIABLE:
- __ push(eax);
+ PushOperand(eax);
break;
case NAMED_PROPERTY:
__ mov(Operand(esp, kPointerSize), eax);
@@ -4287,8 +3669,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ bind(&stub_call);
__ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1)));
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), expr->binary_op(),
- strength(language_mode())).code();
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), expr->binary_op()).code();
CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4323,7 +3705,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
__ mov(StoreDescriptor::NameRegister(),
prop->key()->AsLiteral()->value());
- __ pop(StoreDescriptor::ReceiverRegister());
+ PopOperand(StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(expr->CountSlot());
CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -4359,8 +3741,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case KEYED_PROPERTY: {
- __ pop(StoreDescriptor::NameRegister());
- __ pop(StoreDescriptor::ReceiverRegister());
+ PopOperand(StoreDescriptor::NameRegister());
+ PopOperand(StoreDescriptor::ReceiverRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
EmitLoadStoreICSlot(expr->CountSlot());
@@ -4415,8 +3797,8 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ cmp(eax, isolate()->factory()->false_value());
Split(equal, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->undefined_string())) {
- __ cmp(eax, isolate()->factory()->undefined_value());
- __ j(equal, if_true);
+ __ cmp(eax, isolate()->factory()->null_value());
+ __ j(equal, if_false);
__ JumpIfSmi(eax, if_false);
// Check for undetectable objects => true.
__ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
@@ -4481,7 +3863,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ CallRuntime(Runtime::kHasProperty);
+ CallRuntimeWithOperands(Runtime::kHasProperty);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ cmp(eax, isolate()->factory()->true_value());
Split(equal, if_true, if_false, fall_through);
@@ -4489,7 +3871,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::INSTANCEOF: {
VisitForAccumulatorValue(expr->right());
- __ Pop(edx);
+ PopOperand(edx);
InstanceOfStub stub(isolate());
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
@@ -4501,7 +3883,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
Condition cc = CompareIC::ComputeCondition(op);
- __ pop(edx);
+ PopOperand(edx);
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
@@ -4515,8 +3897,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ bind(&slow_case);
}
- Handle<Code> ic = CodeFactory::CompareIC(
- isolate(), op, strength(language_mode())).code();
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -4596,15 +3977,15 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
// as their closure, not the anonymous closure containing the global
// code.
__ mov(eax, NativeContextOperand());
- __ push(ContextOperand(eax, Context::CLOSURE_INDEX));
+ PushOperand(ContextOperand(eax, Context::CLOSURE_INDEX));
} else if (closure_scope->is_eval_scope()) {
// Contexts nested inside eval code have the same closure as the context
// calling eval, not the anonymous closure containing the eval code.
// Fetch it from the context.
- __ push(ContextOperand(esi, Context::CLOSURE_INDEX));
+ PushOperand(ContextOperand(esi, Context::CLOSURE_INDEX));
} else {
DCHECK(closure_scope->is_function_scope());
- __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ PushOperand(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
}
}
@@ -4613,23 +3994,11 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
// Non-local control flow support.
void FullCodeGenerator::EnterFinallyBlock() {
- // Cook return address on top of stack (smi encoded Code* delta)
- DCHECK(!result_register().is(edx));
- __ pop(edx);
- __ sub(edx, Immediate(masm_->CodeObject()));
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ SmiTag(edx);
- __ push(edx);
-
- // Store result register while executing finally block.
- __ push(result_register());
-
// Store pending message while executing finally block.
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ mov(edx, Operand::StaticVariable(pending_message_obj));
- __ push(edx);
+ PushOperand(edx);
ClearPendingMessage();
}
@@ -4638,19 +4007,10 @@ void FullCodeGenerator::EnterFinallyBlock() {
void FullCodeGenerator::ExitFinallyBlock() {
DCHECK(!result_register().is(edx));
// Restore pending message from stack.
- __ pop(edx);
+ PopOperand(edx);
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ mov(Operand::StaticVariable(pending_message_obj), edx);
-
- // Restore result register from stack.
- __ pop(result_register());
-
- // Uncook return address.
- __ pop(edx);
- __ SmiUntag(edx);
- __ add(edx, Immediate(masm_->CodeObject()));
- __ jmp(edx);
}
@@ -4669,6 +4029,32 @@ void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
Immediate(SmiFromSlot(slot)));
}
+void FullCodeGenerator::DeferredCommands::EmitCommands() {
+ DCHECK(!result_register().is(edx));
+ __ Pop(result_register()); // Restore the accumulator.
+ __ Pop(edx); // Get the token.
+ for (DeferredCommand cmd : commands_) {
+ Label skip;
+ __ cmp(edx, Immediate(Smi::FromInt(cmd.token)));
+ __ j(not_equal, &skip);
+ switch (cmd.command) {
+ case kReturn:
+ codegen_->EmitUnwindAndReturn();
+ break;
+ case kThrow:
+ __ Push(result_register());
+ __ CallRuntime(Runtime::kReThrow);
+ break;
+ case kContinue:
+ codegen_->EmitContinue(cmd.target);
+ break;
+ case kBreak:
+ codegen_->EmitBreak(cmd.target);
+ break;
+ }
+ __ bind(&skip);
+ }
+}
#undef __
diff --git a/deps/v8/src/full-codegen/mips/full-codegen-mips.cc b/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
index 07e9fdfc94..c8ce204590 100644
--- a/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
+++ b/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
@@ -27,8 +27,7 @@
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm_)
-
+#define __ ACCESS_MASM(masm())
// A patch site is a location in the code which it is possible to patch. This
// class has a number of methods to emit the code which is patchable and the
@@ -86,6 +85,7 @@ class JumpPatchSite BASE_EMBEDDED {
}
private:
+ MacroAssembler* masm() { return masm_; }
MacroAssembler* masm_;
Label patch_site_;
#ifdef DEBUG
@@ -118,13 +118,6 @@ void FullCodeGenerator::Generate() {
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ stop("stop-at");
- }
-#endif
-
if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ lw(a2, MemOperand(sp, receiver_offset));
@@ -146,6 +139,7 @@ void FullCodeGenerator::Generate() {
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
+ OperandStackDepthIncrement(locals_count);
if (locals_count > 0) {
if (locals_count >= 128) {
Label ok;
@@ -274,22 +268,12 @@ void FullCodeGenerator::Generate() {
Variable* rest_param = scope()->rest_parameter(&rest_index);
if (rest_param) {
Comment cmnt(masm_, "[ Allocate rest parameter array");
-
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
-
- __ li(RestParamAccessDescriptor::parameter_count(),
- Operand(Smi::FromInt(num_parameters)));
- __ Addu(RestParamAccessDescriptor::parameter_pointer(), fp,
- Operand(StandardFrameConstants::kCallerSPOffset + offset));
- __ li(RestParamAccessDescriptor::rest_parameter_index(),
- Operand(Smi::FromInt(rest_index)));
- DCHECK(a1.is(RestParamAccessDescriptor::rest_parameter_index()));
- function_in_register_a1 = false;
-
- RestParamAccessStub stub(isolate());
+ if (!function_in_register_a1) {
+ __ lw(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ FastNewRestParameterStub stub(isolate());
__ CallStub(&stub);
-
+ function_in_register_a1 = false;
SetVar(rest_param, v0, a1, a2);
}
@@ -297,28 +281,20 @@ void FullCodeGenerator::Generate() {
if (arguments != NULL) {
// Function uses arguments object.
Comment cmnt(masm_, "[ Allocate arguments object");
- DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
if (!function_in_register_a1) {
// Load this again, if it's used by the local context below.
__ lw(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
- // Receiver is just before the parameters on the caller's stack.
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
- __ li(ArgumentsAccessNewDescriptor::parameter_count(),
- Operand(Smi::FromInt(num_parameters)));
- __ Addu(ArgumentsAccessNewDescriptor::parameter_pointer(), fp,
- Operand(StandardFrameConstants::kCallerSPOffset + offset));
-
- // Arguments to ArgumentsAccessStub:
- // function, parameter pointer, parameter count.
- // The stub will rewrite parameter pointer and parameter count if the
- // previous stack frame was an arguments adapter frame.
- bool is_unmapped = is_strict(language_mode()) || !has_simple_parameters();
- ArgumentsAccessStub::Type type = ArgumentsAccessStub::ComputeType(
- is_unmapped, literal()->has_duplicate_parameters());
- ArgumentsAccessStub stub(isolate(), type);
- __ CallStub(&stub);
+ if (is_strict(language_mode()) || !has_simple_parameters()) {
+ FastNewStrictArgumentsStub stub(isolate());
+ __ CallStub(&stub);
+ } else if (literal()->has_duplicate_parameters()) {
+ __ Push(a1);
+ __ CallRuntime(Runtime::kNewSloppyArguments_Generic);
+ } else {
+ FastNewSloppyArgumentsStub stub(isolate());
+ __ CallStub(&stub);
+ }
SetVar(arguments, v0, a1, a2);
}
@@ -431,6 +407,30 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
}
+void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
+ bool is_tail_call) {
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else {
+ int distance = masm_->pc_offset();
+ weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
+ }
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ Branch(&ok, ge, a3, Operand(zero_reg));
+ // Don't need to save result register if we are going to do a tail call.
+ if (!is_tail_call) {
+ __ push(v0);
+ }
+ __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+ if (!is_tail_call) {
+ __ pop(v0);
+ }
+ EmitProfilingCounterReset();
+ __ bind(&ok);
+}
void FullCodeGenerator::EmitReturnSequence() {
Comment cmnt(masm_, "[ Return sequence");
@@ -444,24 +444,7 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(v0);
__ CallRuntime(Runtime::kTraceExit);
}
- // Pretend that the exit is a backwards jump to the entry.
- int weight = 1;
- if (info_->ShouldSelfOptimize()) {
- weight = FLAG_interrupt_budget / FLAG_self_opt_count;
- } else {
- int distance = masm_->pc_offset();
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- }
- EmitProfilingCounterDecrement(weight);
- Label ok;
- __ Branch(&ok, ge, a3, Operand(zero_reg));
- __ push(v0);
- __ Call(isolate()->builtins()->InterruptCheck(),
- RelocInfo::CODE_TARGET);
- __ pop(v0);
- EmitProfilingCounterReset();
- __ bind(&ok);
+ EmitProfilingCounterHandlingForReturnSequence(false);
// Make sure that the constant pool is not emitted inside of the return
// sequence.
@@ -483,7 +466,7 @@ void FullCodeGenerator::EmitReturnSequence() {
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
codegen()->GetVar(result_register(), var);
- __ push(result_register());
+ codegen()->PushOperand(result_register());
}
@@ -500,7 +483,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(
void FullCodeGenerator::StackValueContext::Plug(
Heap::RootListIndex index) const {
__ LoadRoot(result_register(), index);
- __ push(result_register());
+ codegen()->PushOperand(result_register());
}
@@ -535,7 +518,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(
void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
// Immediates cannot be pushed directly.
__ li(result_register(), Operand(lit));
- __ push(result_register());
+ codegen()->PushOperand(result_register());
}
@@ -544,7 +527,7 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
true,
true_label_,
false_label_);
- DCHECK(!lit->IsUndetectableObject()); // There are no undetectable literals.
+ DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
if (false_label_ != fall_through_) __ Branch(false_label_);
} else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -569,41 +552,14 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
}
-void FullCodeGenerator::EffectContext::DropAndPlug(int count,
- Register reg) const {
- DCHECK(count > 0);
- __ Drop(count);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
- int count,
- Register reg) const {
- DCHECK(count > 0);
- __ Drop(count);
- __ Move(result_register(), reg);
-}
-
-
void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
Register reg) const {
DCHECK(count > 0);
- if (count > 1) __ Drop(count - 1);
+ if (count > 1) codegen()->DropOperands(count - 1);
__ sw(reg, MemOperand(sp, 0));
}
-void FullCodeGenerator::TestContext::DropAndPlug(int count,
- Register reg) const {
- DCHECK(count > 0);
- // For simplicity we always test the accumulator register.
- __ Drop(count);
- __ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
Label* materialize_false) const {
DCHECK(materialize_true == materialize_false);
@@ -627,6 +583,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(
void FullCodeGenerator::StackValueContext::Plug(
Label* materialize_true,
Label* materialize_false) const {
+ codegen()->OperandStackDepthIncrement(1);
Label done;
__ bind(materialize_true);
__ LoadRoot(at, Heap::kTrueValueRootIndex);
@@ -658,7 +615,7 @@ void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
Heap::RootListIndex value_root_index =
flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
__ LoadRoot(at, value_root_index);
- __ push(at);
+ codegen()->PushOperand(at);
}
@@ -783,7 +740,7 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// The variable in the declaration always resides in the current function
// context.
DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (generate_debug_code_) {
+ if (FLAG_debug_code) {
// Check that we're not inside a with or catch context.
__ lw(a1, FieldMemOperand(cp, HeapObject::kMapOffset));
__ LoadRoot(t0, Heap::kWithContextMapRootIndex);
@@ -905,11 +862,11 @@ void FullCodeGenerator::VisitFunctionDeclaration(
case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
__ li(a2, Operand(variable->name()));
- __ Push(a2);
+ PushOperand(a2);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- __ CallRuntime(Runtime::kDeclareLookupSlot);
+ PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -984,8 +941,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetExpressionPosition(clause);
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), Token::EQ_STRICT,
- strength(language_mode())).code();
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
@@ -1006,7 +963,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Discard the test value and jump to the default if present, otherwise to
// the end of the statement.
__ bind(&next_test);
- __ Drop(1); // Switch value is no longer needed.
+ DropOperands(1); // Switch value is no longer needed.
if (default_clause == NULL) {
__ Branch(nested_statement.break_label());
} else {
@@ -1037,23 +994,23 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
ForIn loop_statement(this, stmt);
increment_loop_depth();
- // Get the object to enumerate over. If the object is null or undefined, skip
- // over the loop. See ECMA-262 version 5, section 12.6.4.
+ // Get the object to enumerate over.
SetExpressionAsStatementPosition(stmt->enumerable());
VisitForAccumulatorValue(stmt->enumerable());
- __ mov(a0, result_register()); // Result as param to InvokeBuiltin below.
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&exit, eq, a0, Operand(at));
- Register null_value = t1;
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ Branch(&exit, eq, a0, Operand(null_value));
- PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
- __ mov(a0, v0);
- // Convert the object to a JS object.
+ __ mov(a0, result_register());
+ OperandStackDepthIncrement(ForIn::kElementCount);
+
+ // If the object is null or undefined, skip over the loop, otherwise convert
+ // it to a JS receiver. See ECMA-262 version 5, section 12.6.4.
Label convert, done_convert;
__ JumpIfSmi(a0, &convert);
__ GetObjectType(a0, a1, a1);
- __ Branch(&done_convert, ge, a1, Operand(FIRST_JS_RECEIVER_TYPE));
+ __ Branch(USE_DELAY_SLOT, &done_convert, ge, a1,
+ Operand(FIRST_JS_RECEIVER_TYPE));
+ __ LoadRoot(at, Heap::kNullValueRootIndex); // In delay slot.
+ __ Branch(USE_DELAY_SLOT, &exit, eq, a0, Operand(at));
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex); // In delay slot.
+ __ Branch(&exit, eq, a0, Operand(at));
__ bind(&convert);
ToObjectStub stub(isolate());
__ CallStub(&stub);
@@ -1062,16 +1019,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
__ push(a0);
- // Check for proxies.
- Label call_runtime;
- __ GetObjectType(a0, a1, a1);
- __ Branch(&call_runtime, eq, a1, Operand(JS_PROXY_TYPE));
-
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
// guarantee cache validity, call the runtime system to check cache
// validity or get the property names in a fixed array.
- __ CheckEnumCache(null_value, &call_runtime);
+ // Note: Proxies never have an enum cache, so will always take the
+ // slow path.
+ Label call_runtime;
+ __ CheckEnumCache(&call_runtime);
// The enum cache is valid. Load the map of the object being
// iterated over and use the cache for the iteration.
@@ -1082,7 +1037,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(a0); // Duplicate the enumerable object on the stack.
- __ CallRuntime(Runtime::kGetPropertyNamesFast);
+ __ CallRuntime(Runtime::kForInEnumerate);
PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
// If we got a map from the runtime call, we can do a fast
@@ -1117,16 +1072,18 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We got a fixed array in register v0. Iterate through that.
__ bind(&fixed_array);
+ int const vector_index = SmiFromSlot(slot)->value();
__ EmitLoadTypeFeedbackVector(a1);
__ li(a2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
- int vector_index = SmiFromSlot(slot)->value();
__ sw(a2, FieldMemOperand(a1, FixedArray::OffsetOfElementAt(vector_index)));
__ li(a1, Operand(Smi::FromInt(1))); // Smi(1) indicates slow check
__ Push(a1, v0); // Smi and array
__ lw(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
+ __ Push(a1); // Fixed array length (as smi).
+ PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
__ li(a0, Operand(Smi::FromInt(0)));
- __ Push(a1, a0); // Fixed array length (as smi) and initial index.
+ __ Push(a0); // Initial index.
// Generate code for doing the condition check.
__ bind(&loop);
@@ -1140,8 +1097,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the current entry of the array into register a3.
__ lw(a2, MemOperand(sp, 2 * kPointerSize));
__ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
- __ addu(t0, a2, t0); // Array base + scaled (smi) index.
+ __ Lsa(t0, a2, a0, kPointerSizeLog2 - kSmiTagSize);
__ lw(a3, MemOperand(t0)); // Current entry.
// Get the expected map from the stack or a smi in the
@@ -1155,6 +1111,16 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset));
__ Branch(&update_each, eq, t0, Operand(a2));
+ // We might get here from TurboFan or Crankshaft when something in the
+ // for-in loop body deopts and only now notice in fullcodegen, that we
+ // can now longer use the enum cache, i.e. left fast mode. So better record
+ // this information here, in case we later OSR back into this loop or
+ // reoptimize the whole function w/o rerunning the loop with the slow
+ // mode object in fullcodegen (which would result in a deopt loop).
+ __ EmitLoadTypeFeedbackVector(a0);
+ __ li(a2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+ __ sw(a2, FieldMemOperand(a0, FixedArray::OffsetOfElementAt(vector_index)));
+
// Convert the entry to a string or (smi) 0 if it isn't a property
// any more. If the property has been removed while iterating, we
// just skip it.
@@ -1192,7 +1158,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Remove the pointers stored on the stack.
__ bind(loop_statement.break_label());
- __ Drop(5);
+ DropOperands(5);
// Exit and decrement the loop depth.
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1439,12 +1405,11 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
// by eval-introduced variables.
EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
__ bind(&slow);
- __ li(a1, Operand(var->name()));
- __ Push(cp, a1); // Context and name.
+ __ Push(var->name());
Runtime::FunctionId function_id =
typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
- : Runtime::kLoadLookupSlotNoReferenceError;
+ : Runtime::kLoadLookupSlotInsideTypeof;
__ CallRuntime(function_id);
__ bind(&done);
context()->Plug(v0);
@@ -1469,7 +1434,7 @@ void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
Expression* expression = (property == NULL) ? NULL : property->value();
if (expression == NULL) {
__ LoadRoot(a1, Heap::kNullValueRootIndex);
- __ push(a1);
+ PushOperand(a1);
} else {
VisitForStackValue(expression);
if (NeedsHomeObject(expression)) {
@@ -1513,7 +1478,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
if (!result_saved) {
- __ push(v0); // Save result on stack.
+ PushOperand(v0); // Save result on stack.
result_saved = true;
}
switch (property->kind()) {
@@ -1546,7 +1511,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
// Duplicate receiver on stack.
__ lw(a0, MemOperand(sp));
- __ push(a0);
+ PushOperand(a0);
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
@@ -1554,19 +1519,19 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
EmitSetHomeObject(value, 2, property->GetSlot());
}
__ li(a0, Operand(Smi::FromInt(SLOPPY))); // PropertyAttributes.
- __ push(a0);
- __ CallRuntime(Runtime::kSetProperty);
+ PushOperand(a0);
+ CallRuntimeWithOperands(Runtime::kSetProperty);
} else {
- __ Drop(3);
+ DropOperands(3);
}
break;
case ObjectLiteral::Property::PROTOTYPE:
// Duplicate receiver on stack.
__ lw(a0, MemOperand(sp));
- __ push(a0);
+ PushOperand(a0);
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype);
+ CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
NO_REGISTERS);
break;
@@ -1589,13 +1554,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
it != accessor_table.end();
++it) {
__ lw(a0, MemOperand(sp)); // Duplicate receiver.
- __ push(a0);
+ PushOperand(a0);
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
EmitAccessor(it->second->setter);
__ li(a0, Operand(Smi::FromInt(NONE)));
- __ push(a0);
- __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
+ PushOperand(a0);
+ CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1612,18 +1577,18 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Expression* value = property->value();
if (!result_saved) {
- __ push(v0); // Save result on the stack
+ PushOperand(v0); // Save result on the stack
result_saved = true;
}
__ lw(a0, MemOperand(sp)); // Duplicate receiver.
- __ push(a0);
+ PushOperand(a0);
if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
DCHECK(!property->is_computed_name());
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype);
+ CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
NO_REGISTERS);
} else {
@@ -1638,11 +1603,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
case ObjectLiteral::Property::COMPUTED:
if (property->emit_store()) {
- __ li(a0, Operand(Smi::FromInt(NONE)));
- __ push(a0);
- __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
+ PushOperand(Smi::FromInt(NONE));
+ PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+ CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
} else {
- __ Drop(3);
+ DropOperands(3);
}
break;
@@ -1651,15 +1616,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
break;
case ObjectLiteral::Property::GETTER:
- __ li(a0, Operand(Smi::FromInt(NONE)));
- __ push(a0);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(NONE));
+ CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
- __ li(a0, Operand(Smi::FromInt(NONE)));
- __ push(a0);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(NONE));
+ CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
break;
}
}
@@ -1717,14 +1680,14 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
int array_index = 0;
for (; array_index < length; array_index++) {
Expression* subexpr = subexprs->at(array_index);
- if (subexpr->IsSpread()) break;
+ DCHECK(!subexpr->IsSpread());
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
if (!result_saved) {
- __ push(v0); // array literal
+ PushOperand(v0); // array literal
result_saved = true;
}
@@ -1747,21 +1710,16 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// (inclusive) and these elements gets appended to the array. Note that the
// number elements an iterable produces is unknown ahead of time.
if (array_index < length && result_saved) {
- __ Pop(v0);
+ PopOperand(v0);
result_saved = false;
}
for (; array_index < length; array_index++) {
Expression* subexpr = subexprs->at(array_index);
- __ Push(v0);
- if (subexpr->IsSpread()) {
- VisitForStackValue(subexpr->AsSpread()->expression());
- __ InvokeBuiltin(Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX,
- CALL_FUNCTION);
- } else {
- VisitForStackValue(subexpr);
- __ CallRuntime(Runtime::kAppendElement);
- }
+ PushOperand(v0);
+ DCHECK(!subexpr->IsSpread());
+ VisitForStackValue(subexpr);
+ CallRuntimeWithOperands(Runtime::kAppendElement);
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
@@ -1802,11 +1760,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
property->obj()->AsSuperPropertyReference()->this_var());
VisitForAccumulatorValue(
property->obj()->AsSuperPropertyReference()->home_object());
- __ Push(result_register());
+ PushOperand(result_register());
if (expr->is_compound()) {
const Register scratch = a1;
__ lw(scratch, MemOperand(sp, kPointerSize));
- __ Push(scratch, result_register());
+ PushOperands(scratch, result_register());
}
break;
case KEYED_SUPER_PROPERTY: {
@@ -1817,11 +1775,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
property->obj()->AsSuperPropertyReference()->home_object());
__ Move(scratch, result_register());
VisitForAccumulatorValue(property->key());
- __ Push(scratch, result_register());
+ PushOperands(scratch, result_register());
if (expr->is_compound()) {
const Register scratch1 = t0;
__ lw(scratch1, MemOperand(sp, 2 * kPointerSize));
- __ Push(scratch1, scratch, result_register());
+ PushOperands(scratch1, scratch, result_register());
}
break;
}
@@ -1869,7 +1827,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
Token::Value op = expr->binary_op();
- __ push(v0); // Left operand goes on the stack.
+ PushOperand(v0); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
AccumulatorValueContext context(this);
@@ -1935,8 +1893,16 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ jmp(&suspend);
__ bind(&continuation);
+ // When we arrive here, the stack top is the resume mode and
+ // result_register() holds the input value (the argument given to the
+ // respective resume operation).
__ RecordGeneratorContinuation();
- __ jmp(&resume);
+ __ pop(a1);
+ __ Branch(&resume, ne, a1,
+ Operand(Smi::FromInt(JSGeneratorObject::RETURN)));
+ __ push(result_register());
+ EmitCreateIteratorResult(true);
+ EmitUnwindAndReturn();
__ bind(&suspend);
VisitForAccumulatorValue(expr->generator_object());
@@ -1953,7 +1919,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&post_runtime);
- __ pop(result_register());
+ PopOperand(result_register());
EmitReturnSequence();
__ bind(&resume);
@@ -1962,127 +1928,15 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
}
case Yield::kFinal: {
- VisitForAccumulatorValue(expr->generator_object());
- __ li(a1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
- __ sw(a1, FieldMemOperand(result_register(),
- JSGeneratorObject::kContinuationOffset));
// Pop value from top-of-stack slot, box result into result register.
+ OperandStackDepthDecrement(1);
EmitCreateIteratorResult(true);
- EmitUnwindBeforeReturn();
- EmitReturnSequence();
+ EmitUnwindAndReturn();
break;
}
- case Yield::kDelegating: {
- VisitForStackValue(expr->generator_object());
-
- // Initial stack layout is as follows:
- // [sp + 1 * kPointerSize] iter
- // [sp + 0 * kPointerSize] g
-
- Label l_catch, l_try, l_suspend, l_continuation, l_resume;
- Label l_next, l_call;
- Register load_receiver = LoadDescriptor::ReceiverRegister();
- Register load_name = LoadDescriptor::NameRegister();
-
- // Initial send value is undefined.
- __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
- __ Branch(&l_next);
-
- // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
- __ bind(&l_catch);
- __ mov(a0, v0);
- __ LoadRoot(load_name, Heap::kthrow_stringRootIndex); // "throw"
- __ lw(a3, MemOperand(sp, 1 * kPointerSize)); // iter
- __ Push(load_name, a3, a0); // "throw", iter, except
- __ jmp(&l_call);
-
- // try { received = %yield result }
- // Shuffle the received result above a try handler and yield it without
- // re-boxing.
- __ bind(&l_try);
- __ pop(a0); // result
- int handler_index = NewHandlerTableEntry();
- EnterTryBlock(handler_index, &l_catch);
- const int try_block_size = TryCatch::kElementCount * kPointerSize;
- __ push(a0); // result
-
- __ jmp(&l_suspend);
- __ bind(&l_continuation);
- __ RecordGeneratorContinuation();
- __ mov(a0, v0);
- __ jmp(&l_resume);
-
- __ bind(&l_suspend);
- const int generator_object_depth = kPointerSize + try_block_size;
- __ lw(a0, MemOperand(sp, generator_object_depth));
- __ push(a0); // g
- __ Push(Smi::FromInt(handler_index)); // handler-index
- DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
- __ li(a1, Operand(Smi::FromInt(l_continuation.pos())));
- __ sw(a1, FieldMemOperand(a0, JSGeneratorObject::kContinuationOffset));
- __ sw(cp, FieldMemOperand(a0, JSGeneratorObject::kContextOffset));
- __ mov(a1, cp);
- __ RecordWriteField(a0, JSGeneratorObject::kContextOffset, a1, a2,
- kRAHasBeenSaved, kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 2);
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ pop(v0); // result
- EmitReturnSequence();
- __ mov(a0, v0);
- __ bind(&l_resume); // received in a0
- ExitTryBlock(handler_index);
-
- // receiver = iter; f = 'next'; arg = received;
- __ bind(&l_next);
-
- __ LoadRoot(load_name, Heap::knext_stringRootIndex); // "next"
- __ lw(a3, MemOperand(sp, 1 * kPointerSize)); // iter
- __ Push(load_name, a3, a0); // "next", iter, received
-
- // result = receiver[f](arg);
- __ bind(&l_call);
- __ lw(load_receiver, MemOperand(sp, kPointerSize));
- __ lw(load_name, MemOperand(sp, 2 * kPointerSize));
- __ li(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->KeyedLoadFeedbackSlot())));
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), SLOPPY).code();
- CallIC(ic, TypeFeedbackId::None());
- __ mov(a0, v0);
- __ mov(a1, a0);
- __ sw(a1, MemOperand(sp, 2 * kPointerSize));
- SetCallPosition(expr);
- __ li(a0, Operand(1));
- __ Call(
- isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
- RelocInfo::CODE_TARGET);
-
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ Drop(1); // The function is still on the stack; drop it.
-
- // if (!result.done) goto l_try;
- __ Move(load_receiver, v0);
-
- __ push(load_receiver); // save result
- __ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
- __ li(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->DoneFeedbackSlot())));
- CallLoadIC(NOT_INSIDE_TYPEOF); // v0=result.done
- __ mov(a0, v0);
- Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(bool_ic);
- __ LoadRoot(at, Heap::kTrueValueRootIndex);
- __ Branch(&l_try, ne, result_register(), Operand(at));
-
- // result.value
- __ pop(load_receiver); // result
- __ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
- __ li(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->ValueFeedbackSlot())));
- CallLoadIC(NOT_INSIDE_TYPEOF); // v0=result.value
- context()->DropAndPlug(2, v0); // drop iter and g
- break;
- }
+ case Yield::kDelegating:
+ UNREACHABLE();
}
}
@@ -2096,7 +1950,14 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// a1 will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
VisitForAccumulatorValue(value);
- __ pop(a1);
+ PopOperand(a1);
+
+ // Store input value into generator object.
+ __ sw(result_register(),
+ FieldMemOperand(a1, JSGeneratorObject::kInputOffset));
+ __ mov(a2, result_register());
+ __ RecordWriteField(a1, JSGeneratorObject::kInputOffset, a2, a3,
+ kRAHasBeenSaved, kDontSaveFPRegs);
// Load suspended function and context.
__ lw(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset));
@@ -2149,6 +2010,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ Addu(a3, a3, Operand(a2));
__ li(a2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
__ sw(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
+ __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
__ Jump(a3);
__ bind(&slow_resume);
}
@@ -2162,6 +2024,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ push(a2);
__ Branch(&push_operand_holes);
__ bind(&call_resume);
+ __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
DCHECK(!result_register().is(a1));
__ Push(a1, result_register());
__ Push(Smi::FromInt(resume_mode));
@@ -2173,6 +2036,36 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
context()->Plug(result_register());
}
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
+ OperandStackDepthIncrement(2);
+ __ Push(reg1, reg2);
+}
+
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2,
+ Register reg3) {
+ OperandStackDepthIncrement(3);
+ __ Push(reg1, reg2, reg3);
+}
+
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2,
+ Register reg3, Register reg4) {
+ OperandStackDepthIncrement(4);
+ __ Push(reg1, reg2, reg3, reg4);
+}
+
+void FullCodeGenerator::PopOperands(Register reg1, Register reg2) {
+ OperandStackDepthDecrement(2);
+ __ Pop(reg1, reg2);
+}
+
+void FullCodeGenerator::EmitOperandStackDepthCheck() {
+ if (FLAG_debug_code) {
+ int expected_diff = StandardFrameConstants::kFixedFrameSizeFromFp +
+ operand_stack_depth_ * kPointerSize;
+ __ Subu(v0, fp, sp);
+ __ Assert(eq, kUnexpectedStackDepth, v0, Operand(expected_diff));
+ }
+}
void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label allocate, done_allocate;
@@ -2207,38 +2100,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ li(LoadDescriptor::NameRegister(), Operand(key->value()));
__ li(LoadDescriptor::SlotRegister(),
Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallLoadIC(NOT_INSIDE_TYPEOF, language_mode());
-}
-
-
-void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
- // Stack: receiver, home_object.
- SetExpressionPosition(prop);
-
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!key->value()->IsSmi());
- DCHECK(prop->IsSuperAccess());
-
- __ Push(key->value());
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadFromSuper);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- SetExpressionPosition(prop);
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), language_mode()).code();
- __ li(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallIC(ic);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
- // Stack: receiver, home_object, key.
- SetExpressionPosition(prop);
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+ CallLoadIC(NOT_INSIDE_TYPEOF);
}
@@ -2254,7 +2116,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
// Get the arguments.
Register left = a1;
Register right = a0;
- __ pop(left);
+ PopOperand(left);
__ mov(a0, result_register());
// Perform combined smi check on both operands.
@@ -2264,8 +2126,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
patch_site.EmitJumpIfSmi(scratch1, &smi_case);
__ bind(&stub_call);
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -2334,27 +2195,17 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
- // Constructor is in v0.
- DCHECK(lit != NULL);
- __ push(v0);
-
- // No access check is needed here since the constructor is created by the
- // class literal.
- Register scratch = a1;
- __ lw(scratch,
- FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
- __ push(scratch);
-
for (int i = 0; i < lit->properties()->length(); i++) {
ObjectLiteral::Property* property = lit->properties()->at(i);
Expression* value = property->value();
+ Register scratch = a1;
if (property->is_static()) {
__ lw(scratch, MemOperand(sp, kPointerSize)); // constructor
} else {
__ lw(scratch, MemOperand(sp, 0)); // prototype
}
- __ push(scratch);
+ PushOperand(scratch);
EmitPropertyKey(property, lit->GetIdForProperty(i));
// The static prototype property is read only. We handle the non computed
@@ -2377,37 +2228,32 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
case ObjectLiteral::Property::PROTOTYPE:
UNREACHABLE();
case ObjectLiteral::Property::COMPUTED:
- __ CallRuntime(Runtime::kDefineClassMethod);
+ PushOperand(Smi::FromInt(DONT_ENUM));
+ PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+ CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
break;
case ObjectLiteral::Property::GETTER:
- __ li(a0, Operand(Smi::FromInt(DONT_ENUM)));
- __ push(a0);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(DONT_ENUM));
+ CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
- __ li(a0, Operand(Smi::FromInt(DONT_ENUM)));
- __ push(a0);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(DONT_ENUM));
+ CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
break;
default:
UNREACHABLE();
}
}
-
- // Set both the prototype and constructor to have fast properties, and also
- // freeze them in strong mode.
- __ CallRuntime(Runtime::kFinalizeClassDefinition);
}
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
__ mov(a0, result_register());
- __ pop(a1);
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+ PopOperand(a1);
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -2430,10 +2276,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case NAMED_PROPERTY: {
- __ push(result_register()); // Preserve value.
+ PushOperand(result_register()); // Preserve value.
VisitForAccumulatorValue(prop->obj());
__ mov(StoreDescriptor::ReceiverRegister(), result_register());
- __ pop(StoreDescriptor::ValueRegister()); // Restore value.
+ PopOperand(StoreDescriptor::ValueRegister()); // Restore value.
__ li(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
EmitLoadStoreICSlot(slot);
@@ -2441,7 +2287,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case NAMED_SUPER_PROPERTY: {
- __ Push(v0);
+ PushOperand(v0);
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
VisitForAccumulatorValue(
prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2458,7 +2304,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case KEYED_SUPER_PROPERTY: {
- __ Push(v0);
+ PushOperand(v0);
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
VisitForStackValue(
prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2478,12 +2324,12 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case KEYED_PROPERTY: {
- __ push(result_register()); // Preserve value.
+ PushOperand(result_register()); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ mov(StoreDescriptor::NameRegister(), result_register());
- __ Pop(StoreDescriptor::ValueRegister(),
- StoreDescriptor::ReceiverRegister());
+ PopOperands(StoreDescriptor::ValueRegister(),
+ StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
@@ -2567,16 +2413,17 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
(var->mode() == CONST && op == Token::INIT)) {
if (var->IsLookupSlot()) {
// Assignment to var.
- __ li(a1, Operand(var->name()));
- __ li(a0, Operand(Smi::FromInt(language_mode())));
- __ Push(v0, cp, a1, a0); // Value, context, name, language mode.
- __ CallRuntime(Runtime::kStoreLookupSlot);
+ __ Push(var->name());
+ __ Push(v0);
+ __ CallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreLookupSlot_Strict
+ : Runtime::kStoreLookupSlot_Sloppy);
} else {
// Assignment to var or initializing assignment to let/const in harmony
// mode.
DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
MemOperand location = VarOperand(var, a1);
- if (generate_debug_code_ && var->mode() == LET && op == Token::INIT) {
+ if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
// Check for an uninitialized let binding.
__ lw(a2, location);
__ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
@@ -2622,7 +2469,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ mov(StoreDescriptor::ValueRegister(), result_register());
__ li(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
- __ pop(StoreDescriptor::ReceiverRegister());
+ PopOperand(StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(expr->AssignmentSlot());
CallStoreIC();
@@ -2639,10 +2486,11 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
Literal* key = prop->key()->AsLiteral();
DCHECK(key != NULL);
- __ Push(key->value());
- __ Push(v0);
- __ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy));
+ PushOperand(key->value());
+ PushOperand(v0);
+ CallRuntimeWithOperands(is_strict(language_mode())
+ ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy);
}
@@ -2652,10 +2500,10 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
// stack : receiver ('this'), home_object, key
DCHECK(prop != NULL);
- __ Push(v0);
- __ CallRuntime((is_strict(language_mode())
- ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy));
+ PushOperand(v0);
+ CallRuntimeWithOperands(is_strict(language_mode())
+ ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy);
}
@@ -2667,7 +2515,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// - a1 is the key,
// - a2 is the receiver.
__ mov(StoreDescriptor::ValueRegister(), result_register());
- __ Pop(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister());
+ PopOperands(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister());
DCHECK(StoreDescriptor::ValueRegister().is(a0));
Handle<Code> ic =
@@ -2702,7 +2551,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
__ Move(LoadDescriptor::NameRegister(), v0);
- __ pop(LoadDescriptor::ReceiverRegister());
+ PopOperand(LoadDescriptor::ReceiverRegister());
EmitKeyedPropertyLoad(expr);
} else {
VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
@@ -2738,7 +2587,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ push(at);
+ PushOperand(at);
convert_mode = ConvertReceiverMode::kNullOrUndefined;
} else {
// Load the function from the receiver.
@@ -2749,7 +2598,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
__ lw(at, MemOperand(sp, 0));
- __ push(at);
+ PushOperand(at);
__ sw(v0, MemOperand(sp, kPointerSize));
convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
}
@@ -2773,9 +2622,8 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
VisitForAccumulatorValue(super_ref->home_object());
__ mov(scratch, v0);
VisitForAccumulatorValue(super_ref->this_var());
- __ Push(scratch, v0, v0, scratch);
- __ Push(key->value());
- __ Push(Smi::FromInt(language_mode()));
+ PushOperands(scratch, v0, v0, scratch);
+ PushOperand(key->value());
// Stack here:
// - home_object
@@ -2783,8 +2631,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - this (receiver) <-- LoadFromSuper will pop here and below.
// - home_object
// - key
- // - language_mode
- __ CallRuntime(Runtime::kLoadFromSuper);
+ CallRuntimeWithOperands(Runtime::kLoadFromSuper);
// Replace home_object with target function.
__ sw(v0, MemOperand(sp, kPointerSize));
@@ -2813,7 +2660,7 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
// Push the target function under the receiver.
__ lw(at, MemOperand(sp, 0));
- __ push(at);
+ PushOperand(at);
__ sw(v0, MemOperand(sp, kPointerSize));
EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
@@ -2833,9 +2680,8 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
VisitForAccumulatorValue(super_ref->home_object());
__ Move(scratch, v0);
VisitForAccumulatorValue(super_ref->this_var());
- __ Push(scratch, v0, v0, scratch);
+ PushOperands(scratch, v0, v0, scratch);
VisitForStackValue(prop->key());
- __ Push(Smi::FromInt(language_mode()));
// Stack here:
// - home_object
@@ -2843,8 +2689,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
// - home_object
// - key
- // - language_mode
- __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+ CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
// Replace home_object with target function.
__ sw(v0, MemOperand(sp, kPointerSize));
@@ -2867,12 +2712,23 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
// Record source position of the IC call.
SetCallPosition(expr);
- Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
+ if (expr->tail_call_mode() == TailCallMode::kAllow) {
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceTailCall);
+ }
+ // Update profiling counters before the tail call since we will
+ // not return to this function.
+ EmitProfilingCounterHandlingForReturnSequence(true);
+ }
+ Handle<Code> ic =
+ CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
+ .code();
__ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
// Don't assign a type feedback id to the IC, since type feedback is provided
// by the vector above.
CallIC(ic);
+ OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
// Restore context register.
@@ -2918,11 +2774,9 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
__ bind(&slow);
// Call the runtime to find the function to call (returned in v0)
// and the object holding it (returned in v1).
- DCHECK(!context_register().is(a2));
- __ li(a2, Operand(callee->name()));
- __ Push(context_register(), a2);
- __ CallRuntime(Runtime::kLoadLookupSlot);
- __ Push(v0, v1); // Function, receiver.
+ __ Push(callee->name());
+ __ CallRuntime(Runtime::kLoadLookupSlotForCall);
+ PushOperands(v0, v1); // Function, receiver.
PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
// If fast case code has been generated, emit code to push the
@@ -2944,7 +2798,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
VisitForStackValue(callee);
// refEnv.WithBaseObject()
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ push(a2); // Reserved receiver slot.
+ PushOperand(a2); // Reserved receiver slot.
}
}
@@ -2976,7 +2830,10 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
SetCallPosition(expr);
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ li(a0, Operand(arg_count));
- __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ expr->tail_call_mode()),
+ RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3017,6 +2874,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
CallConstructStub stub(isolate());
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3037,7 +2895,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
FieldMemOperand(result_register(), HeapObject::kMapOffset));
__ lw(result_register(),
FieldMemOperand(result_register(), Map::kPrototypeOffset));
- __ Push(result_register());
+ PushOperand(result_register());
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -3059,6 +2917,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
__ lw(a1, MemOperand(sp, arg_count * kPointerSize));
__ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
@@ -3112,81 +2971,6 @@ void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ JumpIfSmi(v0, if_false);
- __ GetObjectType(v0, a1, a1);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, a1, Operand(SIMD128_VALUE_TYPE), if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(v0, if_false);
- __ GetObjectType(v0, a1, a2);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ Branch(if_true, hs, a2, Operand(FIRST_FUNCTION_TYPE));
- __ Branch(if_false);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK);
- __ lw(a2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
- __ lw(a1, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
- __ li(t0, 0x80000000);
- Label not_nan;
- __ Branch(&not_nan, ne, a2, Operand(t0));
- __ mov(t0, zero_reg);
- __ mov(a2, a1);
- __ bind(&not_nan);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, a2, Operand(t0), if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3276,65 +3060,6 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ pop(a1);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, v0, Operand(a1), if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- // ArgumentsAccessStub expects the key in a1 and the formal
- // parameter count in a0.
- VisitForAccumulatorValue(args->at(0));
- __ mov(a1, v0);
- __ li(a0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
- ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
- Label exit;
- // Get the number of formal parameters.
- __ li(v0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
-
- // Check if the calling frame is an arguments adaptor frame.
- __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
- __ Branch(&exit, ne, a3,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ lw(v0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ bind(&exit);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3404,28 +3129,6 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = nullptr;
- Label* if_false = nullptr;
- Label* fall_through = nullptr;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ JumpIfSmi(v0, if_false);
- __ GetObjectType(v0, a1, a1);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, a1, Operand(JS_DATE_TYPE), if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(3, args->length());
@@ -3437,7 +3140,7 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(0)); // index
VisitForStackValue(args->at(1)); // value
VisitForAccumulatorValue(args->at(2)); // string
- __ Pop(index, value);
+ PopOperands(index, value);
if (FLAG_debug_code) {
__ SmiTst(value, at);
@@ -3474,7 +3177,7 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(0)); // index
VisitForStackValue(args->at(1)); // value
VisitForAccumulatorValue(args->at(2)); // string
- __ Pop(index, value);
+ PopOperands(index, value);
if (FLAG_debug_code) {
__ SmiTst(value, at);
@@ -3500,35 +3203,6 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- VisitForStackValue(args->at(0)); // Load the object.
- VisitForAccumulatorValue(args->at(1)); // Load the value.
- __ pop(a1); // v0 = value. a1 = object.
-
- Label done;
- // If the object is a smi, return the value.
- __ JumpIfSmi(a1, &done);
-
- // If the object is not a value type, return the value.
- __ GetObjectType(a1, a2, a2);
- __ Branch(&done, ne, a2, Operand(JS_VALUE_TYPE));
-
- // Store the value.
- __ sw(v0, FieldMemOperand(a1, JSValue::kValueOffset));
- // Update the write barrier. Save the value as it will be
- // overwritten by the write barrier code and is needed afterward.
- __ mov(a2, v0);
- __ RecordWriteField(
- a1, JSValue::kValueOffset, a2, a3, kRAHasBeenSaved, kDontSaveFPRegs);
-
- __ bind(&done);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -3546,26 +3220,6 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitToName(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into v0 and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- Label convert, done_convert;
- __ JumpIfSmi(v0, &convert);
- STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
- __ GetObjectType(v0, a1, a1);
- __ Branch(&done_convert, le, a1, Operand(LAST_NAME_TYPE));
- __ bind(&convert);
- __ Push(v0);
- __ CallRuntime(Runtime::kToName);
- __ bind(&done_convert);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3597,7 +3251,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
Register index = a0;
Register result = v0;
- __ pop(object);
+ PopOperand(object);
Label need_conversion;
Label index_out_of_range;
@@ -3645,7 +3299,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
Register scratch = a3;
Register result = v0;
- __ pop(object);
+ PopOperand(object);
Label need_conversion;
Label index_out_of_range;
@@ -3695,6 +3349,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
// Call the target.
__ li(a0, Operand(argc));
__ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(argc + 1);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
@@ -3748,240 +3403,6 @@ void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
- Label bailout, done, one_char_separator, long_separator,
- non_trivial_array, not_size_one_array, loop,
- empty_separator_loop, one_char_separator_loop,
- one_char_separator_loop_entry, long_separator_loop;
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- VisitForStackValue(args->at(1));
- VisitForAccumulatorValue(args->at(0));
-
- // All aliases of the same register have disjoint lifetimes.
- Register array = v0;
- Register elements = no_reg; // Will be v0.
- Register result = no_reg; // Will be v0.
- Register separator = a1;
- Register array_length = a2;
- Register result_pos = no_reg; // Will be a2.
- Register string_length = a3;
- Register string = t0;
- Register element = t1;
- Register elements_end = t2;
- Register scratch1 = t3;
- Register scratch2 = t5;
- Register scratch3 = t4;
-
- // Separator operand is on the stack.
- __ pop(separator);
-
- // Check that the array is a JSArray.
- __ JumpIfSmi(array, &bailout);
- __ GetObjectType(array, scratch1, scratch2);
- __ Branch(&bailout, ne, scratch2, Operand(JS_ARRAY_TYPE));
-
- // Check that the array has fast elements.
- __ CheckFastElements(scratch1, scratch2, &bailout);
-
- // If the array has length zero, return the empty string.
- __ lw(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
- __ SmiUntag(array_length);
- __ Branch(&non_trivial_array, ne, array_length, Operand(zero_reg));
- __ LoadRoot(v0, Heap::kempty_stringRootIndex);
- __ Branch(&done);
-
- __ bind(&non_trivial_array);
-
- // Get the FixedArray containing array's elements.
- elements = array;
- __ lw(elements, FieldMemOperand(array, JSArray::kElementsOffset));
- array = no_reg; // End of array's live range.
-
- // Check that all array elements are sequential one-byte strings, and
- // accumulate the sum of their lengths, as a smi-encoded value.
- __ mov(string_length, zero_reg);
- __ Addu(element,
- elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(elements_end, array_length, kPointerSizeLog2);
- __ Addu(elements_end, element, elements_end);
- // Loop condition: while (element < elements_end).
- // Live values in registers:
- // elements: Fixed array of strings.
- // array_length: Length of the fixed array of strings (not smi)
- // separator: Separator string
- // string_length: Accumulated sum of string lengths (smi).
- // element: Current array element.
- // elements_end: Array end.
- if (generate_debug_code_) {
- __ Assert(gt, kNoEmptyArraysHereInEmitFastOneByteArrayJoin, array_length,
- Operand(zero_reg));
- }
- __ bind(&loop);
- __ lw(string, MemOperand(element));
- __ Addu(element, element, kPointerSize);
- __ JumpIfSmi(string, &bailout);
- __ lw(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
- __ lw(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
- __ AddBranchOvf(string_length, string_length, Operand(scratch1), &bailout);
- __ Branch(&loop, lt, element, Operand(elements_end));
-
- // If array_length is 1, return elements[0], a string.
- __ Branch(&not_size_one_array, ne, array_length, Operand(1));
- __ lw(v0, FieldMemOperand(elements, FixedArray::kHeaderSize));
- __ Branch(&done);
-
- __ bind(&not_size_one_array);
-
- // Live values in registers:
- // separator: Separator string
- // array_length: Length of the array.
- // string_length: Sum of string lengths (smi).
- // elements: FixedArray of strings.
-
- // Check that the separator is a flat one-byte string.
- __ JumpIfSmi(separator, &bailout);
- __ lw(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
- __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
-
- // Add (separator length times array_length) - separator length to the
- // string_length to get the length of the result string. array_length is not
- // smi but the other values are, so the result is a smi.
- __ lw(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
- __ Subu(string_length, string_length, Operand(scratch1));
- __ Mul(scratch3, scratch2, array_length, scratch1);
- // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
- // zero.
- __ Branch(&bailout, ne, scratch3, Operand(zero_reg));
- __ And(scratch3, scratch2, Operand(0x80000000));
- __ Branch(&bailout, ne, scratch3, Operand(zero_reg));
- __ AddBranchOvf(string_length, string_length, Operand(scratch2), &bailout);
- __ SmiUntag(string_length);
-
- // Bailout for large object allocations.
- __ Branch(&bailout, gt, string_length,
- Operand(Page::kMaxRegularHeapObjectSize));
-
- // Get first element in the array to free up the elements register to be used
- // for the result.
- __ Addu(element,
- elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- result = elements; // End of live range for elements.
- elements = no_reg;
- // Live values in registers:
- // element: First array element
- // separator: Separator string
- // string_length: Length of result string (not smi)
- // array_length: Length of the array.
- __ AllocateOneByteString(result, string_length, scratch1, scratch2,
- elements_end, &bailout);
- // Prepare for looping. Set up elements_end to end of the array. Set
- // result_pos to the position of the result where to write the first
- // character.
- __ sll(elements_end, array_length, kPointerSizeLog2);
- __ Addu(elements_end, element, elements_end);
- result_pos = array_length; // End of live range for array_length.
- array_length = no_reg;
- __ Addu(result_pos,
- result,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
- // Check the length of the separator.
- __ lw(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
- __ li(at, Operand(Smi::FromInt(1)));
- __ Branch(&one_char_separator, eq, scratch1, Operand(at));
- __ Branch(&long_separator, gt, scratch1, Operand(at));
-
- // Empty separator case.
- __ bind(&empty_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
-
- // Copy next array element to the result.
- __ lw(string, MemOperand(element));
- __ Addu(element, element, kPointerSize);
- __ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ CopyBytes(string, result_pos, string_length, scratch1);
- // End while (element < elements_end).
- __ Branch(&empty_separator_loop, lt, element, Operand(elements_end));
- DCHECK(result.is(v0));
- __ Branch(&done);
-
- // One-character separator case.
- __ bind(&one_char_separator);
- // Replace separator with its one-byte character value.
- __ lbu(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator.
- __ jmp(&one_char_separator_loop_entry);
-
- __ bind(&one_char_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
- // separator: Single separator one-byte char (in lower byte).
-
- // Copy the separator character to the result.
- __ sb(separator, MemOperand(result_pos));
- __ Addu(result_pos, result_pos, 1);
-
- // Copy next array element to the result.
- __ bind(&one_char_separator_loop_entry);
- __ lw(string, MemOperand(element));
- __ Addu(element, element, kPointerSize);
- __ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ CopyBytes(string, result_pos, string_length, scratch1);
- // End while (element < elements_end).
- __ Branch(&one_char_separator_loop, lt, element, Operand(elements_end));
- DCHECK(result.is(v0));
- __ Branch(&done);
-
- // Long separator case (separator is more than one character). Entry is at the
- // label long_separator below.
- __ bind(&long_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
- // separator: Separator string.
-
- // Copy the separator to the result.
- __ lw(string_length, FieldMemOperand(separator, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ Addu(string,
- separator,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
-
- __ bind(&long_separator);
- __ lw(string, MemOperand(element));
- __ Addu(element, element, kPointerSize);
- __ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ CopyBytes(string, result_pos, string_length, scratch1);
- // End while (element < elements_end).
- __ Branch(&long_separator_loop, lt, element, Operand(elements_end));
- DCHECK(result.is(v0));
- __ Branch(&done);
-
- __ bind(&bailout);
- __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
- __ bind(&done);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
ExternalReference debug_is_active =
@@ -4014,7 +3435,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ jmp(&done);
__ bind(&runtime);
- __ CallRuntime(Runtime::kCreateIterResultObject);
+ CallRuntimeWithOperands(Runtime::kCreateIterResultObject);
__ bind(&done);
context()->Plug(v0);
@@ -4024,7 +3445,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push undefined as the receiver.
__ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
- __ push(v0);
+ PushOperand(v0);
__ LoadNativeContextSlot(expr->context_index(), v0);
}
@@ -4039,6 +3460,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
__ li(a0, Operand(arg_count));
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
}
@@ -4052,7 +3474,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Push the target function under the receiver.
__ lw(at, MemOperand(sp, 0));
- __ push(at);
+ PushOperand(at);
__ sw(v0, MemOperand(sp, kPointerSize));
// Push the arguments ("left-to-right").
@@ -4088,6 +3510,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Call the C runtime function.
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
__ CallRuntime(expr->function(), arg_count);
+ OperandStackDepthDecrement(arg_count);
context()->Plug(v0);
}
}
@@ -4105,9 +3528,9 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy);
+ CallRuntimeWithOperands(is_strict(language_mode())
+ ? Runtime::kDeleteProperty_Strict
+ : Runtime::kDeleteProperty_Sloppy);
context()->Plug(v0);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -4128,9 +3551,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
} else {
// Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
- DCHECK(!context_register().is(a2));
- __ li(a2, Operand(var->name()));
- __ Push(context_register(), a2);
+ __ Push(var->name());
__ CallRuntime(Runtime::kDeleteLookupSlot);
context()->Plug(v0);
}
@@ -4175,6 +3596,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
&materialize_false,
&materialize_true,
&materialize_true);
+ if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
__ bind(&materialize_true);
PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
__ LoadRoot(v0, Heap::kTrueValueRootIndex);
@@ -4225,7 +3647,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && !context()->IsEffect()) {
__ li(at, Operand(Smi::FromInt(0)));
- __ push(at);
+ PushOperand(at);
}
switch (assign_type) {
case NAMED_PROPERTY: {
@@ -4240,10 +3662,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
VisitForAccumulatorValue(
prop->obj()->AsSuperPropertyReference()->home_object());
- __ Push(result_register());
+ PushOperand(result_register());
const Register scratch = a1;
__ lw(scratch, MemOperand(sp, kPointerSize));
- __ Push(scratch, result_register());
+ PushOperands(scratch, result_register());
EmitNamedSuperPropertyLoad(prop);
break;
}
@@ -4256,9 +3678,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
const Register scratch1 = t0;
__ Move(scratch, result_register());
VisitForAccumulatorValue(prop->key());
- __ Push(scratch, result_register());
+ PushOperands(scratch, result_register());
__ lw(scratch1, MemOperand(sp, 2 * kPointerSize));
- __ Push(scratch1, scratch, result_register());
+ PushOperands(scratch1, scratch, result_register());
EmitKeyedSuperPropertyLoad(prop);
break;
}
@@ -4344,7 +3766,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// of the stack.
switch (assign_type) {
case VARIABLE:
- __ push(v0);
+ PushOperand(v0);
break;
case NAMED_PROPERTY:
__ sw(v0, MemOperand(sp, kPointerSize));
@@ -4368,9 +3790,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetExpressionPosition(expr);
-
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD,
- strength(language_mode())).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD).code();
CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4404,7 +3824,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(StoreDescriptor::ValueRegister(), result_register());
__ li(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
- __ pop(StoreDescriptor::ReceiverRegister());
+ PopOperand(StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(expr->CountSlot());
CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -4441,8 +3861,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case KEYED_PROPERTY: {
__ mov(StoreDescriptor::ValueRegister(), result_register());
- __ Pop(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister());
+ PopOperands(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
EmitLoadStoreICSlot(expr->CountSlot());
@@ -4497,8 +3917,8 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ LoadRoot(at, Heap::kFalseValueRootIndex);
Split(eq, v0, Operand(at), if_true, if_false, fall_through);
} else if (String::Equals(check, factory->undefined_string())) {
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(if_true, eq, v0, Operand(at));
+ __ LoadRoot(at, Heap::kNullValueRootIndex);
+ __ Branch(if_false, eq, v0, Operand(at));
__ JumpIfSmi(v0, if_false);
// Check for undetectable objects => true.
__ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
@@ -4564,7 +3984,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ CallRuntime(Runtime::kHasProperty);
+ CallRuntimeWithOperands(Runtime::kHasProperty);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ LoadRoot(t0, Heap::kTrueValueRootIndex);
Split(eq, v0, Operand(t0), if_true, if_false, fall_through);
@@ -4573,7 +3993,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::INSTANCEOF: {
VisitForAccumulatorValue(expr->right());
__ mov(a0, result_register());
- __ pop(a1);
+ PopOperand(a1);
InstanceOfStub stub(isolate());
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
@@ -4586,7 +4006,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForAccumulatorValue(expr->right());
Condition cc = CompareIC::ComputeCondition(op);
__ mov(a0, result_register());
- __ pop(a1);
+ PopOperand(a1);
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
@@ -4598,8 +4018,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ bind(&slow_case);
}
- Handle<Code> ic = CodeFactory::CompareIC(
- isolate(), op, strength(language_mode())).code();
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -4686,7 +4105,7 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
DCHECK(closure_scope->is_function_scope());
__ lw(at, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
- __ push(at);
+ PushOperand(at);
}
@@ -4695,23 +4114,12 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
void FullCodeGenerator::EnterFinallyBlock() {
DCHECK(!result_register().is(a1));
- // Store result register while executing finally block.
- __ push(result_register());
- // Cook return address in link register to stack (smi encoded Code* delta).
- __ Subu(a1, ra, Operand(masm_->CodeObject()));
- DCHECK_EQ(1, kSmiTagSize + kSmiShiftSize);
- STATIC_ASSERT(0 == kSmiTag);
- __ Addu(a1, a1, Operand(a1)); // Convert to smi.
-
- // Store result register while executing finally block.
- __ push(a1);
-
// Store pending message while executing finally block.
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ li(at, Operand(pending_message_obj));
__ lw(a1, MemOperand(at));
- __ push(a1);
+ PushOperand(a1);
ClearPendingMessage();
}
@@ -4720,21 +4128,11 @@ void FullCodeGenerator::EnterFinallyBlock() {
void FullCodeGenerator::ExitFinallyBlock() {
DCHECK(!result_register().is(a1));
// Restore pending message from stack.
- __ pop(a1);
+ PopOperand(a1);
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ li(at, Operand(pending_message_obj));
__ sw(a1, MemOperand(at));
-
- // Restore result register from stack.
- __ pop(a1);
-
- // Uncook return address and return.
- __ pop(result_register());
- DCHECK_EQ(1, kSmiTagSize + kSmiShiftSize);
- __ sra(a1, a1, 1); // Un-smi-tag value.
- __ Addu(at, a1, Operand(masm_->CodeObject()));
- __ Jump(at);
}
@@ -4754,6 +4152,32 @@ void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
Operand(SmiFromSlot(slot)));
}
+void FullCodeGenerator::DeferredCommands::EmitCommands() {
+ DCHECK(!result_register().is(a1));
+ __ Pop(result_register()); // Restore the accumulator.
+ __ Pop(a1); // Get the token.
+ for (DeferredCommand cmd : commands_) {
+ Label skip;
+ __ li(at, Operand(Smi::FromInt(cmd.token)));
+ __ Branch(&skip, ne, a1, Operand(at));
+ switch (cmd.command) {
+ case kReturn:
+ codegen_->EmitUnwindAndReturn();
+ break;
+ case kThrow:
+ __ Push(result_register());
+ __ CallRuntime(Runtime::kReThrow);
+ break;
+ case kContinue:
+ codegen_->EmitContinue(cmd.target);
+ break;
+ case kBreak:
+ codegen_->EmitBreak(cmd.target);
+ break;
+ }
+ __ bind(&skip);
+ }
+}
#undef __
diff --git a/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc b/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
index 44dd791a59..c85dee4644 100644
--- a/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
+++ b/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
@@ -27,8 +27,7 @@
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm_)
-
+#define __ ACCESS_MASM(masm())
// A patch site is a location in the code which it is possible to patch. This
// class has a number of methods to emit the code which is patchable and the
@@ -86,6 +85,7 @@ class JumpPatchSite BASE_EMBEDDED {
}
private:
+ MacroAssembler* masm() { return masm_; }
MacroAssembler* masm_;
Label patch_site_;
#ifdef DEBUG
@@ -118,13 +118,6 @@ void FullCodeGenerator::Generate() {
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ stop("stop-at");
- }
-#endif
-
if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ ld(a2, MemOperand(sp, receiver_offset));
@@ -145,6 +138,7 @@ void FullCodeGenerator::Generate() {
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
+ OperandStackDepthIncrement(locals_count);
if (locals_count > 0) {
if (locals_count >= 128) {
Label ok;
@@ -272,21 +266,12 @@ void FullCodeGenerator::Generate() {
Variable* rest_param = scope()->rest_parameter(&rest_index);
if (rest_param) {
Comment cmnt(masm_, "[ Allocate rest parameter array");
-
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
-
- __ li(RestParamAccessDescriptor::parameter_count(),
- Operand(Smi::FromInt(num_parameters)));
- __ Daddu(RestParamAccessDescriptor::parameter_pointer(), fp,
- Operand(StandardFrameConstants::kCallerSPOffset + offset));
- __ li(RestParamAccessDescriptor::rest_parameter_index(),
- Operand(Smi::FromInt(rest_index)));
- function_in_register_a1 = false;
-
- RestParamAccessStub stub(isolate());
+ if (!function_in_register_a1) {
+ __ ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ FastNewRestParameterStub stub(isolate());
__ CallStub(&stub);
-
+ function_in_register_a1 = false;
SetVar(rest_param, v0, a1, a2);
}
@@ -294,28 +279,20 @@ void FullCodeGenerator::Generate() {
if (arguments != NULL) {
// Function uses arguments object.
Comment cmnt(masm_, "[ Allocate arguments object");
- DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
if (!function_in_register_a1) {
// Load this again, if it's used by the local context below.
__ ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
- // Receiver is just before the parameters on the caller's stack.
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
- __ li(ArgumentsAccessNewDescriptor::parameter_count(),
- Operand(Smi::FromInt(num_parameters)));
- __ Daddu(ArgumentsAccessNewDescriptor::parameter_pointer(), fp,
- Operand(StandardFrameConstants::kCallerSPOffset + offset));
-
- // Arguments to ArgumentsAccessStub:
- // function, parameter pointer, parameter count.
- // The stub will rewrite parameter pointer and parameter count if the
- // previous stack frame was an arguments adapter frame.
- bool is_unmapped = is_strict(language_mode()) || !has_simple_parameters();
- ArgumentsAccessStub::Type type = ArgumentsAccessStub::ComputeType(
- is_unmapped, literal()->has_duplicate_parameters());
- ArgumentsAccessStub stub(isolate(), type);
- __ CallStub(&stub);
+ if (is_strict(language_mode()) || !has_simple_parameters()) {
+ FastNewStrictArgumentsStub stub(isolate());
+ __ CallStub(&stub);
+ } else if (literal()->has_duplicate_parameters()) {
+ __ Push(a1);
+ __ CallRuntime(Runtime::kNewSloppyArguments_Generic);
+ } else {
+ FastNewSloppyArgumentsStub stub(isolate());
+ __ CallStub(&stub);
+ }
SetVar(arguments, v0, a1, a2);
}
@@ -430,6 +407,30 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
}
+void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
+ bool is_tail_call) {
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else {
+ int distance = masm_->pc_offset();
+ weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
+ }
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ Branch(&ok, ge, a3, Operand(zero_reg));
+ // Don't need to save result register if we are going to do a tail call.
+ if (!is_tail_call) {
+ __ push(v0);
+ }
+ __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+ if (!is_tail_call) {
+ __ pop(v0);
+ }
+ EmitProfilingCounterReset();
+ __ bind(&ok);
+}
void FullCodeGenerator::EmitReturnSequence() {
Comment cmnt(masm_, "[ Return sequence");
@@ -443,24 +444,7 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(v0);
__ CallRuntime(Runtime::kTraceExit);
}
- // Pretend that the exit is a backwards jump to the entry.
- int weight = 1;
- if (info_->ShouldSelfOptimize()) {
- weight = FLAG_interrupt_budget / FLAG_self_opt_count;
- } else {
- int distance = masm_->pc_offset();
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- }
- EmitProfilingCounterDecrement(weight);
- Label ok;
- __ Branch(&ok, ge, a3, Operand(zero_reg));
- __ push(v0);
- __ Call(isolate()->builtins()->InterruptCheck(),
- RelocInfo::CODE_TARGET);
- __ pop(v0);
- EmitProfilingCounterReset();
- __ bind(&ok);
+ EmitProfilingCounterHandlingForReturnSequence(false);
// Make sure that the constant pool is not emitted inside of the return
// sequence.
@@ -482,7 +466,7 @@ void FullCodeGenerator::EmitReturnSequence() {
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
codegen()->GetVar(result_register(), var);
- __ push(result_register());
+ codegen()->PushOperand(result_register());
}
@@ -499,7 +483,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(
void FullCodeGenerator::StackValueContext::Plug(
Heap::RootListIndex index) const {
__ LoadRoot(result_register(), index);
- __ push(result_register());
+ codegen()->PushOperand(result_register());
}
@@ -534,7 +518,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(
void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
// Immediates cannot be pushed directly.
__ li(result_register(), Operand(lit));
- __ push(result_register());
+ codegen()->PushOperand(result_register());
}
@@ -543,7 +527,7 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
true,
true_label_,
false_label_);
- DCHECK(!lit->IsUndetectableObject()); // There are no undetectable literals.
+ DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
if (false_label_ != fall_through_) __ Branch(false_label_);
} else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -568,41 +552,14 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
}
-void FullCodeGenerator::EffectContext::DropAndPlug(int count,
- Register reg) const {
- DCHECK(count > 0);
- __ Drop(count);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
- int count,
- Register reg) const {
- DCHECK(count > 0);
- __ Drop(count);
- __ Move(result_register(), reg);
-}
-
-
void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
Register reg) const {
DCHECK(count > 0);
- if (count > 1) __ Drop(count - 1);
+ if (count > 1) codegen()->DropOperands(count - 1);
__ sd(reg, MemOperand(sp, 0));
}
-void FullCodeGenerator::TestContext::DropAndPlug(int count,
- Register reg) const {
- DCHECK(count > 0);
- // For simplicity we always test the accumulator register.
- __ Drop(count);
- __ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
Label* materialize_false) const {
DCHECK(materialize_true == materialize_false);
@@ -626,6 +583,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(
void FullCodeGenerator::StackValueContext::Plug(
Label* materialize_true,
Label* materialize_false) const {
+ codegen()->OperandStackDepthIncrement(1);
Label done;
__ bind(materialize_true);
__ LoadRoot(at, Heap::kTrueValueRootIndex);
@@ -657,7 +615,7 @@ void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
Heap::RootListIndex value_root_index =
flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
__ LoadRoot(at, value_root_index);
- __ push(at);
+ codegen()->PushOperand(at);
}
@@ -782,7 +740,7 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// The variable in the declaration always resides in the current function
// context.
DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (generate_debug_code_) {
+ if (FLAG_debug_code) {
// Check that we're not inside a with or catch context.
__ ld(a1, FieldMemOperand(cp, HeapObject::kMapOffset));
__ LoadRoot(a4, Heap::kWithContextMapRootIndex);
@@ -904,11 +862,11 @@ void FullCodeGenerator::VisitFunctionDeclaration(
case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
__ li(a2, Operand(variable->name()));
- __ Push(a2);
+ PushOperand(a2);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- __ CallRuntime(Runtime::kDeclareLookupSlot);
+ PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -983,8 +941,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetExpressionPosition(clause);
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), Token::EQ_STRICT,
- strength(language_mode())).code();
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
@@ -1005,7 +963,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Discard the test value and jump to the default if present, otherwise to
// the end of the statement.
__ bind(&next_test);
- __ Drop(1); // Switch value is no longer needed.
+ DropOperands(1); // Switch value is no longer needed.
if (default_clause == NULL) {
__ Branch(nested_statement.break_label());
} else {
@@ -1040,19 +998,20 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// over the loop. See ECMA-262 version 5, section 12.6.4.
SetExpressionAsStatementPosition(stmt->enumerable());
VisitForAccumulatorValue(stmt->enumerable());
- __ mov(a0, result_register()); // Result as param to InvokeBuiltin below.
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&exit, eq, a0, Operand(at));
- Register null_value = a5;
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ Branch(&exit, eq, a0, Operand(null_value));
- PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
- __ mov(a0, v0);
- // Convert the object to a JS object.
+ __ mov(a0, result_register());
+ OperandStackDepthIncrement(ForIn::kElementCount);
+
+ // If the object is null or undefined, skip over the loop, otherwise convert
+ // it to a JS receiver. See ECMA-262 version 5, section 12.6.4.
Label convert, done_convert;
__ JumpIfSmi(a0, &convert);
__ GetObjectType(a0, a1, a1);
- __ Branch(&done_convert, ge, a1, Operand(FIRST_JS_RECEIVER_TYPE));
+ __ Branch(USE_DELAY_SLOT, &done_convert, ge, a1,
+ Operand(FIRST_JS_RECEIVER_TYPE));
+ __ LoadRoot(at, Heap::kNullValueRootIndex); // In delay slot.
+ __ Branch(USE_DELAY_SLOT, &exit, eq, a0, Operand(at));
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex); // In delay slot.
+ __ Branch(&exit, eq, a0, Operand(at));
__ bind(&convert);
ToObjectStub stub(isolate());
__ CallStub(&stub);
@@ -1061,16 +1020,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
__ push(a0);
- // Check for proxies.
- Label call_runtime;
- __ GetObjectType(a0, a1, a1);
- __ Branch(&call_runtime, eq, a1, Operand(JS_PROXY_TYPE));
-
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
// guarantee cache validity, call the runtime system to check cache
// validity or get the property names in a fixed array.
- __ CheckEnumCache(null_value, &call_runtime);
+ // Note: Proxies never have an enum cache, so will always take the
+ // slow path.
+ Label call_runtime;
+ __ CheckEnumCache(&call_runtime);
// The enum cache is valid. Load the map of the object being
// iterated over and use the cache for the iteration.
@@ -1081,7 +1038,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(a0); // Duplicate the enumerable object on the stack.
- __ CallRuntime(Runtime::kGetPropertyNamesFast);
+ __ CallRuntime(Runtime::kForInEnumerate);
PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
// If we got a map from the runtime call, we can do a fast
@@ -1116,16 +1073,18 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We got a fixed array in register v0. Iterate through that.
__ bind(&fixed_array);
+ int const vector_index = SmiFromSlot(slot)->value();
__ EmitLoadTypeFeedbackVector(a1);
__ li(a2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
- int vector_index = SmiFromSlot(slot)->value();
__ sd(a2, FieldMemOperand(a1, FixedArray::OffsetOfElementAt(vector_index)));
__ li(a1, Operand(Smi::FromInt(1))); // Smi(1) indicates slow check
__ Push(a1, v0); // Smi and array
__ ld(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
+ __ Push(a1); // Fixed array length (as smi).
+ PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
__ li(a0, Operand(Smi::FromInt(0)));
- __ Push(a1, a0); // Fixed array length (as smi) and initial index.
+ __ Push(a0); // Initial index.
// Generate code for doing the condition check.
__ bind(&loop);
@@ -1154,6 +1113,16 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ ld(a4, FieldMemOperand(a1, HeapObject::kMapOffset));
__ Branch(&update_each, eq, a4, Operand(a2));
+ // We might get here from TurboFan or Crankshaft when something in the
+ // for-in loop body deopts and only now notice in fullcodegen, that we
+ // can now longer use the enum cache, i.e. left fast mode. So better record
+ // this information here, in case we later OSR back into this loop or
+ // reoptimize the whole function w/o rerunning the loop with the slow
+ // mode object in fullcodegen (which would result in a deopt loop).
+ __ EmitLoadTypeFeedbackVector(a0);
+ __ li(a2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+ __ sd(a2, FieldMemOperand(a0, FixedArray::OffsetOfElementAt(vector_index)));
+
// Convert the entry to a string or (smi) 0 if it isn't a property
// any more. If the property has been removed while iterating, we
// just skip it.
@@ -1191,7 +1160,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Remove the pointers stored on the stack.
__ bind(loop_statement.break_label());
- __ Drop(5);
+ DropOperands(5);
// Exit and decrement the loop depth.
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1438,12 +1407,11 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
// by eval-introduced variables.
EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
__ bind(&slow);
- __ li(a1, Operand(var->name()));
- __ Push(cp, a1); // Context and name.
+ __ Push(var->name());
Runtime::FunctionId function_id =
typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
- : Runtime::kLoadLookupSlotNoReferenceError;
+ : Runtime::kLoadLookupSlotInsideTypeof;
__ CallRuntime(function_id);
__ bind(&done);
context()->Plug(v0);
@@ -1468,7 +1436,7 @@ void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
Expression* expression = (property == NULL) ? NULL : property->value();
if (expression == NULL) {
__ LoadRoot(a1, Heap::kNullValueRootIndex);
- __ push(a1);
+ PushOperand(a1);
} else {
VisitForStackValue(expression);
if (NeedsHomeObject(expression)) {
@@ -1512,7 +1480,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
if (!result_saved) {
- __ push(v0); // Save result on stack.
+ PushOperand(v0); // Save result on stack.
result_saved = true;
}
switch (property->kind()) {
@@ -1545,7 +1513,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
// Duplicate receiver on stack.
__ ld(a0, MemOperand(sp));
- __ push(a0);
+ PushOperand(a0);
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
@@ -1553,19 +1521,19 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
EmitSetHomeObject(value, 2, property->GetSlot());
}
__ li(a0, Operand(Smi::FromInt(SLOPPY))); // PropertyAttributes.
- __ push(a0);
- __ CallRuntime(Runtime::kSetProperty);
+ PushOperand(a0);
+ CallRuntimeWithOperands(Runtime::kSetProperty);
} else {
- __ Drop(3);
+ DropOperands(3);
}
break;
case ObjectLiteral::Property::PROTOTYPE:
// Duplicate receiver on stack.
__ ld(a0, MemOperand(sp));
- __ push(a0);
+ PushOperand(a0);
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype);
+ CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
NO_REGISTERS);
break;
@@ -1588,13 +1556,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
it != accessor_table.end();
++it) {
__ ld(a0, MemOperand(sp)); // Duplicate receiver.
- __ push(a0);
+ PushOperand(a0);
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
EmitAccessor(it->second->setter);
__ li(a0, Operand(Smi::FromInt(NONE)));
- __ push(a0);
- __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
+ PushOperand(a0);
+ CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1611,18 +1579,18 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Expression* value = property->value();
if (!result_saved) {
- __ push(v0); // Save result on the stack
+ PushOperand(v0); // Save result on the stack
result_saved = true;
}
__ ld(a0, MemOperand(sp)); // Duplicate receiver.
- __ push(a0);
+ PushOperand(a0);
if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
DCHECK(!property->is_computed_name());
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype);
+ CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
NO_REGISTERS);
} else {
@@ -1637,11 +1605,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
case ObjectLiteral::Property::COMPUTED:
if (property->emit_store()) {
- __ li(a0, Operand(Smi::FromInt(NONE)));
- __ push(a0);
- __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
+ PushOperand(Smi::FromInt(NONE));
+ PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+ CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
} else {
- __ Drop(3);
+ DropOperands(3);
}
break;
@@ -1650,15 +1618,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
break;
case ObjectLiteral::Property::GETTER:
- __ li(a0, Operand(Smi::FromInt(NONE)));
- __ push(a0);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(NONE));
+ CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
- __ li(a0, Operand(Smi::FromInt(NONE)));
- __ push(a0);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(NONE));
+ CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
break;
}
}
@@ -1716,14 +1682,14 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
int array_index = 0;
for (; array_index < length; array_index++) {
Expression* subexpr = subexprs->at(array_index);
- if (subexpr->IsSpread()) break;
+ DCHECK(!subexpr->IsSpread());
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
if (!result_saved) {
- __ push(v0); // array literal
+ PushOperand(v0); // array literal
result_saved = true;
}
@@ -1746,21 +1712,16 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// (inclusive) and these elements gets appended to the array. Note that the
// number elements an iterable produces is unknown ahead of time.
if (array_index < length && result_saved) {
- __ Pop(v0);
+ PopOperand(v0);
result_saved = false;
}
for (; array_index < length; array_index++) {
Expression* subexpr = subexprs->at(array_index);
- __ Push(v0);
- if (subexpr->IsSpread()) {
- VisitForStackValue(subexpr->AsSpread()->expression());
- __ InvokeBuiltin(Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX,
- CALL_FUNCTION);
- } else {
- VisitForStackValue(subexpr);
- __ CallRuntime(Runtime::kAppendElement);
- }
+ PushOperand(v0);
+ DCHECK(!subexpr->IsSpread());
+ VisitForStackValue(subexpr);
+ CallRuntimeWithOperands(Runtime::kAppendElement);
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
@@ -1801,11 +1762,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
property->obj()->AsSuperPropertyReference()->this_var());
VisitForAccumulatorValue(
property->obj()->AsSuperPropertyReference()->home_object());
- __ Push(result_register());
+ PushOperand(result_register());
if (expr->is_compound()) {
const Register scratch = a1;
__ ld(scratch, MemOperand(sp, kPointerSize));
- __ Push(scratch, result_register());
+ PushOperands(scratch, result_register());
}
break;
case KEYED_SUPER_PROPERTY: {
@@ -1816,11 +1777,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
property->obj()->AsSuperPropertyReference()->home_object());
__ Move(scratch, result_register());
VisitForAccumulatorValue(property->key());
- __ Push(scratch, result_register());
+ PushOperands(scratch, result_register());
if (expr->is_compound()) {
const Register scratch1 = a4;
__ ld(scratch1, MemOperand(sp, 2 * kPointerSize));
- __ Push(scratch1, scratch, result_register());
+ PushOperands(scratch1, scratch, result_register());
}
break;
}
@@ -1868,7 +1829,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
Token::Value op = expr->binary_op();
- __ push(v0); // Left operand goes on the stack.
+ PushOperand(v0); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
AccumulatorValueContext context(this);
@@ -1934,8 +1895,16 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ jmp(&suspend);
__ bind(&continuation);
+ // When we arrive here, the stack top is the resume mode and
+ // result_register() holds the input value (the argument given to the
+ // respective resume operation).
__ RecordGeneratorContinuation();
- __ jmp(&resume);
+ __ pop(a1);
+ __ Branch(&resume, ne, a1,
+ Operand(Smi::FromInt(JSGeneratorObject::RETURN)));
+ __ push(result_register());
+ EmitCreateIteratorResult(true);
+ EmitUnwindAndReturn();
__ bind(&suspend);
VisitForAccumulatorValue(expr->generator_object());
@@ -1952,7 +1921,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
__ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&post_runtime);
- __ pop(result_register());
+ PopOperand(result_register());
EmitReturnSequence();
__ bind(&resume);
@@ -1961,125 +1930,15 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
}
case Yield::kFinal: {
- VisitForAccumulatorValue(expr->generator_object());
- __ li(a1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
- __ sd(a1, FieldMemOperand(result_register(),
- JSGeneratorObject::kContinuationOffset));
// Pop value from top-of-stack slot, box result into result register.
+ OperandStackDepthDecrement(1);
EmitCreateIteratorResult(true);
- EmitUnwindBeforeReturn();
- EmitReturnSequence();
+ EmitUnwindAndReturn();
break;
}
- case Yield::kDelegating: {
- VisitForStackValue(expr->generator_object());
-
- // Initial stack layout is as follows:
- // [sp + 1 * kPointerSize] iter
- // [sp + 0 * kPointerSize] g
-
- Label l_catch, l_try, l_suspend, l_continuation, l_resume;
- Label l_next, l_call;
- Register load_receiver = LoadDescriptor::ReceiverRegister();
- Register load_name = LoadDescriptor::NameRegister();
- // Initial send value is undefined.
- __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
- __ Branch(&l_next);
-
- // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
- __ bind(&l_catch);
- __ mov(a0, v0);
- __ LoadRoot(a2, Heap::kthrow_stringRootIndex); // "throw"
- __ ld(a3, MemOperand(sp, 1 * kPointerSize)); // iter
- __ Push(a2, a3, a0); // "throw", iter, except
- __ jmp(&l_call);
-
- // try { received = %yield result }
- // Shuffle the received result above a try handler and yield it without
- // re-boxing.
- __ bind(&l_try);
- __ pop(a0); // result
- int handler_index = NewHandlerTableEntry();
- EnterTryBlock(handler_index, &l_catch);
- const int try_block_size = TryCatch::kElementCount * kPointerSize;
- __ push(a0); // result
-
- __ jmp(&l_suspend);
- __ bind(&l_continuation);
- __ RecordGeneratorContinuation();
- __ mov(a0, v0);
- __ jmp(&l_resume);
-
- __ bind(&l_suspend);
- const int generator_object_depth = kPointerSize + try_block_size;
- __ ld(a0, MemOperand(sp, generator_object_depth));
- __ push(a0); // g
- __ Push(Smi::FromInt(handler_index)); // handler-index
- DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
- __ li(a1, Operand(Smi::FromInt(l_continuation.pos())));
- __ sd(a1, FieldMemOperand(a0, JSGeneratorObject::kContinuationOffset));
- __ sd(cp, FieldMemOperand(a0, JSGeneratorObject::kContextOffset));
- __ mov(a1, cp);
- __ RecordWriteField(a0, JSGeneratorObject::kContextOffset, a1, a2,
- kRAHasBeenSaved, kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 2);
- __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ pop(v0); // result
- EmitReturnSequence();
- __ mov(a0, v0);
- __ bind(&l_resume); // received in a0
- ExitTryBlock(handler_index);
-
- // receiver = iter; f = 'next'; arg = received;
- __ bind(&l_next);
- __ LoadRoot(load_name, Heap::knext_stringRootIndex); // "next"
- __ ld(a3, MemOperand(sp, 1 * kPointerSize)); // iter
- __ Push(load_name, a3, a0); // "next", iter, received
-
- // result = receiver[f](arg);
- __ bind(&l_call);
- __ ld(load_receiver, MemOperand(sp, kPointerSize));
- __ ld(load_name, MemOperand(sp, 2 * kPointerSize));
- __ li(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->KeyedLoadFeedbackSlot())));
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), SLOPPY).code();
- CallIC(ic, TypeFeedbackId::None());
- __ mov(a0, v0);
- __ mov(a1, a0);
- __ sd(a1, MemOperand(sp, 2 * kPointerSize));
- SetCallPosition(expr);
- __ li(a0, Operand(1));
- __ Call(
- isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
- RelocInfo::CODE_TARGET);
-
- __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ Drop(1); // The function is still on the stack; drop it.
-
- // if (!result.done) goto l_try;
- __ Move(load_receiver, v0);
-
- __ push(load_receiver); // save result
- __ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
- __ li(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->DoneFeedbackSlot())));
- CallLoadIC(NOT_INSIDE_TYPEOF); // v0=result.done
- __ mov(a0, v0);
- Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(bool_ic);
- __ LoadRoot(at, Heap::kTrueValueRootIndex);
- __ Branch(&l_try, ne, result_register(), Operand(at));
-
- // result.value
- __ pop(load_receiver); // result
- __ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
- __ li(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->ValueFeedbackSlot())));
- CallLoadIC(NOT_INSIDE_TYPEOF); // v0=result.value
- context()->DropAndPlug(2, v0); // drop iter and g
- break;
- }
+ case Yield::kDelegating:
+ UNREACHABLE();
}
}
@@ -2093,7 +1952,14 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// a1 will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
VisitForAccumulatorValue(value);
- __ pop(a1);
+ PopOperand(a1);
+
+ // Store input value into generator object.
+ __ sd(result_register(),
+ FieldMemOperand(a1, JSGeneratorObject::kInputOffset));
+ __ mov(a2, result_register());
+ __ RecordWriteField(a1, JSGeneratorObject::kInputOffset, a2, a3,
+ kRAHasBeenSaved, kDontSaveFPRegs);
// Load suspended function and context.
__ ld(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset));
@@ -2148,6 +2014,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ Daddu(a3, a3, Operand(a2));
__ li(a2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
__ sd(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
+ __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
__ Jump(a3);
__ bind(&slow_resume);
}
@@ -2161,6 +2028,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ push(a2);
__ Branch(&push_operand_holes);
__ bind(&call_resume);
+ __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
DCHECK(!result_register().is(a1));
__ Push(a1, result_register());
__ Push(Smi::FromInt(resume_mode));
@@ -2172,6 +2040,36 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
context()->Plug(result_register());
}
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
+ OperandStackDepthIncrement(2);
+ __ Push(reg1, reg2);
+}
+
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2,
+ Register reg3) {
+ OperandStackDepthIncrement(3);
+ __ Push(reg1, reg2, reg3);
+}
+
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2,
+ Register reg3, Register reg4) {
+ OperandStackDepthIncrement(4);
+ __ Push(reg1, reg2, reg3, reg4);
+}
+
+void FullCodeGenerator::PopOperands(Register reg1, Register reg2) {
+ OperandStackDepthDecrement(2);
+ __ Pop(reg1, reg2);
+}
+
+void FullCodeGenerator::EmitOperandStackDepthCheck() {
+ if (FLAG_debug_code) {
+ int expected_diff = StandardFrameConstants::kFixedFrameSizeFromFp +
+ operand_stack_depth_ * kPointerSize;
+ __ Dsubu(v0, fp, sp);
+ __ Assert(eq, kUnexpectedStackDepth, v0, Operand(expected_diff));
+ }
+}
void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label allocate, done_allocate;
@@ -2206,40 +2104,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ li(LoadDescriptor::NameRegister(), Operand(key->value()));
__ li(LoadDescriptor::SlotRegister(),
Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallLoadIC(NOT_INSIDE_TYPEOF, language_mode());
-}
-
-
-void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
- // Stack: receiver, home_object.
- SetExpressionPosition(prop);
-
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!key->value()->IsSmi());
- DCHECK(prop->IsSuperAccess());
-
- __ Push(key->value());
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadFromSuper);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- // Call keyed load IC. It has register arguments receiver and key.
- SetExpressionPosition(prop);
-
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), language_mode()).code();
- __ li(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallIC(ic);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
- // Stack: receiver, home_object, key.
- SetExpressionPosition(prop);
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+ CallLoadIC(NOT_INSIDE_TYPEOF);
}
@@ -2255,7 +2120,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
// Get the arguments.
Register left = a1;
Register right = a0;
- __ pop(left);
+ PopOperand(left);
__ mov(a0, result_register());
// Perform combined smi check on both operands.
@@ -2265,8 +2130,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
patch_site.EmitJumpIfSmi(scratch1, &smi_case);
__ bind(&stub_call);
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -2336,27 +2200,17 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
- // Constructor is in v0.
- DCHECK(lit != NULL);
- __ push(v0);
-
- // No access check is needed here since the constructor is created by the
- // class literal.
- Register scratch = a1;
- __ ld(scratch,
- FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
- __ push(scratch);
-
for (int i = 0; i < lit->properties()->length(); i++) {
ObjectLiteral::Property* property = lit->properties()->at(i);
Expression* value = property->value();
+ Register scratch = a1;
if (property->is_static()) {
__ ld(scratch, MemOperand(sp, kPointerSize)); // constructor
} else {
__ ld(scratch, MemOperand(sp, 0)); // prototype
}
- __ push(scratch);
+ PushOperand(scratch);
EmitPropertyKey(property, lit->GetIdForProperty(i));
// The static prototype property is read only. We handle the non computed
@@ -2379,37 +2233,32 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
case ObjectLiteral::Property::PROTOTYPE:
UNREACHABLE();
case ObjectLiteral::Property::COMPUTED:
- __ CallRuntime(Runtime::kDefineClassMethod);
+ PushOperand(Smi::FromInt(DONT_ENUM));
+ PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+ CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
break;
case ObjectLiteral::Property::GETTER:
- __ li(a0, Operand(Smi::FromInt(DONT_ENUM)));
- __ push(a0);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(DONT_ENUM));
+ CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
- __ li(a0, Operand(Smi::FromInt(DONT_ENUM)));
- __ push(a0);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(DONT_ENUM));
+ CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
break;
default:
UNREACHABLE();
}
}
-
- // Set both the prototype and constructor to have fast properties, and also
- // freeze them in strong mode.
- __ CallRuntime(Runtime::kFinalizeClassDefinition);
}
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
__ mov(a0, result_register());
- __ pop(a1);
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+ PopOperand(a1);
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -2432,10 +2281,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case NAMED_PROPERTY: {
- __ push(result_register()); // Preserve value.
+ PushOperand(result_register()); // Preserve value.
VisitForAccumulatorValue(prop->obj());
__ mov(StoreDescriptor::ReceiverRegister(), result_register());
- __ pop(StoreDescriptor::ValueRegister()); // Restore value.
+ PopOperand(StoreDescriptor::ValueRegister()); // Restore value.
__ li(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
EmitLoadStoreICSlot(slot);
@@ -2443,7 +2292,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case NAMED_SUPER_PROPERTY: {
- __ Push(v0);
+ PushOperand(v0);
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
VisitForAccumulatorValue(
prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2460,7 +2309,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case KEYED_SUPER_PROPERTY: {
- __ Push(v0);
+ PushOperand(v0);
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
VisitForStackValue(
prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2480,12 +2329,12 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case KEYED_PROPERTY: {
- __ push(result_register()); // Preserve value.
+ PushOperand(result_register()); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ Move(StoreDescriptor::NameRegister(), result_register());
- __ Pop(StoreDescriptor::ValueRegister(),
- StoreDescriptor::ReceiverRegister());
+ PopOperands(StoreDescriptor::ValueRegister(),
+ StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
@@ -2568,21 +2417,17 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
} else if (!var->is_const_mode() ||
(var->mode() == CONST && op == Token::INIT)) {
if (var->IsLookupSlot()) {
- // Assignment to var.
- __ li(a4, Operand(var->name()));
- __ li(a3, Operand(Smi::FromInt(language_mode())));
- // jssp[0] : language mode.
- // jssp[8] : name.
- // jssp[16] : context.
- // jssp[24] : value.
- __ Push(v0, cp, a4, a3);
- __ CallRuntime(Runtime::kStoreLookupSlot);
+ __ Push(var->name());
+ __ Push(v0);
+ __ CallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreLookupSlot_Strict
+ : Runtime::kStoreLookupSlot_Sloppy);
} else {
// Assignment to var or initializing assignment to let/const in harmony
// mode.
DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
MemOperand location = VarOperand(var, a1);
- if (generate_debug_code_ && var->mode() == LET && op == Token::INIT) {
+ if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
// Check for an uninitialized let binding.
__ ld(a2, location);
__ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
@@ -2628,7 +2473,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ mov(StoreDescriptor::ValueRegister(), result_register());
__ li(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
- __ pop(StoreDescriptor::ReceiverRegister());
+ PopOperand(StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(expr->AssignmentSlot());
CallStoreIC();
@@ -2645,10 +2490,11 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
Literal* key = prop->key()->AsLiteral();
DCHECK(key != NULL);
- __ Push(key->value());
- __ Push(v0);
- __ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy));
+ PushOperand(key->value());
+ PushOperand(v0);
+ CallRuntimeWithOperands(is_strict(language_mode())
+ ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy);
}
@@ -2658,10 +2504,10 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
// stack : receiver ('this'), home_object, key
DCHECK(prop != NULL);
- __ Push(v0);
- __ CallRuntime((is_strict(language_mode())
- ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy));
+ PushOperand(v0);
+ CallRuntimeWithOperands(is_strict(language_mode())
+ ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy);
}
@@ -2673,7 +2519,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// - a1 is the key,
// - a2 is the receiver.
__ mov(StoreDescriptor::ValueRegister(), result_register());
- __ Pop(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister());
+ PopOperands(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister());
DCHECK(StoreDescriptor::ValueRegister().is(a0));
Handle<Code> ic =
@@ -2708,7 +2555,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
__ Move(LoadDescriptor::NameRegister(), v0);
- __ pop(LoadDescriptor::ReceiverRegister());
+ PopOperand(LoadDescriptor::ReceiverRegister());
EmitKeyedPropertyLoad(expr);
} else {
VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
@@ -2744,7 +2591,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ push(at);
+ PushOperand(at);
convert_mode = ConvertReceiverMode::kNullOrUndefined;
} else {
// Load the function from the receiver.
@@ -2755,7 +2602,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
__ ld(at, MemOperand(sp, 0));
- __ push(at);
+ PushOperand(at);
__ sd(v0, MemOperand(sp, kPointerSize));
convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
}
@@ -2779,9 +2626,8 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
VisitForAccumulatorValue(super_ref->home_object());
__ mov(scratch, v0);
VisitForAccumulatorValue(super_ref->this_var());
- __ Push(scratch, v0, v0, scratch);
- __ Push(key->value());
- __ Push(Smi::FromInt(language_mode()));
+ PushOperands(scratch, v0, v0, scratch);
+ PushOperand(key->value());
// Stack here:
// - home_object
@@ -2789,8 +2635,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - this (receiver) <-- LoadFromSuper will pop here and below.
// - home_object
// - key
- // - language_mode
- __ CallRuntime(Runtime::kLoadFromSuper);
+ CallRuntimeWithOperands(Runtime::kLoadFromSuper);
// Replace home_object with target function.
__ sd(v0, MemOperand(sp, kPointerSize));
@@ -2819,7 +2664,7 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
// Push the target function under the receiver.
__ ld(at, MemOperand(sp, 0));
- __ push(at);
+ PushOperand(at);
__ sd(v0, MemOperand(sp, kPointerSize));
EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
@@ -2839,9 +2684,8 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
VisitForAccumulatorValue(super_ref->home_object());
__ Move(scratch, v0);
VisitForAccumulatorValue(super_ref->this_var());
- __ Push(scratch, v0, v0, scratch);
+ PushOperands(scratch, v0, v0, scratch);
VisitForStackValue(prop->key());
- __ Push(Smi::FromInt(language_mode()));
// Stack here:
// - home_object
@@ -2849,8 +2693,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
// - home_object
// - key
- // - language_mode
- __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+ CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
// Replace home_object with target function.
__ sd(v0, MemOperand(sp, kPointerSize));
@@ -2873,12 +2716,24 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
// Record source position of the IC call.
SetCallPosition(expr);
- Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
+ if (expr->tail_call_mode() == TailCallMode::kAllow) {
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceTailCall);
+ }
+ // Update profiling counters before the tail call since we will
+ // not return to this function.
+ EmitProfilingCounterHandlingForReturnSequence(true);
+ }
+ Handle<Code> ic =
+ CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
+ .code();
__ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
__ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
// Don't assign a type feedback id to the IC, since type feedback is provided
// by the vector above.
CallIC(ic);
+ OperandStackDepthDecrement(arg_count + 1);
+
RecordJSReturnSite(expr);
// Restore context register.
__ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2923,11 +2778,9 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
__ bind(&slow);
// Call the runtime to find the function to call (returned in v0)
// and the object holding it (returned in v1).
- DCHECK(!context_register().is(a2));
- __ li(a2, Operand(callee->name()));
- __ Push(context_register(), a2);
- __ CallRuntime(Runtime::kLoadLookupSlot);
- __ Push(v0, v1); // Function, receiver.
+ __ Push(callee->name());
+ __ CallRuntime(Runtime::kLoadLookupSlotForCall);
+ PushOperands(v0, v1); // Function, receiver.
PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
// If fast case code has been generated, emit code to push the
@@ -2949,7 +2802,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
VisitForStackValue(callee);
// refEnv.WithBaseObject()
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ push(a2); // Reserved receiver slot.
+ PushOperand(a2); // Reserved receiver slot.
}
}
@@ -2981,7 +2834,10 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
SetCallPosition(expr);
__ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ li(a0, Operand(arg_count));
- __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ expr->tail_call_mode()),
+ RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
// Restore context register.
__ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3022,6 +2878,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
CallConstructStub stub(isolate());
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
// Restore context register.
__ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3042,7 +2899,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
FieldMemOperand(result_register(), HeapObject::kMapOffset));
__ ld(result_register(),
FieldMemOperand(result_register(), Map::kPrototypeOffset));
- __ Push(result_register());
+ PushOperand(result_register());
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -3064,6 +2921,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
__ ld(a1, MemOperand(sp, arg_count * kPointerSize));
__ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
@@ -3117,81 +2975,6 @@ void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ JumpIfSmi(v0, if_false);
- __ GetObjectType(v0, a1, a1);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, a1, Operand(SIMD128_VALUE_TYPE), if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(v0, if_false);
- __ GetObjectType(v0, a1, a2);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ Branch(if_true, hs, a2, Operand(FIRST_FUNCTION_TYPE));
- __ Branch(if_false);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK);
- __ lwu(a2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
- __ lwu(a1, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
- __ li(a4, 0x80000000);
- Label not_nan;
- __ Branch(&not_nan, ne, a2, Operand(a4));
- __ mov(a4, zero_reg);
- __ mov(a2, a1);
- __ bind(&not_nan);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, a2, Operand(a4), if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3281,65 +3064,6 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ pop(a1);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, v0, Operand(a1), if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- // ArgumentsAccessStub expects the key in a1 and the formal
- // parameter count in a0.
- VisitForAccumulatorValue(args->at(0));
- __ mov(a1, v0);
- __ li(a0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
- ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
- __ CallStub(&stub);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
- Label exit;
- // Get the number of formal parameters.
- __ li(v0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
-
- // Check if the calling frame is an arguments adaptor frame.
- __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
- __ Branch(&exit, ne, a3,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ ld(v0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ bind(&exit);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3409,28 +3133,6 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = nullptr;
- Label* if_false = nullptr;
- Label* fall_through = nullptr;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ JumpIfSmi(v0, if_false);
- __ GetObjectType(v0, a1, a1);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, a1, Operand(JS_DATE_TYPE), if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(3, args->length());
@@ -3442,7 +3144,7 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(0)); // index
VisitForStackValue(args->at(1)); // value
VisitForAccumulatorValue(args->at(2)); // string
- __ Pop(index, value);
+ PopOperands(index, value);
if (FLAG_debug_code) {
__ SmiTst(value, at);
@@ -3479,7 +3181,7 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(0)); // index
VisitForStackValue(args->at(1)); // value
VisitForAccumulatorValue(args->at(2)); // string
- __ Pop(index, value);
+ PopOperands(index, value);
if (FLAG_debug_code) {
__ SmiTst(value, at);
@@ -3506,35 +3208,6 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- VisitForStackValue(args->at(0)); // Load the object.
- VisitForAccumulatorValue(args->at(1)); // Load the value.
- __ pop(a1); // v0 = value. a1 = object.
-
- Label done;
- // If the object is a smi, return the value.
- __ JumpIfSmi(a1, &done);
-
- // If the object is not a value type, return the value.
- __ GetObjectType(a1, a2, a2);
- __ Branch(&done, ne, a2, Operand(JS_VALUE_TYPE));
-
- // Store the value.
- __ sd(v0, FieldMemOperand(a1, JSValue::kValueOffset));
- // Update the write barrier. Save the value as it will be
- // overwritten by the write barrier code and is needed afterward.
- __ mov(a2, v0);
- __ RecordWriteField(
- a1, JSValue::kValueOffset, a2, a3, kRAHasBeenSaved, kDontSaveFPRegs);
-
- __ bind(&done);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -3552,26 +3225,6 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitToName(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into v0 and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- Label convert, done_convert;
- __ JumpIfSmi(v0, &convert);
- STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
- __ GetObjectType(v0, a1, a1);
- __ Branch(&done_convert, le, a1, Operand(LAST_NAME_TYPE));
- __ bind(&convert);
- __ Push(v0);
- __ CallRuntime(Runtime::kToName);
- __ bind(&done_convert);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3603,7 +3256,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
Register index = a0;
Register result = v0;
- __ pop(object);
+ PopOperand(object);
Label need_conversion;
Label index_out_of_range;
@@ -3651,7 +3304,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
Register scratch = a3;
Register result = v0;
- __ pop(object);
+ PopOperand(object);
Label need_conversion;
Label index_out_of_range;
@@ -3701,6 +3354,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
// Call the target.
__ li(a0, Operand(argc));
__ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(argc + 1);
// Restore context register.
__ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
@@ -3754,242 +3408,6 @@ void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
- Label bailout, done, one_char_separator, long_separator,
- non_trivial_array, not_size_one_array, loop,
- empty_separator_loop, one_char_separator_loop,
- one_char_separator_loop_entry, long_separator_loop;
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- VisitForStackValue(args->at(1));
- VisitForAccumulatorValue(args->at(0));
-
- // All aliases of the same register have disjoint lifetimes.
- Register array = v0;
- Register elements = no_reg; // Will be v0.
- Register result = no_reg; // Will be v0.
- Register separator = a1;
- Register array_length = a2;
- Register result_pos = no_reg; // Will be a2.
- Register string_length = a3;
- Register string = a4;
- Register element = a5;
- Register elements_end = a6;
- Register scratch1 = a7;
- Register scratch2 = t1;
- Register scratch3 = t0;
-
- // Separator operand is on the stack.
- __ pop(separator);
-
- // Check that the array is a JSArray.
- __ JumpIfSmi(array, &bailout);
- __ GetObjectType(array, scratch1, scratch2);
- __ Branch(&bailout, ne, scratch2, Operand(JS_ARRAY_TYPE));
-
- // Check that the array has fast elements.
- __ CheckFastElements(scratch1, scratch2, &bailout);
-
- // If the array has length zero, return the empty string.
- __ ld(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
- __ SmiUntag(array_length);
- __ Branch(&non_trivial_array, ne, array_length, Operand(zero_reg));
- __ LoadRoot(v0, Heap::kempty_stringRootIndex);
- __ Branch(&done);
-
- __ bind(&non_trivial_array);
-
- // Get the FixedArray containing array's elements.
- elements = array;
- __ ld(elements, FieldMemOperand(array, JSArray::kElementsOffset));
- array = no_reg; // End of array's live range.
-
- // Check that all array elements are sequential one-byte strings, and
- // accumulate the sum of their lengths, as a smi-encoded value.
- __ mov(string_length, zero_reg);
- __ Daddu(element,
- elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ dsll(elements_end, array_length, kPointerSizeLog2);
- __ Daddu(elements_end, element, elements_end);
- // Loop condition: while (element < elements_end).
- // Live values in registers:
- // elements: Fixed array of strings.
- // array_length: Length of the fixed array of strings (not smi)
- // separator: Separator string
- // string_length: Accumulated sum of string lengths (smi).
- // element: Current array element.
- // elements_end: Array end.
- if (generate_debug_code_) {
- __ Assert(gt, kNoEmptyArraysHereInEmitFastOneByteArrayJoin, array_length,
- Operand(zero_reg));
- }
- __ bind(&loop);
- __ ld(string, MemOperand(element));
- __ Daddu(element, element, kPointerSize);
- __ JumpIfSmi(string, &bailout);
- __ ld(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
- __ ld(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
- __ DadduAndCheckForOverflow(string_length, string_length, scratch1, scratch3);
- __ BranchOnOverflow(&bailout, scratch3);
- __ Branch(&loop, lt, element, Operand(elements_end));
-
- // If array_length is 1, return elements[0], a string.
- __ Branch(&not_size_one_array, ne, array_length, Operand(1));
- __ ld(v0, FieldMemOperand(elements, FixedArray::kHeaderSize));
- __ Branch(&done);
-
- __ bind(&not_size_one_array);
-
- // Live values in registers:
- // separator: Separator string
- // array_length: Length of the array.
- // string_length: Sum of string lengths (smi).
- // elements: FixedArray of strings.
-
- // Check that the separator is a flat one-byte string.
- __ JumpIfSmi(separator, &bailout);
- __ ld(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
- __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
-
- // Add (separator length times array_length) - separator length to the
- // string_length to get the length of the result string. array_length is not
- // smi but the other values are, so the result is a smi.
- __ ld(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
- __ Dsubu(string_length, string_length, Operand(scratch1));
- __ SmiUntag(scratch1);
- __ Dmul(scratch2, array_length, scratch1);
- // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
- // zero.
- __ dsra32(scratch1, scratch2, 0);
- __ Branch(&bailout, ne, scratch2, Operand(zero_reg));
- __ SmiUntag(string_length);
- __ AdduAndCheckForOverflow(string_length, string_length, scratch2, scratch3);
- __ BranchOnOverflow(&bailout, scratch3);
-
- // Bailout for large object allocations.
- __ Branch(&bailout, gt, string_length,
- Operand(Page::kMaxRegularHeapObjectSize));
-
- // Get first element in the array to free up the elements register to be used
- // for the result.
- __ Daddu(element,
- elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- result = elements; // End of live range for elements.
- elements = no_reg;
- // Live values in registers:
- // element: First array element
- // separator: Separator string
- // string_length: Length of result string (not smi)
- // array_length: Length of the array.
- __ AllocateOneByteString(result, string_length, scratch1, scratch2,
- elements_end, &bailout);
- // Prepare for looping. Set up elements_end to end of the array. Set
- // result_pos to the position of the result where to write the first
- // character.
- __ dsll(elements_end, array_length, kPointerSizeLog2);
- __ Daddu(elements_end, element, elements_end);
- result_pos = array_length; // End of live range for array_length.
- array_length = no_reg;
- __ Daddu(result_pos,
- result,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
- // Check the length of the separator.
- __ ld(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
- __ li(at, Operand(Smi::FromInt(1)));
- __ Branch(&one_char_separator, eq, scratch1, Operand(at));
- __ Branch(&long_separator, gt, scratch1, Operand(at));
-
- // Empty separator case.
- __ bind(&empty_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
-
- // Copy next array element to the result.
- __ ld(string, MemOperand(element));
- __ Daddu(element, element, kPointerSize);
- __ ld(string_length, FieldMemOperand(string, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ Daddu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ CopyBytes(string, result_pos, string_length, scratch1);
- // End while (element < elements_end).
- __ Branch(&empty_separator_loop, lt, element, Operand(elements_end));
- DCHECK(result.is(v0));
- __ Branch(&done);
-
- // One-character separator case.
- __ bind(&one_char_separator);
- // Replace separator with its one-byte character value.
- __ lbu(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator.
- __ jmp(&one_char_separator_loop_entry);
-
- __ bind(&one_char_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
- // separator: Single separator one-byte char (in lower byte).
-
- // Copy the separator character to the result.
- __ sb(separator, MemOperand(result_pos));
- __ Daddu(result_pos, result_pos, 1);
-
- // Copy next array element to the result.
- __ bind(&one_char_separator_loop_entry);
- __ ld(string, MemOperand(element));
- __ Daddu(element, element, kPointerSize);
- __ ld(string_length, FieldMemOperand(string, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ Daddu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ CopyBytes(string, result_pos, string_length, scratch1);
- // End while (element < elements_end).
- __ Branch(&one_char_separator_loop, lt, element, Operand(elements_end));
- DCHECK(result.is(v0));
- __ Branch(&done);
-
- // Long separator case (separator is more than one character). Entry is at the
- // label long_separator below.
- __ bind(&long_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
- // separator: Separator string.
-
- // Copy the separator to the result.
- __ ld(string_length, FieldMemOperand(separator, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ Daddu(string,
- separator,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
-
- __ bind(&long_separator);
- __ ld(string, MemOperand(element));
- __ Daddu(element, element, kPointerSize);
- __ ld(string_length, FieldMemOperand(string, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ Daddu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
- __ CopyBytes(string, result_pos, string_length, scratch1);
- // End while (element < elements_end).
- __ Branch(&long_separator_loop, lt, element, Operand(elements_end));
- DCHECK(result.is(v0));
- __ Branch(&done);
-
- __ bind(&bailout);
- __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
- __ bind(&done);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
ExternalReference debug_is_active =
@@ -4022,7 +3440,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ jmp(&done);
__ bind(&runtime);
- __ CallRuntime(Runtime::kCreateIterResultObject);
+ CallRuntimeWithOperands(Runtime::kCreateIterResultObject);
__ bind(&done);
context()->Plug(v0);
@@ -4032,7 +3450,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push undefined as the receiver.
__ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
- __ push(v0);
+ PushOperand(v0);
__ LoadNativeContextSlot(expr->context_index(), v0);
}
@@ -4047,6 +3465,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
__ li(a0, Operand(arg_count));
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
}
@@ -4060,7 +3479,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Push the target function under the receiver.
__ ld(at, MemOperand(sp, 0));
- __ push(at);
+ PushOperand(at);
__ sd(v0, MemOperand(sp, kPointerSize));
// Push the arguments ("left-to-right").
@@ -4095,6 +3514,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Call the C runtime function.
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
__ CallRuntime(expr->function(), arg_count);
+ OperandStackDepthDecrement(arg_count);
context()->Plug(v0);
}
}
@@ -4112,9 +3532,9 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy);
+ CallRuntimeWithOperands(is_strict(language_mode())
+ ? Runtime::kDeleteProperty_Strict
+ : Runtime::kDeleteProperty_Sloppy);
context()->Plug(v0);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -4136,8 +3556,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
DCHECK(!context_register().is(a2));
- __ li(a2, Operand(var->name()));
- __ Push(context_register(), a2);
+ __ Push(var->name());
__ CallRuntime(Runtime::kDeleteLookupSlot);
context()->Plug(v0);
}
@@ -4182,6 +3601,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
&materialize_false,
&materialize_true,
&materialize_true);
+ if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
__ bind(&materialize_true);
PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
__ LoadRoot(v0, Heap::kTrueValueRootIndex);
@@ -4232,7 +3652,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && !context()->IsEffect()) {
__ li(at, Operand(Smi::FromInt(0)));
- __ push(at);
+ PushOperand(at);
}
switch (assign_type) {
case NAMED_PROPERTY: {
@@ -4247,10 +3667,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
VisitForAccumulatorValue(
prop->obj()->AsSuperPropertyReference()->home_object());
- __ Push(result_register());
+ PushOperand(result_register());
const Register scratch = a1;
__ ld(scratch, MemOperand(sp, kPointerSize));
- __ Push(scratch, result_register());
+ PushOperands(scratch, result_register());
EmitNamedSuperPropertyLoad(prop);
break;
}
@@ -4263,9 +3683,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
const Register scratch1 = a4;
__ Move(scratch, result_register());
VisitForAccumulatorValue(prop->key());
- __ Push(scratch, result_register());
+ PushOperands(scratch, result_register());
__ ld(scratch1, MemOperand(sp, 2 * kPointerSize));
- __ Push(scratch1, scratch, result_register());
+ PushOperands(scratch1, scratch, result_register());
EmitKeyedSuperPropertyLoad(prop);
break;
}
@@ -4353,7 +3773,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// of the stack.
switch (assign_type) {
case VARIABLE:
- __ push(v0);
+ PushOperand(v0);
break;
case NAMED_PROPERTY:
__ sd(v0, MemOperand(sp, kPointerSize));
@@ -4377,9 +3797,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetExpressionPosition(expr);
-
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD,
- strength(language_mode())).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD).code();
CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4413,7 +3831,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(StoreDescriptor::ValueRegister(), result_register());
__ li(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
- __ pop(StoreDescriptor::ReceiverRegister());
+ PopOperand(StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(expr->CountSlot());
CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -4450,8 +3868,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case KEYED_PROPERTY: {
__ mov(StoreDescriptor::ValueRegister(), result_register());
- __ Pop(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister());
+ PopOperands(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
EmitLoadStoreICSlot(expr->CountSlot());
@@ -4506,8 +3924,8 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ LoadRoot(at, Heap::kFalseValueRootIndex);
Split(eq, v0, Operand(at), if_true, if_false, fall_through);
} else if (String::Equals(check, factory->undefined_string())) {
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(if_true, eq, v0, Operand(at));
+ __ LoadRoot(at, Heap::kNullValueRootIndex);
+ __ Branch(if_false, eq, v0, Operand(at));
__ JumpIfSmi(v0, if_false);
// Check for undetectable objects => true.
__ ld(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
@@ -4573,7 +3991,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ CallRuntime(Runtime::kHasProperty);
+ CallRuntimeWithOperands(Runtime::kHasProperty);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ LoadRoot(a4, Heap::kTrueValueRootIndex);
Split(eq, v0, Operand(a4), if_true, if_false, fall_through);
@@ -4582,7 +4000,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::INSTANCEOF: {
VisitForAccumulatorValue(expr->right());
__ mov(a0, result_register());
- __ pop(a1);
+ PopOperand(a1);
InstanceOfStub stub(isolate());
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
@@ -4595,7 +4013,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForAccumulatorValue(expr->right());
Condition cc = CompareIC::ComputeCondition(op);
__ mov(a0, result_register());
- __ pop(a1);
+ PopOperand(a1);
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
@@ -4607,8 +4025,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ bind(&slow_case);
}
- Handle<Code> ic = CodeFactory::CompareIC(
- isolate(), op, strength(language_mode())).code();
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -4697,7 +4114,7 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
DCHECK(closure_scope->is_function_scope());
__ ld(at, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
- __ push(at);
+ PushOperand(at);
}
@@ -4706,21 +4123,12 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
void FullCodeGenerator::EnterFinallyBlock() {
DCHECK(!result_register().is(a1));
- // Store result register while executing finally block.
- __ push(result_register());
- // Cook return address in link register to stack (smi encoded Code* delta).
- __ Dsubu(a1, ra, Operand(masm_->CodeObject()));
- __ SmiTag(a1);
-
- // Store result register while executing finally block.
- __ push(a1);
-
// Store pending message while executing finally block.
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ li(at, Operand(pending_message_obj));
__ ld(a1, MemOperand(at));
- __ push(a1);
+ PushOperand(a1);
ClearPendingMessage();
}
@@ -4729,21 +4137,11 @@ void FullCodeGenerator::EnterFinallyBlock() {
void FullCodeGenerator::ExitFinallyBlock() {
DCHECK(!result_register().is(a1));
// Restore pending message from stack.
- __ pop(a1);
+ PopOperand(a1);
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ li(at, Operand(pending_message_obj));
__ sd(a1, MemOperand(at));
-
- // Restore result register from stack.
- __ pop(a1);
-
- // Uncook return address and return.
- __ pop(result_register());
-
- __ SmiUntag(a1);
- __ Daddu(at, a1, Operand(masm_->CodeObject()));
- __ Jump(at);
}
@@ -4763,6 +4161,31 @@ void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
Operand(SmiFromSlot(slot)));
}
+void FullCodeGenerator::DeferredCommands::EmitCommands() {
+ __ Pop(result_register()); // Restore the accumulator.
+ __ Pop(a1); // Get the token.
+ for (DeferredCommand cmd : commands_) {
+ Label skip;
+ __ li(at, Operand(Smi::FromInt(cmd.token)));
+ __ Branch(&skip, ne, a1, Operand(at));
+ switch (cmd.command) {
+ case kReturn:
+ codegen_->EmitUnwindAndReturn();
+ break;
+ case kThrow:
+ __ Push(result_register());
+ __ CallRuntime(Runtime::kReThrow);
+ break;
+ case kContinue:
+ codegen_->EmitContinue(cmd.target);
+ break;
+ case kBreak:
+ codegen_->EmitBreak(cmd.target);
+ break;
+ }
+ __ bind(&skip);
+ }
+}
#undef __
diff --git a/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc b/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
index d9c324c424..24a2a38733 100644
--- a/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
+++ b/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
@@ -19,7 +19,7 @@
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm_)
+#define __ ACCESS_MASM(masm())
// A patch site is a location in the code which it is possible to patch. This
// class has a number of methods to emit the code which is patchable and the
@@ -74,6 +74,7 @@ class JumpPatchSite BASE_EMBEDDED {
}
private:
+ MacroAssembler* masm() { return masm_; }
MacroAssembler* masm_;
Label patch_site_;
#ifdef DEBUG
@@ -107,13 +108,6 @@ void FullCodeGenerator::Generate() {
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ stop("stop-at");
- }
-#endif
-
if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ LoadP(r5, MemOperand(sp, receiver_offset), r0);
@@ -142,6 +136,7 @@ void FullCodeGenerator::Generate() {
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
+ OperandStackDepthIncrement(locals_count);
if (locals_count > 0) {
if (locals_count >= 128) {
Label ok;
@@ -269,21 +264,12 @@ void FullCodeGenerator::Generate() {
Variable* rest_param = scope()->rest_parameter(&rest_index);
if (rest_param) {
Comment cmnt(masm_, "[ Allocate rest parameter array");
-
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
-
- __ LoadSmiLiteral(RestParamAccessDescriptor::parameter_count(),
- Smi::FromInt(num_parameters));
- __ addi(RestParamAccessDescriptor::parameter_pointer(), fp,
- Operand(StandardFrameConstants::kCallerSPOffset + offset));
- __ LoadSmiLiteral(RestParamAccessDescriptor::rest_parameter_index(),
- Smi::FromInt(rest_index));
- function_in_register_r4 = false;
-
- RestParamAccessStub stub(isolate());
+ if (!function_in_register_r4) {
+ __ LoadP(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ FastNewRestParameterStub stub(isolate());
__ CallStub(&stub);
-
+ function_in_register_r4 = false;
SetVar(rest_param, r3, r4, r5);
}
@@ -291,28 +277,20 @@ void FullCodeGenerator::Generate() {
if (arguments != NULL) {
// Function uses arguments object.
Comment cmnt(masm_, "[ Allocate arguments object");
- DCHECK(r4.is(ArgumentsAccessNewDescriptor::function()));
if (!function_in_register_r4) {
// Load this again, if it's used by the local context below.
__ LoadP(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
- // Receiver is just before the parameters on the caller's stack.
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
- __ LoadSmiLiteral(ArgumentsAccessNewDescriptor::parameter_count(),
- Smi::FromInt(num_parameters));
- __ addi(ArgumentsAccessNewDescriptor::parameter_pointer(), fp,
- Operand(StandardFrameConstants::kCallerSPOffset + offset));
-
- // Arguments to ArgumentsAccessStub:
- // function, parameter pointer, parameter count.
- // The stub will rewrite parameter pointer and parameter count if the
- // previous stack frame was an arguments adapter frame.
- bool is_unmapped = is_strict(language_mode()) || !has_simple_parameters();
- ArgumentsAccessStub::Type type = ArgumentsAccessStub::ComputeType(
- is_unmapped, literal()->has_duplicate_parameters());
- ArgumentsAccessStub stub(isolate(), type);
- __ CallStub(&stub);
+ if (is_strict(language_mode()) || !has_simple_parameters()) {
+ FastNewStrictArgumentsStub stub(isolate());
+ __ CallStub(&stub);
+ } else if (literal()->has_duplicate_parameters()) {
+ __ Push(r4);
+ __ CallRuntime(Runtime::kNewSloppyArguments_Generic);
+ } else {
+ FastNewSloppyArgumentsStub stub(isolate());
+ __ CallStub(&stub);
+ }
SetVar(arguments, r3, r4, r5);
}
@@ -426,6 +404,31 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
}
+void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
+ bool is_tail_call) {
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else {
+ int distance = masm_->pc_offset() + kCodeSizeMultiplier / 2;
+ weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
+ }
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ cmpi(r6, Operand::Zero());
+ __ bge(&ok);
+ // Don't need to save result register if we are going to do a tail call.
+ if (!is_tail_call) {
+ __ push(r3);
+ }
+ __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+ if (!is_tail_call) {
+ __ pop(r3);
+ }
+ EmitProfilingCounterReset();
+ __ bind(&ok);
+}
void FullCodeGenerator::EmitReturnSequence() {
Comment cmnt(masm_, "[ Return sequence");
@@ -439,23 +442,7 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(r3);
__ CallRuntime(Runtime::kTraceExit);
}
- // Pretend that the exit is a backwards jump to the entry.
- int weight = 1;
- if (info_->ShouldSelfOptimize()) {
- weight = FLAG_interrupt_budget / FLAG_self_opt_count;
- } else {
- int distance = masm_->pc_offset() + kCodeSizeMultiplier / 2;
- weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
- }
- EmitProfilingCounterDecrement(weight);
- Label ok;
- __ cmpi(r6, Operand::Zero());
- __ bge(&ok);
- __ push(r3);
- __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
- __ pop(r3);
- EmitProfilingCounterReset();
- __ bind(&ok);
+ EmitProfilingCounterHandlingForReturnSequence(false);
// Make sure that the constant pool is not emitted inside of the return
// sequence.
@@ -474,7 +461,7 @@ void FullCodeGenerator::EmitReturnSequence() {
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
codegen()->GetVar(result_register(), var);
- __ push(result_register());
+ codegen()->PushOperand(result_register());
}
@@ -490,7 +477,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(
void FullCodeGenerator::StackValueContext::Plug(
Heap::RootListIndex index) const {
__ LoadRoot(result_register(), index);
- __ push(result_register());
+ codegen()->PushOperand(result_register());
}
@@ -522,14 +509,14 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(
void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
// Immediates cannot be pushed directly.
__ mov(result_register(), Operand(lit));
- __ push(result_register());
+ codegen()->PushOperand(result_register());
}
void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
false_label_);
- DCHECK(!lit->IsUndetectableObject()); // There are no undetectable literals.
+ DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
if (false_label_ != fall_through_) __ b(false_label_);
} else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -554,40 +541,14 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
}
-void FullCodeGenerator::EffectContext::DropAndPlug(int count,
- Register reg) const {
- DCHECK(count > 0);
- __ Drop(count);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
- int count, Register reg) const {
- DCHECK(count > 0);
- __ Drop(count);
- __ Move(result_register(), reg);
-}
-
-
void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
Register reg) const {
DCHECK(count > 0);
- if (count > 1) __ Drop(count - 1);
+ if (count > 1) codegen()->DropOperands(count - 1);
__ StoreP(reg, MemOperand(sp, 0));
}
-void FullCodeGenerator::TestContext::DropAndPlug(int count,
- Register reg) const {
- DCHECK(count > 0);
- // For simplicity we always test the accumulator register.
- __ Drop(count);
- __ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
Label* materialize_false) const {
DCHECK(materialize_true == materialize_false);
@@ -616,7 +577,7 @@ void FullCodeGenerator::StackValueContext::Plug(
__ bind(materialize_false);
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
__ bind(&done);
- __ push(ip);
+ codegen()->PushOperand(ip);
}
@@ -638,7 +599,7 @@ void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
Heap::RootListIndex value_root_index =
flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
__ LoadRoot(ip, value_root_index);
- __ push(ip);
+ codegen()->PushOperand(ip);
}
@@ -750,7 +711,7 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// The variable in the declaration always resides in the current function
// context.
DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (generate_debug_code_) {
+ if (FLAG_debug_code) {
// Check that we're not inside a with or catch context.
__ LoadP(r4, FieldMemOperand(cp, HeapObject::kMapOffset));
__ CompareRoot(r4, Heap::kWithContextMapRootIndex);
@@ -865,11 +826,11 @@ void FullCodeGenerator::VisitFunctionDeclaration(
case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
__ mov(r5, Operand(variable->name()));
- __ Push(r5);
+ PushOperand(r5);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
- __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- __ CallRuntime(Runtime::kDeclareLookupSlot);
+ PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -943,8 +904,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetExpressionPosition(clause);
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), Token::EQ_STRICT,
- strength(language_mode())).code();
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
@@ -967,7 +928,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Discard the test value and jump to the default if present, otherwise to
// the end of the statement.
__ bind(&next_test);
- __ Drop(1); // Switch value is no longer needed.
+ DropOperands(1); // Switch value is no longer needed.
if (default_clause == NULL) {
__ b(nested_statement.break_label());
} else {
@@ -998,25 +959,21 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
ForIn loop_statement(this, stmt);
increment_loop_depth();
- // Get the object to enumerate over. If the object is null or undefined, skip
- // over the loop. See ECMA-262 version 5, section 12.6.4.
+ // Get the object to enumerate over.
SetExpressionAsStatementPosition(stmt->enumerable());
VisitForAccumulatorValue(stmt->enumerable());
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r3, ip);
- __ beq(&exit);
- Register null_value = r7;
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ cmp(r3, null_value);
- __ beq(&exit);
+ OperandStackDepthIncrement(ForIn::kElementCount);
- PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
-
- // Convert the object to a JS object.
+ // If the object is null or undefined, skip over the loop, otherwise convert
+ // it to a JS receiver. See ECMA-262 version 5, section 12.6.4.
Label convert, done_convert;
__ JumpIfSmi(r3, &convert);
__ CompareObjectType(r3, r4, r4, FIRST_JS_RECEIVER_TYPE);
__ bge(&done_convert);
+ __ CompareRoot(r3, Heap::kNullValueRootIndex);
+ __ beq(&exit);
+ __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ beq(&exit);
__ bind(&convert);
ToObjectStub stub(isolate());
__ CallStub(&stub);
@@ -1024,16 +981,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
__ push(r3);
- // Check for proxies.
- Label call_runtime;
- __ CompareObjectType(r3, r4, r4, JS_PROXY_TYPE);
- __ beq(&call_runtime);
-
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
// guarantee cache validity, call the runtime system to check cache
// validity or get the property names in a fixed array.
- __ CheckEnumCache(null_value, &call_runtime);
+ // Note: Proxies never have an enum cache, so will always take the
+ // slow path.
+ Label call_runtime;
+ __ CheckEnumCache(&call_runtime);
// The enum cache is valid. Load the map of the object being
// iterated over and use the cache for the iteration.
@@ -1044,7 +999,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(r3); // Duplicate the enumerable object on the stack.
- __ CallRuntime(Runtime::kGetPropertyNamesFast);
+ __ CallRuntime(Runtime::kForInEnumerate);
PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
// If we got a map from the runtime call, we can do a fast
@@ -1083,16 +1038,18 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We got a fixed array in register r3. Iterate through that.
__ bind(&fixed_array);
+ int const vector_index = SmiFromSlot(slot)->value();
__ EmitLoadTypeFeedbackVector(r4);
__ mov(r5, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
- int vector_index = SmiFromSlot(slot)->value();
__ StoreP(
r5, FieldMemOperand(r4, FixedArray::OffsetOfElementAt(vector_index)), r0);
__ LoadSmiLiteral(r4, Smi::FromInt(1)); // Smi(1) indicates slow check
__ Push(r4, r3); // Smi and array
__ LoadP(r4, FieldMemOperand(r3, FixedArray::kLengthOffset));
+ __ Push(r4); // Fixed array length (as smi).
+ PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
__ LoadSmiLiteral(r3, Smi::FromInt(0));
- __ Push(r4, r3); // Fixed array length (as smi) and initial index.
+ __ Push(r3); // Initial index.
// Generate code for doing the condition check.
__ bind(&loop);
@@ -1122,6 +1079,17 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(r7, r5);
__ beq(&update_each);
+ // We might get here from TurboFan or Crankshaft when something in the
+ // for-in loop body deopts and only now notice in fullcodegen, that we
+ // can now longer use the enum cache, i.e. left fast mode. So better record
+ // this information here, in case we later OSR back into this loop or
+ // reoptimize the whole function w/o rerunning the loop with the slow
+ // mode object in fullcodegen (which would result in a deopt loop).
+ __ EmitLoadTypeFeedbackVector(r3);
+ __ mov(r5, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+ __ StoreP(
+ r5, FieldMemOperand(r3, FixedArray::OffsetOfElementAt(vector_index)), r0);
+
// Convert the entry to a string or (smi) 0 if it isn't a property
// any more. If the property has been removed while iterating, we
// just skip it.
@@ -1161,7 +1129,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Remove the pointers stored on the stack.
__ bind(loop_statement.break_label());
- __ Drop(5);
+ DropOperands(5);
// Exit and decrement the loop depth.
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1402,12 +1370,11 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
// by eval-introduced variables.
EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
__ bind(&slow);
- __ mov(r4, Operand(var->name()));
- __ Push(cp, r4); // Context and name.
+ __ Push(var->name());
Runtime::FunctionId function_id =
typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
- : Runtime::kLoadLookupSlotNoReferenceError;
+ : Runtime::kLoadLookupSlotInsideTypeof;
__ CallRuntime(function_id);
__ bind(&done);
context()->Plug(r3);
@@ -1432,7 +1399,7 @@ void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
Expression* expression = (property == NULL) ? NULL : property->value();
if (expression == NULL) {
__ LoadRoot(r4, Heap::kNullValueRootIndex);
- __ push(r4);
+ PushOperand(r4);
} else {
VisitForStackValue(expression);
if (NeedsHomeObject(expression)) {
@@ -1477,7 +1444,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
if (!result_saved) {
- __ push(r3); // Save result on stack
+ PushOperand(r3); // Save result on stack
result_saved = true;
}
switch (property->kind()) {
@@ -1509,7 +1476,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
// Duplicate receiver on stack.
__ LoadP(r3, MemOperand(sp));
- __ push(r3);
+ PushOperand(r3);
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
@@ -1517,19 +1484,19 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
EmitSetHomeObject(value, 2, property->GetSlot());
}
__ LoadSmiLiteral(r3, Smi::FromInt(SLOPPY)); // PropertyAttributes
- __ push(r3);
- __ CallRuntime(Runtime::kSetProperty);
+ PushOperand(r3);
+ CallRuntimeWithOperands(Runtime::kSetProperty);
} else {
- __ Drop(3);
+ DropOperands(3);
}
break;
case ObjectLiteral::Property::PROTOTYPE:
// Duplicate receiver on stack.
__ LoadP(r3, MemOperand(sp));
- __ push(r3);
+ PushOperand(r3);
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype);
+ CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
NO_REGISTERS);
break;
@@ -1551,13 +1518,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
for (AccessorTable::Iterator it = accessor_table.begin();
it != accessor_table.end(); ++it) {
__ LoadP(r3, MemOperand(sp)); // Duplicate receiver.
- __ push(r3);
+ PushOperand(r3);
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
EmitAccessor(it->second->setter);
__ LoadSmiLiteral(r3, Smi::FromInt(NONE));
- __ push(r3);
- __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
+ PushOperand(r3);
+ CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1574,18 +1541,18 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Expression* value = property->value();
if (!result_saved) {
- __ push(r3); // Save result on the stack
+ PushOperand(r3); // Save result on the stack
result_saved = true;
}
__ LoadP(r3, MemOperand(sp)); // Duplicate receiver.
- __ push(r3);
+ PushOperand(r3);
if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
DCHECK(!property->is_computed_name());
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype);
+ CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
NO_REGISTERS);
} else {
@@ -1600,11 +1567,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
case ObjectLiteral::Property::COMPUTED:
if (property->emit_store()) {
- __ LoadSmiLiteral(r3, Smi::FromInt(NONE));
- __ push(r3);
- __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
+ PushOperand(Smi::FromInt(NONE));
+ PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+ CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
} else {
- __ Drop(3);
+ DropOperands(3);
}
break;
@@ -1613,15 +1580,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
break;
case ObjectLiteral::Property::GETTER:
- __ mov(r3, Operand(Smi::FromInt(NONE)));
- __ push(r3);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(NONE));
+ CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
- __ mov(r3, Operand(Smi::FromInt(NONE)));
- __ push(r3);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(NONE));
+ CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
break;
}
}
@@ -1680,13 +1645,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
int array_index = 0;
for (; array_index < length; array_index++) {
Expression* subexpr = subexprs->at(array_index);
- if (subexpr->IsSpread()) break;
+ DCHECK(!subexpr->IsSpread());
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
if (!result_saved) {
- __ push(r3);
+ PushOperand(r3);
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
@@ -1708,21 +1673,16 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// (inclusive) and these elements gets appended to the array. Note that the
// number elements an iterable produces is unknown ahead of time.
if (array_index < length && result_saved) {
- __ Pop(r3);
+ PopOperand(r3);
result_saved = false;
}
for (; array_index < length; array_index++) {
Expression* subexpr = subexprs->at(array_index);
- __ Push(r3);
- if (subexpr->IsSpread()) {
- VisitForStackValue(subexpr->AsSpread()->expression());
- __ InvokeBuiltin(Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX,
- CALL_FUNCTION);
- } else {
- VisitForStackValue(subexpr);
- __ CallRuntime(Runtime::kAppendElement);
- }
+ PushOperand(r3);
+ DCHECK(!subexpr->IsSpread());
+ VisitForStackValue(subexpr);
+ CallRuntimeWithOperands(Runtime::kAppendElement);
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
@@ -1763,11 +1723,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
property->obj()->AsSuperPropertyReference()->this_var());
VisitForAccumulatorValue(
property->obj()->AsSuperPropertyReference()->home_object());
- __ Push(result_register());
+ PushOperand(result_register());
if (expr->is_compound()) {
const Register scratch = r4;
__ LoadP(scratch, MemOperand(sp, kPointerSize));
- __ Push(scratch, result_register());
+ PushOperands(scratch, result_register());
}
break;
case KEYED_SUPER_PROPERTY: {
@@ -1778,11 +1738,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
property->obj()->AsSuperPropertyReference()->home_object());
__ mr(scratch, result_register());
VisitForAccumulatorValue(property->key());
- __ Push(scratch, result_register());
+ PushOperands(scratch, result_register());
if (expr->is_compound()) {
const Register scratch1 = r5;
__ LoadP(scratch1, MemOperand(sp, 2 * kPointerSize));
- __ Push(scratch1, scratch, result_register());
+ PushOperands(scratch1, scratch, result_register());
}
break;
}
@@ -1830,7 +1790,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
Token::Value op = expr->binary_op();
- __ push(r3); // Left operand goes on the stack.
+ PushOperand(r3); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
AccumulatorValueContext context(this);
@@ -1894,8 +1854,16 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ b(&suspend);
__ bind(&continuation);
+ // When we arrive here, the stack top is the resume mode and
+ // result_register() holds the input value (the argument given to the
+ // respective resume operation).
__ RecordGeneratorContinuation();
- __ b(&resume);
+ __ pop(r4);
+ __ CmpSmiLiteral(r4, Smi::FromInt(JSGeneratorObject::RETURN), r0);
+ __ bne(&resume);
+ __ push(result_register());
+ EmitCreateIteratorResult(true);
+ EmitUnwindAndReturn();
__ bind(&suspend);
VisitForAccumulatorValue(expr->generator_object());
@@ -1914,7 +1882,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&post_runtime);
- __ pop(result_register());
+ PopOperand(result_register());
EmitReturnSequence();
__ bind(&resume);
@@ -1923,124 +1891,15 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
}
case Yield::kFinal: {
- VisitForAccumulatorValue(expr->generator_object());
- __ LoadSmiLiteral(r4, Smi::FromInt(JSGeneratorObject::kGeneratorClosed));
- __ StoreP(r4, FieldMemOperand(result_register(),
- JSGeneratorObject::kContinuationOffset),
- r0);
// Pop value from top-of-stack slot, box result into result register.
+ OperandStackDepthDecrement(1);
EmitCreateIteratorResult(true);
- EmitUnwindBeforeReturn();
- EmitReturnSequence();
+ EmitUnwindAndReturn();
break;
}
- case Yield::kDelegating: {
- VisitForStackValue(expr->generator_object());
-
- // Initial stack layout is as follows:
- // [sp + 1 * kPointerSize] iter
- // [sp + 0 * kPointerSize] g
-
- Label l_catch, l_try, l_suspend, l_continuation, l_resume;
- Label l_next, l_call;
- Register load_receiver = LoadDescriptor::ReceiverRegister();
- Register load_name = LoadDescriptor::NameRegister();
-
- // Initial send value is undefined.
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
- __ b(&l_next);
-
- // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
- __ bind(&l_catch);
- __ LoadRoot(load_name, Heap::kthrow_stringRootIndex); // "throw"
- __ LoadP(r6, MemOperand(sp, 1 * kPointerSize)); // iter
- __ Push(load_name, r6, r3); // "throw", iter, except
- __ b(&l_call);
-
- // try { received = %yield result }
- // Shuffle the received result above a try handler and yield it without
- // re-boxing.
- __ bind(&l_try);
- __ pop(r3); // result
- int handler_index = NewHandlerTableEntry();
- EnterTryBlock(handler_index, &l_catch);
- const int try_block_size = TryCatch::kElementCount * kPointerSize;
- __ push(r3); // result
-
- __ b(&l_suspend);
- __ bind(&l_continuation);
- __ RecordGeneratorContinuation();
- __ b(&l_resume);
-
- __ bind(&l_suspend);
- const int generator_object_depth = kPointerSize + try_block_size;
- __ LoadP(r3, MemOperand(sp, generator_object_depth));
- __ push(r3); // g
- __ Push(Smi::FromInt(handler_index)); // handler-index
- DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
- __ LoadSmiLiteral(r4, Smi::FromInt(l_continuation.pos()));
- __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset),
- r0);
- __ StoreP(cp, FieldMemOperand(r3, JSGeneratorObject::kContextOffset), r0);
- __ mr(r4, cp);
- __ RecordWriteField(r3, JSGeneratorObject::kContextOffset, r4, r5,
- kLRHasBeenSaved, kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 2);
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ pop(r3); // result
- EmitReturnSequence();
- __ bind(&l_resume); // received in r3
- ExitTryBlock(handler_index);
-
- // receiver = iter; f = 'next'; arg = received;
- __ bind(&l_next);
-
- __ LoadRoot(load_name, Heap::knext_stringRootIndex); // "next"
- __ LoadP(r6, MemOperand(sp, 1 * kPointerSize)); // iter
- __ Push(load_name, r6, r3); // "next", iter, received
-
- // result = receiver[f](arg);
- __ bind(&l_call);
- __ LoadP(load_receiver, MemOperand(sp, kPointerSize));
- __ LoadP(load_name, MemOperand(sp, 2 * kPointerSize));
- __ mov(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->KeyedLoadFeedbackSlot())));
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), SLOPPY).code();
- CallIC(ic, TypeFeedbackId::None());
- __ mr(r4, r3);
- __ StoreP(r4, MemOperand(sp, 2 * kPointerSize));
- SetCallPosition(expr);
- __ li(r3, Operand(1));
- __ Call(
- isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
- RelocInfo::CODE_TARGET);
-
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ Drop(1); // The function is still on the stack; drop it.
-
- // if (!result.done) goto l_try;
- __ Move(load_receiver, r3);
-
- __ push(load_receiver); // save result
- __ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
- __ mov(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->DoneFeedbackSlot())));
- CallLoadIC(NOT_INSIDE_TYPEOF); // r0=result.done
- Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(bool_ic);
- __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
- __ bne(&l_try);
-
- // result.value
- __ pop(load_receiver); // result
- __ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
- __ mov(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->ValueFeedbackSlot())));
- CallLoadIC(NOT_INSIDE_TYPEOF); // r3=result.value
- context()->DropAndPlug(2, r3); // drop iter and g
- break;
- }
+ case Yield::kDelegating:
+ UNREACHABLE();
}
}
@@ -2054,7 +1913,14 @@ void FullCodeGenerator::EmitGeneratorResume(
// r4 will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
VisitForAccumulatorValue(value);
- __ pop(r4);
+ PopOperand(r4);
+
+ // Store input value into generator object.
+ __ StoreP(result_register(),
+ FieldMemOperand(r4, JSGeneratorObject::kInputOffset), r0);
+ __ mr(r5, result_register());
+ __ RecordWriteField(r4, JSGeneratorObject::kInputOffset, r5, r6,
+ kLRHasBeenSaved, kDontSaveFPRegs);
// Load suspended function and context.
__ LoadP(cp, FieldMemOperand(r4, JSGeneratorObject::kContextOffset));
@@ -2121,6 +1987,7 @@ void FullCodeGenerator::EmitGeneratorResume(
Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
__ StoreP(r5, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset),
r0);
+ __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
__ Jump(ip);
__ bind(&slow_resume);
}
@@ -2137,6 +2004,7 @@ void FullCodeGenerator::EmitGeneratorResume(
__ bdnz(&operand_loop);
__ bind(&call_resume);
+ __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
DCHECK(!result_register().is(r4));
__ Push(r4, result_register());
__ Push(Smi::FromInt(resume_mode));
@@ -2148,6 +2016,37 @@ void FullCodeGenerator::EmitGeneratorResume(
context()->Plug(result_register());
}
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
+ OperandStackDepthIncrement(2);
+ __ Push(reg1, reg2);
+}
+
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2,
+ Register reg3) {
+ OperandStackDepthIncrement(3);
+ __ Push(reg1, reg2, reg3);
+}
+
+void FullCodeGenerator::PushOperands(Register reg1, Register reg2,
+ Register reg3, Register reg4) {
+ OperandStackDepthIncrement(4);
+ __ Push(reg1, reg2, reg3, reg4);
+}
+
+void FullCodeGenerator::PopOperands(Register reg1, Register reg2) {
+ OperandStackDepthDecrement(2);
+ __ Pop(reg1, reg2);
+}
+
+void FullCodeGenerator::EmitOperandStackDepthCheck() {
+ if (FLAG_debug_code) {
+ int expected_diff = StandardFrameConstants::kFixedFrameSizeFromFp +
+ operand_stack_depth_ * kPointerSize;
+ __ sub(r3, fp, sp);
+ __ cmpi(r3, Operand(expected_diff));
+ __ Assert(eq, kUnexpectedStackDepth);
+ }
+}
void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label allocate, done_allocate;
@@ -2181,37 +2080,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ mov(LoadDescriptor::NameRegister(), Operand(key->value()));
__ mov(LoadDescriptor::SlotRegister(),
Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallLoadIC(NOT_INSIDE_TYPEOF, language_mode());
-}
-
-
-void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
- // Stack: receiver, home_object.
- SetExpressionPosition(prop);
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!key->value()->IsSmi());
- DCHECK(prop->IsSuperAccess());
-
- __ Push(key->value());
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadFromSuper);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- SetExpressionPosition(prop);
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), language_mode()).code();
- __ mov(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallIC(ic);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
- // Stack: receiver, home_object, key.
- SetExpressionPosition(prop);
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+ CallLoadIC(NOT_INSIDE_TYPEOF);
}
@@ -2227,7 +2096,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
// Get the arguments.
Register left = r4;
Register right = r3;
- __ pop(left);
+ PopOperand(left);
// Perform combined smi check on both operands.
__ orx(scratch1, left, right);
@@ -2236,8 +2105,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
patch_site.EmitJumpIfSmi(scratch1, &smi_case);
__ bind(&stub_call);
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ b(&done);
@@ -2342,27 +2210,17 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
- // Constructor is in r3.
- DCHECK(lit != NULL);
- __ push(r3);
-
- // No access check is needed here since the constructor is created by the
- // class literal.
- Register scratch = r4;
- __ LoadP(scratch,
- FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
- __ push(scratch);
-
for (int i = 0; i < lit->properties()->length(); i++) {
ObjectLiteral::Property* property = lit->properties()->at(i);
Expression* value = property->value();
+ Register scratch = r4;
if (property->is_static()) {
__ LoadP(scratch, MemOperand(sp, kPointerSize)); // constructor
} else {
__ LoadP(scratch, MemOperand(sp, 0)); // prototype
}
- __ push(scratch);
+ PushOperand(scratch);
EmitPropertyKey(property, lit->GetIdForProperty(i));
// The static prototype property is read only. We handle the non computed
@@ -2385,36 +2243,31 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
case ObjectLiteral::Property::PROTOTYPE:
UNREACHABLE();
case ObjectLiteral::Property::COMPUTED:
- __ CallRuntime(Runtime::kDefineClassMethod);
+ PushOperand(Smi::FromInt(DONT_ENUM));
+ PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+ CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
break;
case ObjectLiteral::Property::GETTER:
- __ mov(r3, Operand(Smi::FromInt(DONT_ENUM)));
- __ push(r3);
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(DONT_ENUM));
+ CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
- __ mov(r3, Operand(Smi::FromInt(DONT_ENUM)));
- __ push(r3);
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(DONT_ENUM));
+ CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
break;
default:
UNREACHABLE();
}
}
-
- // Set both the prototype and constructor to have fast properties, and also
- // freeze them in strong mode.
- __ CallRuntime(Runtime::kFinalizeClassDefinition);
}
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
- __ pop(r4);
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+ PopOperand(r4);
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -2437,10 +2290,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case NAMED_PROPERTY: {
- __ push(r3); // Preserve value.
+ PushOperand(r3); // Preserve value.
VisitForAccumulatorValue(prop->obj());
__ Move(StoreDescriptor::ReceiverRegister(), r3);
- __ pop(StoreDescriptor::ValueRegister()); // Restore value.
+ PopOperand(StoreDescriptor::ValueRegister()); // Restore value.
__ mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
EmitLoadStoreICSlot(slot);
@@ -2448,7 +2301,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case NAMED_SUPER_PROPERTY: {
- __ Push(r3);
+ PushOperand(r3);
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
VisitForAccumulatorValue(
prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2465,7 +2318,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case KEYED_SUPER_PROPERTY: {
- __ Push(r3);
+ PushOperand(r3);
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
VisitForStackValue(
prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2485,12 +2338,12 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case KEYED_PROPERTY: {
- __ push(r3); // Preserve value.
+ PushOperand(r3); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ Move(StoreDescriptor::NameRegister(), r3);
- __ Pop(StoreDescriptor::ValueRegister(),
- StoreDescriptor::ReceiverRegister());
+ PopOperands(StoreDescriptor::ValueRegister(),
+ StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
@@ -2573,17 +2426,17 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
(var->mode() == CONST && op == Token::INIT)) {
if (var->IsLookupSlot()) {
// Assignment to var.
- __ push(r3); // Value.
- __ mov(r4, Operand(var->name()));
- __ mov(r3, Operand(Smi::FromInt(language_mode())));
- __ Push(cp, r4, r3); // Context, name, language mode.
- __ CallRuntime(Runtime::kStoreLookupSlot);
+ __ Push(var->name());
+ __ Push(r3);
+ __ CallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreLookupSlot_Strict
+ : Runtime::kStoreLookupSlot_Sloppy);
} else {
// Assignment to var or initializing assignment to let/const in harmony
// mode.
DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
MemOperand location = VarOperand(var, r4);
- if (generate_debug_code_ && var->mode() == LET && op == Token::INIT) {
+ if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
// Check for an uninitialized let binding.
__ LoadP(r5, location);
__ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
@@ -2628,7 +2481,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
- __ pop(StoreDescriptor::ReceiverRegister());
+ PopOperand(StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(expr->AssignmentSlot());
CallStoreIC();
@@ -2645,10 +2498,11 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
Literal* key = prop->key()->AsLiteral();
DCHECK(key != NULL);
- __ Push(key->value());
- __ Push(r3);
- __ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy));
+ PushOperand(key->value());
+ PushOperand(r3);
+ CallRuntimeWithOperands((is_strict(language_mode())
+ ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy));
}
@@ -2658,16 +2512,17 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
// stack : receiver ('this'), home_object, key
DCHECK(prop != NULL);
- __ Push(r3);
- __ CallRuntime((is_strict(language_mode())
- ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy));
+ PushOperand(r3);
+ CallRuntimeWithOperands((is_strict(language_mode())
+ ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy));
}
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
- __ Pop(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister());
+ PopOperands(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister());
DCHECK(StoreDescriptor::ValueRegister().is(r3));
Handle<Code> ic =
@@ -2702,7 +2557,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
__ Move(LoadDescriptor::NameRegister(), r3);
- __ pop(LoadDescriptor::ReceiverRegister());
+ PopOperand(LoadDescriptor::ReceiverRegister());
EmitKeyedPropertyLoad(expr);
} else {
VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
@@ -2738,7 +2593,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ push(r0);
+ PushOperand(r0);
convert_mode = ConvertReceiverMode::kNullOrUndefined;
} else {
// Load the function from the receiver.
@@ -2749,7 +2604,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
__ LoadP(r0, MemOperand(sp, 0));
- __ push(r0);
+ PushOperand(r0);
__ StoreP(r3, MemOperand(sp, kPointerSize));
convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
}
@@ -2773,9 +2628,8 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
VisitForAccumulatorValue(super_ref->home_object());
__ mr(scratch, r3);
VisitForAccumulatorValue(super_ref->this_var());
- __ Push(scratch, r3, r3, scratch);
- __ Push(key->value());
- __ Push(Smi::FromInt(language_mode()));
+ PushOperands(scratch, r3, r3, scratch);
+ PushOperand(key->value());
// Stack here:
// - home_object
@@ -2783,8 +2637,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - this (receiver) <-- LoadFromSuper will pop here and below.
// - home_object
// - key
- // - language_mode
- __ CallRuntime(Runtime::kLoadFromSuper);
+ CallRuntimeWithOperands(Runtime::kLoadFromSuper);
// Replace home_object with target function.
__ StoreP(r3, MemOperand(sp, kPointerSize));
@@ -2812,7 +2665,7 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, Expression* key) {
// Push the target function under the receiver.
__ LoadP(ip, MemOperand(sp, 0));
- __ push(ip);
+ PushOperand(ip);
__ StoreP(r3, MemOperand(sp, kPointerSize));
EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
@@ -2832,9 +2685,8 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
VisitForAccumulatorValue(super_ref->home_object());
__ mr(scratch, r3);
VisitForAccumulatorValue(super_ref->this_var());
- __ Push(scratch, r3, r3, scratch);
+ PushOperands(scratch, r3, r3, scratch);
VisitForStackValue(prop->key());
- __ Push(Smi::FromInt(language_mode()));
// Stack here:
// - home_object
@@ -2842,8 +2694,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
// - home_object
// - key
- // - language_mode
- __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+ CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
// Replace home_object with target function.
__ StoreP(r3, MemOperand(sp, kPointerSize));
@@ -2865,12 +2716,23 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
SetCallPosition(expr);
- Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
+ if (expr->tail_call_mode() == TailCallMode::kAllow) {
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceTailCall);
+ }
+ // Update profiling counters before the tail call since we will
+ // not return to this function.
+ EmitProfilingCounterHandlingForReturnSequence(true);
+ }
+ Handle<Code> ic =
+ CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
+ .code();
__ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackICSlot()));
__ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
// Don't assign a type feedback id to the IC, since type feedback is provided
// by the vector above.
CallIC(ic);
+ OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
// Restore context register.
@@ -2915,11 +2777,9 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
__ bind(&slow);
// Call the runtime to find the function to call (returned in r3) and
// the object holding it (returned in r4).
- DCHECK(!context_register().is(r5));
- __ mov(r5, Operand(callee->name()));
- __ Push(context_register(), r5);
- __ CallRuntime(Runtime::kLoadLookupSlot);
- __ Push(r3, r4); // Function, receiver.
+ __ Push(callee->name());
+ __ CallRuntime(Runtime::kLoadLookupSlotForCall);
+ PushOperands(r3, r4); // Function, receiver.
PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
// If fast case code has been generated, emit code to push the function
@@ -2941,7 +2801,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
VisitForStackValue(callee);
// refEnv.WithBaseObject()
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- __ push(r5); // Reserved receiver slot.
+ PushOperand(r5); // Reserved receiver slot.
}
}
@@ -2975,7 +2835,10 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
SetCallPosition(expr);
__ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
__ mov(r3, Operand(arg_count));
- __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ expr->tail_call_mode()),
+ RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
// Restore context register.
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3016,6 +2879,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
CallConstructStub stub(isolate());
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
// Restore context register.
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -3036,7 +2900,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
FieldMemOperand(result_register(), HeapObject::kMapOffset));
__ LoadP(result_register(),
FieldMemOperand(result_register(), Map::kPrototypeOffset));
- __ Push(result_register());
+ PushOperand(result_register());
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -3058,6 +2922,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
__ LoadP(r4, MemOperand(sp, arg_count * kPointerSize));
__ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
@@ -3110,87 +2975,6 @@ void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ JumpIfSmi(r3, if_false);
- __ CompareObjectType(r3, r4, r4, SIMD128_VALUE_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ JumpIfSmi(r3, if_false);
- __ CompareObjectType(r3, r4, r5, FIRST_FUNCTION_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ge, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ CheckMap(r3, r4, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK);
-#if V8_TARGET_ARCH_PPC64
- __ LoadP(r4, FieldMemOperand(r3, HeapNumber::kValueOffset));
- __ li(r5, Operand(1));
- __ rotrdi(r5, r5, 1); // r5 = 0x80000000_00000000
- __ cmp(r4, r5);
-#else
- __ lwz(r5, FieldMemOperand(r3, HeapNumber::kExponentOffset));
- __ lwz(r4, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
- Label skip;
- __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
- __ cmp(r5, r0);
- __ bne(&skip);
- __ cmpi(r4, Operand::Zero());
- __ bind(&skip);
-#endif
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3279,66 +3063,6 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ pop(r4);
- __ cmp(r3, r4);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- // ArgumentsAccessStub expects the key in r4 and the formal
- // parameter count in r3.
- VisitForAccumulatorValue(args->at(0));
- __ mr(r4, r3);
- __ LoadSmiLiteral(r3, Smi::FromInt(info_->scope()->num_parameters()));
- ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
- __ CallStub(&stub);
- context()->Plug(r3);
-}
-
-
-void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
- Label exit;
- // Get the number of formal parameters.
- __ LoadSmiLiteral(r3, Smi::FromInt(info_->scope()->num_parameters()));
-
- // Check if the calling frame is an arguments adaptor frame.
- __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset));
- __ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
- __ bne(&exit);
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ LoadP(r3, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ bind(&exit);
- context()->Plug(r3);
-}
-
-
void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3409,28 +3133,6 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = nullptr;
- Label* if_false = nullptr;
- Label* fall_through = nullptr;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ JumpIfSmi(r3, if_false);
- __ CompareObjectType(r3, r4, r4, JS_DATE_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(3, args->length());
@@ -3442,7 +3144,7 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(0)); // index
VisitForStackValue(args->at(1)); // value
VisitForAccumulatorValue(args->at(2)); // string
- __ Pop(index, value);
+ PopOperands(index, value);
if (FLAG_debug_code) {
__ TestIfSmi(value, r0);
@@ -3474,7 +3176,7 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(0)); // index
VisitForStackValue(args->at(1)); // value
VisitForAccumulatorValue(args->at(2)); // string
- __ Pop(index, value);
+ PopOperands(index, value);
if (FLAG_debug_code) {
__ TestIfSmi(value, r0);
@@ -3495,34 +3197,6 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- VisitForStackValue(args->at(0)); // Load the object.
- VisitForAccumulatorValue(args->at(1)); // Load the value.
- __ pop(r4); // r3 = value. r4 = object.
-
- Label done;
- // If the object is a smi, return the value.
- __ JumpIfSmi(r4, &done);
-
- // If the object is not a value type, return the value.
- __ CompareObjectType(r4, r5, r5, JS_VALUE_TYPE);
- __ bne(&done);
-
- // Store the value.
- __ StoreP(r3, FieldMemOperand(r4, JSValue::kValueOffset), r0);
- // Update the write barrier. Save the value as it will be
- // overwritten by the write barrier code and is needed afterward.
- __ mr(r5, r3);
- __ RecordWriteField(r4, JSValue::kValueOffset, r5, r6, kLRHasBeenSaved,
- kDontSaveFPRegs);
-
- __ bind(&done);
- context()->Plug(r3);
-}
-
-
void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -3540,26 +3214,6 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitToName(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into r3 and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- Label convert, done_convert;
- __ JumpIfSmi(r3, &convert);
- STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
- __ CompareObjectType(r3, r4, r4, LAST_NAME_TYPE);
- __ ble(&done_convert);
- __ bind(&convert);
- __ Push(r3);
- __ CallRuntime(Runtime::kToName);
- __ bind(&done_convert);
- context()->Plug(r3);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3588,7 +3242,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
Register index = r3;
Register result = r6;
- __ pop(object);
+ PopOperand(object);
Label need_conversion;
Label index_out_of_range;
@@ -3630,7 +3284,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
Register scratch = r6;
Register result = r3;
- __ pop(object);
+ PopOperand(object);
Label need_conversion;
Label index_out_of_range;
@@ -3675,6 +3329,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
// Call the target.
__ mov(r3, Operand(argc));
__ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(argc + 1);
// Restore context register.
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
@@ -3729,261 +3384,6 @@ void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
- Label bailout, done, one_char_separator, long_separator, non_trivial_array,
- not_size_one_array, loop, empty_separator_loop, one_char_separator_loop,
- one_char_separator_loop_entry, long_separator_loop;
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- VisitForStackValue(args->at(1));
- VisitForAccumulatorValue(args->at(0));
-
- // All aliases of the same register have disjoint lifetimes.
- Register array = r3;
- Register elements = no_reg; // Will be r3.
- Register result = no_reg; // Will be r3.
- Register separator = r4;
- Register array_length = r5;
- Register result_pos = no_reg; // Will be r5
- Register string_length = r6;
- Register string = r7;
- Register element = r8;
- Register elements_end = r9;
- Register scratch1 = r10;
- Register scratch2 = r11;
-
- // Separator operand is on the stack.
- __ pop(separator);
-
- // Check that the array is a JSArray.
- __ JumpIfSmi(array, &bailout);
- __ CompareObjectType(array, scratch1, scratch2, JS_ARRAY_TYPE);
- __ bne(&bailout);
-
- // Check that the array has fast elements.
- __ CheckFastElements(scratch1, scratch2, &bailout);
-
- // If the array has length zero, return the empty string.
- __ LoadP(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
- __ SmiUntag(array_length);
- __ cmpi(array_length, Operand::Zero());
- __ bne(&non_trivial_array);
- __ LoadRoot(r3, Heap::kempty_stringRootIndex);
- __ b(&done);
-
- __ bind(&non_trivial_array);
-
- // Get the FixedArray containing array's elements.
- elements = array;
- __ LoadP(elements, FieldMemOperand(array, JSArray::kElementsOffset));
- array = no_reg; // End of array's live range.
-
- // Check that all array elements are sequential one-byte strings, and
- // accumulate the sum of their lengths, as a smi-encoded value.
- __ li(string_length, Operand::Zero());
- __ addi(element, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ShiftLeftImm(elements_end, array_length, Operand(kPointerSizeLog2));
- __ add(elements_end, element, elements_end);
- // Loop condition: while (element < elements_end).
- // Live values in registers:
- // elements: Fixed array of strings.
- // array_length: Length of the fixed array of strings (not smi)
- // separator: Separator string
- // string_length: Accumulated sum of string lengths (smi).
- // element: Current array element.
- // elements_end: Array end.
- if (generate_debug_code_) {
- __ cmpi(array_length, Operand::Zero());
- __ Assert(gt, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
- }
- __ bind(&loop);
- __ LoadP(string, MemOperand(element));
- __ addi(element, element, Operand(kPointerSize));
- __ JumpIfSmi(string, &bailout);
- __ LoadP(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbz(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
- __ LoadP(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
-
- __ AddAndCheckForOverflow(string_length, string_length, scratch1, scratch2,
- r0);
- __ BranchOnOverflow(&bailout);
-
- __ cmp(element, elements_end);
- __ blt(&loop);
-
- // If array_length is 1, return elements[0], a string.
- __ cmpi(array_length, Operand(1));
- __ bne(&not_size_one_array);
- __ LoadP(r3, FieldMemOperand(elements, FixedArray::kHeaderSize));
- __ b(&done);
-
- __ bind(&not_size_one_array);
-
- // Live values in registers:
- // separator: Separator string
- // array_length: Length of the array.
- // string_length: Sum of string lengths (smi).
- // elements: FixedArray of strings.
-
- // Check that the separator is a flat one-byte string.
- __ JumpIfSmi(separator, &bailout);
- __ LoadP(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
- __ lbz(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
-
- // Add (separator length times array_length) - separator length to the
- // string_length to get the length of the result string.
- __ LoadP(scratch1,
- FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
- __ sub(string_length, string_length, scratch1);
-#if V8_TARGET_ARCH_PPC64
- __ SmiUntag(scratch1, scratch1);
- __ Mul(scratch2, array_length, scratch1);
- // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
- // zero.
- __ ShiftRightImm(ip, scratch2, Operand(31), SetRC);
- __ bne(&bailout, cr0);
- __ SmiTag(scratch2, scratch2);
-#else
- // array_length is not smi but the other values are, so the result is a smi
- __ mullw(scratch2, array_length, scratch1);
- __ mulhw(ip, array_length, scratch1);
- // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
- // zero.
- __ cmpi(ip, Operand::Zero());
- __ bne(&bailout);
- __ cmpwi(scratch2, Operand::Zero());
- __ blt(&bailout);
-#endif
-
- __ AddAndCheckForOverflow(string_length, string_length, scratch2, scratch1,
- r0);
- __ BranchOnOverflow(&bailout);
- __ SmiUntag(string_length);
-
- // Bailout for large object allocations.
- __ Cmpi(string_length, Operand(Page::kMaxRegularHeapObjectSize), r0);
- __ bgt(&bailout);
-
- // Get first element in the array to free up the elements register to be used
- // for the result.
- __ addi(element, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- result = elements; // End of live range for elements.
- elements = no_reg;
- // Live values in registers:
- // element: First array element
- // separator: Separator string
- // string_length: Length of result string (not smi)
- // array_length: Length of the array.
- __ AllocateOneByteString(result, string_length, scratch1, scratch2,
- elements_end, &bailout);
- // Prepare for looping. Set up elements_end to end of the array. Set
- // result_pos to the position of the result where to write the first
- // character.
- __ ShiftLeftImm(elements_end, array_length, Operand(kPointerSizeLog2));
- __ add(elements_end, element, elements_end);
- result_pos = array_length; // End of live range for array_length.
- array_length = no_reg;
- __ addi(result_pos, result,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
- // Check the length of the separator.
- __ LoadP(scratch1,
- FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
- __ CmpSmiLiteral(scratch1, Smi::FromInt(1), r0);
- __ beq(&one_char_separator);
- __ bgt(&long_separator);
-
- // Empty separator case
- __ bind(&empty_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
-
- // Copy next array element to the result.
- __ LoadP(string, MemOperand(element));
- __ addi(element, element, Operand(kPointerSize));
- __ LoadP(string_length, FieldMemOperand(string, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ addi(string, string,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
- __ cmp(element, elements_end);
- __ blt(&empty_separator_loop); // End while (element < elements_end).
- DCHECK(result.is(r3));
- __ b(&done);
-
- // One-character separator case
- __ bind(&one_char_separator);
- // Replace separator with its one-byte character value.
- __ lbz(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ b(&one_char_separator_loop_entry);
-
- __ bind(&one_char_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
- // separator: Single separator one-byte char (in lower byte).
-
- // Copy the separator character to the result.
- __ stb(separator, MemOperand(result_pos));
- __ addi(result_pos, result_pos, Operand(1));
-
- // Copy next array element to the result.
- __ bind(&one_char_separator_loop_entry);
- __ LoadP(string, MemOperand(element));
- __ addi(element, element, Operand(kPointerSize));
- __ LoadP(string_length, FieldMemOperand(string, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ addi(string, string,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
- __ cmpl(element, elements_end);
- __ blt(&one_char_separator_loop); // End while (element < elements_end).
- DCHECK(result.is(r3));
- __ b(&done);
-
- // Long separator case (separator is more than one character). Entry is at the
- // label long_separator below.
- __ bind(&long_separator_loop);
- // Live values in registers:
- // result_pos: the position to which we are currently copying characters.
- // element: Current array element.
- // elements_end: Array end.
- // separator: Separator string.
-
- // Copy the separator to the result.
- __ LoadP(string_length, FieldMemOperand(separator, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ addi(string, separator,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
-
- __ bind(&long_separator);
- __ LoadP(string, MemOperand(element));
- __ addi(element, element, Operand(kPointerSize));
- __ LoadP(string_length, FieldMemOperand(string, String::kLengthOffset));
- __ SmiUntag(string_length);
- __ addi(string, string,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch1);
- __ cmpl(element, elements_end);
- __ blt(&long_separator_loop); // End while (element < elements_end).
- DCHECK(result.is(r3));
- __ b(&done);
-
- __ bind(&bailout);
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
- __ bind(&done);
- context()->Plug(r3);
-}
-
-
void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
ExternalReference debug_is_active =
@@ -4016,7 +3416,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ b(&done);
__ bind(&runtime);
- __ CallRuntime(Runtime::kCreateIterResultObject);
+ CallRuntimeWithOperands(Runtime::kCreateIterResultObject);
__ bind(&done);
context()->Plug(r3);
@@ -4026,7 +3426,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push undefined as the receiver.
__ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
- __ push(r3);
+ PushOperand(r3);
__ LoadNativeContextSlot(expr->context_index(), r3);
}
@@ -4041,6 +3441,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
__ mov(r3, Operand(arg_count));
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
}
@@ -4054,7 +3455,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Push the target function under the receiver.
__ LoadP(ip, MemOperand(sp, 0));
- __ push(ip);
+ PushOperand(ip);
__ StoreP(r3, MemOperand(sp, kPointerSize));
// Push the arguments ("left-to-right").
@@ -4090,6 +3491,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Call the C runtime function.
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
__ CallRuntime(expr->function(), arg_count);
+ OperandStackDepthDecrement(arg_count);
context()->Plug(r3);
}
}
@@ -4107,9 +3509,9 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy);
+ CallRuntimeWithOperands(is_strict(language_mode())
+ ? Runtime::kDeleteProperty_Strict
+ : Runtime::kDeleteProperty_Sloppy);
context()->Plug(r3);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -4130,9 +3532,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
} else {
// Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
- DCHECK(!context_register().is(r5));
- __ mov(r5, Operand(var->name()));
- __ Push(context_register(), r5);
+ __ Push(var->name());
__ CallRuntime(Runtime::kDeleteLookupSlot);
context()->Plug(r3);
}
@@ -4173,6 +3573,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Label materialize_true, materialize_false, done;
VisitForControl(expr->expression(), &materialize_false,
&materialize_true, &materialize_true);
+ if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
__ bind(&materialize_true);
PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
__ LoadRoot(r3, Heap::kTrueValueRootIndex);
@@ -4223,7 +3624,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && !context()->IsEffect()) {
__ LoadSmiLiteral(ip, Smi::FromInt(0));
- __ push(ip);
+ PushOperand(ip);
}
switch (assign_type) {
case NAMED_PROPERTY: {
@@ -4238,10 +3639,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
VisitForAccumulatorValue(
prop->obj()->AsSuperPropertyReference()->home_object());
- __ Push(result_register());
+ PushOperand(result_register());
const Register scratch = r4;
__ LoadP(scratch, MemOperand(sp, kPointerSize));
- __ Push(scratch, result_register());
+ PushOperands(scratch, result_register());
EmitNamedSuperPropertyLoad(prop);
break;
}
@@ -4254,9 +3655,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
const Register scratch1 = r5;
__ mr(scratch, result_register());
VisitForAccumulatorValue(prop->key());
- __ Push(scratch, result_register());
+ PushOperands(scratch, result_register());
__ LoadP(scratch1, MemOperand(sp, 2 * kPointerSize));
- __ Push(scratch1, scratch, result_register());
+ PushOperands(scratch1, scratch, result_register());
EmitKeyedSuperPropertyLoad(prop);
break;
}
@@ -4343,7 +3744,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// of the stack.
switch (assign_type) {
case VARIABLE:
- __ push(r3);
+ PushOperand(r3);
break;
case NAMED_PROPERTY:
__ StoreP(r3, MemOperand(sp, kPointerSize));
@@ -4367,8 +3768,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetExpressionPosition(expr);
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD,
- strength(language_mode())).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), Token::ADD).code();
CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4402,7 +3802,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
__ mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
- __ pop(StoreDescriptor::ReceiverRegister());
+ PopOperand(StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(expr->CountSlot());
CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -4438,8 +3838,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case KEYED_PROPERTY: {
- __ Pop(StoreDescriptor::ReceiverRegister(),
- StoreDescriptor::NameRegister());
+ PopOperands(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
EmitLoadStoreICSlot(expr->CountSlot());
@@ -4495,8 +3895,8 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ CompareRoot(r3, Heap::kFalseValueRootIndex);
Split(eq, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->undefined_string())) {
- __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
- __ beq(if_true);
+ __ CompareRoot(r3, Heap::kNullValueRootIndex);
+ __ beq(if_false);
__ JumpIfSmi(r3, if_false);
// Check for undetectable objects => true.
__ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
@@ -4563,7 +3963,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ CallRuntime(Runtime::kHasProperty);
+ CallRuntimeWithOperands(Runtime::kHasProperty);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(r3, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
@@ -4571,7 +3971,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::INSTANCEOF: {
VisitForAccumulatorValue(expr->right());
- __ pop(r4);
+ PopOperand(r4);
InstanceOfStub stub(isolate());
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
@@ -4583,7 +3983,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
Condition cond = CompareIC::ComputeCondition(op);
- __ pop(r4);
+ PopOperand(r4);
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
@@ -4596,8 +3996,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ bind(&slow_case);
}
- Handle<Code> ic = CodeFactory::CompareIC(
- isolate(), op, strength(language_mode())).code();
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -4681,7 +4080,7 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
DCHECK(closure_scope->is_function_scope());
__ LoadP(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
- __ push(ip);
+ PushOperand(ip);
}
@@ -4690,23 +4089,12 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
void FullCodeGenerator::EnterFinallyBlock() {
DCHECK(!result_register().is(r4));
- // Store result register while executing finally block.
- __ push(result_register());
- // Cook return address in link register to stack (smi encoded Code* delta)
- __ mflr(r4);
- __ mov(ip, Operand(masm_->CodeObject()));
- __ sub(r4, r4, ip);
- __ SmiTag(r4);
-
- // Store result register while executing finally block.
- __ push(r4);
-
// Store pending message while executing finally block.
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ mov(ip, Operand(pending_message_obj));
__ LoadP(r4, MemOperand(ip));
- __ push(r4);
+ PushOperand(r4);
ClearPendingMessage();
}
@@ -4715,22 +4103,11 @@ void FullCodeGenerator::EnterFinallyBlock() {
void FullCodeGenerator::ExitFinallyBlock() {
DCHECK(!result_register().is(r4));
// Restore pending message from stack.
- __ pop(r4);
+ PopOperand(r4);
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ mov(ip, Operand(pending_message_obj));
__ StoreP(r4, MemOperand(ip));
-
- // Restore result register from stack.
- __ pop(r4);
-
- // Uncook return address and return.
- __ pop(result_register());
- __ SmiUntag(r4);
- __ mov(ip, Operand(masm_->CodeObject()));
- __ add(ip, ip, r4);
- __ mtctr(ip);
- __ bctr();
}
@@ -4750,6 +4127,32 @@ void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
Operand(SmiFromSlot(slot)));
}
+void FullCodeGenerator::DeferredCommands::EmitCommands() {
+ DCHECK(!result_register().is(r4));
+ // Restore the accumulator (r3) and token (r4).
+ __ Pop(r4, result_register());
+ for (DeferredCommand cmd : commands_) {
+ Label skip;
+ __ CmpSmiLiteral(r4, Smi::FromInt(cmd.token), r0);
+ __ bne(&skip);
+ switch (cmd.command) {
+ case kReturn:
+ codegen_->EmitUnwindAndReturn();
+ break;
+ case kThrow:
+ __ Push(result_register());
+ __ CallRuntime(Runtime::kReThrow);
+ break;
+ case kContinue:
+ codegen_->EmitContinue(cmd.target);
+ break;
+ case kBreak:
+ codegen_->EmitBreak(cmd.target);
+ break;
+ }
+ __ bind(&skip);
+ }
+}
#undef __
diff --git a/deps/v8/src/full-codegen/x64/full-codegen-x64.cc b/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
index 615eb67ba6..910b2cf9f0 100644
--- a/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
+++ b/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
@@ -16,8 +16,7 @@
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm_)
-
+#define __ ACCESS_MASM(masm())
class JumpPatchSite BASE_EMBEDDED {
public:
@@ -67,6 +66,7 @@ class JumpPatchSite BASE_EMBEDDED {
__ j(cc, target, near_jump);
}
+ MacroAssembler* masm() { return masm_; }
MacroAssembler* masm_;
Label patch_site_;
#ifdef DEBUG
@@ -98,13 +98,6 @@ void FullCodeGenerator::Generate() {
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ int3();
- }
-#endif
-
if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
StackArgumentsAccessor args(rsp, info->scope()->num_parameters());
__ movp(rcx, args.GetReceiverOperand());
@@ -125,6 +118,7 @@ void FullCodeGenerator::Generate() {
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
+ OperandStackDepthIncrement(locals_count);
if (locals_count == 1) {
__ PushRoot(Heap::kUndefinedValueRootIndex);
} else if (locals_count > 1) {
@@ -254,21 +248,12 @@ void FullCodeGenerator::Generate() {
Variable* rest_param = scope()->rest_parameter(&rest_index);
if (rest_param) {
Comment cmnt(masm_, "[ Allocate rest parameter array");
-
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
-
- __ Move(RestParamAccessDescriptor::parameter_count(),
- Smi::FromInt(num_parameters));
- __ leap(RestParamAccessDescriptor::parameter_pointer(),
- Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
- __ Move(RestParamAccessDescriptor::rest_parameter_index(),
- Smi::FromInt(rest_index));
- function_in_register = false;
-
- RestParamAccessStub stub(isolate());
+ if (!function_in_register) {
+ __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ FastNewRestParameterStub stub(isolate());
__ CallStub(&stub);
-
+ function_in_register = false;
SetVar(rest_param, rax, rbx, rdx);
}
@@ -278,27 +263,19 @@ void FullCodeGenerator::Generate() {
// Arguments object must be allocated after the context object, in
// case the "arguments" or ".arguments" variables are in the context.
Comment cmnt(masm_, "[ Allocate arguments object");
- DCHECK(rdi.is(ArgumentsAccessNewDescriptor::function()));
if (!function_in_register) {
__ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
- // The receiver is just before the parameters on the caller's stack.
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
- __ Move(ArgumentsAccessNewDescriptor::parameter_count(),
- Smi::FromInt(num_parameters));
- __ leap(ArgumentsAccessNewDescriptor::parameter_pointer(),
- Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
-
- // Arguments to ArgumentsAccessStub:
- // function, parameter pointer, parameter count.
- // The stub will rewrite parameter pointer and parameter count if the
- // previous stack frame was an arguments adapter frame.
- bool is_unmapped = is_strict(language_mode()) || !has_simple_parameters();
- ArgumentsAccessStub::Type type = ArgumentsAccessStub::ComputeType(
- is_unmapped, literal()->has_duplicate_parameters());
- ArgumentsAccessStub stub(isolate(), type);
- __ CallStub(&stub);
+ if (is_strict(language_mode()) || !has_simple_parameters()) {
+ FastNewStrictArgumentsStub stub(isolate());
+ __ CallStub(&stub);
+ } else if (literal()->has_duplicate_parameters()) {
+ __ Push(rdi);
+ __ CallRuntime(Runtime::kNewSloppyArguments_Generic);
+ } else {
+ FastNewSloppyArgumentsStub stub(isolate());
+ __ CallStub(&stub);
+ }
SetVar(arguments, rax, rbx, rdx);
}
@@ -405,6 +382,30 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
}
+void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
+ bool is_tail_call) {
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else {
+ int distance = masm_->pc_offset();
+ weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
+ }
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ j(positive, &ok, Label::kNear);
+ // Don't need to save result register if we are going to do a tail call.
+ if (!is_tail_call) {
+ __ Push(rax);
+ }
+ __ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+ if (!is_tail_call) {
+ __ Pop(rax);
+ }
+ EmitProfilingCounterReset();
+ __ bind(&ok);
+}
void FullCodeGenerator::EmitReturnSequence() {
Comment cmnt(masm_, "[ Return sequence");
@@ -416,24 +417,7 @@ void FullCodeGenerator::EmitReturnSequence() {
__ Push(rax);
__ CallRuntime(Runtime::kTraceExit);
}
- // Pretend that the exit is a backwards jump to the entry.
- int weight = 1;
- if (info_->ShouldSelfOptimize()) {
- weight = FLAG_interrupt_budget / FLAG_self_opt_count;
- } else {
- int distance = masm_->pc_offset();
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- }
- EmitProfilingCounterDecrement(weight);
- Label ok;
- __ j(positive, &ok, Label::kNear);
- __ Push(rax);
- __ call(isolate()->builtins()->InterruptCheck(),
- RelocInfo::CODE_TARGET);
- __ Pop(rax);
- EmitProfilingCounterReset();
- __ bind(&ok);
+ EmitProfilingCounterHandlingForReturnSequence(false);
SetReturnPosition(literal());
__ leave();
@@ -448,7 +432,7 @@ void FullCodeGenerator::EmitReturnSequence() {
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
MemOperand operand = codegen()->VarOperand(var, result_register());
- __ Push(operand);
+ codegen()->PushOperand(operand);
}
@@ -464,6 +448,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(
void FullCodeGenerator::StackValueContext::Plug(
Heap::RootListIndex index) const {
+ codegen()->OperandStackDepthIncrement(1);
__ PushRoot(index);
}
@@ -501,6 +486,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(
void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
+ codegen()->OperandStackDepthIncrement(1);
if (lit->IsSmi()) {
__ SafePush(Smi::cast(*lit));
} else {
@@ -514,7 +500,7 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
true,
true_label_,
false_label_);
- DCHECK(!lit->IsUndetectableObject()); // There are no undetectable literals.
+ DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
if (false_label_ != fall_through_) __ jmp(false_label_);
} else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -539,41 +525,14 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
}
-void FullCodeGenerator::EffectContext::DropAndPlug(int count,
- Register reg) const {
- DCHECK(count > 0);
- __ Drop(count);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
- int count,
- Register reg) const {
- DCHECK(count > 0);
- __ Drop(count);
- __ Move(result_register(), reg);
-}
-
-
void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
Register reg) const {
DCHECK(count > 0);
- if (count > 1) __ Drop(count - 1);
+ if (count > 1) codegen()->DropOperands(count - 1);
__ movp(Operand(rsp, 0), reg);
}
-void FullCodeGenerator::TestContext::DropAndPlug(int count,
- Register reg) const {
- DCHECK(count > 0);
- // For simplicity we always test the accumulator register.
- __ Drop(count);
- __ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
Label* materialize_false) const {
DCHECK(materialize_true == materialize_false);
@@ -597,6 +556,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(
void FullCodeGenerator::StackValueContext::Plug(
Label* materialize_true,
Label* materialize_false) const {
+ codegen()->OperandStackDepthIncrement(1);
Label done;
__ bind(materialize_true);
__ Push(isolate()->factory()->true_value());
@@ -622,6 +582,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
+ codegen()->OperandStackDepthIncrement(1);
Heap::RootListIndex value_root_index =
flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
__ PushRoot(value_root_index);
@@ -743,7 +704,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// The variable in the declaration always resides in the current context.
DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (generate_debug_code_) {
+ if (FLAG_debug_code) {
// Check that we're not inside a with or catch context.
__ movp(rbx, FieldOperand(rsi, HeapObject::kMapOffset));
__ CompareRoot(rbx, Heap::kWithContextMapRootIndex);
@@ -859,10 +820,10 @@ void FullCodeGenerator::VisitFunctionDeclaration(
case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
- __ Push(variable->name());
+ PushOperand(variable->name());
VisitForStackValue(declaration->fun());
- __ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
- __ CallRuntime(Runtime::kDeclareLookupSlot);
+ PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -936,8 +897,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetExpressionPosition(clause);
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), Token::EQ_STRICT,
- strength(language_mode())).code();
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
@@ -959,7 +920,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Discard the test value and jump to the default if present, otherwise to
// the end of the statement.
__ bind(&next_test);
- __ Drop(1); // Switch value is no longer needed.
+ DropOperands(1); // Switch value is no longer needed.
if (default_clause == NULL) {
__ jmp(nested_statement.break_label());
} else {
@@ -990,24 +951,21 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
ForIn loop_statement(this, stmt);
increment_loop_depth();
- // Get the object to enumerate over. If the object is null or undefined, skip
- // over the loop. See ECMA-262 version 5, section 12.6.4.
+ // Get the object to enumerate over.
SetExpressionAsStatementPosition(stmt->enumerable());
VisitForAccumulatorValue(stmt->enumerable());
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(equal, &exit);
- Register null_value = rdi;
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ cmpp(rax, null_value);
- __ j(equal, &exit);
-
- PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
+ OperandStackDepthIncrement(ForIn::kElementCount);
- // Convert the object to a JS object.
+ // If the object is null or undefined, skip over the loop, otherwise convert
+ // it to a JS receiver. See ECMA-262 version 5, section 12.6.4.
Label convert, done_convert;
__ JumpIfSmi(rax, &convert, Label::kNear);
__ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rcx);
__ j(above_equal, &done_convert, Label::kNear);
+ __ CompareRoot(rax, Heap::kNullValueRootIndex);
+ __ j(equal, &exit);
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ j(equal, &exit);
__ bind(&convert);
ToObjectStub stub(isolate());
__ CallStub(&stub);
@@ -1015,16 +973,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
__ Push(rax);
- // Check for proxies.
- Label call_runtime;
- __ CmpObjectType(rax, JS_PROXY_TYPE, rcx);
- __ j(equal, &call_runtime);
-
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
// guarantee cache validity, call the runtime system to check cache
// validity or get the property names in a fixed array.
- __ CheckEnumCache(null_value, &call_runtime);
+ // Note: Proxies never have an enum cache, so will always take the
+ // slow path.
+ Label call_runtime;
+ __ CheckEnumCache(&call_runtime);
// The enum cache is valid. Load the map of the object being
// iterated over and use the cache for the iteration.
@@ -1035,7 +991,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ Push(rax); // Duplicate the enumerable object on the stack.
- __ CallRuntime(Runtime::kGetPropertyNamesFast);
+ __ CallRuntime(Runtime::kForInEnumerate);
PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
// If we got a map from the runtime call, we can do a fast
@@ -1074,8 +1030,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&fixed_array);
// No need for a write barrier, we are storing a Smi in the feedback vector.
+ int const vector_index = SmiFromSlot(slot)->value();
__ EmitLoadTypeFeedbackVector(rbx);
- int vector_index = SmiFromSlot(slot)->value();
__ Move(FieldOperand(rbx, FixedArray::OffsetOfElementAt(vector_index)),
TypeFeedbackVector::MegamorphicSentinel(isolate()));
__ movp(rcx, Operand(rsp, 0 * kPointerSize)); // Get enumerated object
@@ -1083,6 +1039,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Push(rax); // Array
__ movp(rax, FieldOperand(rax, FixedArray::kLengthOffset));
__ Push(rax); // Fixed array length (as smi).
+ PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
__ Push(Smi::FromInt(0)); // Initial index.
// Generate code for doing the condition check.
@@ -1112,6 +1069,16 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmpp(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
__ j(equal, &update_each, Label::kNear);
+ // We might get here from TurboFan or Crankshaft when something in the
+ // for-in loop body deopts and only now notice in fullcodegen, that we
+ // can now longer use the enum cache, i.e. left fast mode. So better record
+ // this information here, in case we later OSR back into this loop or
+ // reoptimize the whole function w/o rerunning the loop with the slow
+ // mode object in fullcodegen (which would result in a deopt loop).
+ __ EmitLoadTypeFeedbackVector(rdx);
+ __ Move(FieldOperand(rdx, FixedArray::OffsetOfElementAt(vector_index)),
+ TypeFeedbackVector::MegamorphicSentinel(isolate()));
+
// Convert the entry to a string or null if it isn't a property
// anymore. If the property has been removed while iterating, we
// just skip it.
@@ -1149,6 +1116,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Remove the pointers stored on the stack.
__ bind(loop_statement.break_label());
__ addp(rsp, Immediate(5 * kPointerSize));
+ OperandStackDepthDecrement(ForIn::kElementCount);
// Exit and decrement the loop depth.
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1391,12 +1359,11 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
// by eval-introduced variables.
EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
__ bind(&slow);
- __ Push(rsi); // Context.
__ Push(var->name());
Runtime::FunctionId function_id =
typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
- : Runtime::kLoadLookupSlotNoReferenceError;
+ : Runtime::kLoadLookupSlotInsideTypeof;
__ CallRuntime(function_id);
__ bind(&done);
context()->Plug(rax);
@@ -1421,6 +1388,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
Expression* expression = (property == NULL) ? NULL : property->value();
if (expression == NULL) {
+ OperandStackDepthIncrement(1);
__ PushRoot(Heap::kNullValueRootIndex);
} else {
VisitForStackValue(expression);
@@ -1469,7 +1437,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
if (!result_saved) {
- __ Push(rax); // Save result on the stack
+ PushOperand(rax); // Save result on the stack
result_saved = true;
}
switch (property->kind()) {
@@ -1499,24 +1467,24 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
break;
}
- __ Push(Operand(rsp, 0)); // Duplicate receiver.
+ PushOperand(Operand(rsp, 0)); // Duplicate receiver.
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
if (NeedsHomeObject(value)) {
EmitSetHomeObject(value, 2, property->GetSlot());
}
- __ Push(Smi::FromInt(SLOPPY)); // Language mode
- __ CallRuntime(Runtime::kSetProperty);
+ PushOperand(Smi::FromInt(SLOPPY)); // Language mode
+ CallRuntimeWithOperands(Runtime::kSetProperty);
} else {
- __ Drop(3);
+ DropOperands(3);
}
break;
case ObjectLiteral::Property::PROTOTYPE:
- __ Push(Operand(rsp, 0)); // Duplicate receiver.
+ PushOperand(Operand(rsp, 0)); // Duplicate receiver.
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype);
+ CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
NO_REGISTERS);
break;
@@ -1538,12 +1506,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
for (AccessorTable::Iterator it = accessor_table.begin();
it != accessor_table.end();
++it) {
- __ Push(Operand(rsp, 0)); // Duplicate receiver.
+ PushOperand(Operand(rsp, 0)); // Duplicate receiver.
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
EmitAccessor(it->second->setter);
- __ Push(Smi::FromInt(NONE));
- __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
+ PushOperand(Smi::FromInt(NONE));
+ CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1560,17 +1528,17 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Expression* value = property->value();
if (!result_saved) {
- __ Push(rax); // Save result on the stack
+ PushOperand(rax); // Save result on the stack
result_saved = true;
}
- __ Push(Operand(rsp, 0)); // Duplicate receiver.
+ PushOperand(Operand(rsp, 0)); // Duplicate receiver.
if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
DCHECK(!property->is_computed_name());
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype);
+ CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
NO_REGISTERS);
} else {
@@ -1585,10 +1553,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
case ObjectLiteral::Property::COMPUTED:
if (property->emit_store()) {
- __ Push(Smi::FromInt(NONE));
- __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
+ PushOperand(Smi::FromInt(NONE));
+ PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+ CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
} else {
- __ Drop(3);
+ DropOperands(3);
}
break;
@@ -1597,13 +1566,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
break;
case ObjectLiteral::Property::GETTER:
- __ Push(Smi::FromInt(NONE));
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(NONE));
+ CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
- __ Push(Smi::FromInt(NONE));
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(NONE));
+ CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
break;
}
}
@@ -1661,14 +1630,14 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
int array_index = 0;
for (; array_index < length; array_index++) {
Expression* subexpr = subexprs->at(array_index);
- if (subexpr->IsSpread()) break;
+ DCHECK(!subexpr->IsSpread());
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
if (!result_saved) {
- __ Push(rax); // array literal
+ PushOperand(rax); // array literal
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
@@ -1689,21 +1658,16 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// (inclusive) and these elements gets appended to the array. Note that the
// number elements an iterable produces is unknown ahead of time.
if (array_index < length && result_saved) {
- __ Pop(rax);
+ PopOperand(rax);
result_saved = false;
}
for (; array_index < length; array_index++) {
Expression* subexpr = subexprs->at(array_index);
- __ Push(rax);
- if (subexpr->IsSpread()) {
- VisitForStackValue(subexpr->AsSpread()->expression());
- __ InvokeBuiltin(Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX,
- CALL_FUNCTION);
- } else {
- VisitForStackValue(subexpr);
- __ CallRuntime(Runtime::kAppendElement);
- }
+ PushOperand(rax);
+ DCHECK(!subexpr->IsSpread());
+ VisitForStackValue(subexpr);
+ CallRuntimeWithOperands(Runtime::kAppendElement);
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
@@ -1744,10 +1708,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
property->obj()->AsSuperPropertyReference()->this_var());
VisitForAccumulatorValue(
property->obj()->AsSuperPropertyReference()->home_object());
- __ Push(result_register());
+ PushOperand(result_register());
if (expr->is_compound()) {
- __ Push(MemOperand(rsp, kPointerSize));
- __ Push(result_register());
+ PushOperand(MemOperand(rsp, kPointerSize));
+ PushOperand(result_register());
}
break;
case KEYED_SUPER_PROPERTY:
@@ -1756,11 +1720,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForStackValue(
property->obj()->AsSuperPropertyReference()->home_object());
VisitForAccumulatorValue(property->key());
- __ Push(result_register());
+ PushOperand(result_register());
if (expr->is_compound()) {
- __ Push(MemOperand(rsp, 2 * kPointerSize));
- __ Push(MemOperand(rsp, 2 * kPointerSize));
- __ Push(result_register());
+ PushOperand(MemOperand(rsp, 2 * kPointerSize));
+ PushOperand(MemOperand(rsp, 2 * kPointerSize));
+ PushOperand(result_register());
}
break;
case KEYED_PROPERTY: {
@@ -1806,7 +1770,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
Token::Value op = expr->binary_op();
- __ Push(rax); // Left operand goes on the stack.
+ PushOperand(rax); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
AccumulatorValueContext context(this);
@@ -1871,8 +1835,16 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ jmp(&suspend);
__ bind(&continuation);
+ // When we arrive here, the stack top is the resume mode and
+ // result_register() holds the input value (the argument given to the
+ // respective resume operation).
__ RecordGeneratorContinuation();
- __ jmp(&resume);
+ __ Pop(rbx);
+ __ SmiCompare(rbx, Smi::FromInt(JSGeneratorObject::RETURN));
+ __ j(not_equal, &resume);
+ __ Push(result_register());
+ EmitCreateIteratorResult(true);
+ EmitUnwindAndReturn();
__ bind(&suspend);
VisitForAccumulatorValue(expr->generator_object());
@@ -1892,7 +1864,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&post_runtime);
- __ Pop(result_register());
+ PopOperand(result_register());
EmitReturnSequence();
__ bind(&resume);
@@ -1901,131 +1873,21 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
}
case Yield::kFinal: {
- VisitForAccumulatorValue(expr->generator_object());
- __ Move(FieldOperand(result_register(),
- JSGeneratorObject::kContinuationOffset),
- Smi::FromInt(JSGeneratorObject::kGeneratorClosed));
// Pop value from top-of-stack slot, box result into result register.
+ OperandStackDepthDecrement(1);
EmitCreateIteratorResult(true);
- EmitUnwindBeforeReturn();
- EmitReturnSequence();
+ EmitUnwindAndReturn();
break;
}
- case Yield::kDelegating: {
- VisitForStackValue(expr->generator_object());
-
- // Initial stack layout is as follows:
- // [sp + 1 * kPointerSize] iter
- // [sp + 0 * kPointerSize] g
-
- Label l_catch, l_try, l_suspend, l_continuation, l_resume;
- Label l_next, l_call, l_loop;
- Register load_receiver = LoadDescriptor::ReceiverRegister();
- Register load_name = LoadDescriptor::NameRegister();
-
- // Initial send value is undefined.
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- __ jmp(&l_next);
-
- // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
- __ bind(&l_catch);
- __ LoadRoot(load_name, Heap::kthrow_stringRootIndex); // "throw"
- __ Push(load_name);
- __ Push(Operand(rsp, 2 * kPointerSize)); // iter
- __ Push(rax); // exception
- __ jmp(&l_call);
-
- // try { received = %yield result }
- // Shuffle the received result above a try handler and yield it without
- // re-boxing.
- __ bind(&l_try);
- __ Pop(rax); // result
- int handler_index = NewHandlerTableEntry();
- EnterTryBlock(handler_index, &l_catch);
- const int try_block_size = TryCatch::kElementCount * kPointerSize;
- __ Push(rax); // result
-
- __ jmp(&l_suspend);
- __ bind(&l_continuation);
- __ RecordGeneratorContinuation();
- __ jmp(&l_resume);
-
- __ bind(&l_suspend);
- const int generator_object_depth = kPointerSize + try_block_size;
- __ movp(rax, Operand(rsp, generator_object_depth));
- __ Push(rax); // g
- __ Push(Smi::FromInt(handler_index)); // handler-index
- DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
- __ Move(FieldOperand(rax, JSGeneratorObject::kContinuationOffset),
- Smi::FromInt(l_continuation.pos()));
- __ movp(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
- __ movp(rcx, rsi);
- __ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
- kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 2);
- __ movp(context_register(),
- Operand(rbp, StandardFrameConstants::kContextOffset));
- __ Pop(rax); // result
- EmitReturnSequence();
- __ bind(&l_resume); // received in rax
- ExitTryBlock(handler_index);
-
- // receiver = iter; f = 'next'; arg = received;
- __ bind(&l_next);
-
- __ LoadRoot(load_name, Heap::knext_stringRootIndex);
- __ Push(load_name); // "next"
- __ Push(Operand(rsp, 2 * kPointerSize)); // iter
- __ Push(rax); // received
-
- // result = receiver[f](arg);
- __ bind(&l_call);
- __ movp(load_receiver, Operand(rsp, kPointerSize));
- __ Move(LoadDescriptor::SlotRegister(),
- SmiFromSlot(expr->KeyedLoadFeedbackSlot()));
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), SLOPPY).code();
- CallIC(ic, TypeFeedbackId::None());
- __ movp(rdi, rax);
- __ movp(Operand(rsp, 2 * kPointerSize), rdi);
-
- SetCallPosition(expr);
- __ Set(rax, 1);
- __ Call(
- isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
- RelocInfo::CODE_TARGET);
-
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ Drop(1); // The function is still on the stack; drop it.
-
- // if (!result.done) goto l_try;
- __ bind(&l_loop);
- __ Move(load_receiver, rax);
- __ Push(load_receiver); // save result
- __ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
- __ Move(LoadDescriptor::SlotRegister(),
- SmiFromSlot(expr->DoneFeedbackSlot()));
- CallLoadIC(NOT_INSIDE_TYPEOF); // rax=result.done
- Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(bool_ic);
- __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
- __ j(not_equal, &l_try);
-
- // result.value
- __ Pop(load_receiver); // result
- __ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
- __ Move(LoadDescriptor::SlotRegister(),
- SmiFromSlot(expr->ValueFeedbackSlot()));
- CallLoadIC(NOT_INSIDE_TYPEOF); // result.value in rax
- context()->DropAndPlug(2, rax); // drop iter and g
- break;
- }
+ case Yield::kDelegating:
+ UNREACHABLE();
}
}
-void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
- Expression *value,
+void FullCodeGenerator::EmitGeneratorResume(
+ Expression* generator, Expression* value,
JSGeneratorObject::ResumeMode resume_mode) {
// The value stays in rax, and is ultimately read by the resumed generator, as
// if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
@@ -2033,7 +1895,14 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// rbx will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
VisitForAccumulatorValue(value);
- __ Pop(rbx);
+ PopOperand(rbx);
+
+ // Store input value into generator object.
+ __ movp(FieldOperand(rbx, JSGeneratorObject::kInputOffset),
+ result_register());
+ __ movp(rcx, result_register());
+ __ RecordWriteField(rbx, JSGeneratorObject::kInputOffset, rcx, rdx,
+ kDontSaveFPRegs);
// Load suspended function and context.
__ movp(rsi, FieldOperand(rbx, JSGeneratorObject::kContextOffset));
@@ -2083,6 +1952,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ addp(rdx, rcx);
__ Move(FieldOperand(rbx, JSGeneratorObject::kContinuationOffset),
Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
+ __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
__ jmp(rdx);
__ bind(&slow_resume);
}
@@ -2096,6 +1966,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ Push(rcx);
__ jmp(&push_operand_holes);
__ bind(&call_resume);
+ __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
__ Push(rbx);
__ Push(result_register());
__ Push(Smi::FromInt(resume_mode));
@@ -2107,6 +1978,21 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
context()->Plug(result_register());
}
+void FullCodeGenerator::PushOperand(MemOperand operand) {
+ OperandStackDepthIncrement(1);
+ __ Push(operand);
+}
+
+void FullCodeGenerator::EmitOperandStackDepthCheck() {
+ if (FLAG_debug_code) {
+ int expected_diff = StandardFrameConstants::kFixedFrameSizeFromFp +
+ operand_stack_depth_ * kPointerSize;
+ __ movp(rax, rbp);
+ __ subp(rax, rsp);
+ __ cmpp(rax, Immediate(expected_diff));
+ __ Assert(equal, kUnexpectedStackDepth);
+ }
+}
void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label allocate, done_allocate;
@@ -2134,42 +2020,13 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetExpressionPosition(prop);
Literal* key = prop->key()->AsLiteral();
+ DCHECK(!key->value()->IsSmi());
DCHECK(!prop->IsSuperAccess());
__ Move(LoadDescriptor::NameRegister(), key->value());
__ Move(LoadDescriptor::SlotRegister(),
SmiFromSlot(prop->PropertyFeedbackSlot()));
- CallLoadIC(NOT_INSIDE_TYPEOF, language_mode());
-}
-
-
-void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
- // Stack: receiver, home_object
- SetExpressionPosition(prop);
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!key->value()->IsSmi());
- DCHECK(prop->IsSuperAccess());
-
- __ Push(key->value());
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadFromSuper);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- SetExpressionPosition(prop);
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), language_mode()).code();
- __ Move(LoadDescriptor::SlotRegister(),
- SmiFromSlot(prop->PropertyFeedbackSlot()));
- CallIC(ic);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
- // Stack: receiver, home_object, key.
- SetExpressionPosition(prop);
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+ CallLoadIC(NOT_INSIDE_TYPEOF);
}
@@ -2181,7 +2038,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
// stack (popped into rdx). Right operand is in rax but moved into
// rcx to make the shifts easier.
Label done, stub_call, smi_case;
- __ Pop(rdx);
+ PopOperand(rdx);
__ movp(rcx, rax);
__ orp(rax, rdx);
JumpPatchSite patch_site(masm_);
@@ -2189,8 +2046,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ movp(rax, rcx);
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -2235,24 +2091,14 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
- // Constructor is in rax.
- DCHECK(lit != NULL);
- __ Push(rax);
-
- // No access check is needed here since the constructor is created by the
- // class literal.
- Register scratch = rbx;
- __ movp(scratch, FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset));
- __ Push(scratch);
-
for (int i = 0; i < lit->properties()->length(); i++) {
ObjectLiteral::Property* property = lit->properties()->at(i);
Expression* value = property->value();
if (property->is_static()) {
- __ Push(Operand(rsp, kPointerSize)); // constructor
+ PushOperand(Operand(rsp, kPointerSize)); // constructor
} else {
- __ Push(Operand(rsp, 0)); // prototype
+ PushOperand(Operand(rsp, 0)); // prototype
}
EmitPropertyKey(property, lit->GetIdForProperty(i));
@@ -2276,34 +2122,31 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
case ObjectLiteral::Property::PROTOTYPE:
UNREACHABLE();
case ObjectLiteral::Property::COMPUTED:
- __ CallRuntime(Runtime::kDefineClassMethod);
+ PushOperand(Smi::FromInt(DONT_ENUM));
+ PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+ CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
break;
case ObjectLiteral::Property::GETTER:
- __ Push(Smi::FromInt(DONT_ENUM));
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(DONT_ENUM));
+ CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
- __ Push(Smi::FromInt(DONT_ENUM));
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(DONT_ENUM));
+ CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
break;
default:
UNREACHABLE();
}
}
-
- // Set both the prototype and constructor to have fast properties, and also
- // freeze them in strong mode.
- __ CallRuntime(Runtime::kFinalizeClassDefinition);
}
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
- __ Pop(rdx);
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+ PopOperand(rdx);
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -2326,10 +2169,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case NAMED_PROPERTY: {
- __ Push(rax); // Preserve value.
+ PushOperand(rax); // Preserve value.
VisitForAccumulatorValue(prop->obj());
__ Move(StoreDescriptor::ReceiverRegister(), rax);
- __ Pop(StoreDescriptor::ValueRegister()); // Restore value.
+ PopOperand(StoreDescriptor::ValueRegister()); // Restore value.
__ Move(StoreDescriptor::NameRegister(),
prop->key()->AsLiteral()->value());
EmitLoadStoreICSlot(slot);
@@ -2337,7 +2180,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case NAMED_SUPER_PROPERTY: {
- __ Push(rax);
+ PushOperand(rax);
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
VisitForAccumulatorValue(
prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2354,7 +2197,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case KEYED_SUPER_PROPERTY: {
- __ Push(rax);
+ PushOperand(rax);
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
VisitForStackValue(
prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2374,12 +2217,12 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case KEYED_PROPERTY: {
- __ Push(rax); // Preserve value.
+ PushOperand(rax); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ Move(StoreDescriptor::NameRegister(), rax);
- __ Pop(StoreDescriptor::ReceiverRegister());
- __ Pop(StoreDescriptor::ValueRegister()); // Restore value.
+ PopOperand(StoreDescriptor::ReceiverRegister());
+ PopOperand(StoreDescriptor::ValueRegister()); // Restore value.
EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
@@ -2456,17 +2299,17 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
(var->mode() == CONST && op == Token::INIT)) {
if (var->IsLookupSlot()) {
// Assignment to var.
- __ Push(rax); // Value.
- __ Push(rsi); // Context.
__ Push(var->name());
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kStoreLookupSlot);
+ __ Push(rax);
+ __ CallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreLookupSlot_Strict
+ : Runtime::kStoreLookupSlot_Sloppy);
} else {
// Assignment to var or initializing assignment to let/const in harmony
// mode.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
MemOperand location = VarOperand(var, rcx);
- if (generate_debug_code_ && var->mode() == LET && op == Token::INIT) {
+ if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
// Check for an uninitialized let binding.
__ movp(rdx, location);
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
@@ -2511,7 +2354,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
DCHECK(prop->key()->IsLiteral());
__ Move(StoreDescriptor::NameRegister(), prop->key()->AsLiteral()->value());
- __ Pop(StoreDescriptor::ReceiverRegister());
+ PopOperand(StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(expr->AssignmentSlot());
CallStoreIC();
@@ -2528,10 +2371,11 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
Literal* key = prop->key()->AsLiteral();
DCHECK(key != NULL);
- __ Push(key->value());
- __ Push(rax);
- __ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy));
+ PushOperand(key->value());
+ PushOperand(rax);
+ CallRuntimeWithOperands(is_strict(language_mode())
+ ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy);
}
@@ -2541,17 +2385,17 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
// stack : receiver ('this'), home_object, key
DCHECK(prop != NULL);
- __ Push(rax);
- __ CallRuntime((is_strict(language_mode())
- ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy));
+ PushOperand(rax);
+ CallRuntimeWithOperands(is_strict(language_mode())
+ ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy);
}
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
- __ Pop(StoreDescriptor::NameRegister()); // Key.
- __ Pop(StoreDescriptor::ReceiverRegister());
+ PopOperand(StoreDescriptor::NameRegister()); // Key.
+ PopOperand(StoreDescriptor::ReceiverRegister());
DCHECK(StoreDescriptor::ValueRegister().is(rax));
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
@@ -2586,7 +2430,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
__ Move(LoadDescriptor::NameRegister(), rax);
- __ Pop(LoadDescriptor::ReceiverRegister());
+ PopOperand(LoadDescriptor::ReceiverRegister());
EmitKeyedPropertyLoad(expr);
} else {
VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
@@ -2621,7 +2465,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
}
// Push undefined as receiver. This is patched in the Call builtin if it
// is a sloppy mode method.
- __ Push(isolate()->factory()->undefined_value());
+ PushOperand(isolate()->factory()->undefined_value());
convert_mode = ConvertReceiverMode::kNullOrUndefined;
} else {
// Load the function from the receiver.
@@ -2631,7 +2475,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
EmitNamedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
- __ Push(Operand(rsp, 0));
+ PushOperand(Operand(rsp, 0));
__ movp(Operand(rsp, kPointerSize), rax);
convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
}
@@ -2653,11 +2497,10 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
VisitForStackValue(super_ref->home_object());
VisitForAccumulatorValue(super_ref->this_var());
- __ Push(rax);
- __ Push(rax);
- __ Push(Operand(rsp, kPointerSize * 2));
- __ Push(key->value());
- __ Push(Smi::FromInt(language_mode()));
+ PushOperand(rax);
+ PushOperand(rax);
+ PushOperand(Operand(rsp, kPointerSize * 2));
+ PushOperand(key->value());
// Stack here:
// - home_object
@@ -2665,8 +2508,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// - this (receiver) <-- LoadFromSuper will pop here and below.
// - home_object
// - key
- // - language_mode
- __ CallRuntime(Runtime::kLoadFromSuper);
+ CallRuntimeWithOperands(Runtime::kLoadFromSuper);
// Replace home_object with target function.
__ movp(Operand(rsp, kPointerSize), rax);
@@ -2694,7 +2536,7 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
- __ Push(Operand(rsp, 0));
+ PushOperand(Operand(rsp, 0));
__ movp(Operand(rsp, kPointerSize), rax);
EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
@@ -2712,11 +2554,10 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
VisitForStackValue(super_ref->home_object());
VisitForAccumulatorValue(super_ref->this_var());
- __ Push(rax);
- __ Push(rax);
- __ Push(Operand(rsp, kPointerSize * 2));
+ PushOperand(rax);
+ PushOperand(rax);
+ PushOperand(Operand(rsp, kPointerSize * 2));
VisitForStackValue(prop->key());
- __ Push(Smi::FromInt(language_mode()));
// Stack here:
// - home_object
@@ -2724,8 +2565,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
// - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
// - home_object
// - key
- // - language_mode
- __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+ CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
// Replace home_object with target function.
__ movp(Operand(rsp, kPointerSize), rax);
@@ -2747,12 +2587,23 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
SetCallPosition(expr);
- Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
+ if (expr->tail_call_mode() == TailCallMode::kAllow) {
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceTailCall);
+ }
+ // Update profiling counters before the tail call since we will
+ // not return to this function.
+ EmitProfilingCounterHandlingForReturnSequence(true);
+ }
+ Handle<Code> ic =
+ CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
+ .code();
__ Move(rdx, SmiFromSlot(expr->CallFeedbackICSlot()));
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
// Don't assign a type feedback id to the IC, since type feedback is provided
// by the vector above.
CallIC(ic);
+ OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
@@ -2797,11 +2648,10 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
__ bind(&slow);
// Call the runtime to find the function to call (returned in rax) and
// the object holding it (returned in rdx).
- __ Push(context_register());
__ Push(callee->name());
- __ CallRuntime(Runtime::kLoadLookupSlot);
- __ Push(rax); // Function.
- __ Push(rdx); // Receiver.
+ __ CallRuntime(Runtime::kLoadLookupSlotForCall);
+ PushOperand(rax); // Function.
+ PushOperand(rdx); // Receiver.
PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
// If fast case code has been generated, emit code to push the function
@@ -2821,6 +2671,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
} else {
VisitForStackValue(callee);
// refEnv.WithBaseObject()
+ OperandStackDepthIncrement(1);
__ PushRoot(Heap::kUndefinedValueRootIndex);
}
}
@@ -2852,7 +2703,10 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
SetCallPosition(expr);
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
__ Set(rax, arg_count);
- __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ expr->tail_call_mode()),
+ RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
// Restore context register.
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2893,6 +2747,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
CallConstructStub stub(isolate());
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
// Restore context register.
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2911,7 +2766,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
__ AssertFunction(result_register());
__ movp(result_register(),
FieldOperand(result_register(), HeapObject::kMapOffset));
- __ Push(FieldOperand(result_register(), Map::kPrototypeOffset));
+ PushOperand(FieldOperand(result_register(), Map::kPrototypeOffset));
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -2933,6 +2788,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
__ movp(rdi, Operand(rsp, arg_count * kPointerSize));
__ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
@@ -2986,77 +2842,6 @@ void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ JumpIfSmi(rax, if_false);
- __ CmpObjectType(rax, SIMD128_VALUE_TYPE, rbx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(rax, if_false);
- __ CmpObjectType(rax, FIRST_FUNCTION_TYPE, rbx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(above_equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
- __ CheckMap(rax, map, if_false, DO_SMI_CHECK);
- __ cmpl(FieldOperand(rax, HeapNumber::kExponentOffset),
- Immediate(0x1));
- __ j(no_overflow, if_false);
- __ cmpl(FieldOperand(rax, HeapNumber::kMantissaOffset),
- Immediate(0x00000000));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3146,68 +2931,6 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ Pop(rbx);
- __ cmpp(rax, rbx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- // ArgumentsAccessStub expects the key in rdx and the formal
- // parameter count in rax.
- VisitForAccumulatorValue(args->at(0));
- __ movp(rdx, rax);
- __ Move(rax, Smi::FromInt(info_->scope()->num_parameters()));
- ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
- __ CallStub(&stub);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
-
- Label exit;
- // Get the number of formal parameters.
- __ Move(rax, Smi::FromInt(info_->scope()->num_parameters()));
-
- // Check if the calling frame is an arguments adaptor frame.
- __ movp(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &exit, Label::kNear);
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ movp(rax, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ bind(&exit);
- __ AssertSmi(rax);
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3276,28 +2999,6 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = nullptr;
- Label* if_false = nullptr;
- Label* fall_through = nullptr;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ JumpIfSmi(rax, if_false);
- __ CmpObjectType(rax, JS_DATE_TYPE, rbx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(3, args->length());
@@ -3309,8 +3010,8 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(0)); // index
VisitForStackValue(args->at(1)); // value
VisitForAccumulatorValue(args->at(2)); // string
- __ Pop(value);
- __ Pop(index);
+ PopOperand(value);
+ PopOperand(index);
if (FLAG_debug_code) {
__ Check(__ CheckSmi(value), kNonSmiValue);
@@ -3342,8 +3043,8 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(0)); // index
VisitForStackValue(args->at(1)); // value
VisitForAccumulatorValue(args->at(2)); // string
- __ Pop(value);
- __ Pop(index);
+ PopOperand(value);
+ PopOperand(index);
if (FLAG_debug_code) {
__ Check(__ CheckSmi(value), kNonSmiValue);
@@ -3364,34 +3065,6 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- VisitForStackValue(args->at(0)); // Load the object.
- VisitForAccumulatorValue(args->at(1)); // Load the value.
- __ Pop(rbx); // rax = value. rbx = object.
-
- Label done;
- // If the object is a smi, return the value.
- __ JumpIfSmi(rbx, &done);
-
- // If the object is not a value type, return the value.
- __ CmpObjectType(rbx, JS_VALUE_TYPE, rcx);
- __ j(not_equal, &done);
-
- // Store the value.
- __ movp(FieldOperand(rbx, JSValue::kValueOffset), rax);
- // Update the write barrier. Save the value as it will be
- // overwritten by the write barrier code and is needed afterward.
- __ movp(rdx, rax);
- __ RecordWriteField(rbx, JSValue::kValueOffset, rdx, rcx, kDontSaveFPRegs);
-
- __ bind(&done);
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -3409,27 +3082,6 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitToName(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into rax and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- // Convert the object to a name.
- Label convert, done_convert;
- __ JumpIfSmi(rax, &convert, Label::kNear);
- STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
- __ CmpObjectType(rax, LAST_NAME_TYPE, rcx);
- __ j(below_equal, &done_convert, Label::kNear);
- __ bind(&convert);
- __ Push(rax);
- __ CallRuntime(Runtime::kToName);
- __ bind(&done_convert);
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3460,7 +3112,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
Register index = rax;
Register result = rdx;
- __ Pop(object);
+ PopOperand(object);
Label need_conversion;
Label index_out_of_range;
@@ -3507,7 +3159,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
Register scratch = rdx;
Register result = rax;
- __ Pop(object);
+ PopOperand(object);
Label need_conversion;
Label index_out_of_range;
@@ -3557,6 +3209,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
// Call the target.
__ Set(rax, argc);
__ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(argc + 1);
// Restore context register.
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
@@ -3613,296 +3266,6 @@ void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
- Label bailout, return_result, done, one_char_separator, long_separator,
- non_trivial_array, not_size_one_array, loop,
- loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- // We will leave the separator on the stack until the end of the function.
- VisitForStackValue(args->at(1));
- // Load this to rax (= array)
- VisitForAccumulatorValue(args->at(0));
- // All aliases of the same register have disjoint lifetimes.
- Register array = rax;
- Register elements = no_reg; // Will be rax.
-
- Register index = rdx;
-
- Register string_length = rcx;
-
- Register string = rsi;
-
- Register scratch = rbx;
-
- Register array_length = rdi;
- Register result_pos = no_reg; // Will be rdi.
-
- Operand separator_operand = Operand(rsp, 2 * kPointerSize);
- Operand result_operand = Operand(rsp, 1 * kPointerSize);
- Operand array_length_operand = Operand(rsp, 0 * kPointerSize);
- // Separator operand is already pushed. Make room for the two
- // other stack fields, and clear the direction flag in anticipation
- // of calling CopyBytes.
- __ subp(rsp, Immediate(2 * kPointerSize));
- __ cld();
- // Check that the array is a JSArray
- __ JumpIfSmi(array, &bailout);
- __ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &bailout);
-
- // Check that the array has fast elements.
- __ CheckFastElements(scratch, &bailout);
-
- // Array has fast elements, so its length must be a smi.
- // If the array has length zero, return the empty string.
- __ movp(array_length, FieldOperand(array, JSArray::kLengthOffset));
- __ SmiCompare(array_length, Smi::FromInt(0));
- __ j(not_zero, &non_trivial_array);
- __ LoadRoot(rax, Heap::kempty_stringRootIndex);
- __ jmp(&return_result);
-
- // Save the array length on the stack.
- __ bind(&non_trivial_array);
- __ SmiToInteger32(array_length, array_length);
- __ movl(array_length_operand, array_length);
-
- // Save the FixedArray containing array's elements.
- // End of array's live range.
- elements = array;
- __ movp(elements, FieldOperand(array, JSArray::kElementsOffset));
- array = no_reg;
-
-
- // Check that all array elements are sequential one-byte strings, and
- // accumulate the sum of their lengths, as a smi-encoded value.
- __ Set(index, 0);
- __ Set(string_length, 0);
- // Loop condition: while (index < array_length).
- // Live loop registers: index(int32), array_length(int32), string(String*),
- // scratch, string_length(int32), elements(FixedArray*).
- if (generate_debug_code_) {
- __ cmpp(index, array_length);
- __ Assert(below, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
- }
- __ bind(&loop);
- __ movp(string, FieldOperand(elements,
- index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ JumpIfSmi(string, &bailout);
- __ movp(scratch, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ andb(scratch, Immediate(
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmpb(scratch, Immediate(kStringTag | kOneByteStringTag | kSeqStringTag));
- __ j(not_equal, &bailout);
- __ AddSmiField(string_length,
- FieldOperand(string, SeqOneByteString::kLengthOffset));
- __ j(overflow, &bailout);
- __ incl(index);
- __ cmpl(index, array_length);
- __ j(less, &loop);
-
- // Live registers:
- // string_length: Sum of string lengths.
- // elements: FixedArray of strings.
- // index: Array length.
- // array_length: Array length.
-
- // If array_length is 1, return elements[0], a string.
- __ cmpl(array_length, Immediate(1));
- __ j(not_equal, &not_size_one_array);
- __ movp(rax, FieldOperand(elements, FixedArray::kHeaderSize));
- __ jmp(&return_result);
-
- __ bind(&not_size_one_array);
-
- // End of array_length live range.
- result_pos = array_length;
- array_length = no_reg;
-
- // Live registers:
- // string_length: Sum of string lengths.
- // elements: FixedArray of strings.
- // index: Array length.
-
- // Check that the separator is a sequential one-byte string.
- __ movp(string, separator_operand);
- __ JumpIfSmi(string, &bailout);
- __ movp(scratch, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ andb(scratch, Immediate(
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmpb(scratch, Immediate(kStringTag | kOneByteStringTag | kSeqStringTag));
- __ j(not_equal, &bailout);
-
- // Live registers:
- // string_length: Sum of string lengths.
- // elements: FixedArray of strings.
- // index: Array length.
- // string: Separator string.
-
- // Add (separator length times (array_length - 1)) to string_length.
- __ SmiToInteger32(scratch,
- FieldOperand(string, SeqOneByteString::kLengthOffset));
- __ decl(index);
- __ imull(scratch, index);
- __ j(overflow, &bailout);
- __ addl(string_length, scratch);
- __ j(overflow, &bailout);
- __ jmp(&bailout);
-
- // Bailout for large object allocations.
- __ cmpl(string_length, Immediate(Page::kMaxRegularHeapObjectSize));
- __ j(greater, &bailout);
-
- // Live registers and stack values:
- // string_length: Total length of result string.
- // elements: FixedArray of strings.
- __ AllocateOneByteString(result_pos, string_length, scratch, index, string,
- &bailout);
- __ movp(result_operand, result_pos);
- __ leap(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
-
- __ movp(string, separator_operand);
- __ SmiCompare(FieldOperand(string, SeqOneByteString::kLengthOffset),
- Smi::FromInt(1));
- __ j(equal, &one_char_separator);
- __ j(greater, &long_separator);
-
-
- // Empty separator case:
- __ Set(index, 0);
- __ movl(scratch, array_length_operand);
- __ jmp(&loop_1_condition);
- // Loop condition: while (index < array_length).
- __ bind(&loop_1);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
- // elements: the FixedArray of strings we are joining.
- // scratch: array length.
-
- // Get string = array[index].
- __ movp(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ SmiToInteger32(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ leap(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ CopyBytes(result_pos, string, string_length);
- __ incl(index);
- __ bind(&loop_1_condition);
- __ cmpl(index, scratch);
- __ j(less, &loop_1); // Loop while (index < array_length).
- __ jmp(&done);
-
- // Generic bailout code used from several places.
- __ bind(&bailout);
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- __ jmp(&return_result);
-
-
- // One-character separator case
- __ bind(&one_char_separator);
- // Get the separator one-byte character value.
- // Register "string" holds the separator.
- __ movzxbl(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ Set(index, 0);
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ jmp(&loop_2_entry);
- // Loop condition: while (index < length).
- __ bind(&loop_2);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // elements: The FixedArray of strings we are joining.
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
- // scratch: Separator character.
-
- // Copy the separator character to the result.
- __ movb(Operand(result_pos, 0), scratch);
- __ incp(result_pos);
-
- __ bind(&loop_2_entry);
- // Get string = array[index].
- __ movp(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ SmiToInteger32(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ leap(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ CopyBytes(result_pos, string, string_length);
- __ incl(index);
- __ cmpl(index, array_length_operand);
- __ j(less, &loop_2); // End while (index < length).
- __ jmp(&done);
-
-
- // Long separator case (separator is more than one character).
- __ bind(&long_separator);
-
- // Make elements point to end of elements array, and index
- // count from -array_length to zero, so we don't need to maintain
- // a loop limit.
- __ movl(index, array_length_operand);
- __ leap(elements, FieldOperand(elements, index, times_pointer_size,
- FixedArray::kHeaderSize));
- __ negq(index);
-
- // Replace separator string with pointer to its first character, and
- // make scratch be its length.
- __ movp(string, separator_operand);
- __ SmiToInteger32(scratch,
- FieldOperand(string, String::kLengthOffset));
- __ leap(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ movp(separator_operand, string);
-
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ jmp(&loop_3_entry);
- // Loop condition: while (index < length).
- __ bind(&loop_3);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
- // scratch: Separator length.
- // separator_operand (rsp[0x10]): Address of first char of separator.
-
- // Copy the separator to the result.
- __ movp(string, separator_operand);
- __ movl(string_length, scratch);
- __ CopyBytes(result_pos, string, string_length, 2);
-
- __ bind(&loop_3_entry);
- // Get string = array[index].
- __ movp(string, Operand(elements, index, times_pointer_size, 0));
- __ SmiToInteger32(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ leap(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ CopyBytes(result_pos, string, string_length);
- __ incq(index);
- __ j(not_equal, &loop_3); // Loop while (index < 0).
-
- __ bind(&done);
- __ movp(rax, result_operand);
-
- __ bind(&return_result);
- // Drop temp values from the stack, and restore context register.
- __ addp(rsp, Immediate(3 * kPointerSize));
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
ExternalReference debug_is_active =
@@ -3934,7 +3297,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ jmp(&done, Label::kNear);
__ bind(&runtime);
- __ CallRuntime(Runtime::kCreateIterResultObject);
+ CallRuntimeWithOperands(Runtime::kCreateIterResultObject);
__ bind(&done);
context()->Plug(rax);
@@ -3943,6 +3306,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push the builtins object as receiver.
+ OperandStackDepthIncrement(1);
__ PushRoot(Heap::kUndefinedValueRootIndex);
__ LoadNativeContextSlot(expr->context_index(), rax);
@@ -3958,6 +3322,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
__ Set(rax, arg_count);
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
}
@@ -3971,7 +3336,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
EmitLoadJSRuntimeFunction(expr);
// Push the target function under the receiver.
- __ Push(Operand(rsp, 0));
+ PushOperand(Operand(rsp, 0));
__ movp(Operand(rsp, kPointerSize), rax);
// Push the arguments ("left-to-right").
@@ -4006,6 +3371,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Call the C runtime.
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
__ CallRuntime(function, arg_count);
+ OperandStackDepthDecrement(arg_count);
context()->Plug(rax);
}
}
@@ -4023,9 +3389,9 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy);
+ CallRuntimeWithOperands(is_strict(language_mode())
+ ? Runtime::kDeleteProperty_Strict
+ : Runtime::kDeleteProperty_Sloppy);
context()->Plug(rax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -4047,7 +3413,6 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
} else {
// Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
- __ Push(context_register());
__ Push(var->name());
__ CallRuntime(Runtime::kDeleteLookupSlot);
context()->Plug(rax);
@@ -4093,6 +3458,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
&materialize_false,
&materialize_true,
&materialize_true);
+ if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
__ bind(&materialize_true);
PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
if (context()->IsAccumulatorValue()) {
@@ -4148,7 +3514,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && !context()->IsEffect()) {
- __ Push(Smi::FromInt(0));
+ PushOperand(Smi::FromInt(0));
}
switch (assign_type) {
case NAMED_PROPERTY: {
@@ -4162,9 +3528,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
VisitForAccumulatorValue(
prop->obj()->AsSuperPropertyReference()->home_object());
- __ Push(result_register());
- __ Push(MemOperand(rsp, kPointerSize));
- __ Push(result_register());
+ PushOperand(result_register());
+ PushOperand(MemOperand(rsp, kPointerSize));
+ PushOperand(result_register());
EmitNamedSuperPropertyLoad(prop);
break;
}
@@ -4174,10 +3540,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
VisitForStackValue(
prop->obj()->AsSuperPropertyReference()->home_object());
VisitForAccumulatorValue(prop->key());
- __ Push(result_register());
- __ Push(MemOperand(rsp, 2 * kPointerSize));
- __ Push(MemOperand(rsp, 2 * kPointerSize));
- __ Push(result_register());
+ PushOperand(result_register());
+ PushOperand(MemOperand(rsp, 2 * kPointerSize));
+ PushOperand(MemOperand(rsp, 2 * kPointerSize));
+ PushOperand(result_register());
EmitKeyedSuperPropertyLoad(prop);
break;
}
@@ -4266,7 +3632,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// of the stack.
switch (assign_type) {
case VARIABLE:
- __ Push(rax);
+ PushOperand(rax);
break;
case NAMED_PROPERTY:
__ movp(Operand(rsp, kPointerSize), rax);
@@ -4290,8 +3656,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ bind(&stub_call);
__ movp(rdx, rax);
__ Move(rax, Smi::FromInt(1));
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), expr->binary_op(),
- strength(language_mode())).code();
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), expr->binary_op()).code();
CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4326,7 +3692,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
__ Move(StoreDescriptor::NameRegister(),
prop->key()->AsLiteral()->value());
- __ Pop(StoreDescriptor::ReceiverRegister());
+ PopOperand(StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(expr->CountSlot());
CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -4362,8 +3728,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case KEYED_PROPERTY: {
- __ Pop(StoreDescriptor::NameRegister());
- __ Pop(StoreDescriptor::ReceiverRegister());
+ PopOperand(StoreDescriptor::NameRegister());
+ PopOperand(StoreDescriptor::ReceiverRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
EmitLoadStoreICSlot(expr->CountSlot());
@@ -4417,8 +3783,8 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ CompareRoot(rax, Heap::kFalseValueRootIndex);
Split(equal, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->undefined_string())) {
- __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
- __ j(equal, if_true);
+ __ CompareRoot(rax, Heap::kNullValueRootIndex);
+ __ j(equal, if_false);
__ JumpIfSmi(rax, if_false);
// Check for undetectable objects => true.
__ movp(rdx, FieldOperand(rax, HeapObject::kMapOffset));
@@ -4484,7 +3850,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ CallRuntime(Runtime::kHasProperty);
+ CallRuntimeWithOperands(Runtime::kHasProperty);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
Split(equal, if_true, if_false, fall_through);
@@ -4492,7 +3858,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::INSTANCEOF: {
VisitForAccumulatorValue(expr->right());
- __ Pop(rdx);
+ PopOperand(rdx);
InstanceOfStub stub(isolate());
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
@@ -4504,7 +3870,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
Condition cc = CompareIC::ComputeCondition(op);
- __ Pop(rdx);
+ PopOperand(rdx);
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
@@ -4518,8 +3884,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ bind(&slow_case);
}
- Handle<Code> ic = CodeFactory::CompareIC(
- isolate(), op, strength(language_mode())).code();
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -4598,15 +3963,15 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
// as their closure, not the anonymous closure containing the global
// code.
__ movp(rax, NativeContextOperand());
- __ Push(ContextOperand(rax, Context::CLOSURE_INDEX));
+ PushOperand(ContextOperand(rax, Context::CLOSURE_INDEX));
} else if (closure_scope->is_eval_scope()) {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
- __ Push(ContextOperand(rsi, Context::CLOSURE_INDEX));
+ PushOperand(ContextOperand(rsi, Context::CLOSURE_INDEX));
} else {
DCHECK(closure_scope->is_function_scope());
- __ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ PushOperand(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
}
@@ -4617,22 +3982,12 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
void FullCodeGenerator::EnterFinallyBlock() {
DCHECK(!result_register().is(rdx));
- DCHECK(!result_register().is(rcx));
- // Cook return address on top of stack (smi encoded Code* delta)
- __ PopReturnAddressTo(rdx);
- __ Move(rcx, masm_->CodeObject());
- __ subp(rdx, rcx);
- __ Integer32ToSmi(rdx, rdx);
- __ Push(rdx);
-
- // Store result register while executing finally block.
- __ Push(result_register());
// Store pending message while executing finally block.
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ Load(rdx, pending_message_obj);
- __ Push(rdx);
+ PushOperand(rdx);
ClearPendingMessage();
}
@@ -4640,22 +3995,11 @@ void FullCodeGenerator::EnterFinallyBlock() {
void FullCodeGenerator::ExitFinallyBlock() {
DCHECK(!result_register().is(rdx));
- DCHECK(!result_register().is(rcx));
// Restore pending message from stack.
- __ Pop(rdx);
+ PopOperand(rdx);
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ Store(pending_message_obj, rdx);
-
- // Restore result register from stack.
- __ Pop(result_register());
-
- // Uncook return address.
- __ Pop(rdx);
- __ SmiToInteger32(rdx, rdx);
- __ Move(rcx, masm_->CodeObject());
- __ addp(rdx, rcx);
- __ jmp(rdx);
}
@@ -4673,6 +4017,31 @@ void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
__ Move(VectorStoreICTrampolineDescriptor::SlotRegister(), SmiFromSlot(slot));
}
+void FullCodeGenerator::DeferredCommands::EmitCommands() {
+ __ Pop(result_register()); // Restore the accumulator.
+ __ Pop(rdx); // Get the token.
+ for (DeferredCommand cmd : commands_) {
+ Label skip;
+ __ SmiCompare(rdx, Smi::FromInt(cmd.token));
+ __ j(not_equal, &skip);
+ switch (cmd.command) {
+ case kReturn:
+ codegen_->EmitUnwindAndReturn();
+ break;
+ case kThrow:
+ __ Push(result_register());
+ __ CallRuntime(Runtime::kReThrow);
+ break;
+ case kContinue:
+ codegen_->EmitContinue(cmd.target);
+ break;
+ case kBreak:
+ codegen_->EmitBreak(cmd.target);
+ break;
+ }
+ __ bind(&skip);
+ }
+}
#undef __
@@ -4753,7 +4122,6 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
return OSR_AFTER_STACK_CHECK;
}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/full-codegen/x87/full-codegen-x87.cc b/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
index c38230ad1e..36b7c5d636 100644
--- a/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
+++ b/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
@@ -17,8 +17,7 @@
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm_)
-
+#define __ ACCESS_MASM(masm())
class JumpPatchSite BASE_EMBEDDED {
public:
@@ -68,6 +67,7 @@ class JumpPatchSite BASE_EMBEDDED {
__ j(cc, target, distance);
}
+ MacroAssembler* masm() { return masm_; }
MacroAssembler* masm_;
Label patch_site_;
#ifdef DEBUG
@@ -99,13 +99,6 @@ void FullCodeGenerator::Generate() {
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
-#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
- __ int3();
- }
-#endif
-
if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
__ mov(ecx, Operand(esp, receiver_offset));
@@ -126,6 +119,7 @@ void FullCodeGenerator::Generate() {
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
DCHECK(!IsGeneratorFunction(literal()->kind()) || locals_count == 0);
+ OperandStackDepthIncrement(locals_count);
if (locals_count == 1) {
__ push(Immediate(isolate()->factory()->undefined_value()));
} else if (locals_count > 1) {
@@ -256,48 +250,33 @@ void FullCodeGenerator::Generate() {
Variable* rest_param = scope()->rest_parameter(&rest_index);
if (rest_param) {
Comment cmnt(masm_, "[ Allocate rest parameter array");
-
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
-
- __ mov(RestParamAccessDescriptor::parameter_count(),
- Immediate(Smi::FromInt(num_parameters)));
- __ lea(RestParamAccessDescriptor::parameter_pointer(),
- Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
- __ mov(RestParamAccessDescriptor::rest_parameter_index(),
- Immediate(Smi::FromInt(rest_index)));
- function_in_register = false;
-
- RestParamAccessStub stub(isolate());
+ if (!function_in_register) {
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ FastNewRestParameterStub stub(isolate());
__ CallStub(&stub);
+ function_in_register = false;
SetVar(rest_param, eax, ebx, edx);
}
Variable* arguments = scope()->arguments();
if (arguments != NULL) {
- // Function uses arguments object.
+ // Arguments object must be allocated after the context object, in
+ // case the "arguments" or ".arguments" variables are in the context.
Comment cmnt(masm_, "[ Allocate arguments object");
- DCHECK(edi.is(ArgumentsAccessNewDescriptor::function()));
if (!function_in_register) {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
}
- // Receiver is just before the parameters on the caller's stack.
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
- __ mov(ArgumentsAccessNewDescriptor::parameter_count(),
- Immediate(Smi::FromInt(num_parameters)));
- __ lea(ArgumentsAccessNewDescriptor::parameter_pointer(),
- Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
-
- // Arguments to ArgumentsAccessStub:
- // function, parameter pointer, parameter count.
- // The stub will rewrite parameter pointer and parameter count if the
- // previous stack frame was an arguments adapter frame.
- bool is_unmapped = is_strict(language_mode()) || !has_simple_parameters();
- ArgumentsAccessStub::Type type = ArgumentsAccessStub::ComputeType(
- is_unmapped, literal()->has_duplicate_parameters());
- ArgumentsAccessStub stub(isolate(), type);
- __ CallStub(&stub);
+ if (is_strict(language_mode()) || !has_simple_parameters()) {
+ FastNewStrictArgumentsStub stub(isolate());
+ __ CallStub(&stub);
+ } else if (literal()->has_duplicate_parameters()) {
+ __ Push(edi);
+ __ CallRuntime(Runtime::kNewSloppyArguments_Generic);
+ } else {
+ FastNewSloppyArgumentsStub stub(isolate());
+ __ CallStub(&stub);
+ }
SetVar(arguments, eax, ebx, edx);
}
@@ -398,6 +377,30 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
}
+void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
+ bool is_tail_call) {
+ // Pretend that the exit is a backwards jump to the entry.
+ int weight = 1;
+ if (info_->ShouldSelfOptimize()) {
+ weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+ } else {
+ int distance = masm_->pc_offset();
+ weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
+ }
+ EmitProfilingCounterDecrement(weight);
+ Label ok;
+ __ j(positive, &ok, Label::kNear);
+ // Don't need to save result register if we are going to do a tail call.
+ if (!is_tail_call) {
+ __ push(eax);
+ }
+ __ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+ if (!is_tail_call) {
+ __ pop(eax);
+ }
+ EmitProfilingCounterReset();
+ __ bind(&ok);
+}
void FullCodeGenerator::EmitReturnSequence() {
Comment cmnt(masm_, "[ Return sequence");
@@ -410,24 +413,7 @@ void FullCodeGenerator::EmitReturnSequence() {
__ push(eax);
__ CallRuntime(Runtime::kTraceExit);
}
- // Pretend that the exit is a backwards jump to the entry.
- int weight = 1;
- if (info_->ShouldSelfOptimize()) {
- weight = FLAG_interrupt_budget / FLAG_self_opt_count;
- } else {
- int distance = masm_->pc_offset();
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
- }
- EmitProfilingCounterDecrement(weight);
- Label ok;
- __ j(positive, &ok, Label::kNear);
- __ push(eax);
- __ call(isolate()->builtins()->InterruptCheck(),
- RelocInfo::CODE_TARGET);
- __ pop(eax);
- EmitProfilingCounterReset();
- __ bind(&ok);
+ EmitProfilingCounterHandlingForReturnSequence(false);
SetReturnPosition(literal());
__ leave();
@@ -443,7 +429,7 @@ void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
MemOperand operand = codegen()->VarOperand(var, result_register());
// Memory operands can be pushed directly.
- __ push(operand);
+ codegen()->PushOperand(operand);
}
@@ -484,6 +470,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(
void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
+ codegen()->OperandStackDepthIncrement(1);
if (lit->IsSmi()) {
__ SafePush(Immediate(lit));
} else {
@@ -497,7 +484,7 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
true,
true_label_,
false_label_);
- DCHECK(!lit->IsUndetectableObject()); // There are no undetectable literals.
+ DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
if (false_label_ != fall_through_) __ jmp(false_label_);
} else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -522,41 +509,14 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
}
-void FullCodeGenerator::EffectContext::DropAndPlug(int count,
- Register reg) const {
- DCHECK(count > 0);
- __ Drop(count);
-}
-
-
-void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
- int count,
- Register reg) const {
- DCHECK(count > 0);
- __ Drop(count);
- __ Move(result_register(), reg);
-}
-
-
void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
Register reg) const {
DCHECK(count > 0);
- if (count > 1) __ Drop(count - 1);
+ if (count > 1) codegen()->DropOperands(count - 1);
__ mov(Operand(esp, 0), reg);
}
-void FullCodeGenerator::TestContext::DropAndPlug(int count,
- Register reg) const {
- DCHECK(count > 0);
- // For simplicity we always test the accumulator register.
- __ Drop(count);
- __ Move(result_register(), reg);
- codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
- codegen()->DoTest(this);
-}
-
-
void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
Label* materialize_false) const {
DCHECK(materialize_true == materialize_false);
@@ -580,6 +540,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(
void FullCodeGenerator::StackValueContext::Plug(
Label* materialize_true,
Label* materialize_false) const {
+ codegen()->OperandStackDepthIncrement(1);
Label done;
__ bind(materialize_true);
__ push(Immediate(isolate()->factory()->true_value()));
@@ -606,6 +567,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
+ codegen()->OperandStackDepthIncrement(1);
Handle<Object> value = flag
? isolate()->factory()->true_value()
: isolate()->factory()->false_value();
@@ -728,7 +690,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// The variable in the declaration always resides in the current context.
DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
- if (generate_debug_code_) {
+ if (FLAG_debug_code) {
// Check that we're not inside a with or catch context.
__ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
__ cmp(ebx, isolate()->factory()->with_context_map());
@@ -838,11 +800,10 @@ void FullCodeGenerator::VisitFunctionDeclaration(
case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
- __ push(Immediate(variable->name()));
+ PushOperand(variable->name());
VisitForStackValue(declaration->fun());
- __ push(
- Immediate(Smi::FromInt(variable->DeclarationPropertyAttributes())));
- __ CallRuntime(Runtime::kDeclareLookupSlot);
+ PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
+ CallRuntimeWithOperands(Runtime::kDeclareLookupSlot);
break;
}
}
@@ -915,8 +876,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
}
SetExpressionPosition(clause);
- Handle<Code> ic = CodeFactory::CompareIC(isolate(), Token::EQ_STRICT,
- strength(language_mode())).code();
+ Handle<Code> ic =
+ CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
@@ -938,7 +899,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Discard the test value and jump to the default if present, otherwise to
// the end of the statement.
__ bind(&next_test);
- __ Drop(1); // Switch value is no longer needed.
+ DropOperands(1); // Switch value is no longer needed.
if (default_clause == NULL) {
__ jmp(nested_statement.break_label());
} else {
@@ -969,22 +930,21 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
ForIn loop_statement(this, stmt);
increment_loop_depth();
- // Get the object to enumerate over. If the object is null or undefined, skip
- // over the loop. See ECMA-262 version 5, section 12.6.4.
+ // Get the object to enumerate over.
SetExpressionAsStatementPosition(stmt->enumerable());
VisitForAccumulatorValue(stmt->enumerable());
- __ cmp(eax, isolate()->factory()->undefined_value());
- __ j(equal, &exit);
- __ cmp(eax, isolate()->factory()->null_value());
- __ j(equal, &exit);
-
- PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
+ OperandStackDepthIncrement(ForIn::kElementCount);
- // Convert the object to a JS object.
+ // If the object is null or undefined, skip over the loop, otherwise convert
+ // it to a JS receiver. See ECMA-262 version 5, section 12.6.4.
Label convert, done_convert;
__ JumpIfSmi(eax, &convert, Label::kNear);
__ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
__ j(above_equal, &done_convert, Label::kNear);
+ __ cmp(eax, isolate()->factory()->undefined_value());
+ __ j(equal, &exit);
+ __ cmp(eax, isolate()->factory()->null_value());
+ __ j(equal, &exit);
__ bind(&convert);
ToObjectStub stub(isolate());
__ CallStub(&stub);
@@ -992,15 +952,13 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
__ push(eax);
- // Check for proxies.
- Label call_runtime, use_cache, fixed_array;
- __ CmpObjectType(eax, JS_PROXY_TYPE, ecx);
- __ j(equal, &call_runtime);
-
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
// guarantee cache validity, call the runtime system to check cache
// validity or get the property names in a fixed array.
+ // Note: Proxies never have an enum cache, so will always take the
+ // slow path.
+ Label call_runtime, use_cache, fixed_array;
__ CheckEnumCache(&call_runtime);
__ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
@@ -1009,7 +967,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ push(eax);
- __ CallRuntime(Runtime::kGetPropertyNamesFast);
+ __ CallRuntime(Runtime::kForInEnumerate);
PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->meta_map());
@@ -1043,14 +1001,15 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&fixed_array);
// No need for a write barrier, we are storing a Smi in the feedback vector.
+ int const vector_index = SmiFromSlot(slot)->value();
__ EmitLoadTypeFeedbackVector(ebx);
- int vector_index = SmiFromSlot(slot)->value();
__ mov(FieldOperand(ebx, FixedArray::OffsetOfElementAt(vector_index)),
Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
__ push(Immediate(Smi::FromInt(1))); // Smi(1) undicates slow check
__ push(eax); // Array
__ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
__ push(eax); // Fixed array length (as smi).
+ PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
__ push(Immediate(Smi::FromInt(0))); // Initial index.
// Generate code for doing the condition check.
@@ -1076,6 +1035,16 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ cmp(edx, FieldOperand(ecx, HeapObject::kMapOffset));
__ j(equal, &update_each, Label::kNear);
+ // We might get here from TurboFan or Crankshaft when something in the
+ // for-in loop body deopts and only now notice in fullcodegen, that we
+ // can now longer use the enum cache, i.e. left fast mode. So better record
+ // this information here, in case we later OSR back into this loop or
+ // reoptimize the whole function w/o rerunning the loop with the slow
+ // mode object in fullcodegen (which would result in a deopt loop).
+ __ EmitLoadTypeFeedbackVector(edx);
+ __ mov(FieldOperand(edx, FixedArray::OffsetOfElementAt(vector_index)),
+ Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+
// Convert the entry to a string or null if it isn't a property
// anymore. If the property has been removed while iterating, we
// just skip it.
@@ -1113,6 +1082,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Remove the pointers stored on the stack.
__ bind(loop_statement.break_label());
__ add(esp, Immediate(5 * kPointerSize));
+ OperandStackDepthDecrement(ForIn::kElementCount);
// Exit and decrement the loop depth.
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1355,12 +1325,11 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
// by eval-introduced variables.
EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
__ bind(&slow);
- __ push(esi); // Context.
__ push(Immediate(var->name()));
Runtime::FunctionId function_id =
typeof_mode == NOT_INSIDE_TYPEOF
? Runtime::kLoadLookupSlot
- : Runtime::kLoadLookupSlotNoReferenceError;
+ : Runtime::kLoadLookupSlotInsideTypeof;
__ CallRuntime(function_id);
__ bind(&done);
context()->Plug(eax);
@@ -1385,7 +1354,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
Expression* expression = (property == NULL) ? NULL : property->value();
if (expression == NULL) {
- __ push(Immediate(isolate()->factory()->null_value()));
+ PushOperand(isolate()->factory()->null_value());
} else {
VisitForStackValue(expression);
if (NeedsHomeObject(expression)) {
@@ -1435,7 +1404,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
if (!result_saved) {
- __ push(eax); // Save result on the stack
+ PushOperand(eax); // Save result on the stack
result_saved = true;
}
switch (property->kind()) {
@@ -1464,24 +1433,24 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
break;
}
- __ push(Operand(esp, 0)); // Duplicate receiver.
+ PushOperand(Operand(esp, 0)); // Duplicate receiver.
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
if (NeedsHomeObject(value)) {
EmitSetHomeObject(value, 2, property->GetSlot());
}
- __ push(Immediate(Smi::FromInt(SLOPPY))); // Language mode
- __ CallRuntime(Runtime::kSetProperty);
+ PushOperand(Smi::FromInt(SLOPPY)); // Language mode
+ CallRuntimeWithOperands(Runtime::kSetProperty);
} else {
- __ Drop(3);
+ DropOperands(3);
}
break;
case ObjectLiteral::Property::PROTOTYPE:
- __ push(Operand(esp, 0)); // Duplicate receiver.
+ PushOperand(Operand(esp, 0)); // Duplicate receiver.
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype);
+ CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
NO_REGISTERS);
break;
@@ -1503,14 +1472,14 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
for (AccessorTable::Iterator it = accessor_table.begin();
it != accessor_table.end();
++it) {
- __ push(Operand(esp, 0)); // Duplicate receiver.
+ PushOperand(Operand(esp, 0)); // Duplicate receiver.
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
EmitAccessor(it->second->setter);
- __ push(Immediate(Smi::FromInt(NONE)));
- __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked);
+ PushOperand(Smi::FromInt(NONE));
+ CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1527,17 +1496,17 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Expression* value = property->value();
if (!result_saved) {
- __ push(eax); // Save result on the stack
+ PushOperand(eax); // Save result on the stack
result_saved = true;
}
- __ push(Operand(esp, 0)); // Duplicate receiver.
+ PushOperand(Operand(esp, 0)); // Duplicate receiver.
if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
DCHECK(!property->is_computed_name());
VisitForStackValue(value);
DCHECK(property->emit_store());
- __ CallRuntime(Runtime::kInternalSetPrototype);
+ CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
NO_REGISTERS);
} else {
@@ -1552,10 +1521,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
case ObjectLiteral::Property::COMPUTED:
if (property->emit_store()) {
- __ push(Immediate(Smi::FromInt(NONE)));
- __ CallRuntime(Runtime::kDefineDataPropertyUnchecked);
+ PushOperand(Smi::FromInt(NONE));
+ PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+ CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
} else {
- __ Drop(3);
+ DropOperands(3);
}
break;
@@ -1564,13 +1534,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
break;
case ObjectLiteral::Property::GETTER:
- __ push(Immediate(Smi::FromInt(NONE)));
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(NONE));
+ CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
- __ push(Immediate(Smi::FromInt(NONE)));
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(NONE));
+ CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
break;
}
}
@@ -1628,14 +1598,14 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
int array_index = 0;
for (; array_index < length; array_index++) {
Expression* subexpr = subexprs->at(array_index);
- if (subexpr->IsSpread()) break;
+ DCHECK(!subexpr->IsSpread());
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
if (!result_saved) {
- __ push(eax); // array literal.
+ PushOperand(eax); // array literal.
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
@@ -1656,21 +1626,16 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// (inclusive) and these elements gets appended to the array. Note that the
// number elements an iterable produces is unknown ahead of time.
if (array_index < length && result_saved) {
- __ Pop(eax);
+ PopOperand(eax);
result_saved = false;
}
for (; array_index < length; array_index++) {
Expression* subexpr = subexprs->at(array_index);
- __ Push(eax);
- if (subexpr->IsSpread()) {
- VisitForStackValue(subexpr->AsSpread()->expression());
- __ InvokeBuiltin(Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX,
- CALL_FUNCTION);
- } else {
- VisitForStackValue(subexpr);
- __ CallRuntime(Runtime::kAppendElement);
- }
+ PushOperand(eax);
+ DCHECK(!subexpr->IsSpread());
+ VisitForStackValue(subexpr);
+ CallRuntimeWithOperands(Runtime::kAppendElement);
PrepareForBailoutForId(expr->GetIdForElement(array_index), NO_REGISTERS);
}
@@ -1702,10 +1667,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
property->obj()->AsSuperPropertyReference()->this_var());
VisitForAccumulatorValue(
property->obj()->AsSuperPropertyReference()->home_object());
- __ push(result_register());
+ PushOperand(result_register());
if (expr->is_compound()) {
- __ push(MemOperand(esp, kPointerSize));
- __ push(result_register());
+ PushOperand(MemOperand(esp, kPointerSize));
+ PushOperand(result_register());
}
break;
case NAMED_PROPERTY:
@@ -1723,11 +1688,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForStackValue(
property->obj()->AsSuperPropertyReference()->home_object());
VisitForAccumulatorValue(property->key());
- __ Push(result_register());
+ PushOperand(result_register());
if (expr->is_compound()) {
- __ push(MemOperand(esp, 2 * kPointerSize));
- __ push(MemOperand(esp, 2 * kPointerSize));
- __ push(result_register());
+ PushOperand(MemOperand(esp, 2 * kPointerSize));
+ PushOperand(MemOperand(esp, 2 * kPointerSize));
+ PushOperand(result_register());
}
break;
case KEYED_PROPERTY: {
@@ -1774,7 +1739,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
Token::Value op = expr->binary_op();
- __ push(eax); // Left operand goes on the stack.
+ PushOperand(eax); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
if (ShouldInlineSmiCase(op)) {
@@ -1839,8 +1804,16 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ jmp(&suspend);
__ bind(&continuation);
+ // When we arrive here, the stack top is the resume mode and
+ // result_register() holds the input value (the argument given to the
+ // respective resume operation).
__ RecordGeneratorContinuation();
- __ jmp(&resume);
+ __ pop(ebx);
+ __ cmp(ebx, Immediate(Smi::FromInt(JSGeneratorObject::RETURN)));
+ __ j(not_equal, &resume);
+ __ push(result_register());
+ EmitCreateIteratorResult(true);
+ EmitUnwindAndReturn();
__ bind(&suspend);
VisitForAccumulatorValue(expr->generator_object());
@@ -1859,7 +1832,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(context_register(),
Operand(ebp, StandardFrameConstants::kContextOffset));
__ bind(&post_runtime);
- __ pop(result_register());
+ PopOperand(result_register());
EmitReturnSequence();
__ bind(&resume);
@@ -1868,126 +1841,15 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
}
case Yield::kFinal: {
- VisitForAccumulatorValue(expr->generator_object());
- __ mov(FieldOperand(result_register(),
- JSGeneratorObject::kContinuationOffset),
- Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
// Pop value from top-of-stack slot, box result into result register.
+ OperandStackDepthDecrement(1);
EmitCreateIteratorResult(true);
- EmitUnwindBeforeReturn();
- EmitReturnSequence();
+ EmitUnwindAndReturn();
break;
}
- case Yield::kDelegating: {
- VisitForStackValue(expr->generator_object());
-
- // Initial stack layout is as follows:
- // [sp + 1 * kPointerSize] iter
- // [sp + 0 * kPointerSize] g
-
- Label l_catch, l_try, l_suspend, l_continuation, l_resume;
- Label l_next, l_call, l_loop;
- Register load_receiver = LoadDescriptor::ReceiverRegister();
- Register load_name = LoadDescriptor::NameRegister();
-
- // Initial send value is undefined.
- __ mov(eax, isolate()->factory()->undefined_value());
- __ jmp(&l_next);
-
- // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
- __ bind(&l_catch);
- __ mov(load_name, isolate()->factory()->throw_string()); // "throw"
- __ push(load_name); // "throw"
- __ push(Operand(esp, 2 * kPointerSize)); // iter
- __ push(eax); // exception
- __ jmp(&l_call);
-
- // try { received = %yield result }
- // Shuffle the received result above a try handler and yield it without
- // re-boxing.
- __ bind(&l_try);
- __ pop(eax); // result
- int handler_index = NewHandlerTableEntry();
- EnterTryBlock(handler_index, &l_catch);
- const int try_block_size = TryCatch::kElementCount * kPointerSize;
- __ push(eax); // result
-
- __ jmp(&l_suspend);
- __ bind(&l_continuation);
- __ RecordGeneratorContinuation();
- __ jmp(&l_resume);
-
- __ bind(&l_suspend);
- const int generator_object_depth = kPointerSize + try_block_size;
- __ mov(eax, Operand(esp, generator_object_depth));
- __ push(eax); // g
- __ push(Immediate(Smi::FromInt(handler_index))); // handler-index
- DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
- __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
- Immediate(Smi::FromInt(l_continuation.pos())));
- __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
- __ mov(ecx, esi);
- __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
- kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 2);
- __ mov(context_register(),
- Operand(ebp, StandardFrameConstants::kContextOffset));
- __ pop(eax); // result
- EmitReturnSequence();
- __ bind(&l_resume); // received in eax
- ExitTryBlock(handler_index);
-
- // receiver = iter; f = iter.next; arg = received;
- __ bind(&l_next);
-
- __ mov(load_name, isolate()->factory()->next_string());
- __ push(load_name); // "next"
- __ push(Operand(esp, 2 * kPointerSize)); // iter
- __ push(eax); // received
-
- // result = receiver[f](arg);
- __ bind(&l_call);
- __ mov(load_receiver, Operand(esp, kPointerSize));
- __ mov(LoadDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(expr->KeyedLoadFeedbackSlot())));
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), SLOPPY).code();
- CallIC(ic, TypeFeedbackId::None());
- __ mov(edi, eax);
- __ mov(Operand(esp, 2 * kPointerSize), edi);
- SetCallPosition(expr);
- __ Set(eax, 1);
- __ Call(
- isolate()->builtins()->Call(ConvertReceiverMode::kNotNullOrUndefined),
- RelocInfo::CODE_TARGET);
-
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ Drop(1); // The function is still on the stack; drop it.
-
- // if (!result.done) goto l_try;
- __ bind(&l_loop);
- __ push(eax); // save result
- __ Move(load_receiver, eax); // result
- __ mov(load_name,
- isolate()->factory()->done_string()); // "done"
- __ mov(LoadDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(expr->DoneFeedbackSlot())));
- CallLoadIC(NOT_INSIDE_TYPEOF); // result.done in eax
- Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(bool_ic);
- __ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
- __ j(not_equal, &l_try);
-
- // result.value
- __ pop(load_receiver); // result
- __ mov(load_name,
- isolate()->factory()->value_string()); // "value"
- __ mov(LoadDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(expr->ValueFeedbackSlot())));
- CallLoadIC(NOT_INSIDE_TYPEOF); // result.value in eax
- context()->DropAndPlug(2, eax); // drop iter and g
- break;
- }
+ case Yield::kDelegating:
+ UNREACHABLE();
}
}
@@ -2001,7 +1863,13 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// ebx will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
VisitForAccumulatorValue(value);
- __ pop(ebx);
+ PopOperand(ebx);
+
+ // Store input value into generator object.
+ __ mov(FieldOperand(ebx, JSGeneratorObject::kInputOffset), result_register());
+ __ mov(ecx, result_register());
+ __ RecordWriteField(ebx, JSGeneratorObject::kInputOffset, ecx, edx,
+ kDontSaveFPRegs);
// Load suspended function and context.
__ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset));
@@ -2051,6 +1919,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ add(edx, ecx);
__ mov(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+ __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
__ jmp(edx);
__ bind(&slow_resume);
}
@@ -2064,6 +1933,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ push(ecx);
__ jmp(&push_operand_holes);
__ bind(&call_resume);
+ __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
__ push(ebx);
__ push(result_register());
__ Push(Smi::FromInt(resume_mode));
@@ -2075,6 +1945,21 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
context()->Plug(result_register());
}
+void FullCodeGenerator::PushOperand(MemOperand operand) {
+ OperandStackDepthIncrement(1);
+ __ Push(operand);
+}
+
+void FullCodeGenerator::EmitOperandStackDepthCheck() {
+ if (FLAG_debug_code) {
+ int expected_diff = StandardFrameConstants::kFixedFrameSizeFromFp +
+ operand_stack_depth_ * kPointerSize;
+ __ mov(eax, ebp);
+ __ sub(eax, esp);
+ __ cmp(eax, Immediate(expected_diff));
+ __ Assert(equal, kUnexpectedStackDepth);
+ }
+}
void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label allocate, done_allocate;
@@ -2110,37 +1995,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
__ mov(LoadDescriptor::NameRegister(), Immediate(key->value()));
__ mov(LoadDescriptor::SlotRegister(),
Immediate(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallLoadIC(NOT_INSIDE_TYPEOF, language_mode());
-}
-
-
-void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
- // Stack: receiver, home_object.
- SetExpressionPosition(prop);
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!key->value()->IsSmi());
- DCHECK(prop->IsSuperAccess());
-
- __ push(Immediate(key->value()));
- __ push(Immediate(Smi::FromInt(language_mode())));
- __ CallRuntime(Runtime::kLoadFromSuper);
-}
-
-
-void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
- SetExpressionPosition(prop);
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate(), language_mode()).code();
- __ mov(LoadDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(prop->PropertyFeedbackSlot())));
- CallIC(ic);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
- // Stack: receiver, home_object, key.
- SetExpressionPosition(prop);
- __ push(Immediate(Smi::FromInt(language_mode())));
- __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+ CallLoadIC(NOT_INSIDE_TYPEOF);
}
@@ -2151,7 +2006,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
// Do combined smi check of the operands. Left operand is on the
// stack. Right operand is in eax.
Label smi_case, done, stub_call;
- __ pop(edx);
+ PopOperand(edx);
__ mov(ecx, eax);
__ or_(eax, edx);
JumpPatchSite patch_site(masm_);
@@ -2159,8 +2014,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ mov(eax, ecx);
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -2240,24 +2094,14 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
- // Constructor is in eax.
- DCHECK(lit != NULL);
- __ push(eax);
-
- // No access check is needed here since the constructor is created by the
- // class literal.
- Register scratch = ebx;
- __ mov(scratch, FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset));
- __ Push(scratch);
-
for (int i = 0; i < lit->properties()->length(); i++) {
ObjectLiteral::Property* property = lit->properties()->at(i);
Expression* value = property->value();
if (property->is_static()) {
- __ push(Operand(esp, kPointerSize)); // constructor
+ PushOperand(Operand(esp, kPointerSize)); // constructor
} else {
- __ push(Operand(esp, 0)); // prototype
+ PushOperand(Operand(esp, 0)); // prototype
}
EmitPropertyKey(property, lit->GetIdForProperty(i));
@@ -2281,31 +2125,28 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
case ObjectLiteral::Property::PROTOTYPE:
UNREACHABLE();
case ObjectLiteral::Property::COMPUTED:
- __ CallRuntime(Runtime::kDefineClassMethod);
+ PushOperand(Smi::FromInt(DONT_ENUM));
+ PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
+ CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
break;
case ObjectLiteral::Property::GETTER:
- __ push(Immediate(Smi::FromInt(DONT_ENUM)));
- __ CallRuntime(Runtime::kDefineGetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(DONT_ENUM));
+ CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
break;
case ObjectLiteral::Property::SETTER:
- __ push(Immediate(Smi::FromInt(DONT_ENUM)));
- __ CallRuntime(Runtime::kDefineSetterPropertyUnchecked);
+ PushOperand(Smi::FromInt(DONT_ENUM));
+ CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
break;
}
}
-
- // Set both the prototype and constructor to have fast properties, and also
- // freeze them in strong mode.
- __ CallRuntime(Runtime::kFinalizeClassDefinition);
}
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
- __ pop(edx);
- Handle<Code> code =
- CodeFactory::BinaryOpIC(isolate(), op, strength(language_mode())).code();
+ PopOperand(edx);
+ Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -2328,10 +2169,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case NAMED_PROPERTY: {
- __ push(eax); // Preserve value.
+ PushOperand(eax); // Preserve value.
VisitForAccumulatorValue(prop->obj());
__ Move(StoreDescriptor::ReceiverRegister(), eax);
- __ pop(StoreDescriptor::ValueRegister()); // Restore value.
+ PopOperand(StoreDescriptor::ValueRegister()); // Restore value.
__ mov(StoreDescriptor::NameRegister(),
prop->key()->AsLiteral()->value());
EmitLoadStoreICSlot(slot);
@@ -2339,7 +2180,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case NAMED_SUPER_PROPERTY: {
- __ push(eax);
+ PushOperand(eax);
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
VisitForAccumulatorValue(
prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2356,7 +2197,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case KEYED_SUPER_PROPERTY: {
- __ push(eax);
+ PushOperand(eax);
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
VisitForStackValue(
prop->obj()->AsSuperPropertyReference()->home_object());
@@ -2376,12 +2217,12 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case KEYED_PROPERTY: {
- __ push(eax); // Preserve value.
+ PushOperand(eax); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ Move(StoreDescriptor::NameRegister(), eax);
- __ pop(StoreDescriptor::ReceiverRegister()); // Receiver.
- __ pop(StoreDescriptor::ValueRegister()); // Restore value.
+ PopOperand(StoreDescriptor::ReceiverRegister()); // Receiver.
+ PopOperand(StoreDescriptor::ValueRegister()); // Restore value.
EmitLoadStoreICSlot(slot);
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
@@ -2461,17 +2302,17 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
(var->mode() == CONST && op == Token::INIT)) {
if (var->IsLookupSlot()) {
// Assignment to var.
- __ push(eax); // Value.
- __ push(esi); // Context.
- __ push(Immediate(var->name()));
- __ push(Immediate(Smi::FromInt(language_mode())));
- __ CallRuntime(Runtime::kStoreLookupSlot);
+ __ Push(Immediate(var->name()));
+ __ Push(eax);
+ __ CallRuntime(is_strict(language_mode())
+ ? Runtime::kStoreLookupSlot_Strict
+ : Runtime::kStoreLookupSlot_Sloppy);
} else {
// Assignment to var or initializing assignment to let/const in harmony
// mode.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
MemOperand location = VarOperand(var, ecx);
- if (generate_debug_code_ && var->mode() == LET && op == Token::INIT) {
+ if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
// Check for an uninitialized let binding.
__ mov(edx, location);
__ cmp(edx, isolate()->factory()->the_hole_value());
@@ -2518,7 +2359,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
DCHECK(prop->key()->IsLiteral());
__ mov(StoreDescriptor::NameRegister(), prop->key()->AsLiteral()->value());
- __ pop(StoreDescriptor::ReceiverRegister());
+ PopOperand(StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(expr->AssignmentSlot());
CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2534,10 +2375,11 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
Literal* key = prop->key()->AsLiteral();
DCHECK(key != NULL);
- __ push(Immediate(key->value()));
- __ push(eax);
- __ CallRuntime((is_strict(language_mode()) ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy));
+ PushOperand(key->value());
+ PushOperand(eax);
+ CallRuntimeWithOperands(is_strict(language_mode())
+ ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy);
}
@@ -2546,10 +2388,10 @@ void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
// eax : value
// stack : receiver ('this'), home_object, key
- __ push(eax);
- __ CallRuntime((is_strict(language_mode())
- ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy));
+ PushOperand(eax);
+ CallRuntimeWithOperands(is_strict(language_mode())
+ ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy);
}
@@ -2559,8 +2401,8 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// esp[0] : key
// esp[kPointerSize] : receiver
- __ pop(StoreDescriptor::NameRegister()); // Key.
- __ pop(StoreDescriptor::ReceiverRegister());
+ PopOperand(StoreDescriptor::NameRegister()); // Key.
+ PopOperand(StoreDescriptor::ReceiverRegister());
DCHECK(StoreDescriptor::ValueRegister().is(eax));
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
@@ -2592,7 +2434,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
if (!expr->IsSuperAccess()) {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
- __ pop(LoadDescriptor::ReceiverRegister()); // Object.
+ PopOperand(LoadDescriptor::ReceiverRegister()); // Object.
__ Move(LoadDescriptor::NameRegister(), result_register()); // Key.
EmitKeyedPropertyLoad(expr);
} else {
@@ -2628,7 +2470,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
}
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
- __ push(Immediate(isolate()->factory()->undefined_value()));
+ PushOperand(isolate()->factory()->undefined_value());
convert_mode = ConvertReceiverMode::kNullOrUndefined;
} else {
// Load the function from the receiver.
@@ -2638,7 +2480,7 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
EmitNamedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
- __ push(Operand(esp, 0));
+ PushOperand(Operand(esp, 0));
__ mov(Operand(esp, kPointerSize), eax);
convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
}
@@ -2660,19 +2502,17 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
VisitForStackValue(super_ref->home_object());
VisitForAccumulatorValue(super_ref->this_var());
- __ push(eax);
- __ push(eax);
- __ push(Operand(esp, kPointerSize * 2));
- __ push(Immediate(key->value()));
- __ push(Immediate(Smi::FromInt(language_mode())));
+ PushOperand(eax);
+ PushOperand(eax);
+ PushOperand(Operand(esp, kPointerSize * 2));
+ PushOperand(key->value());
// Stack here:
// - home_object
// - this (receiver)
// - this (receiver) <-- LoadFromSuper will pop here and below.
// - home_object
// - key
- // - language_mode
- __ CallRuntime(Runtime::kLoadFromSuper);
+ CallRuntimeWithOperands(Runtime::kLoadFromSuper);
// Replace home_object with target function.
__ mov(Operand(esp, kPointerSize), eax);
@@ -2700,7 +2540,7 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
- __ push(Operand(esp, 0));
+ PushOperand(Operand(esp, 0));
__ mov(Operand(esp, kPointerSize), eax);
EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
@@ -2718,19 +2558,17 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
VisitForStackValue(super_ref->home_object());
VisitForAccumulatorValue(super_ref->this_var());
- __ push(eax);
- __ push(eax);
- __ push(Operand(esp, kPointerSize * 2));
+ PushOperand(eax);
+ PushOperand(eax);
+ PushOperand(Operand(esp, kPointerSize * 2));
VisitForStackValue(prop->key());
- __ push(Immediate(Smi::FromInt(language_mode())));
// Stack here:
// - home_object
// - this (receiver)
// - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
// - home_object
// - key
- // - language_mode
- __ CallRuntime(Runtime::kLoadKeyedFromSuper);
+ CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
// Replace home_object with target function.
__ mov(Operand(esp, kPointerSize), eax);
@@ -2752,12 +2590,23 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
SetCallPosition(expr);
- Handle<Code> ic = CodeFactory::CallIC(isolate(), arg_count, mode).code();
+ if (expr->tail_call_mode() == TailCallMode::kAllow) {
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceTailCall);
+ }
+ // Update profiling counters before the tail call since we will
+ // not return to this function.
+ EmitProfilingCounterHandlingForReturnSequence(true);
+ }
+ Handle<Code> ic =
+ CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
+ .code();
__ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
// Don't assign a type feedback id to the IC, since type feedback is provided
// by the vector above.
CallIC(ic);
+ OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
@@ -2803,11 +2652,10 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
__ bind(&slow);
// Call the runtime to find the function to call (returned in eax) and
// the object holding it (returned in edx).
- __ push(context_register());
- __ push(Immediate(callee->name()));
- __ CallRuntime(Runtime::kLoadLookupSlot);
- __ push(eax); // Function.
- __ push(edx); // Receiver.
+ __ Push(callee->name());
+ __ CallRuntime(Runtime::kLoadLookupSlotForCall);
+ PushOperand(eax); // Function.
+ PushOperand(edx); // Receiver.
PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
// If fast case code has been generated, emit code to push the function
@@ -2826,7 +2674,7 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
} else {
VisitForStackValue(callee);
// refEnv.WithBaseObject()
- __ push(Immediate(isolate()->factory()->undefined_value()));
+ PushOperand(isolate()->factory()->undefined_value());
}
}
@@ -2858,7 +2706,10 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
SetCallPosition(expr);
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
__ Set(eax, arg_count);
- __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ expr->tail_call_mode()),
+ RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2899,6 +2750,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
CallConstructStub stub(isolate());
__ call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2917,7 +2769,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
__ AssertFunction(result_register());
__ mov(result_register(),
FieldOperand(result_register(), HeapObject::kMapOffset));
- __ Push(FieldOperand(result_register(), Map::kPrototypeOffset));
+ PushOperand(FieldOperand(result_register(), Map::kPrototypeOffset));
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -2939,6 +2791,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
__ mov(edi, Operand(esp, arg_count * kPointerSize));
__ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
RecordJSReturnSite(expr);
@@ -2991,77 +2844,6 @@ void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, SIMD128_VALUE_TYPE, ebx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, FIRST_FUNCTION_TYPE, ebx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(above_equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
- __ CheckMap(eax, map, if_false, DO_SMI_CHECK);
- // Check if the exponent half is 0x80000000. Comparing against 1 and
- // checking for overflow is the shortest possible encoding.
- __ cmp(FieldOperand(eax, HeapNumber::kExponentOffset), Immediate(0x1));
- __ j(no_overflow, if_false);
- __ cmp(FieldOperand(eax, HeapNumber::kMantissaOffset), Immediate(0x0));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3150,68 +2932,6 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- // Load the two objects into registers and perform the comparison.
- VisitForStackValue(args->at(0));
- VisitForAccumulatorValue(args->at(1));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ pop(ebx);
- __ cmp(eax, ebx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- // ArgumentsAccessStub expects the key in edx and the formal
- // parameter count in eax.
- VisitForAccumulatorValue(args->at(0));
- __ mov(edx, eax);
- __ Move(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
- ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
- __ CallStub(&stub);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
- DCHECK(expr->arguments()->length() == 0);
-
- Label exit;
- // Get the number of formal parameters.
- __ Move(eax, Immediate(Smi::FromInt(info_->scope()->num_parameters())));
-
- // Check if the calling frame is an arguments adaptor frame.
- __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset),
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &exit);
-
- // Arguments adaptor case: Read the arguments length from the
- // adaptor frame.
- __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ bind(&exit);
- __ AssertSmi(eax);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3280,28 +3000,6 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsDate(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = nullptr;
- Label* if_false = nullptr;
- Label* fall_through = nullptr;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, JS_DATE_TYPE, ebx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(3, args->length());
@@ -3314,8 +3012,8 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(1)); // value
VisitForAccumulatorValue(args->at(2)); // string
- __ pop(value);
- __ pop(index);
+ PopOperand(value);
+ PopOperand(index);
if (FLAG_debug_code) {
__ test(value, Immediate(kSmiTagMask));
@@ -3349,8 +3047,8 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(0)); // index
VisitForStackValue(args->at(1)); // value
VisitForAccumulatorValue(args->at(2)); // string
- __ pop(value);
- __ pop(index);
+ PopOperand(value);
+ PopOperand(index);
if (FLAG_debug_code) {
__ test(value, Immediate(kSmiTagMask));
@@ -3371,35 +3069,6 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
-
- VisitForStackValue(args->at(0)); // Load the object.
- VisitForAccumulatorValue(args->at(1)); // Load the value.
- __ pop(ebx); // eax = value. ebx = object.
-
- Label done;
- // If the object is a smi, return the value.
- __ JumpIfSmi(ebx, &done, Label::kNear);
-
- // If the object is not a value type, return the value.
- __ CmpObjectType(ebx, JS_VALUE_TYPE, ecx);
- __ j(not_equal, &done, Label::kNear);
-
- // Store the value.
- __ mov(FieldOperand(ebx, JSValue::kValueOffset), eax);
-
- // Update the write barrier. Save the value as it will be
- // overwritten by the write barrier code and is needed afterward.
- __ mov(edx, eax);
- __ RecordWriteField(ebx, JSValue::kValueOffset, edx, ecx, kDontSaveFPRegs);
-
- __ bind(&done);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -3417,27 +3086,6 @@ void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitToName(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(1, args->length());
-
- // Load the argument into eax and convert it.
- VisitForAccumulatorValue(args->at(0));
-
- // Convert the object to a name.
- Label convert, done_convert;
- __ JumpIfSmi(eax, &convert, Label::kNear);
- STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
- __ CmpObjectType(eax, LAST_NAME_TYPE, ecx);
- __ j(below_equal, &done_convert, Label::kNear);
- __ bind(&convert);
- __ Push(eax);
- __ CallRuntime(Runtime::kToName);
- __ bind(&done_convert);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3468,7 +3116,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
Register index = eax;
Register result = edx;
- __ pop(object);
+ PopOperand(object);
Label need_conversion;
Label index_out_of_range;
@@ -3515,7 +3163,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
Register scratch = edx;
Register result = eax;
- __ pop(object);
+ PopOperand(object);
Label need_conversion;
Label index_out_of_range;
@@ -3565,6 +3213,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
// Call the target.
__ mov(eax, Immediate(argc));
__ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(argc + 1);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
@@ -3621,275 +3270,6 @@ void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
- Label bailout, done, one_char_separator, long_separator,
- non_trivial_array, not_size_one_array, loop,
- loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
-
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 2);
- // We will leave the separator on the stack until the end of the function.
- VisitForStackValue(args->at(1));
- // Load this to eax (= array)
- VisitForAccumulatorValue(args->at(0));
- // All aliases of the same register have disjoint lifetimes.
- Register array = eax;
- Register elements = no_reg; // Will be eax.
-
- Register index = edx;
-
- Register string_length = ecx;
-
- Register string = esi;
-
- Register scratch = ebx;
-
- Register array_length = edi;
- Register result_pos = no_reg; // Will be edi.
-
- // Separator operand is already pushed.
- Operand separator_operand = Operand(esp, 2 * kPointerSize);
- Operand result_operand = Operand(esp, 1 * kPointerSize);
- Operand array_length_operand = Operand(esp, 0);
- __ sub(esp, Immediate(2 * kPointerSize));
- __ cld();
- // Check that the array is a JSArray
- __ JumpIfSmi(array, &bailout);
- __ CmpObjectType(array, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &bailout);
-
- // Check that the array has fast elements.
- __ CheckFastElements(scratch, &bailout);
-
- // If the array has length zero, return the empty string.
- __ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
- __ SmiUntag(array_length);
- __ j(not_zero, &non_trivial_array);
- __ mov(result_operand, isolate()->factory()->empty_string());
- __ jmp(&done);
-
- // Save the array length.
- __ bind(&non_trivial_array);
- __ mov(array_length_operand, array_length);
-
- // Save the FixedArray containing array's elements.
- // End of array's live range.
- elements = array;
- __ mov(elements, FieldOperand(array, JSArray::kElementsOffset));
- array = no_reg;
-
-
- // Check that all array elements are sequential one-byte strings, and
- // accumulate the sum of their lengths, as a smi-encoded value.
- __ Move(index, Immediate(0));
- __ Move(string_length, Immediate(0));
- // Loop condition: while (index < length).
- // Live loop registers: index, array_length, string,
- // scratch, string_length, elements.
- if (generate_debug_code_) {
- __ cmp(index, array_length);
- __ Assert(less, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
- }
- __ bind(&loop);
- __ mov(string, FieldOperand(elements,
- index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ JumpIfSmi(string, &bailout);
- __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, Immediate(
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmp(scratch, kStringTag | kOneByteStringTag | kSeqStringTag);
- __ j(not_equal, &bailout);
- __ add(string_length,
- FieldOperand(string, SeqOneByteString::kLengthOffset));
- __ j(overflow, &bailout);
- __ add(index, Immediate(1));
- __ cmp(index, array_length);
- __ j(less, &loop);
-
- // If array_length is 1, return elements[0], a string.
- __ cmp(array_length, 1);
- __ j(not_equal, &not_size_one_array);
- __ mov(scratch, FieldOperand(elements, FixedArray::kHeaderSize));
- __ mov(result_operand, scratch);
- __ jmp(&done);
-
- __ bind(&not_size_one_array);
-
- // End of array_length live range.
- result_pos = array_length;
- array_length = no_reg;
-
- // Live registers:
- // string_length: Sum of string lengths, as a smi.
- // elements: FixedArray of strings.
-
- // Check that the separator is a flat one-byte string.
- __ mov(string, separator_operand);
- __ JumpIfSmi(string, &bailout);
- __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, Immediate(
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmp(scratch, kStringTag | kOneByteStringTag | kSeqStringTag);
- __ j(not_equal, &bailout);
-
- // Add (separator length times array_length) - separator length
- // to string_length.
- __ mov(scratch, separator_operand);
- __ mov(scratch, FieldOperand(scratch, SeqOneByteString::kLengthOffset));
- __ sub(string_length, scratch); // May be negative, temporarily.
- __ imul(scratch, array_length_operand);
- __ j(overflow, &bailout);
- __ add(string_length, scratch);
- __ j(overflow, &bailout);
-
- __ shr(string_length, 1);
-
- // Bailout for large object allocations.
- __ cmp(string_length, Page::kMaxRegularHeapObjectSize);
- __ j(greater, &bailout);
-
- // Live registers and stack values:
- // string_length
- // elements
- __ AllocateOneByteString(result_pos, string_length, scratch, index, string,
- &bailout);
- __ mov(result_operand, result_pos);
- __ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
-
-
- __ mov(string, separator_operand);
- __ cmp(FieldOperand(string, SeqOneByteString::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- __ j(equal, &one_char_separator);
- __ j(greater, &long_separator);
-
-
- // Empty separator case
- __ mov(index, Immediate(0));
- __ jmp(&loop_1_condition);
- // Loop condition: while (index < length).
- __ bind(&loop_1);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
- // elements: the FixedArray of strings we are joining.
-
- // Get string = array[index].
- __ mov(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ add(index, Immediate(1));
- __ bind(&loop_1_condition);
- __ cmp(index, array_length_operand);
- __ j(less, &loop_1); // End while (index < length).
- __ jmp(&done);
-
-
-
- // One-character separator case
- __ bind(&one_char_separator);
- // Replace separator with its one-byte character value.
- __ mov_b(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ mov_b(separator_operand, scratch);
-
- __ Move(index, Immediate(0));
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ jmp(&loop_2_entry);
- // Loop condition: while (index < length).
- __ bind(&loop_2);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
-
- // Copy the separator character to the result.
- __ mov_b(scratch, separator_operand);
- __ mov_b(Operand(result_pos, 0), scratch);
- __ inc(result_pos);
-
- __ bind(&loop_2_entry);
- // Get string = array[index].
- __ mov(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ add(index, Immediate(1));
-
- __ cmp(index, array_length_operand);
- __ j(less, &loop_2); // End while (index < length).
- __ jmp(&done);
-
-
- // Long separator case (separator is more than one character).
- __ bind(&long_separator);
-
- __ Move(index, Immediate(0));
- // Jump into the loop after the code that copies the separator, so the first
- // element is not preceded by a separator
- __ jmp(&loop_3_entry);
- // Loop condition: while (index < length).
- __ bind(&loop_3);
- // Each iteration of the loop concatenates one string to the result.
- // Live values in registers:
- // index: which element of the elements array we are adding to the result.
- // result_pos: the position to which we are currently copying characters.
-
- // Copy the separator to the result.
- __ mov(string, separator_operand);
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
-
- __ bind(&loop_3_entry);
- // Get string = array[index].
- __ mov(string, FieldOperand(elements, index,
- times_pointer_size,
- FixedArray::kHeaderSize));
- __ mov(string_length,
- FieldOperand(string, String::kLengthOffset));
- __ shr(string_length, 1);
- __ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ add(index, Immediate(1));
-
- __ cmp(index, array_length_operand);
- __ j(less, &loop_3); // End while (index < length).
- __ jmp(&done);
-
-
- __ bind(&bailout);
- __ mov(result_operand, isolate()->factory()->undefined_value());
- __ bind(&done);
- __ mov(eax, result_operand);
- // Drop temp values from the stack, and restore context register.
- __ add(esp, Immediate(3 * kPointerSize));
-
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
ExternalReference debug_is_active =
@@ -3922,7 +3302,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ jmp(&done, Label::kNear);
__ bind(&runtime);
- __ CallRuntime(Runtime::kCreateIterResultObject);
+ CallRuntimeWithOperands(Runtime::kCreateIterResultObject);
__ bind(&done);
context()->Plug(eax);
@@ -3931,7 +3311,7 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push undefined as receiver.
- __ push(Immediate(isolate()->factory()->undefined_value()));
+ PushOperand(isolate()->factory()->undefined_value());
__ LoadGlobalFunction(expr->context_index(), eax);
}
@@ -3946,6 +3326,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
__ Set(eax, arg_count);
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
RelocInfo::CODE_TARGET);
+ OperandStackDepthDecrement(arg_count + 1);
}
@@ -3958,7 +3339,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
EmitLoadJSRuntimeFunction(expr);
// Push the target function under the receiver.
- __ push(Operand(esp, 0));
+ PushOperand(Operand(esp, 0));
__ mov(Operand(esp, kPointerSize), eax);
// Push the arguments ("left-to-right").
@@ -3993,6 +3374,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Call the C runtime function.
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
__ CallRuntime(expr->function(), arg_count);
+ OperandStackDepthDecrement(arg_count);
context()->Plug(eax);
}
}
@@ -4010,9 +3392,9 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy);
+ CallRuntimeWithOperands(is_strict(language_mode())
+ ? Runtime::kDeleteProperty_Strict
+ : Runtime::kDeleteProperty_Sloppy);
context()->Plug(eax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -4034,8 +3416,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
} else {
// Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
- __ push(context_register());
- __ push(Immediate(var->name()));
+ __ Push(var->name());
__ CallRuntime(Runtime::kDeleteLookupSlot);
context()->Plug(eax);
}
@@ -4080,6 +3461,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
&materialize_false,
&materialize_true,
&materialize_true);
+ if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
__ bind(&materialize_true);
PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
if (context()->IsAccumulatorValue()) {
@@ -4135,7 +3517,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && !context()->IsEffect()) {
- __ push(Immediate(Smi::FromInt(0)));
+ PushOperand(Smi::FromInt(0));
}
switch (assign_type) {
case NAMED_PROPERTY: {
@@ -4150,9 +3532,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
VisitForAccumulatorValue(
prop->obj()->AsSuperPropertyReference()->home_object());
- __ push(result_register());
- __ push(MemOperand(esp, kPointerSize));
- __ push(result_register());
+ PushOperand(result_register());
+ PushOperand(MemOperand(esp, kPointerSize));
+ PushOperand(result_register());
EmitNamedSuperPropertyLoad(prop);
break;
}
@@ -4162,10 +3544,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
VisitForStackValue(
prop->obj()->AsSuperPropertyReference()->home_object());
VisitForAccumulatorValue(prop->key());
- __ push(result_register());
- __ push(MemOperand(esp, 2 * kPointerSize));
- __ push(MemOperand(esp, 2 * kPointerSize));
- __ push(result_register());
+ PushOperand(result_register());
+ PushOperand(MemOperand(esp, 2 * kPointerSize));
+ PushOperand(MemOperand(esp, 2 * kPointerSize));
+ PushOperand(result_register());
EmitKeyedSuperPropertyLoad(prop);
break;
}
@@ -4255,7 +3637,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// of the stack.
switch (assign_type) {
case VARIABLE:
- __ push(eax);
+ PushOperand(eax);
break;
case NAMED_PROPERTY:
__ mov(Operand(esp, kPointerSize), eax);
@@ -4279,8 +3661,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ bind(&stub_call);
__ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1)));
- Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), expr->binary_op(),
- strength(language_mode())).code();
+ Handle<Code> code =
+ CodeFactory::BinaryOpIC(isolate(), expr->binary_op()).code();
CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4315,7 +3697,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
__ mov(StoreDescriptor::NameRegister(),
prop->key()->AsLiteral()->value());
- __ pop(StoreDescriptor::ReceiverRegister());
+ PopOperand(StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(expr->CountSlot());
CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -4351,8 +3733,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case KEYED_PROPERTY: {
- __ pop(StoreDescriptor::NameRegister());
- __ pop(StoreDescriptor::ReceiverRegister());
+ PopOperand(StoreDescriptor::NameRegister());
+ PopOperand(StoreDescriptor::ReceiverRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
EmitLoadStoreICSlot(expr->CountSlot());
@@ -4407,8 +3789,8 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ cmp(eax, isolate()->factory()->false_value());
Split(equal, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->undefined_string())) {
- __ cmp(eax, isolate()->factory()->undefined_value());
- __ j(equal, if_true);
+ __ cmp(eax, isolate()->factory()->null_value());
+ __ j(equal, if_false);
__ JumpIfSmi(eax, if_false);
// Check for undetectable objects => true.
__ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
@@ -4473,7 +3855,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ CallRuntime(Runtime::kHasProperty);
+ CallRuntimeWithOperands(Runtime::kHasProperty);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ cmp(eax, isolate()->factory()->true_value());
Split(equal, if_true, if_false, fall_through);
@@ -4481,7 +3863,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
case Token::INSTANCEOF: {
VisitForAccumulatorValue(expr->right());
- __ Pop(edx);
+ PopOperand(edx);
InstanceOfStub stub(isolate());
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
@@ -4493,7 +3875,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
Condition cc = CompareIC::ComputeCondition(op);
- __ pop(edx);
+ PopOperand(edx);
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
@@ -4507,8 +3889,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ bind(&slow_case);
}
- Handle<Code> ic = CodeFactory::CompareIC(
- isolate(), op, strength(language_mode())).code();
+ Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
@@ -4588,15 +3969,15 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
// as their closure, not the anonymous closure containing the global
// code.
__ mov(eax, NativeContextOperand());
- __ push(ContextOperand(eax, Context::CLOSURE_INDEX));
+ PushOperand(ContextOperand(eax, Context::CLOSURE_INDEX));
} else if (closure_scope->is_eval_scope()) {
// Contexts nested inside eval code have the same closure as the context
// calling eval, not the anonymous closure containing the eval code.
// Fetch it from the context.
- __ push(ContextOperand(esi, Context::CLOSURE_INDEX));
+ PushOperand(ContextOperand(esi, Context::CLOSURE_INDEX));
} else {
DCHECK(closure_scope->is_function_scope());
- __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ PushOperand(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
}
}
@@ -4605,23 +3986,11 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
// Non-local control flow support.
void FullCodeGenerator::EnterFinallyBlock() {
- // Cook return address on top of stack (smi encoded Code* delta)
- DCHECK(!result_register().is(edx));
- __ pop(edx);
- __ sub(edx, Immediate(masm_->CodeObject()));
- STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ SmiTag(edx);
- __ push(edx);
-
- // Store result register while executing finally block.
- __ push(result_register());
-
// Store pending message while executing finally block.
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ mov(edx, Operand::StaticVariable(pending_message_obj));
- __ push(edx);
+ PushOperand(edx);
ClearPendingMessage();
}
@@ -4630,19 +3999,10 @@ void FullCodeGenerator::EnterFinallyBlock() {
void FullCodeGenerator::ExitFinallyBlock() {
DCHECK(!result_register().is(edx));
// Restore pending message from stack.
- __ pop(edx);
+ PopOperand(edx);
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ mov(Operand::StaticVariable(pending_message_obj), edx);
-
- // Restore result register from stack.
- __ pop(result_register());
-
- // Uncook return address.
- __ pop(edx);
- __ SmiUntag(edx);
- __ add(edx, Immediate(masm_->CodeObject()));
- __ jmp(edx);
}
@@ -4661,6 +4021,32 @@ void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
Immediate(SmiFromSlot(slot)));
}
+void FullCodeGenerator::DeferredCommands::EmitCommands() {
+ DCHECK(!result_register().is(edx));
+ __ Pop(result_register()); // Restore the accumulator.
+ __ Pop(edx); // Get the token.
+ for (DeferredCommand cmd : commands_) {
+ Label skip;
+ __ cmp(edx, Immediate(Smi::FromInt(cmd.token)));
+ __ j(not_equal, &skip);
+ switch (cmd.command) {
+ case kReturn:
+ codegen_->EmitUnwindAndReturn();
+ break;
+ case kThrow:
+ __ Push(result_register());
+ __ CallRuntime(Runtime::kReThrow);
+ break;
+ case kContinue:
+ codegen_->EmitContinue(cmd.target);
+ break;
+ case kBreak:
+ codegen_->EmitBreak(cmd.target);
+ break;
+ }
+ __ bind(&skip);
+ }
+}
#undef __
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index 4a7292547f..edd52b0ceb 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -817,8 +817,6 @@ void GlobalHandles::InvokeSecondPassPhantomCallbacks(
while (callbacks->length() != 0) {
auto callback = callbacks->RemoveLast();
DCHECK(callback.node() == nullptr);
- // No second pass callback required.
- if (callback.callback() == nullptr) continue;
// Fire second pass callback
callback.Invoke(isolate);
}
@@ -924,6 +922,7 @@ void GlobalHandles::UpdateListOfNewSpaceNodes() {
int GlobalHandles::DispatchPendingPhantomCallbacks(
bool synchronous_second_pass) {
int freed_nodes = 0;
+ List<PendingPhantomCallback> second_pass_callbacks;
{
// The initial pass callbacks must simply clear the nodes.
for (auto i = pending_phantom_callbacks_.begin();
@@ -932,24 +931,25 @@ int GlobalHandles::DispatchPendingPhantomCallbacks(
// Skip callbacks that have already been processed once.
if (callback->node() == nullptr) continue;
callback->Invoke(isolate());
+ if (callback->callback()) second_pass_callbacks.Add(*callback);
freed_nodes++;
}
}
- if (pending_phantom_callbacks_.length() > 0) {
+ pending_phantom_callbacks_.Clear();
+ if (second_pass_callbacks.length() > 0) {
if (FLAG_optimize_for_size || FLAG_predictable || synchronous_second_pass) {
isolate()->heap()->CallGCPrologueCallbacks(
GCType::kGCTypeProcessWeakCallbacks, kNoGCCallbackFlags);
- InvokeSecondPassPhantomCallbacks(&pending_phantom_callbacks_, isolate());
+ InvokeSecondPassPhantomCallbacks(&second_pass_callbacks, isolate());
isolate()->heap()->CallGCEpilogueCallbacks(
GCType::kGCTypeProcessWeakCallbacks, kNoGCCallbackFlags);
} else {
auto task = new PendingPhantomCallbacksSecondPassTask(
- &pending_phantom_callbacks_, isolate());
+ &second_pass_callbacks, isolate());
V8::GetCurrentPlatform()->CallOnForegroundThread(
reinterpret_cast<v8::Isolate*>(isolate()), task);
}
}
- pending_phantom_callbacks_.Clear();
return freed_nodes;
}
@@ -984,7 +984,7 @@ int GlobalHandles::PostGarbageCollectionProcessing(
int freed_nodes = 0;
bool synchronous_second_pass =
(gc_callback_flags &
- (kGCCallbackFlagForced |
+ (kGCCallbackFlagForced | kGCCallbackFlagCollectAllAvailableGarbage |
kGCCallbackFlagSynchronousPhantomCallbackProcessing)) != 0;
freed_nodes += DispatchPendingPhantomCallbacks(synchronous_second_pass);
if (initial_post_gc_processing_count != post_gc_processing_count_) {
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 67bdb63b86..be401a62ec 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -529,7 +529,7 @@ enum VisitMode {
};
// Flag indicating whether code is built into the VM (one of the natives files).
-enum NativesFlag { NOT_NATIVES_CODE, NATIVES_CODE };
+enum NativesFlag { NOT_NATIVES_CODE, EXTENSION_CODE, NATIVES_CODE };
// JavaScript defines two kinds of 'nil'.
enum NilValue { kNullValue, kUndefinedValue };
@@ -754,6 +754,45 @@ inline std::ostream& operator<<(std::ostream& os, ConvertReceiverMode mode) {
return os;
}
+// Defines whether tail call optimization is allowed.
+enum class TailCallMode : unsigned { kAllow, kDisallow };
+
+inline size_t hash_value(TailCallMode mode) { return bit_cast<unsigned>(mode); }
+
+inline std::ostream& operator<<(std::ostream& os, TailCallMode mode) {
+ switch (mode) {
+ case TailCallMode::kAllow:
+ return os << "ALLOW_TAIL_CALLS";
+ case TailCallMode::kDisallow:
+ return os << "DISALLOW_TAIL_CALLS";
+ }
+ UNREACHABLE();
+ return os;
+}
+
+// Defines specifics about arguments object or rest parameter creation.
+enum class CreateArgumentsType : uint8_t {
+ kMappedArguments,
+ kUnmappedArguments,
+ kRestParameter
+};
+
+inline size_t hash_value(CreateArgumentsType type) {
+ return bit_cast<uint8_t>(type);
+}
+
+inline std::ostream& operator<<(std::ostream& os, CreateArgumentsType type) {
+ switch (type) {
+ case CreateArgumentsType::kMappedArguments:
+ return os << "MAPPED_ARGUMENTS";
+ case CreateArgumentsType::kUnmappedArguments:
+ return os << "UNMAPPED_ARGUMENTS";
+ case CreateArgumentsType::kRestParameter:
+ return os << "REST_PARAMETER";
+ }
+ UNREACHABLE();
+ return os;
+}
// Used to specify if a macro instruction must perform a smi check on tagged
// values.
@@ -934,43 +973,37 @@ enum MinusZeroMode {
enum Signedness { kSigned, kUnsigned };
-
enum FunctionKind {
kNormalFunction = 0,
kArrowFunction = 1 << 0,
kGeneratorFunction = 1 << 1,
kConciseMethod = 1 << 2,
kConciseGeneratorMethod = kGeneratorFunction | kConciseMethod,
- kAccessorFunction = 1 << 3,
- kDefaultConstructor = 1 << 4,
- kSubclassConstructor = 1 << 5,
- kBaseConstructor = 1 << 6,
- kInObjectLiteral = 1 << 7,
+ kDefaultConstructor = 1 << 3,
+ kSubclassConstructor = 1 << 4,
+ kBaseConstructor = 1 << 5,
+ kGetterFunction = 1 << 6,
+ kSetterFunction = 1 << 7,
+ kAccessorFunction = kGetterFunction | kSetterFunction,
kDefaultBaseConstructor = kDefaultConstructor | kBaseConstructor,
kDefaultSubclassConstructor = kDefaultConstructor | kSubclassConstructor,
kClassConstructor =
kBaseConstructor | kSubclassConstructor | kDefaultConstructor,
- kConciseMethodInObjectLiteral = kConciseMethod | kInObjectLiteral,
- kConciseGeneratorMethodInObjectLiteral =
- kConciseGeneratorMethod | kInObjectLiteral,
- kAccessorFunctionInObjectLiteral = kAccessorFunction | kInObjectLiteral,
};
-
inline bool IsValidFunctionKind(FunctionKind kind) {
return kind == FunctionKind::kNormalFunction ||
kind == FunctionKind::kArrowFunction ||
kind == FunctionKind::kGeneratorFunction ||
kind == FunctionKind::kConciseMethod ||
kind == FunctionKind::kConciseGeneratorMethod ||
+ kind == FunctionKind::kGetterFunction ||
+ kind == FunctionKind::kSetterFunction ||
kind == FunctionKind::kAccessorFunction ||
kind == FunctionKind::kDefaultBaseConstructor ||
kind == FunctionKind::kDefaultSubclassConstructor ||
kind == FunctionKind::kBaseConstructor ||
- kind == FunctionKind::kSubclassConstructor ||
- kind == FunctionKind::kConciseMethodInObjectLiteral ||
- kind == FunctionKind::kConciseGeneratorMethodInObjectLiteral ||
- kind == FunctionKind::kAccessorFunctionInObjectLiteral;
+ kind == FunctionKind::kSubclassConstructor;
}
@@ -991,6 +1024,15 @@ inline bool IsConciseMethod(FunctionKind kind) {
return kind & FunctionKind::kConciseMethod;
}
+inline bool IsGetterFunction(FunctionKind kind) {
+ DCHECK(IsValidFunctionKind(kind));
+ return kind & FunctionKind::kGetterFunction;
+}
+
+inline bool IsSetterFunction(FunctionKind kind) {
+ DCHECK(IsValidFunctionKind(kind));
+ return kind & FunctionKind::kSetterFunction;
+}
inline bool IsAccessorFunction(FunctionKind kind) {
DCHECK(IsValidFunctionKind(kind));
@@ -1024,24 +1066,21 @@ inline bool IsClassConstructor(FunctionKind kind) {
inline bool IsConstructable(FunctionKind kind, LanguageMode mode) {
if (IsAccessorFunction(kind)) return false;
- if (IsConciseMethod(kind) && !IsGeneratorFunction(kind)) return false;
+ if (IsConciseMethod(kind)) return false;
if (IsArrowFunction(kind)) return false;
+ if (IsGeneratorFunction(kind)) return false;
if (is_strong(mode)) return IsClassConstructor(kind);
return true;
}
-inline bool IsInObjectLiteral(FunctionKind kind) {
- DCHECK(IsValidFunctionKind(kind));
- return kind & FunctionKind::kInObjectLiteral;
+inline uint32_t ObjectHash(Address address) {
+ // All objects are at least pointer aligned, so we can remove the trailing
+ // zeros.
+ return static_cast<uint32_t>(bit_cast<uintptr_t>(address) >>
+ kPointerSizeLog2);
}
-
-inline FunctionKind WithObjectLiteralBit(FunctionKind kind) {
- kind = static_cast<FunctionKind>(kind | FunctionKind::kInObjectLiteral);
- DCHECK(IsValidFunctionKind(kind));
- return kind;
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap-symbols.h b/deps/v8/src/heap-symbols.h
new file mode 100644
index 0000000000..4a772ebbac
--- /dev/null
+++ b/deps/v8/src/heap-symbols.h
@@ -0,0 +1,203 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_SYMBOLS_H_
+#define V8_HEAP_SYMBOLS_H_
+
+#define INTERNALIZED_STRING_LIST(V) \
+ V(anonymous_string, "anonymous") \
+ V(apply_string, "apply") \
+ V(assign_string, "assign") \
+ V(arguments_string, "arguments") \
+ V(Arguments_string, "Arguments") \
+ V(Array_string, "Array") \
+ V(bind_string, "bind") \
+ V(bool16x8_string, "bool16x8") \
+ V(Bool16x8_string, "Bool16x8") \
+ V(bool32x4_string, "bool32x4") \
+ V(Bool32x4_string, "Bool32x4") \
+ V(bool8x16_string, "bool8x16") \
+ V(Bool8x16_string, "Bool8x16") \
+ V(boolean_string, "boolean") \
+ V(Boolean_string, "Boolean") \
+ V(bound__string, "bound ") \
+ V(byte_length_string, "byteLength") \
+ V(byte_offset_string, "byteOffset") \
+ V(call_string, "call") \
+ V(callee_string, "callee") \
+ V(caller_string, "caller") \
+ V(cell_value_string, "%cell_value") \
+ V(char_at_string, "CharAt") \
+ V(closure_string, "(closure)") \
+ V(compare_ic_string, "==") \
+ V(configurable_string, "configurable") \
+ V(constructor_string, "constructor") \
+ V(construct_string, "construct") \
+ V(create_string, "create") \
+ V(Date_string, "Date") \
+ V(default_string, "default") \
+ V(defineProperty_string, "defineProperty") \
+ V(deleteProperty_string, "deleteProperty") \
+ V(display_name_string, "displayName") \
+ V(done_string, "done") \
+ V(dot_result_string, ".result") \
+ V(dot_string, ".") \
+ V(entries_string, "entries") \
+ V(enumerable_string, "enumerable") \
+ V(enumerate_string, "enumerate") \
+ V(Error_string, "Error") \
+ V(eval_string, "eval") \
+ V(false_string, "false") \
+ V(float32x4_string, "float32x4") \
+ V(Float32x4_string, "Float32x4") \
+ V(for_api_string, "for_api") \
+ V(for_string, "for") \
+ V(function_string, "function") \
+ V(Function_string, "Function") \
+ V(Generator_string, "Generator") \
+ V(getOwnPropertyDescriptor_string, "getOwnPropertyDescriptor") \
+ V(getOwnPropertyDescriptors_string, "getOwnPropertyDescriptors") \
+ V(getPrototypeOf_string, "getPrototypeOf") \
+ V(get_string, "get") \
+ V(global_string, "global") \
+ V(has_string, "has") \
+ V(illegal_access_string, "illegal access") \
+ V(illegal_argument_string, "illegal argument") \
+ V(index_string, "index") \
+ V(infinity_string, "Infinity") \
+ V(input_string, "input") \
+ V(int16x8_string, "int16x8") \
+ V(Int16x8_string, "Int16x8") \
+ V(int32x4_string, "int32x4") \
+ V(Int32x4_string, "Int32x4") \
+ V(int8x16_string, "int8x16") \
+ V(Int8x16_string, "Int8x16") \
+ V(isExtensible_string, "isExtensible") \
+ V(isView_string, "isView") \
+ V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic") \
+ V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic") \
+ V(last_index_string, "lastIndex") \
+ V(length_string, "length") \
+ V(Map_string, "Map") \
+ V(minus_infinity_string, "-Infinity") \
+ V(minus_zero_string, "-0") \
+ V(name_string, "name") \
+ V(nan_string, "NaN") \
+ V(next_string, "next") \
+ V(null_string, "null") \
+ V(null_to_string, "[object Null]") \
+ V(number_string, "number") \
+ V(Number_string, "Number") \
+ V(object_string, "object") \
+ V(Object_string, "Object") \
+ V(ownKeys_string, "ownKeys") \
+ V(preventExtensions_string, "preventExtensions") \
+ V(private_api_string, "private_api") \
+ V(Promise_string, "Promise") \
+ V(proto_string, "__proto__") \
+ V(prototype_string, "prototype") \
+ V(Proxy_string, "Proxy") \
+ V(query_colon_string, "(?:)") \
+ V(RegExp_string, "RegExp") \
+ V(setPrototypeOf_string, "setPrototypeOf") \
+ V(set_string, "set") \
+ V(Set_string, "Set") \
+ V(source_mapping_url_string, "source_mapping_url") \
+ V(source_string, "source") \
+ V(source_url_string, "source_url") \
+ V(stack_string, "stack") \
+ V(strict_compare_ic_string, "===") \
+ V(string_string, "string") \
+ V(String_string, "String") \
+ V(symbol_string, "symbol") \
+ V(Symbol_string, "Symbol") \
+ V(this_string, "this") \
+ V(throw_string, "throw") \
+ V(toJSON_string, "toJSON") \
+ V(toString_string, "toString") \
+ V(true_string, "true") \
+ V(uint16x8_string, "uint16x8") \
+ V(Uint16x8_string, "Uint16x8") \
+ V(uint32x4_string, "uint32x4") \
+ V(Uint32x4_string, "Uint32x4") \
+ V(uint8x16_string, "uint8x16") \
+ V(Uint8x16_string, "Uint8x16") \
+ V(undefined_string, "undefined") \
+ V(undefined_to_string, "[object Undefined]") \
+ V(valueOf_string, "valueOf") \
+ V(values_string, "values") \
+ V(value_string, "value") \
+ V(WeakMap_string, "WeakMap") \
+ V(WeakSet_string, "WeakSet") \
+ V(writable_string, "writable")
+
+#define PRIVATE_SYMBOL_LIST(V) \
+ V(array_iteration_kind_symbol) \
+ V(array_iterator_next_symbol) \
+ V(array_iterator_object_symbol) \
+ V(call_site_function_symbol) \
+ V(call_site_position_symbol) \
+ V(call_site_receiver_symbol) \
+ V(call_site_strict_symbol) \
+ V(class_end_position_symbol) \
+ V(class_start_position_symbol) \
+ V(detailed_stack_trace_symbol) \
+ V(elements_transition_symbol) \
+ V(error_end_pos_symbol) \
+ V(error_script_symbol) \
+ V(error_start_pos_symbol) \
+ V(formatted_stack_trace_symbol) \
+ V(frozen_symbol) \
+ V(hash_code_symbol) \
+ V(hidden_properties_symbol) \
+ V(home_object_symbol) \
+ V(internal_error_symbol) \
+ V(intl_impl_object_symbol) \
+ V(intl_initialized_marker_symbol) \
+ V(intl_pattern_symbol) \
+ V(intl_resolved_symbol) \
+ V(megamorphic_symbol) \
+ V(native_context_index_symbol) \
+ V(nonexistent_symbol) \
+ V(nonextensible_symbol) \
+ V(normal_ic_symbol) \
+ V(not_mapped_symbol) \
+ V(observed_symbol) \
+ V(premonomorphic_symbol) \
+ V(promise_combined_deferred_symbol) \
+ V(promise_debug_marker_symbol) \
+ V(promise_has_handler_symbol) \
+ V(promise_on_resolve_symbol) \
+ V(promise_on_reject_symbol) \
+ V(promise_raw_symbol) \
+ V(promise_status_symbol) \
+ V(promise_value_symbol) \
+ V(sealed_symbol) \
+ V(stack_trace_symbol) \
+ V(strict_function_transition_symbol) \
+ V(string_iterator_iterated_string_symbol) \
+ V(string_iterator_next_index_symbol) \
+ V(strong_function_transition_symbol) \
+ V(uninitialized_symbol)
+
+#define PUBLIC_SYMBOL_LIST(V) \
+ V(iterator_symbol, Symbol.iterator) \
+ V(match_symbol, Symbol.match) \
+ V(replace_symbol, Symbol.replace) \
+ V(search_symbol, Symbol.search) \
+ V(species_symbol, Symbol.species) \
+ V(split_symbol, Symbol.split) \
+ V(to_primitive_symbol, Symbol.toPrimitive) \
+ V(unscopables_symbol, Symbol.unscopables)
+
+// Well-Known Symbols are "Public" symbols, which have a bit set which causes
+// them to produce an undefined value when a load results in a failed access
+// check. Because this behaviour is not specified properly as of yet, it only
+// applies to a subset of spec-defined Well-Known Symbols.
+#define WELL_KNOWN_SYMBOL_LIST(V) \
+ V(has_instance_symbol, Symbol.hasInstance) \
+ V(is_concat_spreadable_symbol, Symbol.isConcatSpreadable) \
+ V(to_string_tag_symbol, Symbol.toStringTag)
+
+#endif // V8_HEAP_SYMBOLS_H_
diff --git a/deps/v8/src/heap/array-buffer-tracker.cc b/deps/v8/src/heap/array-buffer-tracker.cc
index bbe3c6b936..6e389c1cbf 100644
--- a/deps/v8/src/heap/array-buffer-tracker.cc
+++ b/deps/v8/src/heap/array-buffer-tracker.cc
@@ -77,6 +77,7 @@ void ArrayBufferTracker::Unregister(JSArrayBuffer* buffer) {
void ArrayBufferTracker::MarkLive(JSArrayBuffer* buffer) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
void* data = buffer->backing_store();
// ArrayBuffer might be in the middle of being constructed.
@@ -123,6 +124,8 @@ void ArrayBufferTracker::PrepareDiscoveryInNewSpace() {
void ArrayBufferTracker::Promote(JSArrayBuffer* buffer) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+
if (buffer->is_external()) return;
void* data = buffer->backing_store();
if (!data) return;
diff --git a/deps/v8/src/heap/array-buffer-tracker.h b/deps/v8/src/heap/array-buffer-tracker.h
index 7ba22fb573..6130003d15 100644
--- a/deps/v8/src/heap/array-buffer-tracker.h
+++ b/deps/v8/src/heap/array-buffer-tracker.h
@@ -7,6 +7,7 @@
#include <map>
+#include "src/base/platform/mutex.h"
#include "src/globals.h"
namespace v8 {
@@ -47,6 +48,7 @@ class ArrayBufferTracker {
void Promote(JSArrayBuffer* buffer);
private:
+ base::Mutex mutex_;
Heap* heap_;
// |live_array_buffers_| maps externally allocated memory used as backing
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index a723b3bdae..57e6cc4c93 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -359,10 +359,6 @@ bool Heap::InNewSpace(Object* object) {
return result;
}
-
-bool Heap::InNewSpace(Address address) { return new_space_.Contains(address); }
-
-
bool Heap::InFromSpace(Object* object) {
return new_space_.FromSpaceContains(object);
}
@@ -372,14 +368,15 @@ bool Heap::InToSpace(Object* object) {
return new_space_.ToSpaceContains(object);
}
+bool Heap::InOldSpace(Object* object) { return old_space_->Contains(object); }
-bool Heap::InOldSpace(Address address) { return old_space_->Contains(address); }
-
-
-bool Heap::InOldSpace(Object* object) {
- return InOldSpace(reinterpret_cast<Address>(object));
+bool Heap::InNewSpaceSlow(Address address) {
+ return new_space_.ContainsSlow(address);
}
+bool Heap::InOldSpaceSlow(Address address) {
+ return old_space_->ContainsSlow(address);
+}
bool Heap::OldGenerationAllocationLimitReached() {
if (!incremental_marking()->IsStopped()) return false;
@@ -394,18 +391,13 @@ bool Heap::ShouldBePromoted(Address old_address, int object_size) {
(!page->ContainsLimit(age_mark) || old_address < age_mark);
}
-
-void Heap::RecordWrite(Address address, int offset) {
- if (!InNewSpace(address)) store_buffer_.Mark(address + offset);
-}
-
-
-void Heap::RecordWrites(Address address, int start, int len) {
- if (!InNewSpace(address)) {
- for (int i = 0; i < len; i++) {
- store_buffer_.Mark(address + start + i * kPointerSize);
- }
+void Heap::RecordWrite(Object* object, int offset, Object* o) {
+ if (!InNewSpace(o) || !object->IsHeapObject() || InNewSpace(object)) {
+ return;
}
+ Page* page = Page::FromAddress(reinterpret_cast<Address>(object));
+ Address slot = HeapObject::cast(object)->address() + offset;
+ RememberedSet<OLD_TO_NEW>::Insert(page, slot);
}
@@ -467,7 +459,7 @@ void Heap::MoveBlock(Address dst, Address src, int byte_size) {
}
}
-
+template <Heap::FindMementoMode mode>
AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
// Check if there is potentially a memento behind the object. If
// the last word of the memento is on another page we return
@@ -476,61 +468,77 @@ AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
Address memento_address = object_address + object->Size();
Address last_memento_word_address = memento_address + kPointerSize;
if (!NewSpacePage::OnSamePage(object_address, last_memento_word_address)) {
- return NULL;
+ return nullptr;
}
-
HeapObject* candidate = HeapObject::FromAddress(memento_address);
Map* candidate_map = candidate->map();
// This fast check may peek at an uninitialized word. However, the slow check
// below (memento_address == top) ensures that this is safe. Mark the word as
// initialized to silence MemorySanitizer warnings.
MSAN_MEMORY_IS_INITIALIZED(&candidate_map, sizeof(candidate_map));
- if (candidate_map != allocation_memento_map()) return NULL;
-
- // Either the object is the last object in the new space, or there is another
- // object of at least word size (the header map word) following it, so
- // suffices to compare ptr and top here. Note that technically we do not have
- // to compare with the current top pointer of the from space page during GC,
- // since we always install filler objects above the top pointer of a from
- // space page when performing a garbage collection. However, always performing
- // the test makes it possible to have a single, unified version of
- // FindAllocationMemento that is used both by the GC and the mutator.
- Address top = NewSpaceTop();
- DCHECK(memento_address == top ||
- memento_address + HeapObject::kHeaderSize <= top ||
- !NewSpacePage::OnSamePage(memento_address, top - 1));
- if (memento_address == top) return NULL;
-
- AllocationMemento* memento = AllocationMemento::cast(candidate);
- if (!memento->IsValid()) return NULL;
- return memento;
+ if (candidate_map != allocation_memento_map()) {
+ return nullptr;
+ }
+ AllocationMemento* memento_candidate = AllocationMemento::cast(candidate);
+
+ // Depending on what the memento is used for, we might need to perform
+ // additional checks.
+ Address top;
+ switch (mode) {
+ case Heap::kForGC:
+ return memento_candidate;
+ case Heap::kForRuntime:
+ if (memento_candidate == nullptr) return nullptr;
+ // Either the object is the last object in the new space, or there is
+ // another object of at least word size (the header map word) following
+ // it, so suffices to compare ptr and top here.
+ top = NewSpaceTop();
+ DCHECK(memento_address == top ||
+ memento_address + HeapObject::kHeaderSize <= top ||
+ !NewSpacePage::OnSamePage(memento_address, top - 1));
+ if ((memento_address != top) && memento_candidate->IsValid()) {
+ return memento_candidate;
+ }
+ return nullptr;
+ default:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return nullptr;
}
-
+template <Heap::UpdateAllocationSiteMode mode>
void Heap::UpdateAllocationSite(HeapObject* object,
HashMap* pretenuring_feedback) {
DCHECK(InFromSpace(object));
if (!FLAG_allocation_site_pretenuring ||
!AllocationSite::CanTrack(object->map()->instance_type()))
return;
- AllocationMemento* memento = FindAllocationMemento(object);
- if (memento == nullptr) return;
-
- AllocationSite* key = memento->GetAllocationSite();
- DCHECK(!key->IsZombie());
-
- if (pretenuring_feedback == global_pretenuring_feedback_) {
+ AllocationMemento* memento_candidate = FindAllocationMemento<kForGC>(object);
+ if (memento_candidate == nullptr) return;
+
+ if (mode == kGlobal) {
+ DCHECK_EQ(pretenuring_feedback, global_pretenuring_feedback_);
+ // Entering global pretenuring feedback is only used in the scavenger, where
+ // we are allowed to actually touch the allocation site.
+ if (!memento_candidate->IsValid()) return;
+ AllocationSite* site = memento_candidate->GetAllocationSite();
+ DCHECK(!site->IsZombie());
// For inserting in the global pretenuring storage we need to first
// increment the memento found count on the allocation site.
- if (key->IncrementMementoFoundCount()) {
- global_pretenuring_feedback_->LookupOrInsert(
- key, static_cast<uint32_t>(bit_cast<uintptr_t>(key)));
+ if (site->IncrementMementoFoundCount()) {
+ global_pretenuring_feedback_->LookupOrInsert(site,
+ ObjectHash(site->address()));
}
} else {
- // Any other pretenuring storage than the global one is used as a cache,
- // where the count is later on merge in the allocation site.
- HashMap::Entry* e = pretenuring_feedback->LookupOrInsert(
- key, static_cast<uint32_t>(bit_cast<uintptr_t>(key)));
+ DCHECK_EQ(mode, kCached);
+ DCHECK_NE(pretenuring_feedback, global_pretenuring_feedback_);
+ // Entering cached feedback is used in the parallel case. We are not allowed
+ // to dereference the allocation site and rather have to postpone all checks
+ // till actually merging the data.
+ Address key = memento_candidate->GetAllocationSiteUnchecked();
+ HashMap::Entry* e =
+ pretenuring_feedback->LookupOrInsert(key, ObjectHash(key));
DCHECK(e != nullptr);
(*bit_cast<intptr_t*>(&e->value))++;
}
@@ -614,9 +622,18 @@ void Heap::ExternalStringTable::ShrinkNewStrings(int position) {
#endif
}
+// static
+int DescriptorLookupCache::Hash(Object* source, Name* name) {
+ DCHECK(name->IsUniqueName());
+ // Uses only lower 32 bits if pointers are larger.
+ uint32_t source_hash =
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(source)) >>
+ kPointerSizeLog2;
+ uint32_t name_hash = name->hash_field();
+ return (source_hash ^ name_hash) % kLength;
+}
int DescriptorLookupCache::Lookup(Map* source, Name* name) {
- if (!name->IsUniqueName()) return kAbsent;
int index = Hash(source, name);
Key& key = keys_[index];
if ((key.source == source) && (key.name == name)) return results_[index];
@@ -626,13 +643,11 @@ int DescriptorLookupCache::Lookup(Map* source, Name* name) {
void DescriptorLookupCache::Update(Map* source, Name* name, int result) {
DCHECK(result != kAbsent);
- if (name->IsUniqueName()) {
- int index = Hash(source, name);
- Key& key = keys_[index];
- key.source = source;
- key.name = name;
- results_[index] = result;
- }
+ int index = Hash(source, name);
+ Key& key = keys_[index];
+ key.source = source;
+ key.name = name;
+ results_[index] = result;
}
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 84b3c79b3e..ad6c451cbe 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -27,6 +27,7 @@
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
+#include "src/heap/remembered-set.h"
#include "src/heap/scavenge-job.h"
#include "src/heap/scavenger-inl.h"
#include "src/heap/store-buffer.h"
@@ -37,6 +38,7 @@
#include "src/snapshot/natives.h"
#include "src/snapshot/serialize.h"
#include "src/snapshot/snapshot.h"
+#include "src/tracing/trace-event.h"
#include "src/type-feedback-vector.h"
#include "src/utils.h"
#include "src/v8.h"
@@ -53,10 +55,10 @@ struct Heap::StrongRootsList {
StrongRootsList* next;
};
-class IdleScavengeObserver : public InlineAllocationObserver {
+class IdleScavengeObserver : public AllocationObserver {
public:
IdleScavengeObserver(Heap& heap, intptr_t step_size)
- : InlineAllocationObserver(step_size), heap_(heap) {}
+ : AllocationObserver(step_size), heap_(heap) {}
void Step(int bytes_allocated, Address, size_t) override {
heap_.ScheduleIdleScavengeIfNeeded(bytes_allocated);
@@ -77,7 +79,6 @@ Heap::Heap()
reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
max_semi_space_size_(8 * (kPointerSize / 4) * MB),
initial_semispace_size_(Page::kPageSize),
- target_semispace_size_(Page::kPageSize),
max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
initial_old_generation_size_(max_old_generation_size_ /
kInitalOldGenerationLimitFactor),
@@ -94,7 +95,6 @@ Heap::Heap()
contexts_disposed_(0),
number_of_disposed_maps_(0),
global_ic_age_(0),
- scan_on_scavenge_pages_(0),
new_space_(this),
old_space_(NULL),
code_space_(NULL),
@@ -114,7 +114,6 @@ Heap::Heap()
old_gen_exhausted_(false),
optimize_for_memory_usage_(false),
inline_allocation_disabled_(false),
- store_buffer_rebuilder_(store_buffer()),
total_regexp_code_generated_(0),
tracer_(nullptr),
high_survival_rate_period_length_(0),
@@ -454,8 +453,6 @@ void Heap::GarbageCollectionPrologue() {
ReportStatisticsBeforeGC();
#endif // DEBUG
- store_buffer()->GCPrologue();
-
if (isolate()->concurrent_osr_enabled()) {
isolate()->optimizing_compile_dispatcher()->AgeBufferedOsrJobs();
}
@@ -467,6 +464,7 @@ void Heap::GarbageCollectionPrologue() {
}
CheckNewSpaceExpansionCriteria();
UpdateNewSpaceAllocationCounter();
+ store_buffer()->MoveEntriesToRememberedSet();
}
@@ -519,17 +517,19 @@ void Heap::MergeAllocationSitePretenuringFeedback(
if (map_word.IsForwardingAddress()) {
site = AllocationSite::cast(map_word.ToForwardingAddress());
}
- DCHECK(site->IsAllocationSite());
+
+ // We have not validated the allocation site yet, since we have not
+ // dereferenced the site during collecting information.
+ // This is an inlined check of AllocationMemento::IsValid.
+ if (!site->IsAllocationSite() || site->IsZombie()) continue;
+
int value =
static_cast<int>(reinterpret_cast<intptr_t>(local_entry->value));
DCHECK_GT(value, 0);
- {
- // TODO(mlippautz): For parallel processing we need synchronization here.
- if (site->IncrementMementoFoundCount(value)) {
- global_pretenuring_feedback_->LookupOrInsert(
- site, static_cast<uint32_t>(bit_cast<uintptr_t>(site)));
- }
+ if (site->IncrementMementoFoundCount(value)) {
+ global_pretenuring_feedback_->LookupOrInsert(site,
+ ObjectHash(site->address()));
}
}
}
@@ -567,22 +567,24 @@ void Heap::ProcessPretenuringFeedback() {
bool maximum_size_scavenge = MaximumSizeScavenge();
for (HashMap::Entry* e = global_pretenuring_feedback_->Start();
e != nullptr; e = global_pretenuring_feedback_->Next(e)) {
+ allocation_sites++;
site = reinterpret_cast<AllocationSite*>(e->key);
int found_count = site->memento_found_count();
- // The fact that we have an entry in the storage means that we've found
- // the site at least once.
- DCHECK_GT(found_count, 0);
- DCHECK(site->IsAllocationSite());
- allocation_sites++;
- active_allocation_sites++;
- allocation_mementos_found += found_count;
- if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
- trigger_deoptimization = true;
- }
- if (site->GetPretenureMode() == TENURED) {
- tenure_decisions++;
- } else {
- dont_tenure_decisions++;
+ // An entry in the storage does not imply that the count is > 0 because
+ // allocation sites might have been reset due to too many objects dying
+ // in old space.
+ if (found_count > 0) {
+ DCHECK(site->IsAllocationSite());
+ active_allocation_sites++;
+ allocation_mementos_found += found_count;
+ if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
+ trigger_deoptimization = true;
+ }
+ if (site->GetPretenureMode() == TENURED) {
+ tenure_decisions++;
+ } else {
+ dont_tenure_decisions++;
+ }
}
}
@@ -639,8 +641,6 @@ void Heap::DeoptMarkedAllocationSites() {
void Heap::GarbageCollectionEpilogue() {
- store_buffer()->GCEpilogue();
-
// In release mode, we only zap the from space under heap verification.
if (Heap::ShouldZapGarbage()) {
ZapFromSpace();
@@ -769,8 +769,7 @@ void Heap::PreprocessStackTraces() {
if (!maybe_code->IsCode()) break;
Code* code = Code::cast(maybe_code);
int offset = Smi::cast(elements->get(j + 3))->value();
- Address pc = code->address() + offset;
- int pos = code->SourcePosition(pc);
+ int pos = code->SourcePosition(offset);
elements->set(j + 2, Smi::FromInt(pos));
}
}
@@ -819,6 +818,7 @@ void Heap::FinalizeIncrementalMarking(const char* gc_reason) {
GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
HistogramTimerScope incremental_marking_scope(
isolate()->counters()->gc_incremental_marking_finalize());
+ TRACE_EVENT0("v8", "V8.GCIncrementalMarkingFinalize");
{
GCCallbacksScope scope(this);
@@ -860,7 +860,6 @@ HistogramTimer* Heap::GCTypeTimer(GarbageCollector collector) {
}
}
-
void Heap::CollectAllGarbage(int flags, const char* gc_reason,
const v8::GCCallbackFlags gc_callback_flags) {
// Since we are ignoring the return value, the exact choice of space does
@@ -896,7 +895,7 @@ void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
const int kMinNumberOfAttempts = 2;
for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL,
- v8::kGCCallbackFlagForced) &&
+ v8::kGCCallbackFlagCollectAllAvailableGarbage) &&
attempt + 1 >= kMinNumberOfAttempts) {
break;
}
@@ -1008,7 +1007,9 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
GarbageCollectionPrologue();
{
- HistogramTimerScope histogram_timer_scope(GCTypeTimer(collector));
+ HistogramTimer* gc_type_timer = GCTypeTimer(collector);
+ HistogramTimerScope histogram_timer_scope(gc_type_timer);
+ TRACE_EVENT0("v8", gc_type_timer->name());
next_gc_likely_to_collect_more =
PerformGarbageCollection(collector, gc_callback_flags);
@@ -1042,7 +1043,8 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
}
if (collector == MARK_COMPACTOR &&
- (gc_callback_flags & kGCCallbackFlagForced) != 0) {
+ (gc_callback_flags & (kGCCallbackFlagForced |
+ kGCCallbackFlagCollectAllAvailableGarbage)) != 0) {
isolate()->CountUsage(v8::Isolate::kForcedGC);
}
@@ -1062,9 +1064,9 @@ int Heap::NotifyContextDisposed(bool dependant_context) {
tracer()->ResetSurvivalEvents();
old_generation_size_configured_ = false;
MemoryReducer::Event event;
- event.type = MemoryReducer::kContextDisposed;
+ event.type = MemoryReducer::kPossibleGarbage;
event.time_ms = MonotonicallyIncreasingTimeInMs();
- memory_reducer_->NotifyContextDisposed(event);
+ memory_reducer_->NotifyPossibleGarbage(event);
}
if (isolate()->concurrent_recompilation_enabled()) {
// Flush the queued recompilation tasks.
@@ -1103,10 +1105,8 @@ void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
if (!InNewSpace(array)) {
for (int i = 0; i < len; i++) {
- // TODO(hpayer): check store buffer for entries
- if (InNewSpace(dst_objects[i])) {
- RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
- }
+ RecordWrite(array, array->OffsetOfElementAt(dst_index + i),
+ dst_objects[i]);
}
}
incremental_marking()->RecordWrites(array);
@@ -1420,7 +1420,7 @@ void Heap::CallGCEpilogueCallbacks(GCType gc_type,
void Heap::MarkCompact() {
- PauseInlineAllocationObserversScope pause_observers(new_space());
+ PauseAllocationObserversScope pause_observers(this);
gc_state_ = MARK_COMPACT;
LOG(isolate_, ResourceEvent("markcompact", "begin"));
@@ -1552,12 +1552,6 @@ static bool IsUnmodifiedHeapObject(Object** p) {
}
-void Heap::ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
- StoreBufferEvent event) {
- heap->store_buffer_rebuilder_.Callback(page, event);
-}
-
-
void PromotionQueue::Initialize() {
// The last to-space page may be used for promotion queue. On promotion
// conflict, we use the emergency stack.
@@ -1627,7 +1621,7 @@ void Heap::Scavenge() {
// Bump-pointer allocations done during scavenge are not real allocations.
// Pause the inline allocation steps.
- PauseInlineAllocationObserversScope pause_observers(new_space());
+ PauseAllocationObserversScope pause_observers(this);
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
@@ -1638,9 +1632,6 @@ void Heap::Scavenge() {
// Implements Cheney's copying algorithm
LOG(isolate_, ResourceEvent("scavenge", "begin"));
- // Clear descriptor cache.
- isolate_->descriptor_lookup_cache()->Clear();
-
// Used for updating survived_since_last_expansion_ at function end.
intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
@@ -1690,9 +1681,8 @@ void Heap::Scavenge() {
// Copy objects reachable from the old generation.
GCTracer::Scope gc_scope(tracer(),
GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS);
- StoreBufferRebuildScope scope(this, store_buffer(),
- &ScavengeStoreBufferCallback);
- store_buffer()->IteratePointersToNewSpace(&Scavenger::ScavengeObject);
+ RememberedSet<OLD_TO_NEW>::IterateWithWrapper(this,
+ Scavenger::ScavengeObject);
}
{
@@ -1946,8 +1936,6 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
// Promote and process all the to-be-promoted objects.
{
- StoreBufferRebuildScope scope(this, store_buffer(),
- &ScavengeStoreBufferCallback);
while (!promotion_queue()->is_empty()) {
HeapObject* target;
int size;
@@ -2099,6 +2087,7 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
if (!allocation.To(&result)) return allocation;
+ isolate()->counters()->maps_created()->Increment();
result->set_map_no_write_barrier(meta_map());
Map* map = Map::cast(result);
map->set_instance_type(instance_type);
@@ -2271,6 +2260,7 @@ bool Heap::CreateInitialMaps() {
if (FLAG_unbox_double_fields) {
null_map()->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
}
+ null_map()->set_is_undetectable();
// Fix prototype object for existing maps.
meta_map()->set_prototype(null_value());
@@ -2281,6 +2271,7 @@ bool Heap::CreateInitialMaps() {
undefined_map()->set_prototype(null_value());
undefined_map()->set_constructor_or_backpointer(null_value());
+ undefined_map()->set_is_undetectable();
null_map()->set_prototype(null_value());
null_map()->set_constructor_or_backpointer(null_value());
@@ -2415,14 +2406,6 @@ bool Heap::CreateInitialMaps() {
ByteArray* byte_array;
if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
set_empty_byte_array(byte_array);
-
- BytecodeArray* bytecode_array = nullptr;
- AllocationResult allocation =
- AllocateBytecodeArray(0, nullptr, 0, 0, empty_fixed_array());
- if (!allocation.To(&bytecode_array)) {
- return false;
- }
- set_empty_bytecode_array(bytecode_array);
}
#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
@@ -2664,7 +2647,7 @@ void Heap::CreateInitialObjects() {
set_arguments_marker(
*factory->NewOddball(factory->arguments_marker_map(), "arguments_marker",
handle(Smi::FromInt(-4), isolate()), "undefined",
- Oddball::kArgumentMarker));
+ Oddball::kArgumentsMarker));
set_no_interceptor_result_sentinel(*factory->NewOddball(
factory->no_interceptor_result_sentinel_map(),
@@ -2685,17 +2668,6 @@ void Heap::CreateInitialObjects() {
roots_[constant_string_table[i].index] = *str;
}
- // The {hidden_string} is special because it is an empty string, but does not
- // match any string (even the {empty_string}) when looked up in properties.
- // Allocate the hidden string which is used to identify the hidden properties
- // in JSObjects. The hash code has a special value so that it will not match
- // the empty string when searching for the property. It cannot be part of the
- // loop above because it needs to be allocated manually with the special
- // hash code in place. The hash code for the hidden_string is zero to ensure
- // that it will always be at the first entry in property descriptors.
- set_hidden_string(*factory->NewOneByteInternalizedString(
- OneByteVector("", 0), String::kEmptyStringHash));
-
// Create the code_stubs dictionary. The initial size is set to avoid
// expanding the dictionary during bootstrapping.
set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
@@ -2724,6 +2696,14 @@ void Heap::CreateInitialObjects() {
#undef SYMBOL_INIT
}
+ // The {hidden_properties_symbol} is special because it is the only name with
+ // hash code zero. This ensures that it will always be the first entry as
+ // sorted by hash code in descriptor arrays. It is used to identify the hidden
+ // properties in JSObjects.
+ // kIsNotArrayIndexMask is a computed hash with value zero.
+ Symbol::cast(roots_[khidden_properties_symbolRootIndex])
+ ->set_hash_field(Name::kIsNotArrayIndexMask);
+
{
HandleScope scope(isolate());
#define SYMBOL_INIT(name, description) \
@@ -2872,15 +2852,14 @@ void Heap::CreateInitialObjects() {
cell->set_value(the_hole_value());
set_empty_property_cell(*cell);
+ Handle<PropertyCell> species_cell = factory->NewPropertyCell();
+ species_cell->set_value(Smi::FromInt(Isolate::kArrayProtectorValid));
+ set_species_protector(*species_cell);
+
set_weak_stack_trace_list(Smi::FromInt(0));
set_noscript_shared_function_infos(Smi::FromInt(0));
- // Will be filled in by Interpreter::Initialize().
- set_interpreter_table(
- *interpreter::Interpreter::CreateUninitializedInterpreterTable(
- isolate()));
-
// Initialize keyed lookup cache.
isolate_->keyed_lookup_cache()->Clear();
@@ -3055,7 +3034,10 @@ AllocationResult Heap::AllocateBytecodeArray(int length,
instance->set_length(length);
instance->set_frame_size(frame_size);
instance->set_parameter_count(parameter_count);
+ instance->set_interrupt_budget(interpreter::Interpreter::InterruptBudget());
instance->set_constant_pool(constant_pool);
+ instance->set_handler_table(empty_fixed_array());
+ instance->set_source_position_table(empty_fixed_array());
CopyBytes(instance->GetFirstBytecodeAddress(), raw_bytecodes, length);
return result;
@@ -3098,7 +3080,7 @@ bool Heap::CanMoveObjectStart(HeapObject* object) {
// (3) the page was already concurrently swept. This case is an optimization
// for concurrent sweeping. The WasSwept predicate for concurrently swept
// pages is set after sweeping all pages.
- return !InOldSpace(address) || page->WasSwept() || page->SweepingCompleted();
+ return !InOldSpace(object) || page->SweepingDone();
}
@@ -3133,6 +3115,10 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
DCHECK(!lo_space()->Contains(object));
DCHECK(object->map() != fixed_cow_array_map());
+ // Ensure that the no handle-scope has more than one pointer to the same
+ // backing-store.
+ SLOW_DCHECK(CountHandlesForObject(object) <= 1);
+
STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
@@ -3161,6 +3147,11 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
// Maintain consistency of live bytes during incremental marking
Marking::TransferMark(this, object->address(), new_start);
+ if (mark_compact_collector()->sweeping_in_progress()) {
+ // Array trimming during sweeping can add invalid slots in free list.
+ ClearRecordedSlotRange(object, former_start,
+ HeapObject::RawField(new_object, 0));
+ }
AdjustLiveBytes(new_object, -bytes_to_trim, Heap::CONCURRENT_TO_SWEEPER);
// Notify the heap profiler of change in object layout.
@@ -3210,7 +3201,8 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
}
// Calculate location of new array end.
- Address new_end = object->address() + object->Size() - bytes_to_trim;
+ Address old_end = object->address() + object->Size();
+ Address new_end = old_end - bytes_to_trim;
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
@@ -3220,6 +3212,11 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
// of the object changed significantly.
if (!lo_space()->Contains(object)) {
CreateFillerObjectAt(new_end, bytes_to_trim);
+ if (mark_compact_collector()->sweeping_in_progress()) {
+ // Array trimming during sweeping can add invalid slots in free list.
+ ClearRecordedSlotRange(object, reinterpret_cast<Object**>(new_end),
+ reinterpret_cast<Object**>(old_end));
+ }
}
// Initialize header of the trimmed array. We are storing the new length
@@ -3366,6 +3363,25 @@ AllocationResult Heap::CopyCode(Code* code) {
return new_code;
}
+AllocationResult Heap::CopyBytecodeArray(BytecodeArray* bytecode_array) {
+ int size = BytecodeArray::SizeFor(bytecode_array->length());
+ HeapObject* result = nullptr;
+ {
+ AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
+ if (!allocation.To(&result)) return allocation;
+ }
+
+ result->set_map_no_write_barrier(bytecode_array_map());
+ BytecodeArray* copy = BytecodeArray::cast(result);
+ copy->set_length(bytecode_array->length());
+ copy->set_frame_size(bytecode_array->frame_size());
+ copy->set_parameter_count(bytecode_array->parameter_count());
+ copy->set_constant_pool(bytecode_array->constant_pool());
+ copy->set_handler_table(bytecode_array->handler_table());
+ copy->set_source_position_table(bytecode_array->source_position_table());
+ bytecode_array->CopyBytecodesTo(copy);
+ return copy;
+}
AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
// Allocate ByteArray before the Code object, so that we do not risk
@@ -3470,7 +3486,6 @@ void Heap::InitializeJSObjectBody(JSObject* obj, Map* map, int start_offset) {
if (start_offset == map->instance_size()) return;
DCHECK_LT(start_offset, map->instance_size());
- Object* filler;
// We cannot always fill with one_pointer_filler_map because objects
// created from API functions expect their internal fields to be initialized
// with undefined_value.
@@ -3480,15 +3495,17 @@ void Heap::InitializeJSObjectBody(JSObject* obj, Map* map, int start_offset) {
// In case of Array subclassing the |map| could already be transitioned
// to different elements kind from the initial map on which we track slack.
- Map* initial_map = map->FindRootMap();
- if (initial_map->IsInobjectSlackTrackingInProgress()) {
- // We might want to shrink the object later.
- filler = Heap::one_pointer_filler_map();
+ bool in_progress = map->IsInobjectSlackTrackingInProgress();
+ Object* filler;
+ if (in_progress) {
+ filler = one_pointer_filler_map();
} else {
- filler = Heap::undefined_value();
+ filler = undefined_value();
}
obj->InitializeBody(map, start_offset, Heap::undefined_value(), filler);
- initial_map->InobjectSlackTrackingStep();
+ if (in_progress) {
+ map->FindRootMap()->InobjectSlackTrackingStep();
+ }
}
@@ -3513,7 +3530,8 @@ AllocationResult Heap::AllocateJSObjectFromMap(
// Initialize the JSObject.
InitializeJSObjectFromMap(js_obj, properties, map);
- DCHECK(js_obj->HasFastElements() || js_obj->HasFixedTypedArrayElements());
+ DCHECK(js_obj->HasFastElements() || js_obj->HasFixedTypedArrayElements() ||
+ js_obj->HasFastStringWrapperElements());
return js_obj;
}
@@ -3804,18 +3822,41 @@ AllocationResult Heap::CopyFixedArrayAndGrow(FixedArray* src, int grow_by,
AllocationResult allocation = AllocateRawFixedArray(new_len, pretenure);
if (!allocation.To(&obj)) return allocation;
}
+
obj->set_map_no_write_barrier(fixed_array_map());
FixedArray* result = FixedArray::cast(obj);
result->set_length(new_len);
// Copy the content.
DisallowHeapAllocation no_gc;
- WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+ WriteBarrierMode mode = obj->GetWriteBarrierMode(no_gc);
for (int i = 0; i < old_len; i++) result->set(i, src->get(i), mode);
MemsetPointer(result->data_start() + old_len, undefined_value(), grow_by);
return result;
}
+AllocationResult Heap::CopyFixedArrayUpTo(FixedArray* src, int new_len,
+ PretenureFlag pretenure) {
+ if (new_len == 0) return empty_fixed_array();
+
+ DCHECK_LE(new_len, src->length());
+
+ HeapObject* obj = nullptr;
+ {
+ AllocationResult allocation = AllocateRawFixedArray(new_len, pretenure);
+ if (!allocation.To(&obj)) return allocation;
+ }
+ obj->set_map_no_write_barrier(fixed_array_map());
+
+ FixedArray* result = FixedArray::cast(obj);
+ result->set_length(new_len);
+
+ // Copy the content.
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+ for (int i = 0; i < new_len; i++) result->set(i, src->get(i), mode);
+ return result;
+}
AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
int len = src->length();
@@ -3824,13 +3865,12 @@ AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
if (!allocation.To(&obj)) return allocation;
}
+ obj->set_map_no_write_barrier(map);
if (InNewSpace(obj)) {
- obj->set_map_no_write_barrier(map);
CopyBlock(obj->address() + kPointerSize, src->address() + kPointerSize,
FixedArray::SizeFor(len) - kPointerSize);
return obj;
}
- obj->set_map_no_write_barrier(map);
FixedArray* result = FixedArray::cast(obj);
result->set_length(len);
@@ -4097,6 +4137,20 @@ bool Heap::HasHighFragmentation(intptr_t used, intptr_t committed) {
return committed - used > used + kSlack;
}
+void Heap::SetOptimizeForMemoryUsage() {
+ // Activate memory reducer when switching to background if
+ // - there was no mark compact since the start.
+ // - the committed memory can be potentially reduced.
+ // 2 pages for the old, code, and map space + 1 page for new space.
+ const int kMinCommittedMemory = 7 * Page::kPageSize;
+ if (ms_count_ == 0 && CommittedMemory() > kMinCommittedMemory) {
+ MemoryReducer::Event event;
+ event.type = MemoryReducer::kPossibleGarbage;
+ event.time_ms = MonotonicallyIncreasingTimeInMs();
+ memory_reducer_->NotifyPossibleGarbage(event);
+ }
+ optimize_for_memory_usage_ = true;
+}
void Heap::ReduceNewSpaceSize() {
// TODO(ulan): Unify this constant with the similar constant in
@@ -4189,6 +4243,7 @@ bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
case DO_FULL_GC: {
DCHECK(contexts_disposed_ > 0);
HistogramTimerScope scope(isolate_->counters()->gc_context());
+ TRACE_EVENT0("v8", "V8.GCContext");
CollectAllGarbage(kNoGCFlags, "idle notification: contexts disposed");
break;
}
@@ -4274,6 +4329,7 @@ bool Heap::IdleNotification(double deadline_in_seconds) {
static_cast<double>(base::Time::kMillisecondsPerSecond);
HistogramTimerScope idle_notification_scope(
isolate_->counters()->gc_idle_notification());
+ TRACE_EVENT0("v8", "V8.GCIdleNotification");
double start_ms = MonotonicallyIncreasingTimeInMs();
double idle_time_in_ms = deadline_in_ms - start_ms;
@@ -4354,38 +4410,65 @@ void Heap::ReportHeapStatistics(const char* title) {
#endif // DEBUG
-bool Heap::Contains(HeapObject* value) { return Contains(value->address()); }
-
-
-bool Heap::Contains(Address addr) {
- if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
+bool Heap::Contains(HeapObject* value) {
+ if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
+ return false;
+ }
return HasBeenSetUp() &&
- (new_space_.ToSpaceContains(addr) || old_space_->Contains(addr) ||
- code_space_->Contains(addr) || map_space_->Contains(addr) ||
- lo_space_->SlowContains(addr));
+ (new_space_.ToSpaceContains(value) || old_space_->Contains(value) ||
+ code_space_->Contains(value) || map_space_->Contains(value) ||
+ lo_space_->Contains(value));
}
+bool Heap::ContainsSlow(Address addr) {
+ if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) {
+ return false;
+ }
+ return HasBeenSetUp() &&
+ (new_space_.ToSpaceContainsSlow(addr) ||
+ old_space_->ContainsSlow(addr) || code_space_->ContainsSlow(addr) ||
+ map_space_->ContainsSlow(addr) || lo_space_->ContainsSlow(addr));
+}
bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
- return InSpace(value->address(), space);
-}
+ if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
+ return false;
+ }
+ if (!HasBeenSetUp()) return false;
+ switch (space) {
+ case NEW_SPACE:
+ return new_space_.ToSpaceContains(value);
+ case OLD_SPACE:
+ return old_space_->Contains(value);
+ case CODE_SPACE:
+ return code_space_->Contains(value);
+ case MAP_SPACE:
+ return map_space_->Contains(value);
+ case LO_SPACE:
+ return lo_space_->Contains(value);
+ }
+ UNREACHABLE();
+ return false;
+}
-bool Heap::InSpace(Address addr, AllocationSpace space) {
- if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
+bool Heap::InSpaceSlow(Address addr, AllocationSpace space) {
+ if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) {
+ return false;
+ }
if (!HasBeenSetUp()) return false;
switch (space) {
case NEW_SPACE:
- return new_space_.ToSpaceContains(addr);
+ return new_space_.ToSpaceContainsSlow(addr);
case OLD_SPACE:
- return old_space_->Contains(addr);
+ return old_space_->ContainsSlow(addr);
case CODE_SPACE:
- return code_space_->Contains(addr);
+ return code_space_->ContainsSlow(addr);
case MAP_SPACE:
- return map_space_->Contains(addr);
+ return map_space_->ContainsSlow(addr);
case LO_SPACE:
- return lo_space_->SlowContains(addr);
+ return lo_space_->ContainsSlow(addr);
}
UNREACHABLE();
return false;
@@ -4429,8 +4512,6 @@ void Heap::Verify() {
CHECK(HasBeenSetUp());
HandleScope scope(isolate());
- store_buffer()->Verify();
-
if (mark_compact_collector()->sweeping_in_progress()) {
// We have to wait here for the sweeper threads to have an iterable heap.
mark_compact_collector()->EnsureSweepingCompleted();
@@ -4478,14 +4559,11 @@ void Heap::IterateAndMarkPointersToFromSpace(HeapObject* object, Address start,
Address end, bool record_slots,
ObjectSlotCallback callback) {
Address slot_address = start;
+ Page* page = Page::FromAddress(start);
while (slot_address < end) {
Object** slot = reinterpret_cast<Object**>(slot_address);
Object* target = *slot;
- // If the store buffer becomes overfull we mark pages as being exempt from
- // the store buffer. These pages are scanned to find pointers that point
- // to the new space. In that case we may hit newly promoted objects and
- // fix the pointers before the promotion queue gets to them. Thus the 'if'.
if (target->IsHeapObject()) {
if (Heap::InFromSpace(target)) {
callback(reinterpret_cast<HeapObject**>(slot),
@@ -4494,8 +4572,7 @@ void Heap::IterateAndMarkPointersToFromSpace(HeapObject* object, Address start,
if (InNewSpace(new_target)) {
SLOW_DCHECK(Heap::InToSpace(new_target));
SLOW_DCHECK(new_target->IsHeapObject());
- store_buffer_.EnterDirectlyIntoStoreBuffer(
- reinterpret_cast<Address>(slot));
+ RememberedSet<OLD_TO_NEW>::Insert(page, slot_address);
}
SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_target));
} else if (record_slots &&
@@ -4590,10 +4667,6 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
Relocatable::Iterate(isolate_, v);
v->Synchronize(VisitorSynchronization::kRelocatable);
- if (isolate_->deoptimizer_data() != NULL) {
- isolate_->deoptimizer_data()->Iterate(v);
- }
- v->Synchronize(VisitorSynchronization::kDebug);
isolate_->compilation_cache()->Iterate(v);
v->Synchronize(VisitorSynchronization::kCompilationCache);
@@ -4607,8 +4680,10 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
// on scavenge collections.
if (mode != VISIT_ALL_IN_SCAVENGE) {
isolate_->builtins()->IterateBuiltins(v);
+ v->Synchronize(VisitorSynchronization::kBuiltins);
+ isolate_->interpreter()->IterateDispatchTable(v);
+ v->Synchronize(VisitorSynchronization::kDispatchTable);
}
- v->Synchronize(VisitorSynchronization::kBuiltins);
// Iterate over global handles.
switch (mode) {
@@ -4746,31 +4821,6 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
- if (FLAG_target_semi_space_size > 0) {
- int target_semispace_size = FLAG_target_semi_space_size * MB;
- if (target_semispace_size < initial_semispace_size_) {
- target_semispace_size_ = initial_semispace_size_;
- if (FLAG_trace_gc) {
- PrintIsolate(isolate_,
- "Target semi-space size cannot be less than the minimum "
- "semi-space size of %d MB\n",
- initial_semispace_size_ / MB);
- }
- } else if (target_semispace_size > max_semi_space_size_) {
- target_semispace_size_ = max_semi_space_size_;
- if (FLAG_trace_gc) {
- PrintIsolate(isolate_,
- "Target semi-space size cannot be less than the maximum "
- "semi-space size of %d MB\n",
- max_semi_space_size_ / MB);
- }
- } else {
- target_semispace_size_ = ROUND_UP(target_semispace_size, Page::kPageSize);
- }
- }
-
- target_semispace_size_ = Max(initial_semispace_size_, target_semispace_size_);
-
if (FLAG_semi_space_growth_factor < 2) {
FLAG_semi_space_growth_factor = 2;
}
@@ -5167,7 +5217,7 @@ bool Heap::SetUp() {
idle_scavenge_observer_ = new IdleScavengeObserver(
*this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
- new_space()->AddInlineAllocationObserver(idle_scavenge_observer_);
+ new_space()->AddAllocationObserver(idle_scavenge_observer_);
return true;
}
@@ -5267,7 +5317,7 @@ void Heap::TearDown() {
PrintAlloctionsHash();
}
- new_space()->RemoveInlineAllocationObserver(idle_scavenge_observer_);
+ new_space()->RemoveAllocationObserver(idle_scavenge_observer_);
delete idle_scavenge_observer_;
idle_scavenge_observer_ = nullptr;
@@ -5476,6 +5526,32 @@ void Heap::PrintHandles() {
#endif
+#ifdef ENABLE_SLOW_DCHECKS
+
+class CountHandleVisitor : public ObjectVisitor {
+ public:
+ explicit CountHandleVisitor(Object* object) : object_(object) {}
+
+ void VisitPointers(Object** start, Object** end) override {
+ for (Object** p = start; p < end; p++) {
+ if (object_ == reinterpret_cast<Object*>(*p)) count_++;
+ }
+ }
+
+ int count() { return count_; }
+
+ private:
+ Object* object_;
+ int count_ = 0;
+};
+
+int Heap::CountHandlesForObject(Object* object) {
+ CountHandleVisitor v(object);
+ isolate_->handle_scope_implementer()->Iterate(&v);
+ return v.count();
+}
+#endif
+
class CheckHandleCountVisitor : public ObjectVisitor {
public:
CheckHandleCountVisitor() : handle_count_(0) {}
@@ -5496,6 +5572,27 @@ void Heap::CheckHandleCount() {
isolate_->handle_scope_implementer()->Iterate(&v);
}
+void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
+ if (!InNewSpace(object)) {
+ store_buffer()->MoveEntriesToRememberedSet();
+ Address slot_addr = reinterpret_cast<Address>(slot);
+ Page* page = Page::FromAddress(slot_addr);
+ DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
+ RememberedSet<OLD_TO_NEW>::Remove(page, slot_addr);
+ }
+}
+
+void Heap::ClearRecordedSlotRange(HeapObject* object, Object** start,
+ Object** end) {
+ if (!InNewSpace(object)) {
+ store_buffer()->MoveEntriesToRememberedSet();
+ Address start_addr = reinterpret_cast<Address>(start);
+ Address end_addr = reinterpret_cast<Address>(end);
+ Page* page = Page::FromAddress(start_addr);
+ DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
+ RememberedSet<OLD_TO_NEW>::RemoveRange(page, start_addr, end_addr);
+ }
+}
Space* AllSpaces::next() {
switch (counter_++) {
@@ -6099,19 +6196,6 @@ void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
}
-void Heap::FilterStoreBufferEntriesOnAboutToBeFreedPages() {
- if (chunks_queued_for_free_ == NULL) return;
- MemoryChunk* next;
- MemoryChunk* chunk;
- for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
- next = chunk->next_chunk();
- chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
- }
- store_buffer()->Compact();
- store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
-}
-
-
void Heap::FreeQueuedChunks() {
if (chunks_queued_for_free_ != NULL) {
if (FLAG_concurrent_sweeping) {
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index af9d0a6235..4a76777ecd 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -14,6 +14,7 @@
#include "src/assert-scope.h"
#include "src/atomic-utils.h"
#include "src/globals.h"
+#include "src/heap-symbols.h"
// TODO(mstarzinger): Two more includes to kill!
#include "src/heap/spaces.h"
#include "src/heap/store-buffer.h"
@@ -36,7 +37,6 @@ namespace internal {
V(Oddball, true_value, TrueValue) \
V(Oddball, false_value, FalseValue) \
V(String, empty_string, empty_string) \
- V(String, hidden_string, hidden_string) \
V(Oddball, uninitialized_value, UninitializedValue) \
V(Map, cell_map, CellMap) \
V(Map, global_property_cell_map, GlobalPropertyCellMap) \
@@ -187,11 +187,9 @@ namespace internal {
V(PropertyCell, empty_property_cell, EmptyPropertyCell) \
V(Object, weak_stack_trace_list, WeakStackTraceList) \
V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos) \
- V(FixedArray, interpreter_table, InterpreterTable) \
V(Map, bytecode_array_map, BytecodeArrayMap) \
V(WeakCell, empty_weak_cell, EmptyWeakCell) \
- V(BytecodeArray, empty_bytecode_array, EmptyBytecodeArray)
-
+ V(PropertyCell, species_protector, SpeciesProtector)
// Entries in this list are limited to Smis and are not visited during GC.
#define SMI_ROOT_LIST(V) \
@@ -209,196 +207,6 @@ namespace internal {
SMI_ROOT_LIST(V) \
V(StringTable, string_table, StringTable)
-#define INTERNALIZED_STRING_LIST(V) \
- V(anonymous_string, "anonymous") \
- V(apply_string, "apply") \
- V(assign_string, "assign") \
- V(arguments_string, "arguments") \
- V(Arguments_string, "Arguments") \
- V(Array_string, "Array") \
- V(bind_string, "bind") \
- V(bool16x8_string, "bool16x8") \
- V(Bool16x8_string, "Bool16x8") \
- V(bool32x4_string, "bool32x4") \
- V(Bool32x4_string, "Bool32x4") \
- V(bool8x16_string, "bool8x16") \
- V(Bool8x16_string, "Bool8x16") \
- V(boolean_string, "boolean") \
- V(Boolean_string, "Boolean") \
- V(bound__string, "bound ") \
- V(byte_length_string, "byteLength") \
- V(byte_offset_string, "byteOffset") \
- V(call_string, "call") \
- V(callee_string, "callee") \
- V(caller_string, "caller") \
- V(cell_value_string, "%cell_value") \
- V(char_at_string, "CharAt") \
- V(closure_string, "(closure)") \
- V(compare_ic_string, "==") \
- V(configurable_string, "configurable") \
- V(constructor_string, "constructor") \
- V(construct_string, "construct") \
- V(create_string, "create") \
- V(Date_string, "Date") \
- V(default_string, "default") \
- V(defineProperty_string, "defineProperty") \
- V(deleteProperty_string, "deleteProperty") \
- V(display_name_string, "displayName") \
- V(done_string, "done") \
- V(dot_result_string, ".result") \
- V(dot_string, ".") \
- V(enumerable_string, "enumerable") \
- V(enumerate_string, "enumerate") \
- V(Error_string, "Error") \
- V(eval_string, "eval") \
- V(false_string, "false") \
- V(float32x4_string, "float32x4") \
- V(Float32x4_string, "Float32x4") \
- V(for_api_string, "for_api") \
- V(for_string, "for") \
- V(function_string, "function") \
- V(Function_string, "Function") \
- V(Generator_string, "Generator") \
- V(getOwnPropertyDescriptor_string, "getOwnPropertyDescriptor") \
- V(getPrototypeOf_string, "getPrototypeOf") \
- V(get_string, "get") \
- V(global_string, "global") \
- V(has_string, "has") \
- V(illegal_access_string, "illegal access") \
- V(illegal_argument_string, "illegal argument") \
- V(index_string, "index") \
- V(infinity_string, "Infinity") \
- V(input_string, "input") \
- V(int16x8_string, "int16x8") \
- V(Int16x8_string, "Int16x8") \
- V(int32x4_string, "int32x4") \
- V(Int32x4_string, "Int32x4") \
- V(int8x16_string, "int8x16") \
- V(Int8x16_string, "Int8x16") \
- V(isExtensible_string, "isExtensible") \
- V(isView_string, "isView") \
- V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic") \
- V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic") \
- V(last_index_string, "lastIndex") \
- V(length_string, "length") \
- V(Map_string, "Map") \
- V(minus_infinity_string, "-Infinity") \
- V(minus_zero_string, "-0") \
- V(name_string, "name") \
- V(nan_string, "NaN") \
- V(next_string, "next") \
- V(null_string, "null") \
- V(null_to_string, "[object Null]") \
- V(number_string, "number") \
- V(Number_string, "Number") \
- V(object_string, "object") \
- V(Object_string, "Object") \
- V(ownKeys_string, "ownKeys") \
- V(preventExtensions_string, "preventExtensions") \
- V(private_api_string, "private_api") \
- V(Promise_string, "Promise") \
- V(proto_string, "__proto__") \
- V(prototype_string, "prototype") \
- V(Proxy_string, "Proxy") \
- V(query_colon_string, "(?:)") \
- V(RegExp_string, "RegExp") \
- V(setPrototypeOf_string, "setPrototypeOf") \
- V(set_string, "set") \
- V(Set_string, "Set") \
- V(source_mapping_url_string, "source_mapping_url") \
- V(source_string, "source") \
- V(source_url_string, "source_url") \
- V(stack_string, "stack") \
- V(strict_compare_ic_string, "===") \
- V(string_string, "string") \
- V(String_string, "String") \
- V(symbol_string, "symbol") \
- V(Symbol_string, "Symbol") \
- V(this_string, "this") \
- V(throw_string, "throw") \
- V(toJSON_string, "toJSON") \
- V(toString_string, "toString") \
- V(true_string, "true") \
- V(uint16x8_string, "uint16x8") \
- V(Uint16x8_string, "Uint16x8") \
- V(uint32x4_string, "uint32x4") \
- V(Uint32x4_string, "Uint32x4") \
- V(uint8x16_string, "uint8x16") \
- V(Uint8x16_string, "Uint8x16") \
- V(undefined_string, "undefined") \
- V(undefined_to_string, "[object Undefined]") \
- V(valueOf_string, "valueOf") \
- V(value_string, "value") \
- V(WeakMap_string, "WeakMap") \
- V(WeakSet_string, "WeakSet") \
- V(writable_string, "writable")
-
-#define PRIVATE_SYMBOL_LIST(V) \
- V(array_iteration_kind_symbol) \
- V(array_iterator_next_symbol) \
- V(array_iterator_object_symbol) \
- V(call_site_function_symbol) \
- V(call_site_position_symbol) \
- V(call_site_receiver_symbol) \
- V(call_site_strict_symbol) \
- V(class_end_position_symbol) \
- V(class_start_position_symbol) \
- V(detailed_stack_trace_symbol) \
- V(elements_transition_symbol) \
- V(error_end_pos_symbol) \
- V(error_script_symbol) \
- V(error_start_pos_symbol) \
- V(formatted_stack_trace_symbol) \
- V(frozen_symbol) \
- V(hash_code_symbol) \
- V(home_object_symbol) \
- V(internal_error_symbol) \
- V(intl_impl_object_symbol) \
- V(intl_initialized_marker_symbol) \
- V(intl_pattern_symbol) \
- V(intl_resolved_symbol) \
- V(megamorphic_symbol) \
- V(native_context_index_symbol) \
- V(nonexistent_symbol) \
- V(nonextensible_symbol) \
- V(normal_ic_symbol) \
- V(not_mapped_symbol) \
- V(observed_symbol) \
- V(premonomorphic_symbol) \
- V(promise_combined_deferred_symbol) \
- V(promise_debug_marker_symbol) \
- V(promise_has_handler_symbol) \
- V(promise_on_resolve_symbol) \
- V(promise_on_reject_symbol) \
- V(promise_raw_symbol) \
- V(promise_status_symbol) \
- V(promise_value_symbol) \
- V(sealed_symbol) \
- V(stack_trace_symbol) \
- V(strict_function_transition_symbol) \
- V(string_iterator_iterated_string_symbol) \
- V(string_iterator_next_index_symbol) \
- V(strong_function_transition_symbol) \
- V(uninitialized_symbol)
-
-#define PUBLIC_SYMBOL_LIST(V) \
- V(has_instance_symbol, Symbol.hasInstance) \
- V(iterator_symbol, Symbol.iterator) \
- V(match_symbol, Symbol.match) \
- V(replace_symbol, Symbol.replace) \
- V(search_symbol, Symbol.search) \
- V(species_symbol, Symbol.species) \
- V(split_symbol, Symbol.split) \
- V(to_primitive_symbol, Symbol.toPrimitive) \
- V(unscopables_symbol, Symbol.unscopables)
-
-// Well-Known Symbols are "Public" symbols, which have a bit set which causes
-// them to produce an undefined value when a load results in a failed access
-// check. Because this behaviour is not specified properly as of yet, it only
-// applies to a subset of spec-defined Well-Known Symbols.
-#define WELL_KNOWN_SYMBOL_LIST(V) \
- V(is_concat_spreadable_symbol, Symbol.isConcatSpreadable) \
- V(to_string_tag_symbol, Symbol.toStringTag)
// Heap roots that are known to be immortal immovable, for which we can safely
// skip write barriers. This list is not complete and has omissions.
@@ -443,7 +251,6 @@ namespace internal {
V(OrderedHashTableMap) \
V(EmptyFixedArray) \
V(EmptyByteArray) \
- V(EmptyBytecodeArray) \
V(EmptyDescriptorArray) \
V(ArgumentsMarker) \
V(SymbolMap) \
@@ -468,6 +275,7 @@ namespace internal {
PRIVATE_SYMBOL_LIST(V)
// Forward declarations.
+class AllocationObserver;
class ArrayBufferTracker;
class GCIdleTimeAction;
class GCIdleTimeHandler;
@@ -483,6 +291,7 @@ class Scavenger;
class ScavengeJob;
class WeakObjectRetainer;
+typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
// A queue of objects promoted during scavenge. Each object is accompanied
// by it's size to avoid dereferencing a map pointer for scanning.
@@ -630,15 +439,17 @@ class Heap {
kSmiRootsStart = kStringTableRootIndex + 1
};
+ enum FindMementoMode { kForRuntime, kForGC };
+
+ enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
+
// Indicates whether live bytes adjustment is triggered
// - from within the GC code before sweeping started (SEQUENTIAL_TO_SWEEPER),
// - or from within GC (CONCURRENT_TO_SWEEPER),
// - or mutator code (CONCURRENT_TO_SWEEPER).
enum InvocationMode { SEQUENTIAL_TO_SWEEPER, CONCURRENT_TO_SWEEPER };
- enum PretenuringFeedbackInsertionMode { kCached, kGlobal };
-
- enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
+ enum UpdateAllocationSiteMode { kGlobal, kCached };
// Taking this lock prevents the GC from entering a phase that relocates
// object references.
@@ -709,20 +520,6 @@ class Heap {
static const double kMaxHeapGrowingFactorIdle;
static const double kTargetMutatorUtilization;
- // Sloppy mode arguments object size.
- static const int kSloppyArgumentsObjectSize =
- JSObject::kHeaderSize + 2 * kPointerSize;
-
- // Strict mode arguments has no callee so it is smaller.
- static const int kStrictArgumentsObjectSize =
- JSObject::kHeaderSize + 1 * kPointerSize;
-
- // Indicies for direct access into argument objects.
- static const int kArgumentsLengthIndex = 0;
-
- // callee is only valid in sloppy mode.
- static const int kArgumentsCalleeIndex = 1;
-
static const int kNoGCFlags = 0;
static const int kReduceMemoryFootprintMask = 1;
static const int kAbortIncrementalMarkingMask = 2;
@@ -860,20 +657,6 @@ class Heap {
// Notify the heap that a context has been disposed.
int NotifyContextDisposed(bool dependant_context);
- inline void increment_scan_on_scavenge_pages() {
- scan_on_scavenge_pages_++;
- if (FLAG_gc_verbose) {
- PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
- }
- }
-
- inline void decrement_scan_on_scavenge_pages() {
- scan_on_scavenge_pages_--;
- if (FLAG_gc_verbose) {
- PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
- }
- }
-
void set_native_contexts_list(Object* object) {
native_contexts_list_ = object;
}
@@ -927,6 +710,7 @@ class Heap {
// If an object has an AllocationMemento trailing it, return it, otherwise
// return NULL;
+ template <FindMementoMode mode>
inline AllocationMemento* FindAllocationMemento(HeapObject* object);
// Returns false if not able to reserve.
@@ -972,7 +756,6 @@ class Heap {
inline bool OldGenerationAllocationLimitReached();
void QueueMemoryChunkForFree(MemoryChunk* chunk);
- void FilterStoreBufferEntriesOnAboutToBeFreedPages();
void FreeQueuedChunks(MemoryChunk* list_head);
void FreeQueuedChunks();
void WaitUntilUnmappingOfFreeChunksCompleted();
@@ -1039,7 +822,7 @@ class Heap {
bool HasHighFragmentation(intptr_t used, intptr_t committed);
void SetOptimizeForLatency() { optimize_for_memory_usage_ = false; }
- void SetOptimizeForMemoryUsage() { optimize_for_memory_usage_ = true; }
+ void SetOptimizeForMemoryUsage();
bool ShouldOptimizeForMemoryUsage() { return optimize_for_memory_usage_; }
// ===========================================================================
@@ -1074,7 +857,6 @@ class Heap {
// address with the mask will result in the start address of the new space
// for all addresses in either semispace.
Address NewSpaceStart() { return new_space_.start(); }
- uintptr_t NewSpaceMask() { return new_space_.mask(); }
Address NewSpaceTop() { return new_space_.top(); }
NewSpace* new_space() { return &new_space_; }
@@ -1270,16 +1052,16 @@ class Heap {
// Store buffer API. =========================================================
// ===========================================================================
- // Write barrier support for address[offset] = o.
- INLINE(void RecordWrite(Address address, int offset));
-
- // Write barrier support for address[start : start + len[ = o.
- INLINE(void RecordWrites(Address address, int start, int len));
+ // Write barrier support for object[offset] = o;
+ inline void RecordWrite(Object* object, int offset, Object* o);
Address* store_buffer_top_address() {
return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
}
+ void ClearRecordedSlot(HeapObject* object, Object** slot);
+ void ClearRecordedSlotRange(HeapObject* object, Object** start, Object** end);
+
// ===========================================================================
// Incremental marking API. ==================================================
// ===========================================================================
@@ -1318,25 +1100,27 @@ class Heap {
// Returns whether the object resides in new space.
inline bool InNewSpace(Object* object);
- inline bool InNewSpace(Address address);
- inline bool InNewSpacePage(Address address);
inline bool InFromSpace(Object* object);
inline bool InToSpace(Object* object);
// Returns whether the object resides in old space.
- inline bool InOldSpace(Address address);
inline bool InOldSpace(Object* object);
// Checks whether an address/object in the heap (including auxiliary
// area and unused area).
- bool Contains(Address addr);
bool Contains(HeapObject* value);
// Checks whether an address/object in a space.
// Currently used by tests, serialization and heap verification only.
- bool InSpace(Address addr, AllocationSpace space);
bool InSpace(HeapObject* value, AllocationSpace space);
+ // Slow methods that can be used for verification as they can also be used
+ // with off-heap Addresses.
+ bool ContainsSlow(Address addr);
+ bool InSpaceSlow(Address addr, AllocationSpace space);
+ inline bool InNewSpaceSlow(Address address);
+ inline bool InOldSpaceSlow(Address address);
+
// ===========================================================================
// Object statistics tracking. ===============================================
// ===========================================================================
@@ -1371,7 +1155,6 @@ class Heap {
int MaxSemiSpaceSize() { return max_semi_space_size_; }
int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
int InitialSemiSpaceSize() { return initial_semispace_size_; }
- int TargetSemiSpaceSize() { return target_semispace_size_; }
intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
intptr_t MaxExecutableSize() { return max_executable_size_; }
@@ -1408,13 +1191,13 @@ class Heap {
void UpdateSurvivalStatistics(int start_new_space_size);
- inline void IncrementPromotedObjectsSize(int object_size) {
+ inline void IncrementPromotedObjectsSize(intptr_t object_size) {
DCHECK_GE(object_size, 0);
promoted_objects_size_ += object_size;
}
inline intptr_t promoted_objects_size() { return promoted_objects_size_; }
- inline void IncrementSemiSpaceCopiedObjectSize(int object_size) {
+ inline void IncrementSemiSpaceCopiedObjectSize(intptr_t object_size) {
DCHECK_GE(object_size, 0);
semi_space_copied_object_size_ += object_size;
}
@@ -1432,8 +1215,8 @@ class Heap {
inline void IncrementNodesPromoted() { nodes_promoted_++; }
- inline void IncrementYoungSurvivorsCounter(int survived) {
- DCHECK(survived >= 0);
+ inline void IncrementYoungSurvivorsCounter(intptr_t survived) {
+ DCHECK_GE(survived, 0);
survived_last_scavenge_ = survived;
survived_since_last_expansion_ += survived;
}
@@ -1547,6 +1330,7 @@ class Heap {
// the corresponding allocation site is immediately updated and an entry
// in the hash map is created. Otherwise the entry (including a the count
// value) is cached on the local pretenuring feedback.
+ template <UpdateAllocationSiteMode mode>
inline void UpdateAllocationSite(HeapObject* object,
HashMap* pretenuring_feedback);
@@ -1580,6 +1364,9 @@ class Heap {
void ReportHeapStatistics(const char* title);
void ReportCodeStatistics(const char* title);
#endif
+#ifdef ENABLE_SLOW_DCHECKS
+ int CountHandlesForObject(Object* object);
+#endif
private:
class PretenuringScope;
@@ -1684,9 +1471,6 @@ class Heap {
static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
Heap* heap, Object** pointer);
- static void ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
- StoreBufferEvent event);
-
// Selects the proper allocation space based on the pretenuring decision.
static AllocationSpace SelectSpace(PretenureFlag pretenure) {
return (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
@@ -2007,6 +1791,9 @@ class Heap {
MUST_USE_RESULT AllocationResult CopyCode(Code* code);
+ MUST_USE_RESULT AllocationResult
+ CopyBytecodeArray(BytecodeArray* bytecode_array);
+
// Allocates a fixed array initialized with undefined values
MUST_USE_RESULT AllocationResult
AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED);
@@ -2084,6 +1871,11 @@ class Heap {
MUST_USE_RESULT AllocationResult
CopyFixedArrayAndGrow(FixedArray* src, int grow_by, PretenureFlag pretenure);
+ // Make a copy of src, also grow the copy, and return the copy.
+ MUST_USE_RESULT AllocationResult CopyFixedArrayUpTo(FixedArray* src,
+ int new_len,
+ PretenureFlag pretenure);
+
// Make a copy of src, set the map, and return the copy.
MUST_USE_RESULT AllocationResult
CopyFixedArrayWithMap(FixedArray* src, Map* map);
@@ -2182,10 +1974,10 @@ class Heap {
// For keeping track of how much data has survived
// scavenge since last new space expansion.
- int survived_since_last_expansion_;
+ intptr_t survived_since_last_expansion_;
// ... and since the last scavenge.
- int survived_last_scavenge_;
+ intptr_t survived_last_scavenge_;
// This is not the depth of nested AlwaysAllocateScope's but rather a single
// count, as scopes can be acquired from multiple tasks (read: threads).
@@ -2201,8 +1993,6 @@ class Heap {
int global_ic_age_;
- int scan_on_scavenge_pages_;
-
NewSpace new_space_;
OldSpace* old_space_;
OldSpace* code_space_;
@@ -2270,8 +2060,6 @@ class Heap {
Object* encountered_transition_arrays_;
- StoreBufferRebuilder store_buffer_rebuilder_;
-
List<GCCallbackPair> gc_epilogue_callbacks_;
List<GCCallbackPair> gc_prologue_callbacks_;
@@ -2339,7 +2127,7 @@ class Heap {
ScavengeJob* scavenge_job_;
- InlineAllocationObserver* idle_scavenge_observer_;
+ AllocationObserver* idle_scavenge_observer_;
// These two counters are monotomically increasing and never reset.
size_t full_codegen_bytes_generated_;
@@ -2696,16 +2484,7 @@ class DescriptorLookupCache {
}
}
- static int Hash(Object* source, Name* name) {
- // Uses only lower 32 bits if pointers are larger.
- uint32_t source_hash =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(source)) >>
- kPointerSizeLog2;
- uint32_t name_hash =
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)) >>
- kPointerSizeLog2;
- return (source_hash ^ name_hash) % kLength;
- }
+ static inline int Hash(Object* source, Name* name);
static const int kLength = 64;
struct Key {
@@ -2790,6 +2569,61 @@ class PathTracer : public ObjectVisitor {
DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);
};
#endif // DEBUG
+
+// -----------------------------------------------------------------------------
+// Allows observation of allocations.
+class AllocationObserver {
+ public:
+ explicit AllocationObserver(intptr_t step_size)
+ : step_size_(step_size), bytes_to_next_step_(step_size) {
+ DCHECK(step_size >= kPointerSize);
+ }
+ virtual ~AllocationObserver() {}
+
+ // Called each time the observed space does an allocation step. This may be
+ // more frequently than the step_size we are monitoring (e.g. when there are
+ // multiple observers, or when page or space boundary is encountered.)
+ void AllocationStep(int bytes_allocated, Address soon_object, size_t size) {
+ bytes_to_next_step_ -= bytes_allocated;
+ if (bytes_to_next_step_ <= 0) {
+ Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object,
+ size);
+ step_size_ = GetNextStepSize();
+ bytes_to_next_step_ = step_size_;
+ }
+ }
+
+ protected:
+ intptr_t step_size() const { return step_size_; }
+ intptr_t bytes_to_next_step() const { return bytes_to_next_step_; }
+
+ // Pure virtual method provided by the subclasses that gets called when at
+ // least step_size bytes have been allocated. soon_object is the address just
+ // allocated (but not yet initialized.) size is the size of the object as
+ // requested (i.e. w/o the alignment fillers). Some complexities to be aware
+ // of:
+ // 1) soon_object will be nullptr in cases where we end up observing an
+ // allocation that happens to be a filler space (e.g. page boundaries.)
+ // 2) size is the requested size at the time of allocation. Right-trimming
+ // may change the object size dynamically.
+ // 3) soon_object may actually be the first object in an allocation-folding
+ // group. In such a case size is the size of the group rather than the
+ // first object.
+ virtual void Step(int bytes_allocated, Address soon_object, size_t size) = 0;
+
+ // Subclasses can override this method to make step size dynamic.
+ virtual intptr_t GetNextStepSize() { return step_size_; }
+
+ intptr_t step_size_;
+ intptr_t bytes_to_next_step_;
+
+ private:
+ friend class LargeObjectSpace;
+ friend class NewSpace;
+ friend class PagedSpace;
+ DISALLOW_COPY_AND_ASSIGN(AllocationObserver);
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 579df28b08..ce6f6ee69b 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -12,6 +12,7 @@
#include "src/heap/mark-compact-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/objects-visiting-inl.h"
+#include "src/tracing/trace-event.h"
#include "src/v8.h"
namespace v8 {
@@ -23,7 +24,6 @@ IncrementalMarking::StepActions IncrementalMarking::IdleStepActions() {
IncrementalMarking::DO_NOT_FORCE_COMPLETION);
}
-
IncrementalMarking::IncrementalMarking(Heap* heap)
: heap_(heap),
observer_(*this, kAllocatedThreshold),
@@ -46,7 +46,6 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
incremental_marking_finalization_rounds_(0),
request_type_(COMPLETE_MARKING) {}
-
bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
HeapObject* value_heap_obj = HeapObject::cast(value);
MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj);
@@ -91,6 +90,16 @@ void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
marking->RecordWrite(obj, slot, *slot);
}
+// static
+void IncrementalMarking::RecordWriteOfCodeEntryFromCode(JSFunction* host,
+ Object** slot,
+ Isolate* isolate) {
+ DCHECK(host->IsJSFunction());
+ IncrementalMarking* marking = isolate->heap()->incremental_marking();
+ Code* value = Code::cast(
+ Code::GetObjectFromEntryAddress(reinterpret_cast<Address>(slot)));
+ marking->RecordWriteOfCodeEntry(host, slot, value);
+}
void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc,
HeapObject* value) {
@@ -128,8 +137,7 @@ void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
Object* value) {
if (BaseRecordWrite(obj, value)) {
// Object is not going to be rescanned. We need to record the slot.
- heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
- Code::cast(value));
+ heap_->mark_compact_collector()->RecordRelocSlot(rinfo, value);
}
}
@@ -366,7 +374,6 @@ void IncrementalMarking::SetNewSpacePageFlags(MemoryChunk* chunk,
} else {
chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
}
- chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
}
@@ -437,7 +444,16 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() {
bool IncrementalMarking::ShouldActivateEvenWithoutIdleNotification() {
+#ifndef DEBUG
+ static const intptr_t kActivationThreshold = 8 * MB;
+#else
+ // TODO(gc) consider setting this to some low level so that some
+ // debug tests run with incremental marking and some without.
+ static const intptr_t kActivationThreshold = 0;
+#endif
+ // Don't switch on for very small heaps.
return CanBeActivated() &&
+ heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold &&
heap_->HeapIsFullEnoughToStartIncrementalMarking(
heap_->old_generation_allocation_limit());
}
@@ -447,21 +463,12 @@ bool IncrementalMarking::WasActivated() { return was_activated_; }
bool IncrementalMarking::CanBeActivated() {
-#ifndef DEBUG
- static const intptr_t kActivationThreshold = 8 * MB;
-#else
- // TODO(gc) consider setting this to some low level so that some
- // debug tests run with incremental marking and some without.
- static const intptr_t kActivationThreshold = 0;
-#endif
// Only start incremental marking in a safe state: 1) when incremental
// marking is turned on, 2) when we are currently not in a GC, and
// 3) when we are currently not serializing or deserializing the heap.
- // Don't switch on for very small heaps.
return FLAG_incremental_marking && heap_->gc_state() == Heap::NOT_IN_GC &&
heap_->deserialization_complete() &&
- !heap_->isolate()->serializer_enabled() &&
- heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
+ !heap_->isolate()->serializer_enabled();
}
@@ -528,6 +535,7 @@ void IncrementalMarking::Start(const char* reason) {
HistogramTimerScope incremental_marking_scope(
heap_->isolate()->counters()->gc_incremental_marking_start());
+ TRACE_EVENT0("v8", "V8.GCIncrementalMarkingStart");
ResetStepCounters();
was_activated_ = true;
@@ -541,7 +549,7 @@ void IncrementalMarking::Start(const char* reason) {
state_ = SWEEPING;
}
- heap_->new_space()->AddInlineAllocationObserver(&observer_);
+ heap_->new_space()->AddAllocationObserver(&observer_);
incremental_marking_job()->Start(heap_);
}
@@ -787,8 +795,14 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
HeapObject* obj = array[current];
DCHECK(obj->IsHeapObject());
current = ((current + 1) & mask);
- if (heap_->InNewSpace(obj)) {
+ // Only pointers to from space have to be updated.
+ if (heap_->InFromSpace(obj)) {
MapWord map_word = obj->map_word();
+ // There may be objects on the marking deque that do not exist anymore,
+ // e.g. left trimmed objects or objects from the root set (frames).
+ // If these object are dead at scavenging time, their marking deque
+ // entries will not point to forwarding addresses. Hence, we can discard
+ // them.
if (map_word.IsForwardingAddress()) {
HeapObject* dest = map_word.ToForwardingAddress();
array[new_top] = dest;
@@ -944,23 +958,13 @@ void IncrementalMarking::Stop() {
PrintF("[IncrementalMarking] Stopping.\n");
}
- heap_->new_space()->RemoveInlineAllocationObserver(&observer_);
+ heap_->new_space()->RemoveAllocationObserver(&observer_);
IncrementalMarking::set_should_hurry(false);
ResetStepCounters();
if (IsMarking()) {
PatchIncrementalMarkingRecordWriteStubs(heap_,
RecordWriteStub::STORE_BUFFER_ONLY);
DeactivateIncrementalWriteBarrier();
-
- if (is_compacting_) {
- LargeObjectIterator it(heap_->lo_space());
- for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
- Page* p = Page::FromAddress(obj->address());
- if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
- p->ClearFlag(Page::RESCAN_ON_EVACUATION);
- }
- }
- }
}
heap_->isolate()->stack_guard()->ClearGC();
state_ = STOPPED;
@@ -970,17 +974,7 @@ void IncrementalMarking::Stop() {
void IncrementalMarking::Finalize() {
Hurry();
- state_ = STOPPED;
- is_compacting_ = false;
-
- heap_->new_space()->RemoveInlineAllocationObserver(&observer_);
- IncrementalMarking::set_should_hurry(false);
- ResetStepCounters();
- PatchIncrementalMarkingRecordWriteStubs(heap_,
- RecordWriteStub::STORE_BUFFER_ONLY);
- DeactivateIncrementalWriteBarrier();
- DCHECK(heap_->mark_compact_collector()->marking_deque()->IsEmpty());
- heap_->isolate()->stack_guard()->ClearGC();
+ Stop();
}
@@ -1164,6 +1158,7 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
{
HistogramTimerScope incremental_marking_scope(
heap_->isolate()->counters()->gc_incremental_marking());
+ TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
double start = heap_->MonotonicallyIncreasingTimeInMs();
// The marking speed is driven either by the allocation rate or by the rate
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index be630213ac..387dd0c74a 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -7,6 +7,7 @@
#include "src/cancelable-task.h"
#include "src/execution.h"
+#include "src/heap/heap.h"
#include "src/heap/incremental-marking-job.h"
#include "src/heap/spaces.h"
#include "src/objects.h"
@@ -153,6 +154,9 @@ class IncrementalMarking {
static void RecordWriteFromCode(HeapObject* obj, Object** slot,
Isolate* isolate);
+ static void RecordWriteOfCodeEntryFromCode(JSFunction* host, Object** slot,
+ Isolate* isolate);
+
// Record a slot for compaction. Returns false for objects that are
// guaranteed to be rescanned or not guaranteed to survive.
//
@@ -215,10 +219,10 @@ class IncrementalMarking {
}
private:
- class Observer : public InlineAllocationObserver {
+ class Observer : public AllocationObserver {
public:
Observer(IncrementalMarking& incremental_marking, intptr_t step_size)
- : InlineAllocationObserver(step_size),
+ : AllocationObserver(step_size),
incremental_marking_(incremental_marking) {}
void Step(int bytes_allocated, Address, size_t) override {
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index a59d36bfa1..f117acee9b 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -12,6 +12,17 @@
namespace v8 {
namespace internal {
+inline std::vector<Page*>& MarkCompactCollector::sweeping_list(Space* space) {
+ if (space == heap()->old_space()) {
+ return sweeping_list_old_space_;
+ } else if (space == heap()->code_space()) {
+ return sweeping_list_code_space_;
+ }
+ DCHECK_EQ(space, heap()->map_space());
+ return sweeping_list_map_space_;
+}
+
+
void MarkCompactCollector::PushBlack(HeapObject* obj) {
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(obj)));
if (marking_deque_.Push(obj)) {
@@ -83,7 +94,7 @@ void MarkCompactCollector::ForceRecordSlot(HeapObject* object, Object** slot,
void CodeFlusher::AddCandidate(SharedFunctionInfo* shared_info) {
- if (GetNextCandidate(shared_info) == NULL) {
+ if (GetNextCandidate(shared_info) == nullptr) {
SetNextCandidate(shared_info, shared_function_info_candidates_head_);
shared_function_info_candidates_head_ = shared_info;
}
@@ -92,7 +103,7 @@ void CodeFlusher::AddCandidate(SharedFunctionInfo* shared_info) {
void CodeFlusher::AddCandidate(JSFunction* function) {
DCHECK(function->code() == function->shared()->code());
- if (GetNextCandidate(function)->IsUndefined()) {
+ if (function->next_function_link()->IsUndefined()) {
SetNextCandidate(function, jsfunction_candidates_head_);
jsfunction_candidates_head_ = function;
}
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 65bfdd92d8..646e63402a 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -19,13 +19,14 @@
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/object-stats.h"
-#include "src/heap/objects-visiting.h"
#include "src/heap/objects-visiting-inl.h"
+#include "src/heap/objects-visiting.h"
#include "src/heap/slots-buffer.h"
#include "src/heap/spaces-inl.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/profiler/cpu-profiler.h"
+#include "src/utils-inl.h"
#include "src/v8.h"
namespace v8 {
@@ -314,15 +315,13 @@ void MarkCompactCollector::ClearInvalidStoreAndSlotsBufferEntries() {
{
GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_CLEAR_STORE_BUFFER);
- heap_->store_buffer()->ClearInvalidStoreBufferEntries();
+ RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(heap());
}
{
GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_CLEAR_SLOTS_BUFFER);
- int number_of_pages = evacuation_candidates_.length();
- for (int i = 0; i < number_of_pages; i++) {
- Page* p = evacuation_candidates_[i];
+ for (Page* p : evacuation_candidates_) {
SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer());
}
}
@@ -345,7 +344,7 @@ static void VerifyValidSlotsBufferEntries(Heap* heap, PagedSpace* space) {
void MarkCompactCollector::VerifyValidStoreAndSlotsBufferEntries() {
- heap()->store_buffer()->VerifyValidStoreBufferEntries();
+ RememberedSet<OLD_TO_NEW>::VerifyValidSlots(heap());
VerifyValidSlotsBufferEntries(heap(), heap()->old_space());
VerifyValidSlotsBufferEntries(heap(), heap()->code_space());
@@ -478,45 +477,32 @@ void MarkCompactCollector::ClearMarkbits() {
}
-class MarkCompactCollector::CompactionTask : public CancelableTask {
- public:
- explicit CompactionTask(Heap* heap, CompactionSpaceCollection* spaces)
- : CancelableTask(heap->isolate()), spaces_(spaces) {}
-
- virtual ~CompactionTask() {}
-
- private:
- // v8::internal::CancelableTask overrides.
- void RunInternal() override {
- MarkCompactCollector* mark_compact =
- isolate()->heap()->mark_compact_collector();
- SlotsBuffer* evacuation_slots_buffer = nullptr;
- mark_compact->EvacuatePages(spaces_, &evacuation_slots_buffer);
- mark_compact->AddEvacuationSlotsBufferSynchronized(evacuation_slots_buffer);
- mark_compact->pending_compaction_tasks_semaphore_.Signal();
- }
-
- CompactionSpaceCollection* spaces_;
-
- DISALLOW_COPY_AND_ASSIGN(CompactionTask);
-};
-
-
class MarkCompactCollector::SweeperTask : public v8::Task {
public:
- SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {}
+ SweeperTask(Heap* heap, AllocationSpace space_to_start)
+ : heap_(heap), space_to_start_(space_to_start) {}
virtual ~SweeperTask() {}
private:
// v8::Task overrides.
void Run() override {
- heap_->mark_compact_collector()->SweepInParallel(space_, 0);
+ DCHECK_GE(space_to_start_, FIRST_PAGED_SPACE);
+ DCHECK_LE(space_to_start_, LAST_PAGED_SPACE);
+ const int offset = space_to_start_ - FIRST_PAGED_SPACE;
+ const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
+ for (int i = 0; i < num_spaces; i++) {
+ const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces);
+ DCHECK_GE(space_id, FIRST_PAGED_SPACE);
+ DCHECK_LE(space_id, LAST_PAGED_SPACE);
+ heap_->mark_compact_collector()->SweepInParallel(
+ heap_->paged_space(space_id), 0);
+ }
heap_->mark_compact_collector()->pending_sweeper_tasks_semaphore_.Signal();
}
Heap* heap_;
- PagedSpace* space_;
+ AllocationSpace space_to_start_;
DISALLOW_COPY_AND_ASSIGN(SweeperTask);
};
@@ -527,22 +513,19 @@ void MarkCompactCollector::StartSweeperThreads() {
DCHECK(free_list_code_space_.get()->IsEmpty());
DCHECK(free_list_map_space_.get()->IsEmpty());
V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new SweeperTask(heap(), heap()->old_space()),
- v8::Platform::kShortRunningTask);
+ new SweeperTask(heap(), OLD_SPACE), v8::Platform::kShortRunningTask);
V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new SweeperTask(heap(), heap()->code_space()),
- v8::Platform::kShortRunningTask);
+ new SweeperTask(heap(), CODE_SPACE), v8::Platform::kShortRunningTask);
V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new SweeperTask(heap(), heap()->map_space()),
- v8::Platform::kShortRunningTask);
+ new SweeperTask(heap(), MAP_SPACE), v8::Platform::kShortRunningTask);
}
void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) {
PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
- if (!page->SweepingCompleted()) {
+ if (!page->SweepingDone()) {
SweepInParallel(page, owner);
- if (!page->SweepingCompleted()) {
+ if (!page->SweepingDone()) {
// We were not able to sweep that page, i.e., a concurrent
// sweeper thread currently owns this page. Wait for the sweeper
// thread to be done with this page.
@@ -721,14 +704,14 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
continue;
}
// Invariant: Evacuation candidates are just created when marking is
- // started. At the end of a GC all evacuation candidates are cleared and
- // their slot buffers are released.
+ // started. This means that sweeping has finished. Furthermore, at the end
+ // of a GC all evacuation candidates are cleared and their slot buffers are
+ // released.
CHECK(!p->IsEvacuationCandidate());
- CHECK(p->slots_buffer() == NULL);
+ CHECK(p->slots_buffer() == nullptr);
+ CHECK(p->SweepingDone());
DCHECK(p->area_size() == area_size);
- int live_bytes =
- p->WasSwept() ? p->LiveBytesFromFreeList() : p->LiveBytes();
- pages.push_back(std::make_pair(live_bytes, p));
+ pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p));
}
int candidate_count = 0;
@@ -831,9 +814,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
void MarkCompactCollector::AbortCompaction() {
if (compacting_) {
- int npages = evacuation_candidates_.length();
- for (int i = 0; i < npages; i++) {
- Page* p = evacuation_candidates_[i];
+ for (Page* p : evacuation_candidates_) {
slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
p->ClearEvacuationCandidate();
p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
@@ -1224,9 +1205,6 @@ class MarkCompactMarkingVisitor
}
private:
- template <int id>
- static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
-
// Code flushing support.
static const int kRegExpCodeThreshold = 5;
@@ -1551,8 +1529,13 @@ class MarkCompactCollector::HeapObjectVisitor {
class MarkCompactCollector::EvacuateVisitorBase
: public MarkCompactCollector::HeapObjectVisitor {
public:
- EvacuateVisitorBase(Heap* heap, SlotsBuffer** evacuation_slots_buffer)
- : heap_(heap), evacuation_slots_buffer_(evacuation_slots_buffer) {}
+ EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces,
+ SlotsBuffer** evacuation_slots_buffer,
+ LocalStoreBuffer* local_store_buffer)
+ : heap_(heap),
+ evacuation_slots_buffer_(evacuation_slots_buffer),
+ compaction_spaces_(compaction_spaces),
+ local_store_buffer_(local_store_buffer) {}
bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
HeapObject** target_object) {
@@ -1562,7 +1545,7 @@ class MarkCompactCollector::EvacuateVisitorBase
if (allocation.To(target_object)) {
heap_->mark_compact_collector()->MigrateObject(
*target_object, object, size, target_space->identity(),
- evacuation_slots_buffer_);
+ evacuation_slots_buffer_, local_store_buffer_);
return true;
}
return false;
@@ -1571,6 +1554,8 @@ class MarkCompactCollector::EvacuateVisitorBase
protected:
Heap* heap_;
SlotsBuffer** evacuation_slots_buffer_;
+ CompactionSpaceCollection* compaction_spaces_;
+ LocalStoreBuffer* local_store_buffer_;
};
@@ -1581,9 +1566,12 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final
static const intptr_t kMaxLabObjectSize = 256;
explicit EvacuateNewSpaceVisitor(Heap* heap,
+ CompactionSpaceCollection* compaction_spaces,
SlotsBuffer** evacuation_slots_buffer,
+ LocalStoreBuffer* local_store_buffer,
HashMap* local_pretenuring_feedback)
- : EvacuateVisitorBase(heap, evacuation_slots_buffer),
+ : EvacuateVisitorBase(heap, compaction_spaces, evacuation_slots_buffer,
+ local_store_buffer),
buffer_(LocalAllocationBuffer::InvalidBuffer()),
space_to_allocate_(NEW_SPACE),
promoted_size_(0),
@@ -1591,11 +1579,13 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final
local_pretenuring_feedback_(local_pretenuring_feedback) {}
bool Visit(HeapObject* object) override {
- heap_->UpdateAllocationSite(object, local_pretenuring_feedback_);
+ heap_->UpdateAllocationSite<Heap::kCached>(object,
+ local_pretenuring_feedback_);
int size = object->Size();
HeapObject* target_object = nullptr;
if (heap_->ShouldBePromoted(object->address(), size) &&
- TryEvacuateObject(heap_->old_space(), object, &target_object)) {
+ TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object,
+ &target_object)) {
// If we end up needing more special cases, we should factor this out.
if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) {
heap_->array_buffer_tracker()->Promote(
@@ -1608,7 +1598,8 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final
AllocationSpace space = AllocateTargetObject(object, &target);
heap_->mark_compact_collector()->MigrateObject(
HeapObject::cast(target), object, size, space,
- (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_);
+ (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_,
+ (space == NEW_SPACE) ? nullptr : local_store_buffer_);
if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
}
@@ -1681,7 +1672,8 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final
inline AllocationResult AllocateInOldSpace(int size_in_bytes,
AllocationAlignment alignment) {
AllocationResult allocation =
- heap_->old_space()->AllocateRaw(size_in_bytes, alignment);
+ compaction_spaces_->Get(OLD_SPACE)->AllocateRaw(size_in_bytes,
+ alignment);
if (allocation.IsRetry()) {
FatalProcessOutOfMemory(
"MarkCompactCollector: semi-space copy, fallback in old gen\n");
@@ -1727,9 +1719,10 @@ class MarkCompactCollector::EvacuateOldSpaceVisitor final
public:
EvacuateOldSpaceVisitor(Heap* heap,
CompactionSpaceCollection* compaction_spaces,
- SlotsBuffer** evacuation_slots_buffer)
- : EvacuateVisitorBase(heap, evacuation_slots_buffer),
- compaction_spaces_(compaction_spaces) {}
+ SlotsBuffer** evacuation_slots_buffer,
+ LocalStoreBuffer* local_store_buffer)
+ : EvacuateVisitorBase(heap, compaction_spaces, evacuation_slots_buffer,
+ local_store_buffer) {}
bool Visit(HeapObject* object) override {
CompactionSpace* target_space = compaction_spaces_->Get(
@@ -1741,9 +1734,6 @@ class MarkCompactCollector::EvacuateOldSpaceVisitor final
}
return false;
}
-
- private:
- CompactionSpaceCollection* compaction_spaces_;
};
@@ -2551,16 +2541,17 @@ void MarkCompactCollector::AbortTransitionArrays() {
heap()->set_encountered_transition_arrays(Smi::FromInt(0));
}
-
void MarkCompactCollector::RecordMigratedSlot(
- Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer) {
+ Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer,
+ LocalStoreBuffer* local_store_buffer) {
// When parallel compaction is in progress, store and slots buffer entries
// require synchronization.
if (heap_->InNewSpace(value)) {
if (compaction_in_progress_) {
- heap_->store_buffer()->MarkSynchronized(slot);
+ local_store_buffer->Record(slot);
} else {
- heap_->store_buffer()->Mark(slot);
+ Page* page = Page::FromAddress(slot);
+ RememberedSet<OLD_TO_NEW>::Insert(page, slot);
}
} else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
@@ -2640,19 +2631,23 @@ void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
class RecordMigratedSlotVisitor final : public ObjectVisitor {
public:
RecordMigratedSlotVisitor(MarkCompactCollector* collector,
- SlotsBuffer** evacuation_slots_buffer)
+ SlotsBuffer** evacuation_slots_buffer,
+ LocalStoreBuffer* local_store_buffer)
: collector_(collector),
- evacuation_slots_buffer_(evacuation_slots_buffer) {}
+ evacuation_slots_buffer_(evacuation_slots_buffer),
+ local_store_buffer_(local_store_buffer) {}
V8_INLINE void VisitPointer(Object** p) override {
collector_->RecordMigratedSlot(*p, reinterpret_cast<Address>(p),
- evacuation_slots_buffer_);
+ evacuation_slots_buffer_,
+ local_store_buffer_);
}
V8_INLINE void VisitPointers(Object** start, Object** end) override {
while (start < end) {
collector_->RecordMigratedSlot(*start, reinterpret_cast<Address>(start),
- evacuation_slots_buffer_);
+ evacuation_slots_buffer_,
+ local_store_buffer_);
++start;
}
}
@@ -2668,6 +2663,7 @@ class RecordMigratedSlotVisitor final : public ObjectVisitor {
private:
MarkCompactCollector* collector_;
SlotsBuffer** evacuation_slots_buffer_;
+ LocalStoreBuffer* local_store_buffer_;
};
@@ -2685,9 +2681,10 @@ class RecordMigratedSlotVisitor final : public ObjectVisitor {
// pointer iteration. This is an issue if the store buffer overflows and we
// have to scan the entire old space, including dead objects, looking for
// pointers to new space.
-void MarkCompactCollector::MigrateObject(
- HeapObject* dst, HeapObject* src, int size, AllocationSpace dest,
- SlotsBuffer** evacuation_slots_buffer) {
+void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
+ int size, AllocationSpace dest,
+ SlotsBuffer** evacuation_slots_buffer,
+ LocalStoreBuffer* local_store_buffer) {
Address dst_addr = dst->address();
Address src_addr = src->address();
DCHECK(heap()->AllowedToBeMigrated(src, dest));
@@ -2698,7 +2695,8 @@ void MarkCompactCollector::MigrateObject(
DCHECK(IsAligned(size, kPointerSize));
heap()->MoveBlock(dst->address(), src->address(), size);
- RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer);
+ RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer,
+ local_store_buffer);
dst->IterateBody(&visitor);
} else if (dest == CODE_SPACE) {
DCHECK_CODEOBJECT_SIZE(size, heap()->code_space());
@@ -2884,11 +2882,12 @@ void MarkCompactCollector::UpdateSlotsRecordedIn(SlotsBuffer* buffer) {
static void UpdatePointer(HeapObject** address, HeapObject* object) {
MapWord map_word = object->map_word();
- // The store buffer can still contain stale pointers in dead large objects.
- // Ignore these pointers here.
+ // Since we only filter invalid slots in old space, the store buffer can
+ // still contain stale pointers in large object and in map spaces. Ignore
+ // these pointers here.
DCHECK(map_word.IsForwardingAddress() ||
- object->GetHeap()->lo_space()->FindPage(
- reinterpret_cast<Address>(address)) != NULL);
+ !object->GetHeap()->old_space()->Contains(
+ reinterpret_cast<Address>(address)));
if (map_word.IsForwardingAddress()) {
// Update the corresponding slot.
*address = map_word.ToForwardingAddress();
@@ -3060,54 +3059,18 @@ void MarkCompactCollector::VerifyIsSlotInLiveObject(Address slot,
void MarkCompactCollector::EvacuateNewSpacePrologue() {
- // There are soft limits in the allocation code, designed trigger a mark
- // sweep collection by failing allocations. But since we are already in
- // a mark-sweep allocation, there is no sense in trying to trigger one.
- AlwaysAllocateScope scope(isolate());
-
NewSpace* new_space = heap()->new_space();
-
- // Store allocation range before flipping semispaces.
- Address from_bottom = new_space->bottom();
- Address from_top = new_space->top();
-
- // Flip the semispaces. After flipping, to space is empty, from space has
- // live objects.
- new_space->Flip();
- new_space->ResetAllocationInfo();
-
- newspace_evacuation_candidates_.Clear();
- NewSpacePageIterator it(from_bottom, from_top);
+ NewSpacePageIterator it(new_space->bottom(), new_space->top());
+ // Append the list of new space pages to be processed.
while (it.has_next()) {
newspace_evacuation_candidates_.Add(it.next());
}
+ new_space->Flip();
+ new_space->ResetAllocationInfo();
}
-
-HashMap* MarkCompactCollector::EvacuateNewSpaceInParallel() {
- HashMap* local_pretenuring_feedback = new HashMap(
- HashMap::PointersMatch, kInitialLocalPretenuringFeedbackCapacity);
- EvacuateNewSpaceVisitor new_space_visitor(heap(), &migration_slots_buffer_,
- local_pretenuring_feedback);
- // First pass: traverse all objects in inactive semispace, remove marks,
- // migrate live objects and write forwarding addresses. This stage puts
- // new entries in the store buffer and may cause some pages to be marked
- // scan-on-scavenge.
- for (int i = 0; i < newspace_evacuation_candidates_.length(); i++) {
- NewSpacePage* p =
- reinterpret_cast<NewSpacePage*>(newspace_evacuation_candidates_[i]);
- bool ok = VisitLiveObjects(p, &new_space_visitor, kClearMarkbits);
- USE(ok);
- DCHECK(ok);
- }
- heap_->IncrementPromotedObjectsSize(
- static_cast<int>(new_space_visitor.promoted_size()));
- heap_->IncrementSemiSpaceCopiedObjectSize(
- static_cast<int>(new_space_visitor.semispace_copied_size()));
- heap_->IncrementYoungSurvivorsCounter(
- static_cast<int>(new_space_visitor.promoted_size()) +
- static_cast<int>(new_space_visitor.semispace_copied_size()));
- return local_pretenuring_feedback;
+void MarkCompactCollector::EvacuateNewSpaceEpilogue() {
+ newspace_evacuation_candidates_.Rewind(0);
}
@@ -3117,8 +3080,168 @@ void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized(
evacuation_slots_buffers_.Add(evacuation_slots_buffer);
}
+class MarkCompactCollector::Evacuator : public Malloced {
+ public:
+ Evacuator(MarkCompactCollector* collector,
+ const List<Page*>& evacuation_candidates,
+ const List<NewSpacePage*>& newspace_evacuation_candidates)
+ : collector_(collector),
+ evacuation_candidates_(evacuation_candidates),
+ newspace_evacuation_candidates_(newspace_evacuation_candidates),
+ compaction_spaces_(collector->heap()),
+ local_slots_buffer_(nullptr),
+ local_store_buffer_(collector->heap()),
+ local_pretenuring_feedback_(HashMap::PointersMatch,
+ kInitialLocalPretenuringFeedbackCapacity),
+ new_space_visitor_(collector->heap(), &compaction_spaces_,
+ &local_slots_buffer_, &local_store_buffer_,
+ &local_pretenuring_feedback_),
+ old_space_visitor_(collector->heap(), &compaction_spaces_,
+ &local_slots_buffer_, &local_store_buffer_),
+ duration_(0.0),
+ bytes_compacted_(0),
+ task_id_(0) {}
+
+ // Evacuate the configured set of pages in parallel.
+ inline void EvacuatePages();
+
+ // Merge back locally cached info sequentially. Note that this method needs
+ // to be called from the main thread.
+ inline void Finalize();
+
+ CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
+
+ uint32_t task_id() { return task_id_; }
+ void set_task_id(uint32_t id) { task_id_ = id; }
+
+ private:
+ static const int kInitialLocalPretenuringFeedbackCapacity = 256;
+
+ Heap* heap() { return collector_->heap(); }
+
+ void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
+ duration_ += duration;
+ bytes_compacted_ += bytes_compacted;
+ }
+
+ inline bool EvacuateSinglePage(MemoryChunk* p, HeapObjectVisitor* visitor);
+
+ MarkCompactCollector* collector_;
+
+ // Pages to process.
+ const List<Page*>& evacuation_candidates_;
+ const List<NewSpacePage*>& newspace_evacuation_candidates_;
+
+ // Locally cached collector data.
+ CompactionSpaceCollection compaction_spaces_;
+ SlotsBuffer* local_slots_buffer_;
+ LocalStoreBuffer local_store_buffer_;
+ HashMap local_pretenuring_feedback_;
+
+ // Vistors for the corresponding spaces.
+ EvacuateNewSpaceVisitor new_space_visitor_;
+ EvacuateOldSpaceVisitor old_space_visitor_;
+
+ // Book keeping info.
+ double duration_;
+ intptr_t bytes_compacted_;
+
+ // Task id, if this evacuator is executed on a background task instead of
+ // the main thread. Can be used to try to abort the task currently scheduled
+ // to executed to evacuate pages.
+ uint32_t task_id_;
+};
+
+bool MarkCompactCollector::Evacuator::EvacuateSinglePage(
+ MemoryChunk* p, HeapObjectVisitor* visitor) {
+ bool success = true;
+ if (p->parallel_compaction_state().TrySetValue(
+ MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
+ if (p->IsEvacuationCandidate() || p->InNewSpace()) {
+ DCHECK_EQ(p->parallel_compaction_state().Value(),
+ MemoryChunk::kCompactingInProgress);
+ int saved_live_bytes = p->LiveBytes();
+ double evacuation_time;
+ {
+ AlwaysAllocateScope always_allocate(heap()->isolate());
+ TimedScope timed_scope(&evacuation_time);
+ success = collector_->VisitLiveObjects(p, visitor, kClearMarkbits);
+ }
+ if (success) {
+ ReportCompactionProgress(evacuation_time, saved_live_bytes);
+ p->parallel_compaction_state().SetValue(
+ MemoryChunk::kCompactingFinalize);
+ } else {
+ p->parallel_compaction_state().SetValue(
+ MemoryChunk::kCompactingAborted);
+ }
+ } else {
+ // There could be popular pages in the list of evacuation candidates
+ // which we do not compact.
+ p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
+ }
+ }
+ return success;
+}
+
+void MarkCompactCollector::Evacuator::EvacuatePages() {
+ for (NewSpacePage* p : newspace_evacuation_candidates_) {
+ DCHECK(p->InNewSpace());
+ DCHECK_EQ(p->concurrent_sweeping_state().Value(),
+ NewSpacePage::kSweepingDone);
+ bool success = EvacuateSinglePage(p, &new_space_visitor_);
+ DCHECK(success);
+ USE(success);
+ }
+ for (Page* p : evacuation_candidates_) {
+ DCHECK(p->IsEvacuationCandidate() ||
+ p->IsFlagSet(MemoryChunk::RESCAN_ON_EVACUATION));
+ DCHECK_EQ(p->concurrent_sweeping_state().Value(), Page::kSweepingDone);
+ EvacuateSinglePage(p, &old_space_visitor_);
+ }
+}
+
+void MarkCompactCollector::Evacuator::Finalize() {
+ heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
+ heap()->code_space()->MergeCompactionSpace(
+ compaction_spaces_.Get(CODE_SPACE));
+ heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
+ heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size());
+ heap()->IncrementSemiSpaceCopiedObjectSize(
+ new_space_visitor_.semispace_copied_size());
+ heap()->IncrementYoungSurvivorsCounter(
+ new_space_visitor_.promoted_size() +
+ new_space_visitor_.semispace_copied_size());
+ heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
+ local_store_buffer_.Process(heap()->store_buffer());
+ collector_->AddEvacuationSlotsBufferSynchronized(local_slots_buffer_);
+}
+
+class MarkCompactCollector::CompactionTask : public CancelableTask {
+ public:
+ explicit CompactionTask(Heap* heap, Evacuator* evacuator)
+ : CancelableTask(heap->isolate()), heap_(heap), evacuator_(evacuator) {
+ evacuator->set_task_id(id());
+ }
+
+ virtual ~CompactionTask() {}
+
+ private:
+ // v8::internal::CancelableTask overrides.
+ void RunInternal() override {
+ evacuator_->EvacuatePages();
+ heap_->mark_compact_collector()
+ ->pending_compaction_tasks_semaphore_.Signal();
+ }
+
+ Heap* heap_;
+ Evacuator* evacuator_;
+
+ DISALLOW_COPY_AND_ASSIGN(CompactionTask);
+};
-int MarkCompactCollector::NumberOfParallelCompactionTasks() {
+int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
+ intptr_t live_bytes) {
if (!FLAG_parallel_compaction) return 1;
// Compute the number of needed tasks based on a target compaction time, the
// profiled compaction speed and marked live memory.
@@ -3126,83 +3249,75 @@ int MarkCompactCollector::NumberOfParallelCompactionTasks() {
// The number of parallel compaction tasks is limited by:
// - #evacuation pages
// - (#cores - 1)
- // - a hard limit
const double kTargetCompactionTimeInMs = 1;
- const int kMaxCompactionTasks = 8;
+ const int kNumSweepingTasks = 3;
intptr_t compaction_speed =
heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
- if (compaction_speed == 0) return 1;
- intptr_t live_bytes = 0;
- for (Page* page : evacuation_candidates_) {
- live_bytes += page->LiveBytes();
+ const int available_cores =
+ Max(1, base::SysInfo::NumberOfProcessors() - kNumSweepingTasks - 1);
+ int tasks;
+ if (compaction_speed > 0) {
+ tasks = 1 + static_cast<int>(static_cast<double>(live_bytes) /
+ compaction_speed / kTargetCompactionTimeInMs);
+ } else {
+ tasks = pages;
}
-
- const int cores = Max(1, base::SysInfo::NumberOfProcessors() - 1);
- const int tasks =
- 1 + static_cast<int>(static_cast<double>(live_bytes) / compaction_speed /
- kTargetCompactionTimeInMs);
- const int tasks_capped_pages = Min(evacuation_candidates_.length(), tasks);
- const int tasks_capped_cores = Min(cores, tasks_capped_pages);
- const int tasks_capped_hard = Min(kMaxCompactionTasks, tasks_capped_cores);
- return tasks_capped_hard;
+ const int tasks_capped_pages = Min(pages, tasks);
+ return Min(available_cores, tasks_capped_pages);
}
void MarkCompactCollector::EvacuatePagesInParallel() {
- const int num_pages = evacuation_candidates_.length();
- if (num_pages == 0) return;
+ int num_pages = 0;
+ intptr_t live_bytes = 0;
+ for (Page* page : evacuation_candidates_) {
+ num_pages++;
+ live_bytes += page->LiveBytes();
+ }
+ for (NewSpacePage* page : newspace_evacuation_candidates_) {
+ num_pages++;
+ live_bytes += page->LiveBytes();
+ }
+ DCHECK_GE(num_pages, 1);
// Used for trace summary.
- intptr_t live_bytes = 0;
intptr_t compaction_speed = 0;
if (FLAG_trace_fragmentation) {
- for (Page* page : evacuation_candidates_) {
- live_bytes += page->LiveBytes();
- }
compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
}
- const int num_tasks = NumberOfParallelCompactionTasks();
+
+ const int num_tasks = NumberOfParallelCompactionTasks(num_pages, live_bytes);
// Set up compaction spaces.
- CompactionSpaceCollection** compaction_spaces_for_tasks =
- new CompactionSpaceCollection*[num_tasks];
+ Evacuator** evacuators = new Evacuator*[num_tasks];
for (int i = 0; i < num_tasks; i++) {
- compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap());
+ evacuators[i] = new Evacuator(this, evacuation_candidates_,
+ newspace_evacuation_candidates_);
}
- heap()->old_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks,
- num_tasks);
- heap()->code_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks,
- num_tasks);
-
- uint32_t* task_ids = new uint32_t[num_tasks - 1];
// Kick off parallel tasks.
- StartParallelCompaction(compaction_spaces_for_tasks, task_ids, num_tasks);
+ StartParallelCompaction(evacuators, num_tasks);
// Wait for unfinished and not-yet-started tasks.
- WaitUntilCompactionCompleted(task_ids, num_tasks - 1);
- delete[] task_ids;
+ WaitUntilCompactionCompleted(&evacuators[1], num_tasks - 1);
- double compaction_duration = 0.0;
- intptr_t compacted_memory = 0;
- // Merge back memory (compacted and unused) from compaction spaces.
+ // Finalize local evacuators by merging back all locally cached data.
for (int i = 0; i < num_tasks; i++) {
- heap()->old_space()->MergeCompactionSpace(
- compaction_spaces_for_tasks[i]->Get(OLD_SPACE));
- heap()->code_space()->MergeCompactionSpace(
- compaction_spaces_for_tasks[i]->Get(CODE_SPACE));
- compacted_memory += compaction_spaces_for_tasks[i]->bytes_compacted();
- compaction_duration += compaction_spaces_for_tasks[i]->duration();
- delete compaction_spaces_for_tasks[i];
- }
- delete[] compaction_spaces_for_tasks;
- heap()->tracer()->AddCompactionEvent(compaction_duration, compacted_memory);
-
- // Finalize sequentially.
+ evacuators[i]->Finalize();
+ delete evacuators[i];
+ }
+ delete[] evacuators;
+
+ // Finalize pages sequentially.
+ for (NewSpacePage* p : newspace_evacuation_candidates_) {
+ DCHECK_EQ(p->parallel_compaction_state().Value(),
+ MemoryChunk::kCompactingFinalize);
+ p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
+ }
+
int abandoned_pages = 0;
- for (int i = 0; i < num_pages; i++) {
- Page* p = evacuation_candidates_[i];
+ for (Page* p : evacuation_candidates_) {
switch (p->parallel_compaction_state().Value()) {
case MemoryChunk::ParallelCompactingState::kCompactingAborted:
// We have partially compacted the page, i.e., some objects may have
@@ -3222,12 +3337,11 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
// entries of such pages are filtered before rescanning.
DCHECK(p->IsEvacuationCandidate());
p->SetFlag(Page::COMPACTION_WAS_ABORTED);
- p->set_scan_on_scavenge(true);
abandoned_pages++;
break;
case MemoryChunk::kCompactingFinalize:
DCHECK(p->IsEvacuationCandidate());
- p->SetWasSwept();
+ DCHECK(p->SweepingDone());
p->Unlink();
break;
case MemoryChunk::kCompactingDone:
@@ -3235,7 +3349,7 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
break;
default:
- // We should not observe kCompactingInProgress, or kCompactingDone.
+ // MemoryChunk::kCompactingInProgress.
UNREACHABLE();
}
p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
@@ -3252,31 +3366,28 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
}
}
-
-void MarkCompactCollector::StartParallelCompaction(
- CompactionSpaceCollection** compaction_spaces, uint32_t* task_ids,
- int len) {
+void MarkCompactCollector::StartParallelCompaction(Evacuator** evacuators,
+ int len) {
compaction_in_progress_ = true;
for (int i = 1; i < len; i++) {
- CompactionTask* task = new CompactionTask(heap(), compaction_spaces[i]);
- task_ids[i - 1] = task->id();
+ CompactionTask* task = new CompactionTask(heap(), evacuators[i]);
V8::GetCurrentPlatform()->CallOnBackgroundThread(
task, v8::Platform::kShortRunningTask);
}
- // Contribute in main thread.
- EvacuatePages(compaction_spaces[0], &migration_slots_buffer_);
+ // Contribute on main thread.
+ evacuators[0]->EvacuatePages();
}
-
-void MarkCompactCollector::WaitUntilCompactionCompleted(uint32_t* task_ids,
+void MarkCompactCollector::WaitUntilCompactionCompleted(Evacuator** evacuators,
int len) {
// Try to cancel compaction tasks that have not been run (as they might be
// stuck in a worker queue). Tasks that cannot be canceled, have either
// already completed or are still running, hence we need to wait for their
// semaphore signal.
for (int i = 0; i < len; i++) {
- if (!heap()->isolate()->cancelable_task_manager()->TryAbort(task_ids[i])) {
+ if (!heap()->isolate()->cancelable_task_manager()->TryAbort(
+ evacuators[i]->task_id())) {
pending_compaction_tasks_semaphore_.Wait();
}
}
@@ -3284,45 +3395,6 @@ void MarkCompactCollector::WaitUntilCompactionCompleted(uint32_t* task_ids,
}
-void MarkCompactCollector::EvacuatePages(
- CompactionSpaceCollection* compaction_spaces,
- SlotsBuffer** evacuation_slots_buffer) {
- EvacuateOldSpaceVisitor visitor(heap(), compaction_spaces,
- evacuation_slots_buffer);
- for (int i = 0; i < evacuation_candidates_.length(); i++) {
- Page* p = evacuation_candidates_[i];
- DCHECK(p->IsEvacuationCandidate() ||
- p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
- DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) ==
- MemoryChunk::kSweepingDone);
- if (p->parallel_compaction_state().TrySetValue(
- MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
- if (p->IsEvacuationCandidate()) {
- DCHECK_EQ(p->parallel_compaction_state().Value(),
- MemoryChunk::kCompactingInProgress);
- double start = heap()->MonotonicallyIncreasingTimeInMs();
- intptr_t live_bytes = p->LiveBytes();
- AlwaysAllocateScope always_allocate(isolate());
- if (VisitLiveObjects(p, &visitor, kClearMarkbits)) {
- p->ResetLiveBytes();
- p->parallel_compaction_state().SetValue(
- MemoryChunk::kCompactingFinalize);
- compaction_spaces->ReportCompactionProgress(
- heap()->MonotonicallyIncreasingTimeInMs() - start, live_bytes);
- } else {
- p->parallel_compaction_state().SetValue(
- MemoryChunk::kCompactingAborted);
- }
- } else {
- // There could be popular pages in the list of evacuation candidates
- // which we do compact.
- p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
- }
- }
- }
-}
-
-
class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
public:
virtual Object* RetainAs(Object* object) {
@@ -3369,7 +3441,7 @@ template <SweepingMode sweeping_mode,
FreeSpaceTreatmentMode free_space_mode>
static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
ObjectVisitor* v) {
- DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
+ DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
space->identity() == CODE_SPACE);
DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
@@ -3432,14 +3504,7 @@ static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
freed_bytes = Free<parallelism>(space, free_list, free_start, size);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
}
-
- if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) {
- // When concurrent sweeping is active, the page will be marked after
- // sweeping by the main thread.
- p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingFinalize);
- } else {
- p->SetWasSwept();
- }
+ p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
}
@@ -3472,9 +3537,7 @@ void MarkCompactCollector::RemoveObjectSlots(Address start_slot,
Address end_slot) {
// Remove entries by replacing them with an old-space slot containing a smi
// that is located in an unmovable page.
- int npages = evacuation_candidates_.length();
- for (int i = 0; i < npages; i++) {
- Page* p = evacuation_candidates_[i];
+ for (Page* p : evacuation_candidates_) {
DCHECK(p->IsEvacuationCandidate() ||
p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
if (p->IsEvacuationCandidate()) {
@@ -3512,6 +3575,10 @@ bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page,
page->markbits()->ClearRange(
page->AddressToMarkbitIndex(page->area_start()),
page->AddressToMarkbitIndex(object->address()));
+ if (page->old_to_new_slots() != nullptr) {
+ page->old_to_new_slots()->RemoveRange(
+ 0, static_cast<int>(object->address() - page->address()));
+ }
RecomputeLiveBytes(page);
}
return false;
@@ -3554,10 +3621,10 @@ void MarkCompactCollector::VisitLiveObjectsBody(Page* page,
void MarkCompactCollector::SweepAbortedPages() {
// Second pass on aborted pages.
- for (int i = 0; i < evacuation_candidates_.length(); i++) {
- Page* p = evacuation_candidates_[i];
+ for (Page* p : evacuation_candidates_) {
if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED);
+ p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
switch (space->identity()) {
case OLD_SPACE:
@@ -3586,30 +3653,25 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
Heap::RelocationLock relocation_lock(heap());
- HashMap* local_pretenuring_feedback = nullptr;
{
GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_NEW_SPACE);
EvacuationScope evacuation_scope(this);
- EvacuateNewSpacePrologue();
- local_pretenuring_feedback = EvacuateNewSpaceInParallel();
- heap_->new_space()->set_age_mark(heap_->new_space()->top());
- }
- {
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_EVACUATE_CANDIDATES);
- EvacuationScope evacuation_scope(this);
+ EvacuateNewSpacePrologue();
EvacuatePagesInParallel();
- }
-
- {
- heap_->MergeAllocationSitePretenuringFeedback(*local_pretenuring_feedback);
- delete local_pretenuring_feedback;
+ EvacuateNewSpaceEpilogue();
+ heap()->new_space()->set_age_mark(heap()->new_space()->top());
}
UpdatePointersAfterEvacuation();
+ // Give pages that are queued to be freed back to the OS. Note that filtering
+ // slots only handles old space (for unboxed doubles), and thus map space can
+ // still contain stale pointers. We only free the chunks after pointer updates
+ // to still have access to page headers.
+ heap()->FreeQueuedChunks();
+
{
GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
@@ -3677,18 +3739,14 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
// Update roots.
heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
- StoreBufferRebuildScope scope(heap_, heap_->store_buffer(),
- &Heap::ScavengeStoreBufferCallback);
- heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
+ RememberedSet<OLD_TO_NEW>::IterateWithWrapper(heap_, UpdatePointer);
}
- int npages = evacuation_candidates_.length();
{
GCTracer::Scope gc_scope(
heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED);
- for (int i = 0; i < npages; i++) {
- Page* p = evacuation_candidates_[i];
+ for (Page* p : evacuation_candidates_) {
DCHECK(p->IsEvacuationCandidate() ||
p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
@@ -3720,6 +3778,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
}
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
+ p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
switch (space->identity()) {
case OLD_SPACE:
@@ -3761,51 +3820,38 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
}
-void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() {
- int npages = evacuation_candidates_.length();
- for (int i = 0; i < npages; i++) {
- Page* p = evacuation_candidates_[i];
- if (!p->IsEvacuationCandidate()) continue;
- p->Unlink();
- PagedSpace* space = static_cast<PagedSpace*>(p->owner());
- p->InsertAfter(space->LastPage());
- }
-}
-
-
void MarkCompactCollector::ReleaseEvacuationCandidates() {
- int npages = evacuation_candidates_.length();
- for (int i = 0; i < npages; i++) {
- Page* p = evacuation_candidates_[i];
+ for (Page* p : evacuation_candidates_) {
if (!p->IsEvacuationCandidate()) continue;
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
space->Free(p->area_start(), p->area_size());
- p->set_scan_on_scavenge(false);
p->ResetLiveBytes();
- CHECK(p->WasSwept());
- space->ReleasePage(p);
+ CHECK(p->SweepingDone());
+ space->ReleasePage(p, true);
}
evacuation_candidates_.Rewind(0);
compacting_ = false;
- heap()->FilterStoreBufferEntriesOnAboutToBeFreedPages();
heap()->FreeQueuedChunks();
}
int MarkCompactCollector::SweepInParallel(PagedSpace* space,
- int required_freed_bytes) {
+ int required_freed_bytes,
+ int max_pages) {
int max_freed = 0;
int max_freed_overall = 0;
- PageIterator it(space);
- while (it.has_next()) {
- Page* p = it.next();
+ int page_count = 0;
+ for (Page* p : sweeping_list(space)) {
max_freed = SweepInParallel(p, space);
DCHECK(max_freed >= 0);
if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) {
return max_freed;
}
max_freed_overall = Max(max_freed, max_freed_overall);
- if (p == space->end_of_unswept_pages()) break;
+ page_count++;
+ if (max_pages > 0 && page_count >= max_pages) {
+ break;
+ }
}
return max_freed_overall;
}
@@ -3813,14 +3859,13 @@ int MarkCompactCollector::SweepInParallel(PagedSpace* space,
int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
int max_freed = 0;
- if (page->TryLock()) {
+ if (page->mutex()->TryLock()) {
// If this page was already swept in the meantime, we can return here.
- if (page->parallel_sweeping_state().Value() !=
- MemoryChunk::kSweepingPending) {
+ if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) {
page->mutex()->Unlock();
return 0;
}
- page->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingInProgress);
+ page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
FreeList* free_list;
FreeList private_free_list(space);
if (space->identity() == OLD_SPACE) {
@@ -3840,6 +3885,7 @@ int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
}
free_list->Concatenate(&private_free_list);
+ page->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
page->mutex()->Unlock();
}
return max_freed;
@@ -3849,22 +3895,14 @@ int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
space->ClearStats();
- // We defensively initialize end_of_unswept_pages_ here with the first page
- // of the pages list.
- space->set_end_of_unswept_pages(space->FirstPage());
-
PageIterator it(space);
- int pages_swept = 0;
+ int will_be_swept = 0;
bool unused_page_present = false;
- bool parallel_sweeping_active = false;
while (it.has_next()) {
Page* p = it.next();
- DCHECK(p->parallel_sweeping_state().Value() == MemoryChunk::kSweepingDone);
-
- // Clear sweeping flags indicating that marking bits are still intact.
- p->ClearWasSwept();
+ DCHECK(p->SweepingDone());
if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) ||
p->IsEvacuationCandidate()) {
@@ -3878,6 +3916,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
// that this adds unusable memory into the free list that is later on
// (in the free list) dropped again. Since we only use the flag for
// testing this is fine.
+ p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
continue;
@@ -3889,45 +3928,25 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
if (FLAG_gc_verbose) {
PrintIsolate(isolate(), "sweeping: released page: %p", p);
}
- space->ReleasePage(p);
+ space->ReleasePage(p, false);
continue;
}
unused_page_present = true;
}
- if (!parallel_sweeping_active) {
- if (FLAG_gc_verbose) {
- PrintIsolate(isolate(), "sweeping: %p", p);
- }
- if (space->identity() == CODE_SPACE) {
- if (FLAG_zap_code_space) {
- Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
- ZAP_FREE_SPACE>(space, NULL, p, NULL);
- } else {
- Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, NULL, p, NULL);
- }
- } else {
- Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, NULL, p, NULL);
- }
- pages_swept++;
- parallel_sweeping_active = true;
- } else {
- if (FLAG_gc_verbose) {
- PrintIsolate(isolate(), "sweeping: initialized for parallel: %p", p);
- }
- p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingPending);
- int to_sweep = p->area_size() - p->LiveBytes();
- space->accounting_stats_.ShrinkSpace(to_sweep);
- }
- space->set_end_of_unswept_pages(p);
+ p->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
+ sweeping_list(space).push_back(p);
+ int to_sweep = p->area_size() - p->LiveBytes();
+ space->accounting_stats_.ShrinkSpace(to_sweep);
+ will_be_swept++;
}
if (FLAG_gc_verbose) {
- PrintIsolate(isolate(), "sweeping: space=%s pages_swept=%d",
- AllocationSpaceName(space->identity()), pages_swept);
+ PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
+ AllocationSpaceName(space->identity()), will_be_swept);
}
+ std::sort(sweeping_list(space).begin(), sweeping_list(space).end(),
+ [](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); });
}
@@ -3942,8 +3961,6 @@ void MarkCompactCollector::SweepSpaces() {
state_ = SWEEP_SPACES;
#endif
- MoveEvacuationCandidatesToEndOfPagesList();
-
{
sweeping_in_progress_ = true;
{
@@ -3969,10 +3986,6 @@ void MarkCompactCollector::SweepSpaces() {
// Deallocate unmarked large objects.
heap_->lo_space()->FreeUnmarkedObjects();
- // Give pages that are queued to be freed back to the OS. Invalid store
- // buffer entries are already filter out. We can just release the memory.
- heap()->FreeQueuedChunks();
-
if (FLAG_print_cumulative_gc_stat) {
heap_->tracer()->AddSweepingTime(heap_->MonotonicallyIncreasingTimeInMs() -
start_time);
@@ -3980,24 +3993,10 @@ void MarkCompactCollector::SweepSpaces() {
}
-void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
- PageIterator it(space);
- while (it.has_next()) {
- Page* p = it.next();
- if (p->parallel_sweeping_state().Value() ==
- MemoryChunk::kSweepingFinalize) {
- p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingDone);
- p->SetWasSwept();
- }
- DCHECK(p->parallel_sweeping_state().Value() == MemoryChunk::kSweepingDone);
- }
-}
-
-
void MarkCompactCollector::ParallelSweepSpacesComplete() {
- ParallelSweepSpaceComplete(heap()->old_space());
- ParallelSweepSpaceComplete(heap()->code_space());
- ParallelSweepSpaceComplete(heap()->map_space());
+ sweeping_list(heap()->old_space()).clear();
+ sweeping_list(heap()->code_space()).clear();
+ sweeping_list(heap()->map_space()).clear();
}
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index cfb2d9d270..cc5449f977 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -7,6 +7,7 @@
#include "src/base/bits.h"
#include "src/heap/spaces.h"
+#include "src/heap/store-buffer.h"
namespace v8 {
namespace internal {
@@ -406,7 +407,8 @@ class MarkCompactCollector {
void MigrateObject(HeapObject* dst, HeapObject* src, int size,
AllocationSpace to_old_space,
- SlotsBuffer** evacuation_slots_buffer);
+ SlotsBuffer** evacuation_slots_buffer,
+ LocalStoreBuffer* local_store_buffer);
void InvalidateCode(Code* code);
@@ -421,7 +423,8 @@ class MarkCompactCollector {
// required_freed_bytes was freed. If required_freed_bytes was set to zero
// then the whole given space is swept. It returns the size of the maximum
// continuous freed memory chunk.
- int SweepInParallel(PagedSpace* space, int required_freed_bytes);
+ int SweepInParallel(PagedSpace* space, int required_freed_bytes,
+ int max_pages = 0);
// Sweeps a given page concurrently to the sweeper threads. It returns the
// size of the maximum continuous freed memory chunk.
@@ -508,10 +511,11 @@ class MarkCompactCollector {
class EvacuateNewSpaceVisitor;
class EvacuateOldSpaceVisitor;
class EvacuateVisitorBase;
+ class Evacuator;
class HeapObjectVisitor;
class SweeperTask;
- static const int kInitialLocalPretenuringFeedbackCapacity = 256;
+ typedef std::vector<Page*> SweepingList;
explicit MarkCompactCollector(Heap* heap);
@@ -693,31 +697,26 @@ class MarkCompactCollector {
// evacuation.
//
+ inline SweepingList& sweeping_list(Space* space);
+
// If we are not compacting the heap, we simply sweep the spaces except
// for the large object space, clearing mark bits and adding unmarked
// regions to each space's free list.
void SweepSpaces();
void EvacuateNewSpacePrologue();
-
- // Returns local pretenuring feedback.
- HashMap* EvacuateNewSpaceInParallel();
+ void EvacuateNewSpaceEpilogue();
void AddEvacuationSlotsBufferSynchronized(
SlotsBuffer* evacuation_slots_buffer);
- void EvacuatePages(CompactionSpaceCollection* compaction_spaces,
- SlotsBuffer** evacuation_slots_buffer);
-
void EvacuatePagesInParallel();
// The number of parallel compaction tasks, including the main thread.
- int NumberOfParallelCompactionTasks();
-
+ int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes);
- void StartParallelCompaction(CompactionSpaceCollection** compaction_spaces,
- uint32_t* task_ids, int len);
- void WaitUntilCompactionCompleted(uint32_t* task_ids, int len);
+ void StartParallelCompaction(Evacuator** evacuators, int len);
+ void WaitUntilCompactionCompleted(Evacuator** evacuators, int len);
void EvacuateNewSpaceAndCandidates();
@@ -736,10 +735,6 @@ class MarkCompactCollector {
void ReleaseEvacuationCandidates();
- // Moves the pages of the evacuation_candidates_ list to the end of their
- // corresponding space pages list.
- void MoveEvacuationCandidatesToEndOfPagesList();
-
// Starts sweeping of a space by contributing on the main thread and setting
// up other pages for sweeping.
void StartSweepSpace(PagedSpace* space);
@@ -748,11 +743,10 @@ class MarkCompactCollector {
// swept in parallel.
void ParallelSweepSpacesComplete();
- void ParallelSweepSpaceComplete(PagedSpace* space);
-
// Updates store buffer and slot buffer for a pointer in a migrating object.
void RecordMigratedSlot(Object* value, Address slot,
- SlotsBuffer** evacuation_slots_buffer);
+ SlotsBuffer** evacuation_slots_buffer,
+ LocalStoreBuffer* local_store_buffer);
// Adds the code entry slot to the slots buffer.
void RecordMigratedCodeEntrySlot(Address code_entry, Address code_entry_slot,
@@ -778,8 +772,7 @@ class MarkCompactCollector {
bool have_code_to_deoptimize_;
List<Page*> evacuation_candidates_;
-
- List<MemoryChunk*> newspace_evacuation_candidates_;
+ List<NewSpacePage*> newspace_evacuation_candidates_;
// The evacuation_slots_buffers_ are used by the compaction threads.
// When a compaction task finishes, it uses
@@ -793,6 +786,10 @@ class MarkCompactCollector {
base::SmartPointer<FreeList> free_list_code_space_;
base::SmartPointer<FreeList> free_list_map_space_;
+ SweepingList sweeping_list_old_space_;
+ SweepingList sweeping_list_code_space_;
+ SweepingList sweeping_list_map_space_;
+
// True if we are collecting slots to perform evacuation from evacuation
// candidates.
bool compacting_;
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc
index ee1009134b..f53730785a 100644
--- a/deps/v8/src/heap/memory-reducer.cc
+++ b/deps/v8/src/heap/memory-reducer.cc
@@ -47,7 +47,7 @@ void MemoryReducer::TimerTask::RunInternal() {
event.should_start_incremental_gc = is_idle || optimize_for_memory;
event.can_start_incremental_gc =
heap->incremental_marking()->IsStopped() &&
- heap->incremental_marking()->CanBeActivated();
+ (heap->incremental_marking()->CanBeActivated() || optimize_for_memory);
memory_reducer_->NotifyTimer(event);
}
@@ -118,9 +118,8 @@ void MemoryReducer::NotifyMarkCompact(const Event& event) {
}
}
-
-void MemoryReducer::NotifyContextDisposed(const Event& event) {
- DCHECK_EQ(kContextDisposed, event.type);
+void MemoryReducer::NotifyPossibleGarbage(const Event& event) {
+ DCHECK_EQ(kPossibleGarbage, event.type);
Action old_action = state_.action;
state_ = Step(state_, event);
if (old_action != kWait && state_.action == kWait) {
@@ -147,14 +146,14 @@ MemoryReducer::State MemoryReducer::Step(const State& state,
if (event.type == kTimer) {
return state;
} else {
- DCHECK(event.type == kContextDisposed || event.type == kMarkCompact);
+ DCHECK(event.type == kPossibleGarbage || event.type == kMarkCompact);
return State(
kWait, 0, event.time_ms + kLongDelayMs,
event.type == kMarkCompact ? event.time_ms : state.last_gc_time_ms);
}
case kWait:
switch (event.type) {
- case kContextDisposed:
+ case kPossibleGarbage:
return state;
case kTimer:
if (state.started_gcs >= kMaxNumberOfGCs) {
diff --git a/deps/v8/src/heap/memory-reducer.h b/deps/v8/src/heap/memory-reducer.h
index 9213613c07..0fe53e5fea 100644
--- a/deps/v8/src/heap/memory-reducer.h
+++ b/deps/v8/src/heap/memory-reducer.h
@@ -96,7 +96,7 @@ class MemoryReducer {
double last_gc_time_ms;
};
- enum EventType { kTimer, kMarkCompact, kContextDisposed };
+ enum EventType { kTimer, kMarkCompact, kPossibleGarbage };
struct Event {
EventType type;
@@ -113,7 +113,7 @@ class MemoryReducer {
js_calls_sample_time_ms_(0.0) {}
// Callbacks.
void NotifyMarkCompact(const Event& event);
- void NotifyContextDisposed(const Event& event);
+ void NotifyPossibleGarbage(const Event& event);
void NotifyBackgroundIdleNotification(const Event& event);
// The step function that computes the next state from the current state and
// the incoming event.
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index a29ba4b08c..d71c879a73 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -115,7 +115,7 @@ int StaticNewSpaceVisitor<StaticVisitor>::VisitBytecodeArray(
VisitPointers(
map->GetHeap(), object,
HeapObject::RawField(object, BytecodeArray::kConstantPoolOffset),
- HeapObject::RawField(object, BytecodeArray::kHeaderSize));
+ HeapObject::RawField(object, BytecodeArray::kFrameSizeOffset));
return reinterpret_cast<BytecodeArray*>(object)->BytecodeArraySize();
}
@@ -531,7 +531,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitBytecodeArray(
StaticVisitor::VisitPointers(
map->GetHeap(), object,
HeapObject::RawField(object, BytecodeArray::kConstantPoolOffset),
- HeapObject::RawField(object, BytecodeArray::kHeaderSize));
+ HeapObject::RawField(object, BytecodeArray::kFrameSizeOffset));
}
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index 315c897bec..0003a0702d 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -118,7 +118,6 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case JS_MAP_TYPE:
case JS_SET_ITERATOR_TYPE:
case JS_MAP_ITERATOR_TYPE:
- case JS_ITERATOR_RESULT_TYPE:
case JS_PROMISE_TYPE:
case JS_BOUND_FUNCTION_TYPE:
return GetVisitorIdForSize(kVisitJSObject, kVisitJSObjectGeneric,
diff --git a/deps/v8/src/heap/remembered-set.cc b/deps/v8/src/heap/remembered-set.cc
new file mode 100644
index 0000000000..d9d5914273
--- /dev/null
+++ b/deps/v8/src/heap/remembered-set.cc
@@ -0,0 +1,69 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/remembered-set.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/heap.h"
+#include "src/heap/mark-compact.h"
+#include "src/heap/slot-set.h"
+#include "src/heap/spaces.h"
+#include "src/heap/store-buffer.h"
+
+namespace v8 {
+namespace internal {
+
+template <PointerDirection direction>
+void RememberedSet<direction>::ClearInvalidSlots(Heap* heap) {
+ STATIC_ASSERT(direction == OLD_TO_NEW);
+ PageIterator it(heap->old_space());
+ MemoryChunk* chunk;
+ while (it.has_next()) {
+ chunk = it.next();
+ SlotSet* slots = GetSlotSet(chunk);
+ if (slots != nullptr) {
+ slots->Iterate([heap](Address addr) {
+ Object** slot = reinterpret_cast<Object**>(addr);
+ return IsValidSlot(heap, slot) ? SlotSet::KEEP_SLOT
+ : SlotSet::REMOVE_SLOT;
+ });
+ }
+ }
+}
+
+template <PointerDirection direction>
+void RememberedSet<direction>::VerifyValidSlots(Heap* heap) {
+ STATIC_ASSERT(direction == OLD_TO_NEW);
+ Iterate(heap, [heap](Address addr) {
+ Object** slot = reinterpret_cast<Object**>(addr);
+ Object* object = *slot;
+ if (Page::FromAddress(addr)->owner() != nullptr &&
+ Page::FromAddress(addr)->owner()->identity() == OLD_SPACE) {
+ CHECK(IsValidSlot(heap, slot));
+ heap->mark_compact_collector()->VerifyIsSlotInLiveObject(
+ reinterpret_cast<Address>(slot), HeapObject::cast(object));
+ }
+ return SlotSet::KEEP_SLOT;
+ });
+}
+
+template <PointerDirection direction>
+bool RememberedSet<direction>::IsValidSlot(Heap* heap, Object** slot) {
+ STATIC_ASSERT(direction == OLD_TO_NEW);
+ Object* object = *slot;
+ if (!heap->InNewSpace(object)) {
+ return false;
+ }
+ HeapObject* heap_object = HeapObject::cast(object);
+ // If the target object is not black, the source slot must be part
+ // of a non-black (dead) object.
+ return Marking::IsBlack(Marking::MarkBitFrom(heap_object)) &&
+ heap->mark_compact_collector()->IsSlotInLiveObject(
+ reinterpret_cast<Address>(slot));
+}
+
+template void RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(Heap* heap);
+template void RememberedSet<OLD_TO_NEW>::VerifyValidSlots(Heap* heap);
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
new file mode 100644
index 0000000000..351d76edb8
--- /dev/null
+++ b/deps/v8/src/heap/remembered-set.h
@@ -0,0 +1,157 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REMEMBERED_SET_H
+#define V8_REMEMBERED_SET_H
+
+#include "src/heap/heap.h"
+#include "src/heap/slot-set.h"
+#include "src/heap/spaces.h"
+
+namespace v8 {
+namespace internal {
+
+enum PointerDirection { OLD_TO_OLD, OLD_TO_NEW };
+
+template <PointerDirection direction>
+class RememberedSet {
+ public:
+ // Given a page and a slot in that page, this function adds the slot to the
+ // remembered set.
+ static void Insert(Page* page, Address slot_addr) {
+ DCHECK(page->Contains(slot_addr));
+ SlotSet* slot_set = GetSlotSet(page);
+ if (slot_set == nullptr) {
+ slot_set = AllocateSlotSet(page);
+ }
+ uintptr_t offset = slot_addr - page->address();
+ slot_set[offset / Page::kPageSize].Insert(offset % Page::kPageSize);
+ }
+
+ // Given a page and a slot in that page, this function removes the slot from
+ // the remembered set.
+ // If the slot was never added, then the function does nothing.
+ static void Remove(Page* page, Address slot_addr) {
+ DCHECK(page->Contains(slot_addr));
+ SlotSet* slot_set = GetSlotSet(page);
+ if (slot_set != nullptr) {
+ uintptr_t offset = slot_addr - page->address();
+ slot_set[offset / Page::kPageSize].Remove(offset % Page::kPageSize);
+ }
+ }
+
+ // Given a page and a range of slots in that page, this function removes the
+ // slots from the remembered set.
+ static void RemoveRange(Page* page, Address start, Address end) {
+ SlotSet* slot_set = GetSlotSet(page);
+ if (slot_set != nullptr) {
+ uintptr_t start_offset = start - page->address();
+ uintptr_t end_offset = end - page->address();
+ DCHECK_LT(start_offset, end_offset);
+ DCHECK_LE(end_offset, static_cast<uintptr_t>(Page::kPageSize));
+ slot_set->RemoveRange(static_cast<uint32_t>(start_offset),
+ static_cast<uint32_t>(end_offset));
+ }
+ }
+
+ // Iterates and filters the remembered set with the given callback.
+ // The callback should take (Address slot) and return SlotSet::CallbackResult.
+ template <typename Callback>
+ static void Iterate(Heap* heap, Callback callback) {
+ PointerChunkIterator it(heap);
+ MemoryChunk* chunk;
+ while ((chunk = it.next()) != nullptr) {
+ SlotSet* slots = GetSlotSet(chunk);
+ if (slots != nullptr) {
+ size_t pages = (chunk->size() + Page::kPageSize - 1) / Page::kPageSize;
+ int new_count = 0;
+ for (size_t page = 0; page < pages; page++) {
+ new_count += slots[page].Iterate(callback);
+ }
+ if (new_count == 0) {
+ ReleaseSlotSet(chunk);
+ }
+ }
+ }
+ }
+
+ // Iterates and filters the remembered set with the given callback.
+ // The callback should take (HeapObject** slot, HeapObject* target) and
+ // update the slot.
+ // A special wrapper takes care of filtering the slots based on their values.
+ // For OLD_TO_NEW case: slots that do not point to the ToSpace after
+ // callback invocation will be removed from the set.
+ template <typename Callback>
+ static void IterateWithWrapper(Heap* heap, Callback callback) {
+ Iterate(heap, [heap, callback](Address addr) {
+ return Wrapper(heap, addr, callback);
+ });
+ }
+
+ // Eliminates all stale slots from the remembered set, i.e.
+ // slots that are not part of live objects anymore. This method must be
+ // called after marking, when the whole transitive closure is known and
+ // must be called before sweeping when mark bits are still intact.
+ static void ClearInvalidSlots(Heap* heap);
+
+ static void VerifyValidSlots(Heap* heap);
+
+ private:
+ static SlotSet* GetSlotSet(MemoryChunk* chunk) {
+ if (direction == OLD_TO_OLD) {
+ return chunk->old_to_old_slots();
+ } else {
+ return chunk->old_to_new_slots();
+ }
+ }
+
+ static void ReleaseSlotSet(MemoryChunk* chunk) {
+ if (direction == OLD_TO_OLD) {
+ chunk->ReleaseOldToOldSlots();
+ } else {
+ chunk->ReleaseOldToNewSlots();
+ }
+ }
+
+ static SlotSet* AllocateSlotSet(MemoryChunk* chunk) {
+ if (direction == OLD_TO_OLD) {
+ chunk->AllocateOldToOldSlots();
+ return chunk->old_to_old_slots();
+ } else {
+ chunk->AllocateOldToNewSlots();
+ return chunk->old_to_new_slots();
+ }
+ }
+
+ template <typename Callback>
+ static SlotSet::CallbackResult Wrapper(Heap* heap, Address slot_address,
+ Callback slot_callback) {
+ STATIC_ASSERT(direction == OLD_TO_NEW);
+ Object** slot = reinterpret_cast<Object**>(slot_address);
+ Object* object = *slot;
+ if (heap->InFromSpace(object)) {
+ HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
+ DCHECK(heap_object->IsHeapObject());
+ slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
+ object = *slot;
+ // If the object was in from space before and is after executing the
+ // callback in to space, the object is still live.
+ // Unfortunately, we do not know about the slot. It could be in a
+ // just freed free space object.
+ if (heap->InToSpace(object)) {
+ return SlotSet::KEEP_SLOT;
+ }
+ } else {
+ DCHECK(!heap->InNewSpace(object));
+ }
+ return SlotSet::REMOVE_SLOT;
+ }
+
+ static bool IsValidSlot(Heap* heap, Object** slot);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_REMEMBERED_SET_H
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
index cd35c7d7e3..b8fd1c8292 100644
--- a/deps/v8/src/heap/scavenger-inl.h
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -28,7 +28,7 @@ void Scavenger::ScavengeObject(HeapObject** p, HeapObject* object) {
return;
}
- object->GetHeap()->UpdateAllocationSite(
+ object->GetHeap()->UpdateAllocationSite<Heap::kGlobal>(
object, object->GetHeap()->global_pretenuring_feedback_);
// AllocationMementos are unrooted and shouldn't survive a scavenge
diff --git a/deps/v8/src/heap/slot-set.h b/deps/v8/src/heap/slot-set.h
new file mode 100644
index 0000000000..6144706f71
--- /dev/null
+++ b/deps/v8/src/heap/slot-set.h
@@ -0,0 +1,219 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SLOT_SET_H
+#define V8_SLOT_SET_H
+
+#include "src/allocation.h"
+#include "src/base/bits.h"
+
+namespace v8 {
+namespace internal {
+
+// Data structure for maintaining a set of slots in a standard (non-large)
+// page. The base address of the page must be set with SetPageStart before any
+// operation.
+// The data structure assumes that the slots are pointer size aligned and
+// splits the valid slot offset range into kBuckets buckets.
+// Each bucket is a bitmap with a bit corresponding to a single slot offset.
+class SlotSet : public Malloced {
+ public:
+ enum CallbackResult { KEEP_SLOT, REMOVE_SLOT };
+
+ SlotSet() {
+ for (int i = 0; i < kBuckets; i++) {
+ bucket[i] = nullptr;
+ }
+ }
+
+ ~SlotSet() {
+ for (int i = 0; i < kBuckets; i++) {
+ ReleaseBucket(i);
+ }
+ }
+
+ void SetPageStart(Address page_start) { page_start_ = page_start; }
+
+ // The slot offset specifies a slot at address page_start_ + slot_offset.
+ void Insert(int slot_offset) {
+ int bucket_index, cell_index, bit_index;
+ SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
+ if (bucket[bucket_index] == nullptr) {
+ bucket[bucket_index] = AllocateBucket();
+ }
+ bucket[bucket_index][cell_index] |= 1u << bit_index;
+ }
+
+ // The slot offset specifies a slot at address page_start_ + slot_offset.
+ void Remove(int slot_offset) {
+ int bucket_index, cell_index, bit_index;
+ SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
+ if (bucket[bucket_index] != nullptr) {
+ uint32_t cell = bucket[bucket_index][cell_index];
+ if (cell) {
+ uint32_t bit_mask = 1u << bit_index;
+ if (cell & bit_mask) {
+ bucket[bucket_index][cell_index] ^= bit_mask;
+ }
+ }
+ }
+ }
+
+ // The slot offsets specify a range of slots at addresses:
+ // [page_start_ + start_offset ... page_start_ + end_offset).
+ void RemoveRange(int start_offset, int end_offset) {
+ DCHECK_LE(start_offset, end_offset);
+ int start_bucket, start_cell, start_bit;
+ SlotToIndices(start_offset, &start_bucket, &start_cell, &start_bit);
+ int end_bucket, end_cell, end_bit;
+ SlotToIndices(end_offset, &end_bucket, &end_cell, &end_bit);
+ uint32_t start_mask = (1u << start_bit) - 1;
+ uint32_t end_mask = ~((1u << end_bit) - 1);
+ if (start_bucket == end_bucket && start_cell == end_cell) {
+ MaskCell(start_bucket, start_cell, start_mask | end_mask);
+ return;
+ }
+ int current_bucket = start_bucket;
+ int current_cell = start_cell;
+ MaskCell(current_bucket, current_cell, start_mask);
+ current_cell++;
+ if (current_bucket < end_bucket) {
+ if (bucket[current_bucket] != nullptr) {
+ while (current_cell < kCellsPerBucket) {
+ bucket[current_bucket][current_cell] = 0;
+ current_cell++;
+ }
+ }
+ // The rest of the current bucket is cleared.
+ // Move on to the next bucket.
+ current_bucket++;
+ current_cell = 0;
+ }
+ DCHECK(current_bucket == end_bucket ||
+ (current_bucket < end_bucket && current_cell == 0));
+ while (current_bucket < end_bucket) {
+ ReleaseBucket(current_bucket);
+ current_bucket++;
+ }
+ // All buckets between start_bucket and end_bucket are cleared.
+ DCHECK(current_bucket == end_bucket && current_cell <= end_cell);
+ if (current_bucket == kBuckets || bucket[current_bucket] == nullptr) {
+ return;
+ }
+ while (current_cell < end_cell) {
+ bucket[current_bucket][current_cell] = 0;
+ current_cell++;
+ }
+ // All cells between start_cell and end_cell are cleared.
+ DCHECK(current_bucket == end_bucket && current_cell == end_cell);
+ MaskCell(end_bucket, end_cell, end_mask);
+ }
+
+ // The slot offset specifies a slot at address page_start_ + slot_offset.
+ bool Lookup(int slot_offset) {
+ int bucket_index, cell_index, bit_index;
+ SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
+ if (bucket[bucket_index] != nullptr) {
+ uint32_t cell = bucket[bucket_index][cell_index];
+ return (cell & (1u << bit_index)) != 0;
+ }
+ return false;
+ }
+
+ // Iterate over all slots in the set and for each slot invoke the callback.
+ // If the callback returns REMOVE_SLOT then the slot is removed from the set.
+ // Returns the new number of slots.
+ //
+ // Sample usage:
+ // Iterate([](Address slot_address) {
+ // if (good(slot_address)) return KEEP_SLOT;
+ // else return REMOVE_SLOT;
+ // });
+ template <typename Callback>
+ int Iterate(Callback callback) {
+ int new_count = 0;
+ for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) {
+ if (bucket[bucket_index] != nullptr) {
+ int in_bucket_count = 0;
+ uint32_t* current_bucket = bucket[bucket_index];
+ int cell_offset = bucket_index * kBitsPerBucket;
+ for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) {
+ if (current_bucket[i]) {
+ uint32_t cell = current_bucket[i];
+ uint32_t old_cell = cell;
+ uint32_t new_cell = cell;
+ while (cell) {
+ int bit_offset = base::bits::CountTrailingZeros32(cell);
+ uint32_t bit_mask = 1u << bit_offset;
+ uint32_t slot = (cell_offset + bit_offset) << kPointerSizeLog2;
+ if (callback(page_start_ + slot) == KEEP_SLOT) {
+ ++in_bucket_count;
+ } else {
+ new_cell ^= bit_mask;
+ }
+ cell ^= bit_mask;
+ }
+ if (old_cell != new_cell) {
+ current_bucket[i] = new_cell;
+ }
+ }
+ }
+ if (in_bucket_count == 0) {
+ ReleaseBucket(bucket_index);
+ }
+ new_count += in_bucket_count;
+ }
+ }
+ return new_count;
+ }
+
+ private:
+ static const int kMaxSlots = (1 << kPageSizeBits) / kPointerSize;
+ static const int kCellsPerBucket = 32;
+ static const int kCellsPerBucketLog2 = 5;
+ static const int kBitsPerCell = 32;
+ static const int kBitsPerCellLog2 = 5;
+ static const int kBitsPerBucket = kCellsPerBucket * kBitsPerCell;
+ static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2;
+ static const int kBuckets = kMaxSlots / kCellsPerBucket / kBitsPerCell;
+
+ uint32_t* AllocateBucket() {
+ uint32_t* result = NewArray<uint32_t>(kCellsPerBucket);
+ for (int i = 0; i < kCellsPerBucket; i++) {
+ result[i] = 0;
+ }
+ return result;
+ }
+
+ void ReleaseBucket(int bucket_index) {
+ DeleteArray<uint32_t>(bucket[bucket_index]);
+ bucket[bucket_index] = nullptr;
+ }
+
+ void MaskCell(int bucket_index, int cell_index, uint32_t mask) {
+ uint32_t* cells = bucket[bucket_index];
+ if (cells != nullptr && cells[cell_index] != 0) {
+ cells[cell_index] &= mask;
+ }
+ }
+
+ // Converts the slot offset into bucket/cell/bit index.
+ void SlotToIndices(int slot_offset, int* bucket_index, int* cell_index,
+ int* bit_index) {
+ DCHECK_EQ(slot_offset % kPointerSize, 0);
+ int slot = slot_offset >> kPointerSizeLog2;
+ DCHECK(slot >= 0 && slot <= kMaxSlots);
+ *bucket_index = slot >> kBitsPerBucketLog2;
+ *cell_index = (slot >> kBitsPerCellLog2) & (kCellsPerBucket - 1);
+ *bit_index = slot & (kBitsPerCell - 1);
+ }
+
+ uint32_t* bucket[kBuckets];
+ Address page_start_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SLOT_SET_H
diff --git a/deps/v8/src/heap/slots-buffer.cc b/deps/v8/src/heap/slots-buffer.cc
index 3f145e6e2e..5a3db281fd 100644
--- a/deps/v8/src/heap/slots-buffer.cc
+++ b/deps/v8/src/heap/slots-buffer.cc
@@ -56,9 +56,12 @@ void SlotsBuffer::RemoveInvalidSlots(Heap* heap, SlotsBuffer* buffer) {
// - point to a heap object in new space
// - are not within a live heap object on a valid pointer slot
// - point to a heap object not on an evacuation candidate
- if (!object->IsHeapObject() || heap->InNewSpace(object) ||
+ // TODO(mlippautz): Move InNewSpace check above IsSlotInLiveObject once
+ // we filter out unboxed double slots eagerly.
+ if (!object->IsHeapObject() ||
!heap->mark_compact_collector()->IsSlotInLiveObject(
reinterpret_cast<Address>(slot)) ||
+ heap->InNewSpace(object) ||
!Page::FromAddress(reinterpret_cast<Address>(object))
->IsEvacuationCandidate()) {
// TODO(hpayer): Instead of replacing slots with kRemovedEntry we
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index 3023fbf51e..515a202769 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -178,6 +178,52 @@ void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
#endif
+// -----------------------------------------------------------------------------
+// SemiSpace
+
+bool SemiSpace::Contains(HeapObject* o) {
+ return id_ == kToSpace
+ ? MemoryChunk::FromAddress(o->address())->InToSpace()
+ : MemoryChunk::FromAddress(o->address())->InFromSpace();
+}
+
+bool SemiSpace::Contains(Object* o) {
+ return o->IsHeapObject() && Contains(HeapObject::cast(o));
+}
+
+bool SemiSpace::ContainsSlow(Address a) {
+ NewSpacePageIterator it(this);
+ while (it.has_next()) {
+ if (it.next() == MemoryChunk::FromAddress(a)) return true;
+ }
+ return false;
+}
+
+// --------------------------------------------------------------------------
+// NewSpace
+
+bool NewSpace::Contains(HeapObject* o) {
+ return MemoryChunk::FromAddress(o->address())->InNewSpace();
+}
+
+bool NewSpace::Contains(Object* o) {
+ return o->IsHeapObject() && Contains(HeapObject::cast(o));
+}
+
+bool NewSpace::ContainsSlow(Address a) {
+ return from_space_.ContainsSlow(a) || to_space_.ContainsSlow(a);
+}
+
+bool NewSpace::ToSpaceContainsSlow(Address a) {
+ return to_space_.ContainsSlow(a);
+}
+
+bool NewSpace::FromSpaceContainsSlow(Address a) {
+ return from_space_.ContainsSlow(a);
+}
+
+bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); }
+bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); }
// --------------------------------------------------------------------------
// AllocationResult
@@ -205,6 +251,36 @@ Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
return page;
}
+void MemoryChunk::IncrementLiveBytesFromGC(HeapObject* object, int by) {
+ MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by);
+}
+
+void MemoryChunk::ResetLiveBytes() {
+ if (FLAG_trace_live_bytes) {
+ PrintIsolate(heap()->isolate(), "live-bytes: reset page=%p %d->0\n", this,
+ live_byte_count_);
+ }
+ live_byte_count_ = 0;
+}
+
+void MemoryChunk::IncrementLiveBytes(int by) {
+ if (FLAG_trace_live_bytes) {
+ PrintIsolate(heap()->isolate(),
+ "live-bytes: update page=%p delta=%d %d->%d\n", this, by,
+ live_byte_count_, live_byte_count_ + by);
+ }
+ live_byte_count_ += by;
+ DCHECK_GE(live_byte_count_, 0);
+ DCHECK_LE(static_cast<size_t>(live_byte_count_), size_);
+}
+
+void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+ if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->SweepingDone()) {
+ static_cast<PagedSpace*>(chunk->owner())->Allocate(by);
+ }
+ chunk->IncrementLiveBytes(by);
+}
bool PagedSpace::Contains(Address addr) {
Page* p = Page::FromAddress(addr);
@@ -212,39 +288,24 @@ bool PagedSpace::Contains(Address addr) {
return p->owner() == this;
}
-
-bool PagedSpace::Contains(HeapObject* o) { return Contains(o->address()); }
-
-
-void MemoryChunk::set_scan_on_scavenge(bool scan) {
- if (scan) {
- if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
- SetFlag(SCAN_ON_SCAVENGE);
- } else {
- if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages();
- ClearFlag(SCAN_ON_SCAVENGE);
- }
- heap_->incremental_marking()->SetOldSpacePageFlags(this);
+bool PagedSpace::Contains(Object* o) {
+ if (!o->IsHeapObject()) return false;
+ Page* p = Page::FromAddress(HeapObject::cast(o)->address());
+ if (!p->is_valid()) return false;
+ return p->owner() == this;
}
-
MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
- MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
- OffsetFrom(addr) & ~Page::kPageAlignmentMask);
- if (maybe->owner() != NULL) return maybe;
- LargeObjectIterator iterator(heap->lo_space());
- for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
- // Fixed arrays are the only pointer-containing objects in large object
- // space.
- if (o->IsFixedArray()) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(o->address());
- if (chunk->Contains(addr)) {
- return chunk;
- }
- }
+ MemoryChunk* chunk = MemoryChunk::FromAddress(addr);
+ uintptr_t offset = addr - chunk->address();
+ if (offset < MemoryChunk::kHeaderSize || !chunk->HasPageHeader()) {
+ chunk = heap->lo_space()->FindPage(addr);
}
- UNREACHABLE();
- return NULL;
+ return chunk;
+}
+
+Page* Page::FromAnyPointerAddress(Heap* heap, Address addr) {
+ return static_cast<Page*>(MemoryChunk::FromAnyPointerAddress(heap, addr));
}
@@ -425,12 +486,18 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment) {
#ifdef V8_HOST_ARCH_32_BIT
- return alignment == kDoubleAligned
- ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
- : AllocateRawUnaligned(size_in_bytes);
+ AllocationResult result =
+ alignment == kDoubleAligned
+ ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
+ : AllocateRawUnaligned(size_in_bytes);
#else
- return AllocateRawUnaligned(size_in_bytes);
+ AllocationResult result = AllocateRawUnaligned(size_in_bytes);
#endif
+ HeapObject* heap_obj = nullptr;
+ if (!result.IsRetry() && result.To(&heap_obj)) {
+ AllocationStep(heap_obj->address(), size_in_bytes);
+ }
+ return result;
}
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index 90d252abb5..6b98fc1d0e 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -7,6 +7,7 @@
#include "src/base/bits.h"
#include "src/base/platform/platform.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/heap/slot-set.h"
#include "src/heap/slots-buffer.h"
#include "src/macro-assembler.h"
#include "src/msan.h"
@@ -35,7 +36,7 @@ HeapObjectIterator::HeapObjectIterator(Page* page) {
owner == page->heap()->code_space());
Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
page->area_end(), kOnePageOnly);
- DCHECK(page->WasSwept() || page->SweepingCompleted());
+ DCHECK(page->SweepingDone());
}
@@ -66,10 +67,24 @@ bool HeapObjectIterator::AdvanceToNextPage() {
cur_page);
cur_addr_ = cur_page->area_start();
cur_end_ = cur_page->area_end();
- DCHECK(cur_page->WasSwept() || cur_page->SweepingCompleted());
+ DCHECK(cur_page->SweepingDone());
return true;
}
+PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
+ : heap_(heap) {
+ AllSpaces spaces(heap_);
+ for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+ space->PauseAllocationObservers();
+ }
+}
+
+PauseAllocationObserversScope::~PauseAllocationObserversScope() {
+ AllSpaces spaces(heap_);
+ for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+ space->ResumeAllocationObservers();
+ }
+}
// -----------------------------------------------------------------------------
// CodeRange
@@ -427,8 +442,7 @@ NewSpacePage* NewSpacePage::Initialize(Heap* heap, Address start,
MemoryChunk* chunk =
MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start,
- area_end, NOT_EXECUTABLE, semi_space);
- chunk->initialize_scan_on_scavenge(true);
+ area_end, NOT_EXECUTABLE, semi_space, nullptr);
bool in_to_space = (semi_space->id() != kFromSpace);
chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
: MemoryChunk::IN_FROM_SPACE);
@@ -449,10 +463,10 @@ void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
SetFlags(0, ~0);
}
-
MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
- Executability executable, Space* owner) {
+ Executability executable, Space* owner,
+ base::VirtualMemory* reservation) {
MemoryChunk* chunk = FromAddress(base);
DCHECK(base == chunk->address());
@@ -464,23 +478,20 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->flags_ = 0;
chunk->set_owner(owner);
chunk->InitializeReservedMemory();
- chunk->slots_buffer_ = NULL;
- chunk->skip_list_ = NULL;
+ chunk->slots_buffer_ = nullptr;
+ chunk->old_to_new_slots_ = nullptr;
+ chunk->old_to_old_slots_ = nullptr;
+ chunk->skip_list_ = nullptr;
chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
chunk->progress_bar_ = 0;
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
- chunk->parallel_sweeping_state().SetValue(kSweepingDone);
+ chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
chunk->parallel_compaction_state().SetValue(kCompactingDone);
- chunk->mutex_ = NULL;
- chunk->available_in_small_free_list_ = 0;
- chunk->available_in_medium_free_list_ = 0;
- chunk->available_in_large_free_list_ = 0;
- chunk->available_in_huge_free_list_ = 0;
- chunk->non_available_small_blocks_ = 0;
+ chunk->mutex_ = nullptr;
+ chunk->available_in_free_list_ = 0;
+ chunk->wasted_memory_ = 0;
chunk->ResetLiveBytes();
Bitmap::Clear(chunk);
- chunk->initialize_scan_on_scavenge(false);
- chunk->SetFlag(WAS_SWEPT);
chunk->set_next_chunk(nullptr);
chunk->set_prev_chunk(nullptr);
@@ -491,6 +502,10 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->SetFlag(IS_EXECUTABLE);
}
+ if (reservation != nullptr) {
+ chunk->reservation_.TakeControl(reservation);
+ }
+
return chunk;
}
@@ -692,19 +707,14 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
}
- MemoryChunk* result = MemoryChunk::Initialize(
- heap, base, chunk_size, area_start, area_end, executable, owner);
- result->set_reserved_memory(&reservation);
- return result;
+ return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
+ executable, owner, &reservation);
}
void Page::ResetFreeListStatistics() {
- non_available_small_blocks_ = 0;
- available_in_small_free_list_ = 0;
- available_in_medium_free_list_ = 0;
- available_in_large_free_list_ = 0;
- available_in_huge_free_list_ = 0;
+ wasted_memory_ = 0;
+ available_in_free_list_ = 0;
}
@@ -921,21 +931,46 @@ bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
// -----------------------------------------------------------------------------
// MemoryChunk implementation
-void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
- if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
- static_cast<PagedSpace*>(chunk->owner())->Allocate(by);
- }
- chunk->IncrementLiveBytes(by);
-}
-
-
void MemoryChunk::ReleaseAllocatedMemory() {
delete slots_buffer_;
+ slots_buffer_ = nullptr;
delete skip_list_;
+ skip_list_ = nullptr;
delete mutex_;
+ mutex_ = nullptr;
+ ReleaseOldToNewSlots();
+ ReleaseOldToOldSlots();
+}
+
+static SlotSet* AllocateSlotSet(size_t size, Address page_start) {
+ size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
+ DCHECK(pages > 0);
+ SlotSet* slot_set = new SlotSet[pages];
+ for (size_t i = 0; i < pages; i++) {
+ slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
+ }
+ return slot_set;
+}
+
+void MemoryChunk::AllocateOldToNewSlots() {
+ DCHECK(nullptr == old_to_new_slots_);
+ old_to_new_slots_ = AllocateSlotSet(size_, address());
+}
+
+void MemoryChunk::ReleaseOldToNewSlots() {
+ delete[] old_to_new_slots_;
+ old_to_new_slots_ = nullptr;
+}
+
+void MemoryChunk::AllocateOldToOldSlots() {
+ DCHECK(nullptr == old_to_old_slots_);
+ old_to_old_slots_ = AllocateSlotSet(size_, address());
}
+void MemoryChunk::ReleaseOldToOldSlots() {
+ delete[] old_to_old_slots_;
+ old_to_old_slots_ = nullptr;
+}
// -----------------------------------------------------------------------------
// PagedSpace implementation
@@ -949,12 +984,18 @@ STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) ==
STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
ObjectSpace::kObjectSpaceMapSpace);
+void Space::AllocationStep(Address soon_object, int size) {
+ if (!allocation_observers_paused_) {
+ for (int i = 0; i < allocation_observers_->length(); ++i) {
+ AllocationObserver* o = (*allocation_observers_)[i];
+ o->AllocationStep(size, soon_object, size);
+ }
+ }
+}
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Executability executable)
- : Space(heap, space, executable),
- free_list_(this),
- end_of_unswept_pages_(NULL) {
+ : Space(heap, space, executable), free_list_(this) {
area_size_ = MemoryAllocator::PageAreaSize(space);
accounting_stats_.Clear();
@@ -987,52 +1028,6 @@ void PagedSpace::AddMemory(Address start, intptr_t size) {
}
-FreeSpace* PagedSpace::TryRemoveMemory(intptr_t size_in_bytes) {
- FreeSpace* free_space = free_list()->TryRemoveMemory(size_in_bytes);
- if (free_space != nullptr) {
- accounting_stats_.DecreaseCapacity(free_space->size());
- }
- return free_space;
-}
-
-
-void PagedSpace::DivideUponCompactionSpaces(CompactionSpaceCollection** other,
- int num, intptr_t limit) {
- DCHECK_GT(num, 0);
- DCHECK(other != nullptr);
-
- if (limit == 0) limit = std::numeric_limits<intptr_t>::max();
-
- EmptyAllocationInfo();
-
- bool memory_available = true;
- bool spaces_need_memory = true;
- FreeSpace* node = nullptr;
- CompactionSpace* current_space = nullptr;
- // Iterate over spaces and memory as long as we have memory and there are
- // spaces in need of some.
- while (memory_available && spaces_need_memory) {
- spaces_need_memory = false;
- // Round-robin over all spaces.
- for (int i = 0; i < num; i++) {
- current_space = other[i]->Get(identity());
- if (current_space->free_list()->Available() < limit) {
- // Space has not reached its limit. Try to get some memory.
- spaces_need_memory = true;
- node = TryRemoveMemory(limit - current_space->free_list()->Available());
- if (node != nullptr) {
- CHECK(current_space->identity() == identity());
- current_space->AddMemory(node->address(), node->size());
- } else {
- memory_available = false;
- break;
- }
- }
- }
- }
-}
-
-
void PagedSpace::RefillFreeList() {
MarkCompactCollector* collector = heap()->mark_compact_collector();
FreeList* free_list = nullptr;
@@ -1075,7 +1070,6 @@ void CompactionSpace::RefillFreeList() {
}
}
-
void PagedSpace::MoveOverFreeMemory(PagedSpace* other) {
DCHECK(identity() == other->identity());
// Destroy the linear allocation space of {other}. This is needed to
@@ -1109,8 +1103,6 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
DCHECK(other->top() == nullptr);
DCHECK(other->limit() == nullptr);
- DCHECK(other->end_of_unswept_pages_ == nullptr);
-
AccountCommitted(other->CommittedMemory());
// Move over pages.
@@ -1136,8 +1128,7 @@ size_t PagedSpace::CommittedPhysicalMemory() {
return size;
}
-
-bool PagedSpace::ContainsSafe(Address addr) {
+bool PagedSpace::ContainsSlow(Address addr) {
Page* p = Page::FromAddress(addr);
PageIterator iterator(this);
while (iterator.has_next()) {
@@ -1229,21 +1220,16 @@ void PagedSpace::IncreaseCapacity(int size) {
}
-void PagedSpace::ReleasePage(Page* page) {
+void PagedSpace::ReleasePage(Page* page, bool evict_free_list_items) {
DCHECK(page->LiveBytes() == 0);
DCHECK(AreaSize() == page->area_size());
- if (page->WasSwept()) {
+ if (evict_free_list_items) {
intptr_t size = free_list_.EvictFreeListItems(page);
accounting_stats_.AllocateBytes(size);
DCHECK_EQ(AreaSize(), static_cast<int>(size));
}
- if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) {
- heap()->decrement_scan_on_scavenge_pages();
- page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE);
- }
-
DCHECK(!free_list_.ContainsPageFreeListItems(page));
if (Page::FromAllocationTop(allocation_info_.top()) == page) {
@@ -1279,7 +1265,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
if (page == Page::FromAllocationTop(allocation_info_.top())) {
allocation_pointer_found_in_space = true;
}
- CHECK(page->WasSwept());
+ CHECK(page->SweepingDone());
HeapObjectIterator it(page);
Address end_of_previous_object = page->area_start();
Address top = page->area_end();
@@ -1327,8 +1313,6 @@ bool NewSpace::SetUp(int reserved_semispace_capacity,
// this chunk must be a power of two and it must be aligned to its size.
int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
- int target_semispace_capacity = heap()->TargetSemiSpaceSize();
-
size_t size = 2 * reserved_semispace_capacity;
Address base = heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
size, size, &reservation_);
@@ -1357,19 +1341,15 @@ bool NewSpace::SetUp(int reserved_semispace_capacity,
DCHECK(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
to_space_.SetUp(chunk_base_, initial_semispace_capacity,
- target_semispace_capacity, maximum_semispace_capacity);
+ maximum_semispace_capacity);
from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
- initial_semispace_capacity, target_semispace_capacity,
- maximum_semispace_capacity);
+ initial_semispace_capacity, maximum_semispace_capacity);
if (!to_space_.Commit()) {
return false;
}
DCHECK(!from_space_.is_committed()); // No need to use memory yet.
start_ = chunk_base_;
- address_mask_ = ~(2 * reserved_semispace_capacity - 1);
- object_mask_ = address_mask_ | kHeapObjectTagMask;
- object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
ResetAllocationInfo();
@@ -1416,7 +1396,7 @@ void NewSpace::Grow() {
if (!from_space_.GrowTo(new_capacity)) {
// If we managed to grow to-space but couldn't grow from-space,
// attempt to shrink to-space.
- if (!to_space_.ShrinkTo(from_space_.TotalCapacity())) {
+ if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
// We are in an inconsistent state because we could not
// commit/uncommit memory from new space.
CHECK(false);
@@ -1427,36 +1407,6 @@ void NewSpace::Grow() {
}
-bool NewSpace::GrowOnePage() {
- if (TotalCapacity() == MaximumCapacity()) return false;
- int new_capacity = static_cast<int>(TotalCapacity()) + Page::kPageSize;
- if (to_space_.GrowTo(new_capacity)) {
- // Only grow from space if we managed to grow to-space and the from space
- // is actually committed.
- if (from_space_.is_committed()) {
- if (!from_space_.GrowTo(new_capacity)) {
- // If we managed to grow to-space but couldn't grow from-space,
- // attempt to shrink to-space.
- if (!to_space_.ShrinkTo(from_space_.TotalCapacity())) {
- // We are in an inconsistent state because we could not
- // commit/uncommit memory from new space.
- CHECK(false);
- }
- return false;
- }
- } else {
- if (!from_space_.SetTotalCapacity(new_capacity)) {
- // Can't really happen, but better safe than sorry.
- CHECK(false);
- }
- }
- DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
- return true;
- }
- return false;
-}
-
-
void NewSpace::Shrink() {
int new_capacity = Max(InitialTotalCapacity(), 2 * SizeAsInt());
int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
@@ -1467,7 +1417,7 @@ void NewSpace::Shrink() {
if (!from_space_.ShrinkTo(rounded_new_capacity)) {
// If we managed to shrink to-space but couldn't shrink from
// space, attempt to grow to-space again.
- if (!to_space_.GrowTo(from_space_.TotalCapacity())) {
+ if (!to_space_.GrowTo(from_space_.current_capacity())) {
// We are in an inconsistent state because we could not
// commit/uncommit memory from new space.
CHECK(false);
@@ -1547,8 +1497,7 @@ void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
Address high = to_space_.page_high();
Address new_top = allocation_info_.top() + size_in_bytes;
allocation_info_.set_limit(Min(new_top, high));
- } else if (inline_allocation_observers_paused_ ||
- top_on_previous_step_ == 0) {
+ } else if (allocation_observers_paused_ || top_on_previous_step_ == 0) {
// Normal limit is the end of the current page.
allocation_info_.set_limit(to_space_.page_high());
} else {
@@ -1564,29 +1513,10 @@ void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
bool NewSpace::AddFreshPage() {
Address top = allocation_info_.top();
- if (NewSpacePage::IsAtStart(top)) {
- // The current page is already empty. Don't try to make another.
-
- // We should only get here if someone asks to allocate more
- // than what can be stored in a single page.
- // TODO(gc): Change the limit on new-space allocation to prevent this
- // from happening (all such allocations should go directly to LOSpace).
- return false;
- }
+ DCHECK(!NewSpacePage::IsAtStart(top));
if (!to_space_.AdvancePage()) {
- // Check if we reached the target capacity yet. If not, try to commit a page
- // and continue.
- if ((to_space_.TotalCapacity() < to_space_.TargetCapacity()) &&
- GrowOnePage()) {
- if (!to_space_.AdvancePage()) {
- // It doesn't make sense that we managed to commit a page, but can't use
- // it.
- CHECK(false);
- }
- } else {
- // Failed to get a new page in to-space.
- return false;
- }
+ // No more pages left to advance.
+ return false;
}
// Clear remainder of current page.
@@ -1648,9 +1578,9 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
void NewSpace::StartNextInlineAllocationStep() {
- if (!inline_allocation_observers_paused_) {
+ if (!allocation_observers_paused_) {
top_on_previous_step_ =
- inline_allocation_observers_.length() ? allocation_info_.top() : 0;
+ allocation_observers_->length() ? allocation_info_.top() : 0;
UpdateInlineAllocationLimit(0);
}
}
@@ -1658,44 +1588,36 @@ void NewSpace::StartNextInlineAllocationStep() {
intptr_t NewSpace::GetNextInlineAllocationStepSize() {
intptr_t next_step = 0;
- for (int i = 0; i < inline_allocation_observers_.length(); ++i) {
- InlineAllocationObserver* o = inline_allocation_observers_[i];
+ for (int i = 0; i < allocation_observers_->length(); ++i) {
+ AllocationObserver* o = (*allocation_observers_)[i];
next_step = next_step ? Min(next_step, o->bytes_to_next_step())
: o->bytes_to_next_step();
}
- DCHECK(inline_allocation_observers_.length() == 0 || next_step != 0);
+ DCHECK(allocation_observers_->length() == 0 || next_step != 0);
return next_step;
}
-
-void NewSpace::AddInlineAllocationObserver(InlineAllocationObserver* observer) {
- inline_allocation_observers_.Add(observer);
+void NewSpace::AddAllocationObserver(AllocationObserver* observer) {
+ Space::AddAllocationObserver(observer);
StartNextInlineAllocationStep();
}
-
-void NewSpace::RemoveInlineAllocationObserver(
- InlineAllocationObserver* observer) {
- bool removed = inline_allocation_observers_.RemoveElement(observer);
- // Only used in assertion. Suppress unused variable warning.
- static_cast<void>(removed);
- DCHECK(removed);
+void NewSpace::RemoveAllocationObserver(AllocationObserver* observer) {
+ Space::RemoveAllocationObserver(observer);
StartNextInlineAllocationStep();
}
-
-void NewSpace::PauseInlineAllocationObservers() {
+void NewSpace::PauseAllocationObservers() {
// Do a step to account for memory allocated so far.
InlineAllocationStep(top(), top(), nullptr, 0);
- inline_allocation_observers_paused_ = true;
+ Space::PauseAllocationObservers();
top_on_previous_step_ = 0;
UpdateInlineAllocationLimit(0);
}
-
-void NewSpace::ResumeInlineAllocationObservers() {
+void NewSpace::ResumeAllocationObservers() {
DCHECK(top_on_previous_step_ == 0);
- inline_allocation_observers_paused_ = false;
+ Space::ResumeAllocationObservers();
StartNextInlineAllocationStep();
}
@@ -1704,9 +1626,9 @@ void NewSpace::InlineAllocationStep(Address top, Address new_top,
Address soon_object, size_t size) {
if (top_on_previous_step_) {
int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
- for (int i = 0; i < inline_allocation_observers_.length(); ++i) {
- inline_allocation_observers_[i]->InlineAllocationStep(bytes_allocated,
- soon_object, size);
+ for (int i = 0; i < allocation_observers_->length(); ++i) {
+ (*allocation_observers_)[i]->AllocationStep(bytes_allocated, soon_object,
+ size);
}
top_on_previous_step_ = new_top;
}
@@ -1771,68 +1693,56 @@ void NewSpace::Verify() {
// -----------------------------------------------------------------------------
// SemiSpace implementation
-void SemiSpace::SetUp(Address start, int initial_capacity, int target_capacity,
+void SemiSpace::SetUp(Address start, int initial_capacity,
int maximum_capacity) {
- // Creates a space in the young generation. The constructor does not
- // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
- // memory of size 'capacity' when set up, and does not grow or shrink
- // otherwise. In the mark-compact collector, the memory region of the from
- // space is used as the marking stack. It requires contiguous memory
- // addresses.
- DCHECK(maximum_capacity >= Page::kPageSize);
- DCHECK(initial_capacity <= target_capacity);
- DCHECK(target_capacity <= maximum_capacity);
- initial_total_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
- total_capacity_ = initial_capacity;
- target_capacity_ = RoundDown(target_capacity, Page::kPageSize);
- maximum_total_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
+ DCHECK_GE(maximum_capacity, Page::kPageSize);
+ minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
+ current_capacity_ = minimum_capacity_;
+ maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
committed_ = false;
start_ = start;
- address_mask_ = ~(maximum_capacity - 1);
- object_mask_ = address_mask_ | kHeapObjectTagMask;
- object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
age_mark_ = start_ + NewSpacePage::kObjectStartOffset;
}
void SemiSpace::TearDown() {
- start_ = NULL;
- total_capacity_ = 0;
+ start_ = nullptr;
+ current_capacity_ = 0;
}
bool SemiSpace::Commit() {
DCHECK(!is_committed());
- int pages = total_capacity_ / Page::kPageSize;
if (!heap()->isolate()->memory_allocator()->CommitBlock(
- start_, total_capacity_, executable())) {
+ start_, current_capacity_, executable())) {
return false;
}
- AccountCommitted(total_capacity_);
+ AccountCommitted(current_capacity_);
NewSpacePage* current = anchor();
- for (int i = 0; i < pages; i++) {
+ const int num_pages = current_capacity_ / Page::kPageSize;
+ for (int i = 0; i < num_pages; i++) {
NewSpacePage* new_page =
NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
new_page->InsertAfter(current);
current = new_page;
}
+ Reset();
- SetCapacity(total_capacity_);
+ set_current_capacity(current_capacity_);
committed_ = true;
- Reset();
return true;
}
bool SemiSpace::Uncommit() {
DCHECK(is_committed());
- Address start = start_ + maximum_total_capacity_ - total_capacity_;
- if (!heap()->isolate()->memory_allocator()->UncommitBlock(start,
- total_capacity_)) {
+ Address start = start_ + maximum_capacity_ - current_capacity_;
+ if (!heap()->isolate()->memory_allocator()->UncommitBlock(
+ start, current_capacity_)) {
return false;
}
- AccountUncommitted(total_capacity_);
+ AccountUncommitted(current_capacity_);
anchor()->set_next_page(anchor());
anchor()->set_prev_page(anchor());
@@ -1857,23 +1767,23 @@ bool SemiSpace::GrowTo(int new_capacity) {
if (!is_committed()) {
if (!Commit()) return false;
}
- DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
- DCHECK(new_capacity <= maximum_total_capacity_);
- DCHECK(new_capacity > total_capacity_);
- int pages_before = total_capacity_ / Page::kPageSize;
+ DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
+ DCHECK_LE(new_capacity, maximum_capacity_);
+ DCHECK_GT(new_capacity, current_capacity_);
+ int pages_before = current_capacity_ / Page::kPageSize;
int pages_after = new_capacity / Page::kPageSize;
- size_t delta = new_capacity - total_capacity_;
+ size_t delta = new_capacity - current_capacity_;
DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
if (!heap()->isolate()->memory_allocator()->CommitBlock(
- start_ + total_capacity_, delta, executable())) {
+ start_ + current_capacity_, delta, executable())) {
return false;
}
AccountCommitted(static_cast<intptr_t>(delta));
- SetCapacity(new_capacity);
+ set_current_capacity(new_capacity);
NewSpacePage* last_page = anchor()->prev_page();
- DCHECK(last_page != anchor());
+ DCHECK_NE(last_page, anchor());
for (int i = pages_before; i < pages_after; i++) {
Address page_address = start_ + i * Page::kPageSize;
NewSpacePage* new_page =
@@ -1890,11 +1800,11 @@ bool SemiSpace::GrowTo(int new_capacity) {
bool SemiSpace::ShrinkTo(int new_capacity) {
- DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
- DCHECK(new_capacity >= initial_total_capacity_);
- DCHECK(new_capacity < total_capacity_);
+ DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
+ DCHECK_GE(new_capacity, minimum_capacity_);
+ DCHECK_LT(new_capacity, current_capacity_);
if (is_committed()) {
- size_t delta = total_capacity_ - new_capacity;
+ size_t delta = current_capacity_ - new_capacity;
DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
@@ -1911,37 +1821,23 @@ bool SemiSpace::ShrinkTo(int new_capacity) {
DCHECK((current_page_ >= first_page()) && (current_page_ <= new_last_page));
}
- SetCapacity(new_capacity);
+ set_current_capacity(new_capacity);
return true;
}
-
-bool SemiSpace::SetTotalCapacity(int new_capacity) {
- CHECK(!is_committed());
- if (new_capacity >= initial_total_capacity_ &&
- new_capacity <= maximum_total_capacity_) {
- total_capacity_ = new_capacity;
- return true;
- }
- return false;
-}
-
-
-void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
+void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
anchor_.set_owner(this);
- // Fixup back-pointers to anchor. Address of anchor changes
- // when we swap.
+ // Fixup back-pointers to anchor. Address of anchor changes when we swap.
anchor_.prev_page()->set_next_page(&anchor_);
anchor_.next_page()->set_prev_page(&anchor_);
- bool becomes_to_space = (id_ == kFromSpace);
- id_ = becomes_to_space ? kToSpace : kFromSpace;
- NewSpacePage* page = anchor_.next_page();
- while (page != &anchor_) {
+ NewSpacePageIterator it(this);
+ while (it.has_next()) {
+ NewSpacePage* page = it.next();
page->set_owner(this);
page->SetFlags(flags, mask);
- if (becomes_to_space) {
+ if (id_ == kToSpace) {
page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
page->SetFlag(MemoryChunk::IN_TO_SPACE);
page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
@@ -1950,48 +1846,42 @@ void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
page->SetFlag(MemoryChunk::IN_FROM_SPACE);
page->ClearFlag(MemoryChunk::IN_TO_SPACE);
}
- DCHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
- page = page->next_page();
}
}
void SemiSpace::Reset() {
- DCHECK(anchor_.next_page() != &anchor_);
+ DCHECK_NE(anchor_.next_page(), &anchor_);
current_page_ = anchor_.next_page();
}
void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
// We won't be swapping semispaces without data in them.
- DCHECK(from->anchor_.next_page() != &from->anchor_);
- DCHECK(to->anchor_.next_page() != &to->anchor_);
+ DCHECK_NE(from->anchor_.next_page(), &from->anchor_);
+ DCHECK_NE(to->anchor_.next_page(), &to->anchor_);
- // Swap bits.
- SemiSpace tmp = *from;
- *from = *to;
- *to = tmp;
+ intptr_t saved_to_space_flags = to->current_page()->GetFlags();
- // Fixup back-pointers to the page list anchor now that its address
- // has changed.
- // Swap to/from-space bits on pages.
- // Copy GC flags from old active space (from-space) to new (to-space).
- intptr_t flags = from->current_page()->GetFlags();
- to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
+ // We swap all properties but id_.
+ std::swap(from->current_capacity_, to->current_capacity_);
+ std::swap(from->maximum_capacity_, to->maximum_capacity_);
+ std::swap(from->minimum_capacity_, to->minimum_capacity_);
+ std::swap(from->start_, to->start_);
+ std::swap(from->age_mark_, to->age_mark_);
+ std::swap(from->committed_, to->committed_);
+ std::swap(from->anchor_, to->anchor_);
+ std::swap(from->current_page_, to->current_page_);
- from->FlipPages(0, 0);
-}
-
-
-void SemiSpace::SetCapacity(int new_capacity) {
- total_capacity_ = new_capacity;
+ to->FixPagesFlags(saved_to_space_flags, NewSpacePage::kCopyOnFlipFlagsMask);
+ from->FixPagesFlags(0, 0);
}
void SemiSpace::set_age_mark(Address mark) {
- DCHECK(NewSpacePage::FromLimit(mark)->semi_space() == this);
+ DCHECK_EQ(NewSpacePage::FromLimit(mark)->semi_space(), this);
age_mark_ = mark;
// Mark all pages up to the one containing mark.
NewSpacePageIterator it(space_start(), mark);
@@ -2011,7 +1901,7 @@ void SemiSpace::Verify() {
NewSpacePage* page = anchor_.next_page();
CHECK(anchor_.semi_space() == this);
while (page != &anchor_) {
- CHECK(page->semi_space() == this);
+ CHECK_EQ(page->semi_space(), this);
CHECK(page->InNewSpace());
CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
: MemoryChunk::IN_TO_SPACE));
@@ -2030,8 +1920,7 @@ void SemiSpace::Verify() {
// TODO(gc): Check that the live_bytes_count_ field matches the
// black marking on the page (if we make it match in new-space).
}
- CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
- CHECK(page->prev_page()->next_page() == page);
+ CHECK_EQ(page->prev_page()->next_page(), page);
page = page->next_page();
}
}
@@ -2048,7 +1937,7 @@ void SemiSpace::AssertValidRange(Address start, Address end) {
// or end address is on a later page in the linked list of
// semi-space pages.
if (page == end_page) {
- CHECK(start <= end);
+ CHECK_LE(start, end);
} else {
while (page != end_page) {
page = page->next_page();
@@ -2313,8 +2202,7 @@ intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
}
prev_node = cur_node;
}
- DCHECK_EQ(p->available_in_free_list(type_), sum);
- p->add_available_in_free_list(type_, -sum);
+ p->add_available_in_free_list(-sum);
available_ -= sum;
return sum;
}
@@ -2337,7 +2225,7 @@ FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
Page* page = Page::FromAddress(node->address());
while ((node != nullptr) && !page->CanAllocate()) {
available_ -= node->size();
- page->add_available_in_free_list(type_, -(node->Size()));
+ page->add_available_in_free_list(-(node->Size()));
node = node->next();
}
@@ -2392,7 +2280,7 @@ FreeSpace* FreeListCategory::SearchForNodeInList(int size_in_bytes,
}
// For evacuation candidates we continue.
if (!page_for_node->CanAllocate()) {
- page_for_node->add_available_in_free_list(type_, -size);
+ page_for_node->add_available_in_free_list(-size);
continue;
}
// Otherwise we have a large enough node and can return.
@@ -2429,14 +2317,10 @@ void FreeListCategory::RepairFreeList(Heap* heap) {
}
}
-
-FreeList::FreeList(PagedSpace* owner)
- : owner_(owner),
- wasted_bytes_(0),
- small_list_(this, kSmall),
- medium_list_(this, kMedium),
- large_list_(this, kLarge),
- huge_list_(this, kHuge) {
+FreeList::FreeList(PagedSpace* owner) : owner_(owner), wasted_bytes_(0) {
+ for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ category_[i].Initialize(this, static_cast<FreeListCategoryType>(i));
+ }
Reset();
}
@@ -2456,10 +2340,10 @@ intptr_t FreeList::Concatenate(FreeList* other) {
wasted_bytes_ += wasted_bytes;
other->wasted_bytes_ = 0;
- usable_bytes += small_list_.Concatenate(other->GetFreeListCategory(kSmall));
- usable_bytes += medium_list_.Concatenate(other->GetFreeListCategory(kMedium));
- usable_bytes += large_list_.Concatenate(other->GetFreeListCategory(kLarge));
- usable_bytes += huge_list_.Concatenate(other->GetFreeListCategory(kHuge));
+ for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ usable_bytes += category_[i].Concatenate(
+ other->GetFreeListCategory(static_cast<FreeListCategoryType>(i)));
+ }
if (!other->owner()->is_local()) other->mutex()->Unlock();
if (!owner()->is_local()) mutex_.Unlock();
@@ -2468,10 +2352,9 @@ intptr_t FreeList::Concatenate(FreeList* other) {
void FreeList::Reset() {
- small_list_.Reset();
- medium_list_.Reset();
- large_list_.Reset();
- huge_list_.Reset();
+ for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ category_[i].Reset();
+ }
ResetStats();
}
@@ -2485,7 +2368,7 @@ int FreeList::Free(Address start, int size_in_bytes) {
// Early return to drop too-small blocks on the floor.
if (size_in_bytes <= kSmallListMin) {
- page->add_non_available_small_blocks(size_in_bytes);
+ page->add_wasted_memory(size_in_bytes);
wasted_bytes_ += size_in_bytes;
return size_in_bytes;
}
@@ -2493,19 +2376,9 @@ int FreeList::Free(Address start, int size_in_bytes) {
FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
// Insert other blocks at the head of a free list of the appropriate
// magnitude.
- if (size_in_bytes <= kSmallListMax) {
- small_list_.Free(free_space, size_in_bytes);
- page->add_available_in_small_free_list(size_in_bytes);
- } else if (size_in_bytes <= kMediumListMax) {
- medium_list_.Free(free_space, size_in_bytes);
- page->add_available_in_medium_free_list(size_in_bytes);
- } else if (size_in_bytes <= kLargeListMax) {
- large_list_.Free(free_space, size_in_bytes);
- page->add_available_in_large_free_list(size_in_bytes);
- } else {
- huge_list_.Free(free_space, size_in_bytes);
- page->add_available_in_huge_free_list(size_in_bytes);
- }
+ FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
+ category_[type].Free(free_space, size_in_bytes);
+ page->add_available_in_free_list(size_in_bytes);
DCHECK(IsVeryLong() || Available() == SumFreeLists());
return 0;
@@ -2516,7 +2389,7 @@ FreeSpace* FreeList::FindNodeIn(FreeListCategoryType category, int* node_size) {
FreeSpace* node = GetFreeListCategory(category)->PickNodeFromList(node_size);
if (node != nullptr) {
Page::FromAddress(node->address())
- ->add_available_in_free_list(category, -(*node_size));
+ ->add_available_in_free_list(-(*node_size));
DCHECK(IsVeryLong() || Available() == SumFreeLists());
}
return node;
@@ -2527,50 +2400,37 @@ FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
FreeSpace* node = nullptr;
Page* page = nullptr;
- if (size_in_bytes <= kSmallAllocationMax) {
- node = FindNodeIn(kSmall, node_size);
- if (node != nullptr) return node;
- }
-
- if (size_in_bytes <= kMediumAllocationMax) {
- node = FindNodeIn(kMedium, node_size);
- if (node != nullptr) return node;
- }
-
- if (size_in_bytes <= kLargeAllocationMax) {
- node = FindNodeIn(kLarge, node_size);
+ // First try the allocation fast path: try to allocate the minimum element
+ // size of a free list category. This operation is constant time.
+ FreeListCategoryType type =
+ SelectFastAllocationFreeListCategoryType(size_in_bytes);
+ for (int i = type; i < kHuge; i++) {
+ node = FindNodeIn(static_cast<FreeListCategoryType>(i), node_size);
if (node != nullptr) return node;
}
- node = huge_list_.SearchForNodeInList(size_in_bytes, node_size);
+ // Next search the huge list for free list nodes. This takes linear time in
+ // the number of huge elements.
+ node = category_[kHuge].SearchForNodeInList(size_in_bytes, node_size);
if (node != nullptr) {
page = Page::FromAddress(node->address());
- page->add_available_in_large_free_list(-(*node_size));
+ page->add_available_in_free_list(-(*node_size));
DCHECK(IsVeryLong() || Available() == SumFreeLists());
return node;
}
- if (size_in_bytes <= kSmallListMax) {
- node = small_list_.PickNodeFromList(size_in_bytes, node_size);
- if (node != NULL) {
- DCHECK(size_in_bytes <= *node_size);
- page = Page::FromAddress(node->address());
- page->add_available_in_small_free_list(-(*node_size));
- }
- } else if (size_in_bytes <= kMediumListMax) {
- node = medium_list_.PickNodeFromList(size_in_bytes, node_size);
- if (node != NULL) {
- DCHECK(size_in_bytes <= *node_size);
- page = Page::FromAddress(node->address());
- page->add_available_in_medium_free_list(-(*node_size));
- }
- } else if (size_in_bytes <= kLargeListMax) {
- node = large_list_.PickNodeFromList(size_in_bytes, node_size);
- if (node != NULL) {
- DCHECK(size_in_bytes <= *node_size);
- page = Page::FromAddress(node->address());
- page->add_available_in_large_free_list(-(*node_size));
- }
+ // We need a huge block of memory, but we didn't find anything in the huge
+ // list.
+ if (type == kHuge) return nullptr;
+
+ // Now search the best fitting free list for a node that has at least the
+ // requested size. This takes linear time in the number of elements.
+ type = SelectFreeListCategoryType(size_in_bytes);
+ node = category_[type].PickNodeFromList(size_in_bytes, node_size);
+ if (node != nullptr) {
+ DCHECK(size_in_bytes <= *node_size);
+ page = Page::FromAddress(node->address());
+ page->add_available_in_free_list(-(*node_size));
}
DCHECK(IsVeryLong() || Available() == SumFreeLists());
@@ -2633,6 +2493,7 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
int new_node_size = 0;
FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
if (new_node == nullptr) return nullptr;
+ owner_->AllocationStep(new_node->address(), size_in_bytes);
int bytes_left = new_node_size - size_in_bytes;
DCHECK(bytes_left >= 0);
@@ -2683,29 +2544,30 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
intptr_t FreeList::EvictFreeListItems(Page* p) {
- intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
+ intptr_t sum = category_[kHuge].EvictFreeListItemsInList(p);
if (sum < p->area_size()) {
- sum += small_list_.EvictFreeListItemsInList(p) +
- medium_list_.EvictFreeListItemsInList(p) +
- large_list_.EvictFreeListItemsInList(p);
+ for (int i = kFirstCategory; i <= kLarge; i++) {
+ sum += category_[i].EvictFreeListItemsInList(p);
+ }
}
return sum;
}
bool FreeList::ContainsPageFreeListItems(Page* p) {
- return huge_list_.EvictFreeListItemsInList(p) ||
- small_list_.EvictFreeListItemsInList(p) ||
- medium_list_.EvictFreeListItemsInList(p) ||
- large_list_.EvictFreeListItemsInList(p);
+ for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ if (category_[i].EvictFreeListItemsInList(p)) {
+ return true;
+ }
+ }
+ return false;
}
void FreeList::RepairLists(Heap* heap) {
- small_list_.RepairFreeList(heap);
- medium_list_.RepairFreeList(heap);
- large_list_.RepairFreeList(heap);
- huge_list_.RepairFreeList(heap);
+ for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ category_[i].RepairFreeList(heap);
+ }
}
@@ -2740,8 +2602,12 @@ bool FreeListCategory::IsVeryLong() {
bool FreeList::IsVeryLong() {
- return small_list_.IsVeryLong() || medium_list_.IsVeryLong() ||
- large_list_.IsVeryLong() || huge_list_.IsVeryLong();
+ for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ if (category_[i].IsVeryLong()) {
+ return true;
+ }
+ }
+ return false;
}
@@ -2749,10 +2615,10 @@ bool FreeList::IsVeryLong() {
// on the free list, so it should not be called if FreeListLength returns
// kVeryLongFreeList.
intptr_t FreeList::SumFreeLists() {
- intptr_t sum = small_list_.SumFreeList();
- sum += medium_list_.SumFreeList();
- sum += large_list_.SumFreeList();
- sum += huge_list_.SumFreeList();
+ intptr_t sum = 0;
+ for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ sum += category_[i].SumFreeList();
+ }
return sum;
}
#endif
@@ -2791,7 +2657,7 @@ void PagedSpace::RepairFreeListsAfterDeserialization() {
PageIterator iterator(this);
while (iterator.has_next()) {
Page* page = iterator.next();
- int size = static_cast<int>(page->non_available_small_blocks());
+ int size = static_cast<int>(page->wasted_memory());
if (size == 0) continue;
Address address = page->OffsetToAddress(Page::kPageSize - size);
heap()->CreateFillerObjectAt(address, size);
@@ -2837,6 +2703,8 @@ HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
+ const int kMaxPagesToSweep = 1;
+
// Allocation in this space has failed.
MarkCompactCollector* collector = heap()->mark_compact_collector();
@@ -2851,10 +2719,13 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
if (object != NULL) return object;
// If sweeping is still in progress try to sweep pages on the main thread.
- collector->SweepInParallel(heap()->paged_space(identity()), size_in_bytes);
+ int max_freed = collector->SweepInParallel(heap()->paged_space(identity()),
+ size_in_bytes, kMaxPagesToSweep);
RefillFreeList();
- object = free_list_.Allocate(size_in_bytes);
- if (object != nullptr) return object;
+ if (max_freed >= size_in_bytes) {
+ object = free_list_.Allocate(size_in_bytes);
+ if (object != nullptr) return object;
+ }
}
// Free list allocation failed and there is no next page. Fail if we have
@@ -3139,6 +3010,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
}
heap()->incremental_marking()->OldSpaceStep(object_size);
+ AllocationStep(object->address(), object_size);
return object;
}
@@ -3252,11 +3124,6 @@ bool LargeObjectSpace::Contains(HeapObject* object) {
}
-bool LargeObjectSpace::Contains(Address address) {
- return FindPage(address) != NULL;
-}
-
-
#ifdef VERIFY_HEAP
// We do not assume that the large object iterator works, because it depends
// on the invariants we are checking during verification.
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index a8102cabc7..c0d399f94c 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -19,8 +19,20 @@
namespace v8 {
namespace internal {
+class AllocationInfo;
+class AllocationObserver;
+class CompactionSpace;
class CompactionSpaceCollection;
+class FreeList;
class Isolate;
+class MemoryAllocator;
+class MemoryChunk;
+class PagedSpace;
+class SemiSpace;
+class SkipList;
+class SlotsBuffer;
+class SlotSet;
+class Space;
// -----------------------------------------------------------------------------
// Heap structures:
@@ -96,13 +108,6 @@ class Isolate;
#define DCHECK_MAP_PAGE_INDEX(index) \
DCHECK((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
-class AllocationInfo;
-class CompactionSpace;
-class FreeList;
-class MemoryAllocator;
-class MemoryChunk;
-class PagedSpace;
-class Space;
class MarkBit {
public:
@@ -284,9 +289,6 @@ class Bitmap {
};
-class SkipList;
-class SlotsBuffer;
-
// MemoryChunk represents a memory region owned by a specific space.
// It is divided into the header and the body. Chunk start is always
// 1MB aligned. Start of the body is aligned so it can accommodate
@@ -295,10 +297,8 @@ class MemoryChunk {
public:
enum MemoryChunkFlags {
IS_EXECUTABLE,
- ABOUT_TO_BE_FREED,
POINTERS_TO_HERE_ARE_INTERESTING,
POINTERS_FROM_HERE_ARE_INTERESTING,
- SCAN_ON_SCAVENGE,
IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
IN_TO_SPACE, // All pages in new space has one of these two set.
NEW_SPACE_BELOW_AGE_MARK,
@@ -307,10 +307,6 @@ class MemoryChunk {
NEVER_EVACUATE, // May contain immortal immutables.
POPULAR_PAGE, // Slots buffer of this page overflowed on the previous GC.
- // WAS_SWEPT indicates that marking bits have been cleared by the sweeper,
- // otherwise marking bits are still intact.
- WAS_SWEPT,
-
// Large objects can have a progress bar in their page header. These object
// are scanned in increments and will be kept black while being scanned.
// Even if the mutator writes to them they will be kept black and a white
@@ -323,7 +319,7 @@ class MemoryChunk {
// candidates selection cycle.
FORCE_EVACUATION_CANDIDATE_FOR_TESTING,
- // This flag is inteded to be used for testing.
+ // This flag is intended to be used for testing.
NEVER_ALLOCATE_ON_PAGE,
// The memory chunk is already logically freed, however the actual freeing
@@ -352,16 +348,14 @@ class MemoryChunk {
};
// |kSweepingDone|: The page state when sweeping is complete or sweeping must
- // not be performed on that page.
- // |kSweepingFinalize|: A sweeper thread is done sweeping this page and will
- // not touch the page memory anymore.
- // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
+ // not be performed on that page. Sweeper threads that are done with their
+ // work will set this value and not touch the page anymore.
// |kSweepingPending|: This page is ready for parallel sweeping.
- enum ParallelSweepingState {
+ // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
+ enum ConcurrentSweepingState {
kSweepingDone,
- kSweepingFinalize,
+ kSweepingPending,
kSweepingInProgress,
- kSweepingPending
};
// Every n write barrier invocations we go to runtime even though
@@ -396,31 +390,32 @@ class MemoryChunk {
+ 2 * kPointerSize // base::VirtualMemory reservation_
+ kPointerSize // Address owner_
+ kPointerSize // Heap* heap_
- + kIntSize; // int store_buffer_counter_
+ + kIntSize; // int progress_bar_
static const size_t kSlotsBufferOffset =
kLiveBytesOffset + kIntSize; // int live_byte_count_
static const size_t kWriteBarrierCounterOffset =
kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_;
+ + kPointerSize // SlotSet* old_to_new_slots_;
+ + kPointerSize // SlotSet* old_to_old_slots_;
+ kPointerSize; // SkipList* skip_list_;
static const size_t kMinHeaderSize =
kWriteBarrierCounterOffset +
kIntptrSize // intptr_t write_barrier_counter_
- + kIntSize // int progress_bar_
+ kPointerSize // AtomicValue high_water_mark_
+ kPointerSize // base::Mutex* mutex_
+ kPointerSize // base::AtomicWord parallel_sweeping_
+ kPointerSize // AtomicValue parallel_compaction_
- + 5 * kPointerSize // AtomicNumber free-list statistics
+ + 2 * kPointerSize // AtomicNumber free-list statistics
+ kPointerSize // AtomicValue next_chunk_
+ kPointerSize; // AtomicValue prev_chunk_
// We add some more space to the computed header size to amount for missing
// alignment requirements in our computation.
// Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
- static const size_t kHeaderSize = kMinHeaderSize + kIntSize;
+ static const size_t kHeaderSize = kMinHeaderSize;
static const int kBodyOffset =
CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
@@ -435,30 +430,16 @@ class MemoryChunk {
static const int kFlagsOffset = kPointerSize;
- static void IncrementLiveBytesFromMutator(HeapObject* object, int by);
+ static inline void IncrementLiveBytesFromMutator(HeapObject* object, int by);
+ static inline void IncrementLiveBytesFromGC(HeapObject* object, int by);
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromAddress(Address a) {
return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
}
- static const MemoryChunk* FromAddress(const byte* a) {
- return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) &
- ~kAlignmentMask);
- }
-
- static void IncrementLiveBytesFromGC(HeapObject* object, int by) {
- MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by);
- }
-
- // Only works for addresses in pointer spaces, not data or code spaces.
static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
- static inline uint32_t FastAddressToMarkbitIndex(Address addr) {
- const intptr_t offset = reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
- return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
- }
-
static inline void UpdateHighWaterMark(Address mark) {
if (mark == nullptr) return;
// Need to subtract one from the mark because when a chunk is full the
@@ -477,144 +458,38 @@ class MemoryChunk {
bool is_valid() { return address() != NULL; }
- MemoryChunk* next_chunk() { return next_chunk_.Value(); }
-
- MemoryChunk* prev_chunk() { return prev_chunk_.Value(); }
-
- void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); }
-
- void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }
-
- Space* owner() const {
- if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
- kPageHeaderTag) {
- return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
- kPageHeaderTag);
- } else {
- return NULL;
- }
- }
-
- void set_owner(Space* space) {
- DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0);
- owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag;
- DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
- kPageHeaderTag);
- }
-
- base::VirtualMemory* reserved_memory() { return &reservation_; }
-
- void set_reserved_memory(base::VirtualMemory* reservation) {
- DCHECK_NOT_NULL(reservation);
- reservation_.TakeControl(reservation);
- }
-
- bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); }
- void initialize_scan_on_scavenge(bool scan) {
- if (scan) {
- SetFlag(SCAN_ON_SCAVENGE);
- } else {
- ClearFlag(SCAN_ON_SCAVENGE);
- }
- }
- inline void set_scan_on_scavenge(bool scan);
-
- int store_buffer_counter() { return store_buffer_counter_; }
- void set_store_buffer_counter(int counter) {
- store_buffer_counter_ = counter;
- }
+ base::Mutex* mutex() { return mutex_; }
bool Contains(Address addr) {
return addr >= area_start() && addr < area_end();
}
- // Checks whether addr can be a limit of addresses in this page.
- // It's a limit if it's in the page, or if it's just after the
- // last byte of the page.
+ // Checks whether |addr| can be a limit of addresses in this page. It's a
+ // limit if it's in the page, or if it's just after the last byte of the page.
bool ContainsLimit(Address addr) {
return addr >= area_start() && addr <= area_end();
}
- void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; }
-
- void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); }
-
- void SetFlagTo(int flag, bool value) {
- if (value) {
- SetFlag(flag);
- } else {
- ClearFlag(flag);
- }
- }
-
- bool IsFlagSet(int flag) {
- return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
- }
-
- // Set or clear multiple flags at a time. The flags in the mask
- // are set to the value in "flags", the rest retain the current value
- // in flags_.
- void SetFlags(intptr_t flags, intptr_t mask) {
- flags_ = (flags_ & ~mask) | (flags & mask);
- }
-
- // Return all current flags.
- intptr_t GetFlags() { return flags_; }
-
- AtomicValue<ParallelSweepingState>& parallel_sweeping_state() {
- return parallel_sweeping_;
+ AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() {
+ return concurrent_sweeping_;
}
AtomicValue<ParallelCompactingState>& parallel_compaction_state() {
return parallel_compaction_;
}
- bool TryLock() { return mutex_->TryLock(); }
-
- base::Mutex* mutex() { return mutex_; }
-
- // WaitUntilSweepingCompleted only works when concurrent sweeping is in
- // progress. In particular, when we know that right before this call a
- // sweeper thread was sweeping this page.
- void WaitUntilSweepingCompleted() {
- mutex_->Lock();
- mutex_->Unlock();
- DCHECK(SweepingCompleted());
- }
-
- bool SweepingCompleted() {
- return parallel_sweeping_state().Value() <= kSweepingFinalize;
- }
-
- // Manage live byte count (count of bytes known to be live,
- // because they are marked black).
- void ResetLiveBytes() {
- if (FLAG_gc_verbose) {
- PrintF("ResetLiveBytes:%p:%x->0\n", static_cast<void*>(this),
- live_byte_count_);
- }
- live_byte_count_ = 0;
- }
-
- void IncrementLiveBytes(int by) {
- if (FLAG_gc_verbose) {
- printf("UpdateLiveBytes:%p:%x%c=%x->%x\n", static_cast<void*>(this),
- live_byte_count_, ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
- live_byte_count_ + by);
- }
- live_byte_count_ += by;
- DCHECK_GE(live_byte_count_, 0);
- DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
- }
+ // Manage live byte count, i.e., count of bytes in black objects.
+ inline void ResetLiveBytes();
+ inline void IncrementLiveBytes(int by);
int LiveBytes() {
- DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
+ DCHECK_LE(static_cast<size_t>(live_byte_count_), size_);
return live_byte_count_;
}
void SetLiveBytes(int live_bytes) {
DCHECK_GE(live_bytes, 0);
- DCHECK_LE(static_cast<unsigned>(live_bytes), size_);
+ DCHECK_LE(static_cast<size_t>(live_bytes), size_);
live_byte_count_ = live_bytes;
}
@@ -626,6 +501,35 @@ class MemoryChunk {
write_barrier_counter_ = counter;
}
+ size_t size() const { return size_; }
+
+ inline Heap* heap() const { return heap_; }
+
+ inline SkipList* skip_list() { return skip_list_; }
+
+ inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
+
+ inline SlotsBuffer* slots_buffer() { return slots_buffer_; }
+
+ inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; }
+
+ inline SlotSet* old_to_new_slots() { return old_to_new_slots_; }
+ inline SlotSet* old_to_old_slots() { return old_to_old_slots_; }
+
+ void AllocateOldToNewSlots();
+ void ReleaseOldToNewSlots();
+ void AllocateOldToOldSlots();
+ void ReleaseOldToOldSlots();
+
+ Address area_start() { return area_start_; }
+ Address area_end() { return area_end_; }
+ int area_size() { return static_cast<int>(area_end() - area_start()); }
+
+ bool CommitArea(size_t requested);
+
+ // Approximate amount of physical memory committed for this chunk.
+ size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); }
+
int progress_bar() {
DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
return progress_bar_;
@@ -643,35 +547,10 @@ class MemoryChunk {
}
}
- size_t size() const { return size_; }
-
- void set_size(size_t size) { size_ = size; }
-
- void SetArea(Address area_start, Address area_end) {
- area_start_ = area_start;
- area_end_ = area_end;
- }
-
- Executability executable() {
- return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
- }
-
- bool InNewSpace() {
- return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
- }
-
- bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
-
- bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
-
- // Markbits support
-
inline Bitmap* markbits() {
return Bitmap::FromAddress(address() + kHeaderSize);
}
- void PrintMarkbits() { markbits()->Print(); }
-
inline uint32_t AddressToMarkbitIndex(Address addr) {
return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
}
@@ -680,10 +559,24 @@ class MemoryChunk {
return this->address() + (index << kPointerSizeLog2);
}
- void InsertAfter(MemoryChunk* other);
- void Unlink();
+ void PrintMarkbits() { markbits()->Print(); }
- inline Heap* heap() const { return heap_; }
+ void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; }
+
+ void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); }
+
+ bool IsFlagSet(int flag) {
+ return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
+ }
+
+ // Set or clear multiple flags at a time. The flags in the mask are set to
+ // the value in "flags", the rest retain the current value in |flags_|.
+ void SetFlags(intptr_t flags, intptr_t mask) {
+ flags_ = (flags_ & ~mask) | (flags & mask);
+ }
+
+ // Return all current flags.
+ intptr_t GetFlags() { return flags_; }
bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
@@ -698,44 +591,73 @@ class MemoryChunk {
return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
}
+ void MarkEvacuationCandidate() {
+ DCHECK(!IsFlagSet(NEVER_EVACUATE));
+ DCHECK_NULL(slots_buffer_);
+ SetFlag(EVACUATION_CANDIDATE);
+ }
+
+ void ClearEvacuationCandidate() {
+ DCHECK(slots_buffer_ == NULL);
+ ClearFlag(EVACUATION_CANDIDATE);
+ }
+
bool ShouldSkipEvacuationSlotRecording() {
return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
}
- inline SkipList* skip_list() { return skip_list_; }
+ Executability executable() {
+ return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
+ }
- inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
+ bool InNewSpace() {
+ return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
+ }
- inline SlotsBuffer* slots_buffer() { return slots_buffer_; }
+ bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
- inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; }
+ bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
- void MarkEvacuationCandidate() {
- DCHECK(!IsFlagSet(NEVER_EVACUATE));
- DCHECK(slots_buffer_ == NULL);
- SetFlag(EVACUATION_CANDIDATE);
- }
+ MemoryChunk* next_chunk() { return next_chunk_.Value(); }
- void ClearEvacuationCandidate() {
- DCHECK(slots_buffer_ == NULL);
- ClearFlag(EVACUATION_CANDIDATE);
+ MemoryChunk* prev_chunk() { return prev_chunk_.Value(); }
+
+ void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); }
+
+ void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }
+
+ Space* owner() const {
+ if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
+ kPageHeaderTag) {
+ return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
+ kPageHeaderTag);
+ } else {
+ return nullptr;
+ }
}
- Address area_start() { return area_start_; }
- Address area_end() { return area_end_; }
- int area_size() { return static_cast<int>(area_end() - area_start()); }
- bool CommitArea(size_t requested);
+ void set_owner(Space* space) {
+ DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0);
+ owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag;
+ DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
+ kPageHeaderTag);
+ }
- // Approximate amount of physical memory committed for this chunk.
- size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); }
+ bool HasPageHeader() { return owner() != nullptr; }
- // Should be called when memory chunk is about to be freed.
- void ReleaseAllocatedMemory();
+ void InsertAfter(MemoryChunk* other);
+ void Unlink();
protected:
static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
Address area_start, Address area_end,
- Executability executable, Space* owner);
+ Executability executable, Space* owner,
+ base::VirtualMemory* reservation);
+
+ // Should be called when memory chunk is about to be freed.
+ void ReleaseAllocatedMemory();
+
+ base::VirtualMemory* reserved_memory() { return &reservation_; }
size_t size_;
intptr_t flags_;
@@ -746,36 +668,45 @@ class MemoryChunk {
// If the chunk needs to remember its memory reservation, it is stored here.
base::VirtualMemory reservation_;
+
// The identity of the owning space. This is tagged as a failure pointer, but
// no failure can be in an object, so this can be distinguished from any entry
// in a fixed array.
Address owner_;
+
Heap* heap_;
- // Used by the store buffer to keep track of which pages to mark scan-on-
- // scavenge.
- int store_buffer_counter_;
+
+ // Used by the incremental marker to keep track of the scanning progress in
+ // large objects that have a progress bar and are scanned in increments.
+ int progress_bar_;
+
// Count of bytes marked black on page.
int live_byte_count_;
+
SlotsBuffer* slots_buffer_;
+
+ // A single slot set for small pages (of size kPageSize) or an array of slot
+ // set for large pages. In the latter case the number of entries in the array
+ // is ceil(size() / kPageSize).
+ SlotSet* old_to_new_slots_;
+ SlotSet* old_to_old_slots_;
+
SkipList* skip_list_;
+
intptr_t write_barrier_counter_;
- // Used by the incremental marker to keep track of the scanning progress in
- // large objects that have a progress bar and are scanned in increments.
- int progress_bar_;
+
// Assuming the initial allocation on a page is sequential,
// count highest number of bytes ever allocated on the page.
AtomicValue<intptr_t> high_water_mark_;
base::Mutex* mutex_;
- AtomicValue<ParallelSweepingState> parallel_sweeping_;
+
+ AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
AtomicValue<ParallelCompactingState> parallel_compaction_;
// PagedSpace free-list statistics.
- AtomicNumber<intptr_t> available_in_small_free_list_;
- AtomicNumber<intptr_t> available_in_medium_free_list_;
- AtomicNumber<intptr_t> available_in_large_free_list_;
- AtomicNumber<intptr_t> available_in_huge_free_list_;
- AtomicNumber<intptr_t> non_available_small_blocks_;
+ AtomicNumber<intptr_t> available_in_free_list_;
+ AtomicNumber<intptr_t> wasted_memory_;
// next_chunk_ holds a pointer of type MemoryChunk
AtomicValue<MemoryChunk*> next_chunk_;
@@ -789,9 +720,16 @@ class MemoryChunk {
friend class MemoryChunkValidator;
};
+enum FreeListCategoryType {
+ kSmall,
+ kMedium,
+ kLarge,
+ kHuge,
-enum FreeListCategoryType { kSmall, kMedium, kLarge, kHuge };
-
+ kFirstCategory = kSmall,
+ kLastCategory = kHuge,
+ kNumberOfCategories = kLastCategory + 1
+};
// -----------------------------------------------------------------------------
// A page is a memory chunk of a size 1MB. Large object pages may be larger.
@@ -809,6 +747,9 @@ class Page : public MemoryChunk {
return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
}
+ // Only works for addresses in pointer spaces, not code space.
+ inline static Page* FromAnyPointerAddress(Heap* heap, Address addr);
+
// Returns the page containing an allocation top. Because an allocation
// top address can be the upper bound of the page, we need to subtract
// it with kPointerSize first. The address ranges from
@@ -873,17 +814,24 @@ class Page : public MemoryChunk {
void InitializeAsAnchor(PagedSpace* owner);
- bool WasSwept() { return IsFlagSet(WAS_SWEPT); }
- void SetWasSwept() { SetFlag(WAS_SWEPT); }
- void ClearWasSwept() { ClearFlag(WAS_SWEPT); }
+ // WaitUntilSweepingCompleted only works when concurrent sweeping is in
+ // progress. In particular, when we know that right before this call a
+ // sweeper thread was sweeping this page.
+ void WaitUntilSweepingCompleted() {
+ mutex_->Lock();
+ mutex_->Unlock();
+ DCHECK(SweepingDone());
+ }
+
+ bool SweepingDone() {
+ return concurrent_sweeping_state().Value() == kSweepingDone;
+ }
void ResetFreeListStatistics();
int LiveBytesFromFreeList() {
- return static_cast<int>(
- area_size() - non_available_small_blocks() -
- available_in_small_free_list() - available_in_medium_free_list() -
- available_in_large_free_list() - available_in_huge_free_list());
+ return static_cast<int>(area_size() - wasted_memory() -
+ available_in_free_list());
}
#define FRAGMENTATION_STATS_ACCESSORS(type, name) \
@@ -891,50 +839,11 @@ class Page : public MemoryChunk {
void set_##name(type name) { name##_.SetValue(name); } \
void add_##name(type name) { name##_.Increment(name); }
- FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks)
- FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list)
- FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list)
- FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list)
- FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list)
+ FRAGMENTATION_STATS_ACCESSORS(intptr_t, wasted_memory)
+ FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_free_list)
#undef FRAGMENTATION_STATS_ACCESSORS
- void add_available_in_free_list(FreeListCategoryType type, intptr_t bytes) {
- switch (type) {
- case kSmall:
- add_available_in_small_free_list(bytes);
- break;
- case kMedium:
- add_available_in_medium_free_list(bytes);
- break;
- case kLarge:
- add_available_in_large_free_list(bytes);
- break;
- case kHuge:
- add_available_in_huge_free_list(bytes);
- break;
- default:
- UNREACHABLE();
- }
- }
-
- intptr_t available_in_free_list(FreeListCategoryType type) {
- switch (type) {
- case kSmall:
- return available_in_small_free_list();
- case kMedium:
- return available_in_medium_free_list();
- case kLarge:
- return available_in_large_free_list();
- case kHuge:
- return available_in_huge_free_list();
- default:
- UNREACHABLE();
- }
- UNREACHABLE();
- return 0;
- }
-
#ifdef DEBUG
void Print();
#endif // DEBUG
@@ -965,7 +874,9 @@ class LargePage : public MemoryChunk {
class Space : public Malloced {
public:
Space(Heap* heap, AllocationSpace id, Executability executable)
- : heap_(heap),
+ : allocation_observers_(new List<AllocationObserver*>()),
+ allocation_observers_paused_(false),
+ heap_(heap),
id_(id),
executable_(executable),
committed_(0),
@@ -981,6 +892,26 @@ class Space : public Malloced {
// Identity used in error reporting.
AllocationSpace identity() { return id_; }
+ virtual void AddAllocationObserver(AllocationObserver* observer) {
+ allocation_observers_->Add(observer);
+ }
+
+ virtual void RemoveAllocationObserver(AllocationObserver* observer) {
+ bool removed = allocation_observers_->RemoveElement(observer);
+ USE(removed);
+ DCHECK(removed);
+ }
+
+ virtual void PauseAllocationObservers() {
+ allocation_observers_paused_ = true;
+ }
+
+ virtual void ResumeAllocationObservers() {
+ allocation_observers_paused_ = false;
+ }
+
+ void AllocationStep(Address soon_object, int size);
+
// Return the total amount committed memory for this space, i.e., allocatable
// memory and page headers.
virtual intptr_t CommittedMemory() { return committed_; }
@@ -1027,6 +958,9 @@ class Space : public Malloced {
DCHECK_GE(committed_, 0);
}
+ v8::base::SmartPointer<List<AllocationObserver*>> allocation_observers_;
+ bool allocation_observers_paused_;
+
private:
Heap* heap_;
AllocationSpace id_;
@@ -1628,12 +1562,12 @@ class AllocationStats BASE_EMBEDDED {
// A free list category maintains a linked list of free memory blocks.
class FreeListCategory {
public:
- explicit FreeListCategory(FreeList* owner, FreeListCategoryType type)
- : type_(type),
- top_(nullptr),
- end_(nullptr),
- available_(0),
- owner_(owner) {}
+ FreeListCategory() : top_(nullptr), end_(nullptr), available_(0) {}
+
+ void Initialize(FreeList* owner, FreeListCategoryType type) {
+ owner_ = owner;
+ type_ = type;
+ }
// Concatenates {category} into {this}.
//
@@ -1763,8 +1697,11 @@ class FreeList {
// Return the number of bytes available on the free list.
intptr_t Available() {
- return small_list_.available() + medium_list_.available() +
- large_list_.available() + huge_list_.available();
+ intptr_t available = 0;
+ for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ available += category_[i].available();
+ }
+ return available;
}
// The method tries to find a {FreeSpace} node of at least {size_in_bytes}
@@ -1776,8 +1713,10 @@ class FreeList {
MUST_USE_RESULT FreeSpace* TryRemoveMemory(intptr_t hint_size_in_bytes);
bool IsEmpty() {
- return small_list_.IsEmpty() && medium_list_.IsEmpty() &&
- large_list_.IsEmpty() && huge_list_.IsEmpty();
+ for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+ if (!category_[i].IsEmpty()) return false;
+ }
+ return true;
}
// Used after booting the VM.
@@ -1813,29 +1752,36 @@ class FreeList {
FreeSpace* FindNodeIn(FreeListCategoryType category, int* node_size);
FreeListCategory* GetFreeListCategory(FreeListCategoryType category) {
- switch (category) {
- case kSmall:
- return &small_list_;
- case kMedium:
- return &medium_list_;
- case kLarge:
- return &large_list_;
- case kHuge:
- return &huge_list_;
- default:
- UNREACHABLE();
+ return &category_[category];
+ }
+
+ FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
+ if (size_in_bytes <= kSmallListMax) {
+ return kSmall;
+ } else if (size_in_bytes <= kMediumListMax) {
+ return kMedium;
+ } else if (size_in_bytes <= kLargeListMax) {
+ return kLarge;
}
- UNREACHABLE();
- return nullptr;
+ return kHuge;
+ }
+
+ FreeListCategoryType SelectFastAllocationFreeListCategoryType(
+ size_t size_in_bytes) {
+ if (size_in_bytes <= kSmallAllocationMax) {
+ return kSmall;
+ } else if (size_in_bytes <= kMediumAllocationMax) {
+ return kMedium;
+ } else if (size_in_bytes <= kLargeAllocationMax) {
+ return kLarge;
+ }
+ return kHuge;
}
PagedSpace* owner_;
base::Mutex mutex_;
intptr_t wasted_bytes_;
- FreeListCategory small_list_;
- FreeListCategory medium_list_;
- FreeListCategory large_list_;
- FreeListCategory huge_list_;
+ FreeListCategory category_[kNumberOfCategories];
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
};
@@ -1959,10 +1905,8 @@ class PagedSpace : public Space {
// Checks whether an object/address is in this space.
inline bool Contains(Address a);
- inline bool Contains(HeapObject* o);
- // Unlike Contains() methods it is safe to call this one even for addresses
- // of unmapped memory.
- bool ContainsSafe(Address addr);
+ inline bool Contains(Object* o);
+ bool ContainsSlow(Address addr);
// Given an address occupied by a live object, return that object if it is
// in this space, or a Smi if it is not. The implementation iterates over
@@ -2085,7 +2029,7 @@ class PagedSpace : public Space {
void IncreaseCapacity(int size);
// Releases an unused page and shrinks the space.
- void ReleasePage(Page* page);
+ void ReleasePage(Page* page, bool evict_free_list_items);
// The dummy page that anchors the linked list of pages.
Page* anchor() { return &anchor_; }
@@ -2112,23 +2056,12 @@ class PagedSpace : public Space {
static void ResetCodeStatistics(Isolate* isolate);
#endif
- // Evacuation candidates are swept by evacuator. Needs to return a valid
- // result before _and_ after evacuation has finished.
- static bool ShouldBeSweptBySweeperThreads(Page* p) {
- return !p->IsEvacuationCandidate() &&
- !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSwept();
- }
-
// This function tries to steal size_in_bytes memory from the sweeper threads
// free-lists. If it does not succeed stealing enough memory, it will wait
// for the sweeper threads to finish sweeping.
// It returns true when sweeping is completed and false otherwise.
bool EnsureSweeperProgress(intptr_t size_in_bytes);
- void set_end_of_unswept_pages(Page* page) { end_of_unswept_pages_ = page; }
-
- Page* end_of_unswept_pages() { return end_of_unswept_pages_; }
-
Page* FirstPage() { return anchor_.next_page(); }
Page* LastPage() { return anchor_.prev_page(); }
@@ -2148,9 +2081,6 @@ class PagedSpace : public Space {
// e.g., removes its bump pointer area and resets statistics.
void MergeCompactionSpace(CompactionSpace* other);
- void DivideUponCompactionSpaces(CompactionSpaceCollection** other, int num,
- intptr_t limit = kCompactionMemoryWanted);
-
// Refills the free list from the corresponding free list filled by the
// sweeper.
virtual void RefillFreeList();
@@ -2158,8 +2088,6 @@ class PagedSpace : public Space {
protected:
void AddMemory(Address start, intptr_t size);
- FreeSpace* TryRemoveMemory(intptr_t size_in_bytes);
-
void MoveOverFreeMemory(PagedSpace* other);
// PagedSpaces that should be included in snapshots have different, i.e.,
@@ -2212,11 +2140,6 @@ class PagedSpace : public Space {
// Normal allocation information.
AllocationInfo allocation_info_;
- // The sweeper threads iterate over the list of pointer and data space pages
- // and sweep these pages concurrently. They will stop sweeping after the
- // end_of_unswept_pages_ page.
- Page* end_of_unswept_pages_;
-
// Mutex guarding any concurrent access to the space.
base::Mutex space_mutex_;
@@ -2266,17 +2189,13 @@ class HistogramInfo : public NumberAndSizeInfo {
enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
-class SemiSpace;
-
-
class NewSpacePage : public MemoryChunk {
public:
// GC related flags copied from from-space to to-space when
// flipping semispaces.
static const intptr_t kCopyOnFlipFlagsMask =
(1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
- (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
- (1 << MemoryChunk::SCAN_ON_SCAVENGE);
+ (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
static const int kAreaSize = Page::kAllocatableMemory;
@@ -2349,31 +2268,39 @@ class NewSpacePage : public MemoryChunk {
// -----------------------------------------------------------------------------
// SemiSpace in young generation
//
-// A semispace is a contiguous chunk of memory holding page-like memory
-// chunks. The mark-compact collector uses the memory of the first page in
-// the from space as a marking stack when tracing live objects.
-
+// A SemiSpace is a contiguous chunk of memory holding page-like memory chunks.
+// The mark-compact collector uses the memory of the first page in the from
+// space as a marking stack when tracing live objects.
class SemiSpace : public Space {
public:
- // Constructor.
+ static void Swap(SemiSpace* from, SemiSpace* to);
+
SemiSpace(Heap* heap, SemiSpaceId semispace)
: Space(heap, NEW_SPACE, NOT_EXECUTABLE),
- start_(NULL),
- age_mark_(NULL),
+ current_capacity_(0),
+ maximum_capacity_(0),
+ minimum_capacity_(0),
+ start_(nullptr),
+ age_mark_(nullptr),
+ committed_(false),
id_(semispace),
anchor_(this),
- current_page_(NULL) {}
+ current_page_(nullptr) {}
+
+ inline bool Contains(HeapObject* o);
+ inline bool Contains(Object* o);
+ inline bool ContainsSlow(Address a);
- // Sets up the semispace using the given chunk.
- void SetUp(Address start, int initial_capacity, int target_capacity,
- int maximum_capacity);
+ // Creates a space in the young generation. The constructor does not
+ // allocate memory from the OS.
+ void SetUp(Address start, int initial_capacity, int maximum_capacity);
// Tear down the space. Heap memory was not allocated by the space, so it
// is not deallocated here.
void TearDown();
// True if the space has been set up but not torn down.
- bool HasBeenSetUp() { return start_ != NULL; }
+ bool HasBeenSetUp() { return start_ != nullptr; }
// Grow the semispace to the new capacity. The new capacity
// requested must be larger than the current capacity and less than
@@ -2385,12 +2312,9 @@ class SemiSpace : public Space {
// semispace and less than the current capacity.
bool ShrinkTo(int new_capacity);
- // Sets the total capacity. Only possible when the space is not committed.
- bool SetTotalCapacity(int new_capacity);
-
// Returns the start address of the first page of the space.
Address space_start() {
- DCHECK(anchor_.next_page() != &anchor_);
+ DCHECK_NE(anchor_.next_page(), &anchor_);
return anchor_.next_page()->area_start();
}
@@ -2417,18 +2341,26 @@ class SemiSpace : public Space {
Address age_mark() { return age_mark_; }
void set_age_mark(Address mark);
- // True if the address is in the address range of this semispace (not
- // necessarily below the allocation pointer).
- bool Contains(Address a) {
- return (reinterpret_cast<uintptr_t>(a) & address_mask_) ==
- reinterpret_cast<uintptr_t>(start_);
- }
+ bool is_committed() { return committed_; }
+ bool Commit();
+ bool Uncommit();
- // True if the object is a heap object in the address range of this
- // semispace (not necessarily below the allocation pointer).
- bool Contains(Object* o) {
- return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
- }
+ NewSpacePage* first_page() { return anchor_.next_page(); }
+ NewSpacePage* current_page() { return current_page_; }
+
+ // Returns the current total capacity of the semispace.
+ int current_capacity() { return current_capacity_; }
+
+ // Returns the maximum total capacity of the semispace.
+ int maximum_capacity() { return maximum_capacity_; }
+
+ // Returns the initial capacity of the semispace.
+ int minimum_capacity() { return minimum_capacity_; }
+
+ SemiSpaceId id() { return id_; }
+
+ // Approximate amount of physical memory committed for this space.
+ size_t CommittedPhysicalMemory() override;
// If we don't have these here then SemiSpace will be abstract. However
// they should never be called:
@@ -2445,18 +2377,6 @@ class SemiSpace : public Space {
return 0;
}
-
- bool is_committed() { return committed_; }
- bool Commit();
- bool Uncommit();
-
- NewSpacePage* first_page() { return anchor_.next_page(); }
- NewSpacePage* current_page() { return current_page_; }
-
-#ifdef VERIFY_HEAP
- virtual void Verify();
-#endif
-
#ifdef DEBUG
void Print() override;
// Validate a range of of addresses in a SemiSpace.
@@ -2468,51 +2388,34 @@ class SemiSpace : public Space {
inline static void AssertValidRange(Address from, Address to) {}
#endif
- // Returns the current total capacity of the semispace.
- int TotalCapacity() { return total_capacity_; }
-
- // Returns the target for total capacity of the semispace.
- int TargetCapacity() { return target_capacity_; }
-
- // Returns the maximum total capacity of the semispace.
- int MaximumTotalCapacity() { return maximum_total_capacity_; }
-
- // Returns the initial capacity of the semispace.
- int InitialTotalCapacity() { return initial_total_capacity_; }
-
- SemiSpaceId id() { return id_; }
+#ifdef VERIFY_HEAP
+ virtual void Verify();
+#endif
- static void Swap(SemiSpace* from, SemiSpace* to);
+ private:
+ NewSpacePage* anchor() { return &anchor_; }
- // Approximate amount of physical memory committed for this space.
- size_t CommittedPhysicalMemory() override;
+ void set_current_capacity(int new_capacity) {
+ current_capacity_ = new_capacity;
+ }
- private:
- // Flips the semispace between being from-space and to-space.
// Copies the flags into the masked positions on all pages in the space.
- void FlipPages(intptr_t flags, intptr_t flag_mask);
+ void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
- // Updates Capacity and MaximumCommitted based on new capacity.
- void SetCapacity(int new_capacity);
+ // The currently committed space capacity.
+ int current_capacity_;
- NewSpacePage* anchor() { return &anchor_; }
+ // The maximum capacity that can be used by this space.
+ int maximum_capacity_;
- // The current and maximum total capacity of the space.
- int total_capacity_;
- int target_capacity_;
- int maximum_total_capacity_;
- int initial_total_capacity_;
+ // The mimnimum capacity for the space. A space cannot shrink below this size.
+ int minimum_capacity_;
// The start address of the space.
Address start_;
// Used to govern object promotion during mark-compact collection.
Address age_mark_;
- // Masks and comparison values to test for containment in this semispace.
- uintptr_t address_mask_;
- uintptr_t object_mask_;
- uintptr_t object_expected_;
-
bool committed_;
SemiSpaceId id_;
@@ -2576,54 +2479,6 @@ class NewSpacePageIterator BASE_EMBEDDED {
NewSpacePage* last_page_;
};
-// -----------------------------------------------------------------------------
-// Allows observation of inline allocation in the new space.
-class InlineAllocationObserver {
- public:
- explicit InlineAllocationObserver(intptr_t step_size)
- : step_size_(step_size), bytes_to_next_step_(step_size) {
- DCHECK(step_size >= kPointerSize);
- }
- virtual ~InlineAllocationObserver() {}
-
- private:
- intptr_t step_size() const { return step_size_; }
- intptr_t bytes_to_next_step() const { return bytes_to_next_step_; }
-
- // Pure virtual method provided by the subclasses that gets called when at
- // least step_size bytes have been allocated. soon_object is the address just
- // allocated (but not yet initialized.) size is the size of the object as
- // requested (i.e. w/o the alignment fillers). Some complexities to be aware
- // of:
- // 1) soon_object will be nullptr in cases where we end up observing an
- // allocation that happens to be a filler space (e.g. page boundaries.)
- // 2) size is the requested size at the time of allocation. Right-trimming
- // may change the object size dynamically.
- // 3) soon_object may actually be the first object in an allocation-folding
- // group. In such a case size is the size of the group rather than the
- // first object.
- virtual void Step(int bytes_allocated, Address soon_object, size_t size) = 0;
-
- // Called each time the new space does an inline allocation step. This may be
- // more frequently than the step_size we are monitoring (e.g. when there are
- // multiple observers, or when page or space boundary is encountered.)
- void InlineAllocationStep(int bytes_allocated, Address soon_object,
- size_t size) {
- bytes_to_next_step_ -= bytes_allocated;
- if (bytes_to_next_step_ <= 0) {
- Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object,
- size);
- bytes_to_next_step_ = step_size_;
- }
- }
-
- intptr_t step_size_;
- intptr_t bytes_to_next_step_;
-
- friend class NewSpace;
-
- DISALLOW_COPY_AND_ASSIGN(InlineAllocationObserver);
-};
// -----------------------------------------------------------------------------
// The young generation space.
@@ -2639,8 +2494,11 @@ class NewSpace : public Space {
to_space_(heap, kToSpace),
from_space_(heap, kFromSpace),
reservation_(),
- top_on_previous_step_(0),
- inline_allocation_observers_paused_(false) {}
+ top_on_previous_step_(0) {}
+
+ inline bool Contains(HeapObject* o);
+ inline bool ContainsSlow(Address a);
+ inline bool Contains(Object* o);
// Sets up the new space using the given chunk.
bool SetUp(int reserved_semispace_size_, int max_semi_space_size);
@@ -2661,24 +2519,9 @@ class NewSpace : public Space {
// their maximum capacity.
void Grow();
- // Grow the capacity of the semispaces by one page.
- bool GrowOnePage();
-
// Shrink the capacity of the semispaces.
void Shrink();
- // True if the address or object lies in the address range of either
- // semispace (not necessarily below the allocation pointer).
- bool Contains(Address a) {
- return (reinterpret_cast<uintptr_t>(a) & address_mask_) ==
- reinterpret_cast<uintptr_t>(start_);
- }
-
- bool Contains(Object* o) {
- Address a = reinterpret_cast<Address>(o);
- return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
- }
-
// Return the allocated bytes in the active semispace.
intptr_t Size() override {
return pages_used_ * NewSpacePage::kAreaSize +
@@ -2692,16 +2535,16 @@ class NewSpace : public Space {
// Return the allocatable capacity of a semispace.
intptr_t Capacity() {
- SLOW_DCHECK(to_space_.TotalCapacity() == from_space_.TotalCapacity());
- return (to_space_.TotalCapacity() / Page::kPageSize) *
+ SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
+ return (to_space_.current_capacity() / Page::kPageSize) *
NewSpacePage::kAreaSize;
}
// Return the current size of a semispace, allocatable and non-allocatable
// memory.
intptr_t TotalCapacity() {
- DCHECK(to_space_.TotalCapacity() == from_space_.TotalCapacity());
- return to_space_.TotalCapacity();
+ DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
+ return to_space_.current_capacity();
}
// Committed memory for NewSpace is the committed memory of both semi-spaces
@@ -2742,18 +2585,16 @@ class NewSpace : public Space {
// Return the maximum capacity of a semispace.
int MaximumCapacity() {
- DCHECK(to_space_.MaximumTotalCapacity() ==
- from_space_.MaximumTotalCapacity());
- return to_space_.MaximumTotalCapacity();
+ DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
+ return to_space_.maximum_capacity();
}
bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
// Returns the initial capacity of a semispace.
int InitialTotalCapacity() {
- DCHECK(to_space_.InitialTotalCapacity() ==
- from_space_.InitialTotalCapacity());
- return to_space_.InitialTotalCapacity();
+ DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
+ return to_space_.minimum_capacity();
}
// Return the address of the allocation pointer in the active semispace.
@@ -2779,18 +2620,6 @@ class NewSpace : public Space {
// The start address of the space and a bit mask. Anding an address in the
// new space with the mask will result in the start address.
Address start() { return start_; }
- uintptr_t mask() { return address_mask_; }
-
- INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
- DCHECK(Contains(addr));
- DCHECK(IsAligned(OffsetFrom(addr), kPointerSize) ||
- IsAligned(OffsetFrom(addr) - 1, kPointerSize));
- return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
- }
-
- INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
- return reinterpret_cast<Address>(index << kPointerSizeLog2);
- }
// The allocation top and limit address.
Address* allocation_top_address() { return allocation_info_.top_address(); }
@@ -2815,21 +2644,25 @@ class NewSpace : public Space {
// Reset the allocation pointer to the beginning of the active semispace.
void ResetAllocationInfo();
+ // When inline allocation stepping is active, either because of incremental
+ // marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
+ // inline allocation every once in a while. This is done by setting
+ // allocation_info_.limit to be lower than the actual limit and and increasing
+ // it in steps to guarantee that the observers are notified periodically.
void UpdateInlineAllocationLimit(int size_in_bytes);
+ void DisableInlineAllocationSteps() {
+ top_on_previous_step_ = 0;
+ UpdateInlineAllocationLimit(0);
+ }
+
// Allows observation of inline allocation. The observer->Step() method gets
// called after every step_size bytes have been allocated (approximately).
// This works by adjusting the allocation limit to a lower value and adjusting
// it after each step.
- void AddInlineAllocationObserver(InlineAllocationObserver* observer);
+ void AddAllocationObserver(AllocationObserver* observer) override;
- // Removes a previously installed observer.
- void RemoveInlineAllocationObserver(InlineAllocationObserver* observer);
-
- void DisableInlineAllocationSteps() {
- top_on_previous_step_ = 0;
- UpdateInlineAllocationLimit(0);
- }
+ void RemoveAllocationObserver(AllocationObserver* observer) override;
// Get the extent of the inactive semispace (for use as a marking stack,
// or to zap it). Notice: space-addresses are not necessarily on the
@@ -2843,18 +2676,10 @@ class NewSpace : public Space {
Address ToSpaceStart() { return to_space_.space_start(); }
Address ToSpaceEnd() { return to_space_.space_end(); }
- inline bool ToSpaceContains(Address address) {
- return to_space_.Contains(address);
- }
- inline bool FromSpaceContains(Address address) {
- return from_space_.Contains(address);
- }
-
- // True if the object is a heap object in the address range of the
- // respective semispace (not necessarily below the allocation pointer of the
- // semispace).
- inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
- inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
+ inline bool ToSpaceContainsSlow(Address a);
+ inline bool FromSpaceContainsSlow(Address a);
+ inline bool ToSpaceContains(Object* o);
+ inline bool FromSpaceContains(Object* o);
// Try to switch the active semispace to a new, empty, page.
// Returns false if this isn't possible or reasonable (i.e., there
@@ -2901,6 +2726,9 @@ class NewSpace : public Space {
SemiSpace* active_space() { return &to_space_; }
+ void PauseAllocationObservers() override;
+ void ResumeAllocationObservers() override;
+
private:
// Update allocation info to match the current to-space page.
void UpdateAllocationInfo();
@@ -2918,22 +2746,12 @@ class NewSpace : public Space {
// Start address and bit mask for containment testing.
Address start_;
- uintptr_t address_mask_;
- uintptr_t object_mask_;
- uintptr_t object_expected_;
// Allocation pointer and limit for normal allocation and allocation during
// mark-compact collection.
AllocationInfo allocation_info_;
- // When inline allocation stepping is active, either because of incremental
- // marking or because of idle scavenge, we 'interrupt' inline allocation every
- // once in a while. This is done by setting allocation_info_.limit to be lower
- // than the actual limit and and increasing it in steps to guarantee that the
- // observers are notified periodically.
- List<InlineAllocationObserver*> inline_allocation_observers_;
Address top_on_previous_step_;
- bool inline_allocation_observers_paused_;
HistogramInfo* allocated_histogram_;
HistogramInfo* promoted_histogram_;
@@ -2950,26 +2768,18 @@ class NewSpace : public Space {
size_t size);
intptr_t GetNextInlineAllocationStepSize();
void StartNextInlineAllocationStep();
- void PauseInlineAllocationObservers();
- void ResumeInlineAllocationObservers();
- friend class PauseInlineAllocationObserversScope;
friend class SemiSpaceIterator;
};
-class PauseInlineAllocationObserversScope {
+class PauseAllocationObserversScope {
public:
- explicit PauseInlineAllocationObserversScope(NewSpace* new_space)
- : new_space_(new_space) {
- new_space_->PauseInlineAllocationObservers();
- }
- ~PauseInlineAllocationObserversScope() {
- new_space_->ResumeInlineAllocationObservers();
- }
+ explicit PauseAllocationObserversScope(Heap* heap);
+ ~PauseAllocationObserversScope();
private:
- NewSpace* new_space_;
- DISALLOW_COPY_AND_ASSIGN(PauseInlineAllocationObserversScope);
+ Heap* heap_;
+ DISALLOW_COPY_AND_ASSIGN(PauseAllocationObserversScope);
};
// -----------------------------------------------------------------------------
@@ -2980,12 +2790,6 @@ class CompactionSpace : public PagedSpace {
CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
: PagedSpace(heap, id, executable) {}
- // Adds external memory starting at {start} of {size_in_bytes} to the space.
- void AddExternalMemory(Address start, int size_in_bytes) {
- IncreaseCapacity(size_in_bytes);
- Free(start, size_in_bytes);
- }
-
bool is_local() override { return true; }
void RefillFreeList() override;
@@ -3004,9 +2808,7 @@ class CompactionSpaceCollection : public Malloced {
public:
explicit CompactionSpaceCollection(Heap* heap)
: old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
- code_space_(heap, CODE_SPACE, Executability::EXECUTABLE),
- duration_(0.0),
- bytes_compacted_(0) {}
+ code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
CompactionSpace* Get(AllocationSpace space) {
switch (space) {
@@ -3021,21 +2823,9 @@ class CompactionSpaceCollection : public Malloced {
return nullptr;
}
- void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
- duration_ += duration;
- bytes_compacted_ += bytes_compacted;
- }
-
- double duration() const { return duration_; }
- intptr_t bytes_compacted() const { return bytes_compacted_; }
-
private:
CompactionSpace old_space_;
CompactionSpace code_space_;
-
- // Book keeping.
- double duration_;
- intptr_t bytes_compacted_;
};
@@ -3153,7 +2943,9 @@ class LargeObjectSpace : public Space {
// Checks whether a heap object is in this space; O(1).
bool Contains(HeapObject* obj);
- bool Contains(Address address);
+ // Checks whether an address is in the object area in this space. Iterates
+ // all objects in the space. May be slow.
+ bool ContainsSlow(Address addr) { return FindObject(addr)->IsHeapObject(); }
// Checks whether the space is empty.
bool IsEmpty() { return first_page_ == NULL; }
@@ -3169,9 +2961,6 @@ class LargeObjectSpace : public Space {
void ReportStatistics();
void CollectCodeStatistics();
#endif
- // Checks whether an address is in the object area in this space. It
- // iterates all objects in the space. May be slow.
- bool SlowContains(Address addr) { return FindObject(addr)->IsHeapObject(); }
private:
// The head of the linked list of large object chunks.
diff --git a/deps/v8/src/heap/store-buffer-inl.h b/deps/v8/src/heap/store-buffer-inl.h
index e11ad87087..920ec3411d 100644
--- a/deps/v8/src/heap/store-buffer-inl.h
+++ b/deps/v8/src/heap/store-buffer-inl.h
@@ -6,48 +6,30 @@
#define V8_STORE_BUFFER_INL_H_
#include "src/heap/heap.h"
+#include "src/heap/remembered-set.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/store-buffer.h"
namespace v8 {
namespace internal {
-void StoreBuffer::Mark(Address addr) {
- DCHECK(!heap_->code_space()->Contains(addr));
- Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
- *top++ = addr;
- heap_->set_store_buffer_top(reinterpret_cast<Smi*>(top));
- if ((reinterpret_cast<uintptr_t>(top) & kStoreBufferOverflowBit) != 0) {
- DCHECK(top == limit_);
- Compact();
- } else {
- DCHECK(top < limit_);
- }
-}
-
-
-inline void StoreBuffer::MarkSynchronized(Address addr) {
- base::LockGuard<base::Mutex> lock_guard(&mutex_);
- Mark(addr);
+void LocalStoreBuffer::Record(Address addr) {
+ if (top_->is_full()) top_ = new Node(top_);
+ top_->buffer[top_->count++] = addr;
}
-
-void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) {
- if (store_buffer_rebuilding_enabled_) {
- SLOW_DCHECK(!heap_->code_space()->Contains(addr) &&
- !heap_->new_space()->Contains(addr));
- Address* top = old_top_;
- *top++ = addr;
- old_top_ = top;
- old_buffer_is_sorted_ = false;
- old_buffer_is_filtered_ = false;
- if (top >= old_limit_) {
- DCHECK(callback_ != NULL);
- (*callback_)(heap_, MemoryChunk::FromAnyPointerAddress(heap_, addr),
- kStoreBufferFullEvent);
+void LocalStoreBuffer::Process(StoreBuffer* store_buffer) {
+ Node* current = top_;
+ while (current != nullptr) {
+ for (int i = 0; i < current->count; i++) {
+ Address slot = current->buffer[i];
+ Page* page = Page::FromAnyPointerAddress(heap_, slot);
+ RememberedSet<OLD_TO_NEW>::Insert(page, slot);
}
+ current = current->next;
}
}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index a8a1e5bbf1..21f375b195 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -17,24 +17,7 @@ namespace v8 {
namespace internal {
StoreBuffer::StoreBuffer(Heap* heap)
- : heap_(heap),
- start_(NULL),
- limit_(NULL),
- old_start_(NULL),
- old_limit_(NULL),
- old_top_(NULL),
- old_reserved_limit_(NULL),
- old_buffer_is_sorted_(false),
- old_buffer_is_filtered_(false),
- during_gc_(false),
- store_buffer_rebuilding_enabled_(false),
- callback_(NULL),
- may_move_store_buffer_entries_(true),
- virtual_memory_(NULL),
- hash_set_1_(NULL),
- hash_set_2_(NULL),
- hash_sets_are_empty_(true) {}
-
+ : heap_(heap), start_(nullptr), limit_(nullptr), virtual_memory_(nullptr) {}
void StoreBuffer::SetUp() {
// Allocate 3x the buffer size, so that we can start the new store buffer
@@ -47,31 +30,6 @@ void StoreBuffer::SetUp() {
reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
limit_ = start_ + (kStoreBufferSize / kPointerSize);
- // Reserve space for the larger old buffer.
- old_virtual_memory_ =
- new base::VirtualMemory(kOldStoreBufferLength * kPointerSize);
- old_top_ = old_start_ =
- reinterpret_cast<Address*>(old_virtual_memory_->address());
- // Don't know the alignment requirements of the OS, but it is certainly not
- // less than 0xfff.
- CHECK((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
- CHECK(kStoreBufferSize >= base::OS::CommitPageSize());
- // Initial size of the old buffer is as big as the buffer for new pointers.
- // This means even if we later fail to enlarge the old buffer due to OOM from
- // the OS, we will still be able to empty the new pointer buffer into the old
- // buffer.
- int initial_length = static_cast<int>(kStoreBufferSize / kPointerSize);
- CHECK(initial_length > 0);
- CHECK(initial_length <= kOldStoreBufferLength);
- old_limit_ = old_start_ + initial_length;
- old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
-
- if (!old_virtual_memory_->Commit(reinterpret_cast<void*>(old_start_),
- (old_limit_ - old_start_) * kPointerSize,
- false)) {
- V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
- }
-
DCHECK(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
DCHECK(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
Address* vm_limit = reinterpret_cast<Address*>(
@@ -90,533 +48,31 @@ void StoreBuffer::SetUp() {
V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
}
heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_));
-
- hash_set_1_ = new uintptr_t[kHashSetLength];
- hash_set_2_ = new uintptr_t[kHashSetLength];
- hash_sets_are_empty_ = false;
-
- ClearFilteringHashSets();
}
void StoreBuffer::TearDown() {
delete virtual_memory_;
- delete old_virtual_memory_;
- delete[] hash_set_1_;
- delete[] hash_set_2_;
- old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
start_ = limit_ = NULL;
heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_));
}
void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
- isolate->heap()->store_buffer()->Compact();
+ isolate->heap()->store_buffer()->MoveEntriesToRememberedSet();
isolate->counters()->store_buffer_overflows()->Increment();
}
-
-bool StoreBuffer::SpaceAvailable(intptr_t space_needed) {
- return old_limit_ - old_top_ >= space_needed;
-}
-
-
-void StoreBuffer::EnsureSpace(intptr_t space_needed) {
- while (old_limit_ - old_top_ < space_needed &&
- old_limit_ < old_reserved_limit_) {
- size_t grow = old_limit_ - old_start_; // Double size.
- if (old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
- grow * kPointerSize, false)) {
- old_limit_ += grow;
- } else {
- break;
- }
- }
-
- if (SpaceAvailable(space_needed)) return;
-
- if (old_buffer_is_filtered_) return;
- DCHECK(may_move_store_buffer_entries_);
- Compact();
-
- old_buffer_is_filtered_ = true;
- bool page_has_scan_on_scavenge_flag = false;
-
- PointerChunkIterator it(heap_);
- MemoryChunk* chunk;
- while ((chunk = it.next()) != NULL) {
- if (chunk->scan_on_scavenge()) {
- page_has_scan_on_scavenge_flag = true;
- break;
- }
- }
-
- if (page_has_scan_on_scavenge_flag) {
- Filter(MemoryChunk::SCAN_ON_SCAVENGE);
- }
-
- if (SpaceAvailable(space_needed)) return;
-
- // Sample 1 entry in 97 and filter out the pages where we estimate that more
- // than 1 in 8 pointers are to new space.
- static const int kSampleFinenesses = 5;
- static const struct Samples {
- int prime_sample_step;
- int threshold;
- } samples[kSampleFinenesses] = {
- {97, ((Page::kPageSize / kPointerSize) / 97) / 8},
- {23, ((Page::kPageSize / kPointerSize) / 23) / 16},
- {7, ((Page::kPageSize / kPointerSize) / 7) / 32},
- {3, ((Page::kPageSize / kPointerSize) / 3) / 256},
- {1, 0}};
- for (int i = 0; i < kSampleFinenesses; i++) {
- ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
- // As a last resort we mark all pages as being exempt from the store buffer.
- DCHECK(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
- if (SpaceAvailable(space_needed)) return;
- }
- UNREACHABLE();
-}
-
-
-// Sample the store buffer to see if some pages are taking up a lot of space
-// in the store buffer.
-void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
- PointerChunkIterator it(heap_);
- MemoryChunk* chunk;
- while ((chunk = it.next()) != NULL) {
- chunk->set_store_buffer_counter(0);
- }
- bool created_new_scan_on_scavenge_pages = false;
- MemoryChunk* previous_chunk = NULL;
- for (Address* p = old_start_; p < old_top_; p += prime_sample_step) {
- Address addr = *p;
- MemoryChunk* containing_chunk = NULL;
- if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
- containing_chunk = previous_chunk;
- } else {
- containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
- }
- int old_counter = containing_chunk->store_buffer_counter();
- if (old_counter >= threshold) {
- containing_chunk->set_scan_on_scavenge(true);
- created_new_scan_on_scavenge_pages = true;
- }
- containing_chunk->set_store_buffer_counter(old_counter + 1);
- previous_chunk = containing_chunk;
- }
- if (created_new_scan_on_scavenge_pages) {
- Filter(MemoryChunk::SCAN_ON_SCAVENGE);
- heap_->isolate()->CountUsage(
- v8::Isolate::UseCounterFeature::kStoreBufferOverflow);
- }
- old_buffer_is_filtered_ = true;
-}
-
-
-void StoreBuffer::Filter(int flag) {
- Address* new_top = old_start_;
- MemoryChunk* previous_chunk = NULL;
- for (Address* p = old_start_; p < old_top_; p++) {
- Address addr = *p;
- MemoryChunk* containing_chunk = NULL;
- if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
- containing_chunk = previous_chunk;
- } else {
- containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
- previous_chunk = containing_chunk;
- }
- if (!containing_chunk->IsFlagSet(flag)) {
- *new_top++ = addr;
- }
- }
- old_top_ = new_top;
-
- // Filtering hash sets are inconsistent with the store buffer after this
- // operation.
- ClearFilteringHashSets();
-}
-
-
-bool StoreBuffer::PrepareForIteration() {
- Compact();
- PointerChunkIterator it(heap_);
- MemoryChunk* chunk;
- bool page_has_scan_on_scavenge_flag = false;
- while ((chunk = it.next()) != NULL) {
- if (chunk->scan_on_scavenge()) {
- page_has_scan_on_scavenge_flag = true;
- break;
- }
- }
-
- if (page_has_scan_on_scavenge_flag) {
- Filter(MemoryChunk::SCAN_ON_SCAVENGE);
- }
-
- // Filtering hash sets are inconsistent with the store buffer after
- // iteration.
- ClearFilteringHashSets();
-
- return page_has_scan_on_scavenge_flag;
-}
-
-
-void StoreBuffer::ClearFilteringHashSets() {
- if (!hash_sets_are_empty_) {
- memset(reinterpret_cast<void*>(hash_set_1_), 0,
- sizeof(uintptr_t) * kHashSetLength);
- memset(reinterpret_cast<void*>(hash_set_2_), 0,
- sizeof(uintptr_t) * kHashSetLength);
- hash_sets_are_empty_ = true;
- }
-}
-
-
-void StoreBuffer::GCPrologue() {
- ClearFilteringHashSets();
- during_gc_ = true;
-}
-
-
-#ifdef VERIFY_HEAP
-void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
- LargeObjectIterator it(space);
- for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
- if (object->IsFixedArray()) {
- Address slot_address = object->address();
- Address end = object->address() + object->Size();
-
- while (slot_address < end) {
- HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
- // When we are not in GC the Heap::InNewSpace() predicate
- // checks that pointers which satisfy predicate point into
- // the active semispace.
- Object* object = *slot;
- heap_->InNewSpace(object);
- slot_address += kPointerSize;
- }
- }
- }
-}
-#endif
-
-
-void StoreBuffer::Verify() {
-#ifdef VERIFY_HEAP
- VerifyPointers(heap_->lo_space());
-#endif
-}
-
-
-void StoreBuffer::GCEpilogue() {
- during_gc_ = false;
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- Verify();
- }
-#endif
-}
-
-
-void StoreBuffer::ProcessOldToNewSlot(Address slot_address,
- ObjectSlotCallback slot_callback) {
- Object** slot = reinterpret_cast<Object**>(slot_address);
- Object* object = *slot;
-
- // If the object is not in from space, it must be a duplicate store buffer
- // entry and the slot was already updated.
- if (heap_->InFromSpace(object)) {
- HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
- DCHECK(heap_object->IsHeapObject());
- slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
- object = *slot;
- // If the object was in from space before and is after executing the
- // callback in to space, the object is still live.
- // Unfortunately, we do not know about the slot. It could be in a
- // just freed free space object.
- if (heap_->InToSpace(object)) {
- EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
- }
- }
-}
-
-
-void StoreBuffer::FindPointersToNewSpaceInRegion(
- Address start, Address end, ObjectSlotCallback slot_callback) {
- for (Address slot_address = start; slot_address < end;
- slot_address += kPointerSize) {
- ProcessOldToNewSlot(slot_address, slot_callback);
- }
-}
-
-
-void StoreBuffer::IteratePointersInStoreBuffer(
- ObjectSlotCallback slot_callback) {
- Address* limit = old_top_;
- old_top_ = old_start_;
- {
- DontMoveStoreBufferEntriesScope scope(this);
- for (Address* current = old_start_; current < limit; current++) {
-#ifdef DEBUG
- Address* saved_top = old_top_;
-#endif
- ProcessOldToNewSlot(*current, slot_callback);
- DCHECK(old_top_ == saved_top + 1 || old_top_ == saved_top);
- }
- }
-}
-
-
-void StoreBuffer::ClearInvalidStoreBufferEntries() {
- Compact();
- Address* new_top = old_start_;
- for (Address* current = old_start_; current < old_top_; current++) {
- Address addr = *current;
- Object** slot = reinterpret_cast<Object**>(addr);
- Object* object = *slot;
- if (heap_->InNewSpace(object) && object->IsHeapObject()) {
- // If the target object is not black, the source slot must be part
- // of a non-black (dead) object.
- HeapObject* heap_object = HeapObject::cast(object);
- if (Marking::IsBlack(Marking::MarkBitFrom(heap_object)) &&
- heap_->mark_compact_collector()->IsSlotInLiveObject(addr)) {
- *new_top++ = addr;
- }
- }
- }
- old_top_ = new_top;
- ClearFilteringHashSets();
-
- // Don't scan on scavenge dead large objects.
- LargeObjectIterator it(heap_->lo_space());
- for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
- if (chunk->scan_on_scavenge() &&
- Marking::IsWhite(Marking::MarkBitFrom(object))) {
- chunk->set_scan_on_scavenge(false);
- }
- }
-}
-
-
-void StoreBuffer::VerifyValidStoreBufferEntries() {
- for (Address* current = old_start_; current < old_top_; current++) {
- Object** slot = reinterpret_cast<Object**>(*current);
- Object* object = *slot;
- CHECK(object->IsHeapObject());
- CHECK(heap_->InNewSpace(object));
- heap_->mark_compact_collector()->VerifyIsSlotInLiveObject(
- reinterpret_cast<Address>(slot), HeapObject::cast(object));
- }
-}
-
-
-class FindPointersToNewSpaceVisitor final : public ObjectVisitor {
- public:
- FindPointersToNewSpaceVisitor(StoreBuffer* store_buffer,
- ObjectSlotCallback callback)
- : store_buffer_(store_buffer), callback_(callback) {}
-
- V8_INLINE void VisitPointers(Object** start, Object** end) override {
- store_buffer_->FindPointersToNewSpaceInRegion(
- reinterpret_cast<Address>(start), reinterpret_cast<Address>(end),
- callback_);
- }
-
- V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {}
-
- private:
- StoreBuffer* store_buffer_;
- ObjectSlotCallback callback_;
-};
-
-
-void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
- // We do not sort or remove duplicated entries from the store buffer because
- // we expect that callback will rebuild the store buffer thus removing
- // all duplicates and pointers to old space.
- bool some_pages_to_scan = PrepareForIteration();
-
- // TODO(gc): we want to skip slots on evacuation candidates
- // but we can't simply figure that out from slot address
- // because slot can belong to a large object.
- IteratePointersInStoreBuffer(slot_callback);
-
- // We are done scanning all the pointers that were in the store buffer, but
- // there may be some pages marked scan_on_scavenge that have pointers to new
- // space that are not in the store buffer. We must scan them now. As we
- // scan, the surviving pointers to new space will be added to the store
- // buffer. If there are still a lot of pointers to new space then we will
- // keep the scan_on_scavenge flag on the page and discard the pointers that
- // were added to the store buffer. If there are not many pointers to new
- // space left on the page we will keep the pointers in the store buffer and
- // remove the flag from the page.
- if (some_pages_to_scan) {
- if (callback_ != NULL) {
- (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent);
- }
- PointerChunkIterator it(heap_);
- MemoryChunk* chunk;
- FindPointersToNewSpaceVisitor visitor(this, slot_callback);
- while ((chunk = it.next()) != NULL) {
- if (chunk->scan_on_scavenge()) {
- chunk->set_scan_on_scavenge(false);
- if (callback_ != NULL) {
- (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent);
- }
- if (chunk->owner() == heap_->lo_space()) {
- LargePage* large_page = reinterpret_cast<LargePage*>(chunk);
- HeapObject* array = large_page->GetObject();
- DCHECK(array->IsFixedArray());
- Address start = array->address();
- Address end = start + array->Size();
- FindPointersToNewSpaceInRegion(start, end, slot_callback);
- } else {
- Page* page = reinterpret_cast<Page*>(chunk);
- PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
- if (owner == heap_->map_space()) {
- DCHECK(page->WasSwept());
- HeapObjectIterator iterator(page);
- for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
- heap_object = iterator.Next()) {
- // We skip free space objects.
- if (!heap_object->IsFiller()) {
- DCHECK(heap_object->IsMap());
- FindPointersToNewSpaceInRegion(
- heap_object->address() + Map::kPointerFieldsBeginOffset,
- heap_object->address() + Map::kPointerFieldsEndOffset,
- slot_callback);
- }
- }
- } else {
- if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
- // Aborted pages require iterating using mark bits because they
- // don't have an iterable object layout before sweeping (which can
- // only happen later). Note that we can never reach an
- // aborted page through the scavenger.
- DCHECK_EQ(heap_->gc_state(), Heap::MARK_COMPACT);
- heap_->mark_compact_collector()->VisitLiveObjectsBody(page,
- &visitor);
- } else {
- heap_->mark_compact_collector()
- ->SweepOrWaitUntilSweepingCompleted(page);
- HeapObjectIterator iterator(page);
- for (HeapObject* heap_object = iterator.Next();
- heap_object != nullptr; heap_object = iterator.Next()) {
- // We iterate over objects that contain new space pointers only.
- heap_object->IterateBody(&visitor);
- }
- }
- }
- }
- }
- }
- if (callback_ != NULL) {
- (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent);
- }
- }
-}
-
-
-void StoreBuffer::Compact() {
+void StoreBuffer::MoveEntriesToRememberedSet() {
Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
-
if (top == start_) return;
-
- // There's no check of the limit in the loop below so we check here for
- // the worst case (compaction doesn't eliminate any pointers).
DCHECK(top <= limit_);
heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_));
- EnsureSpace(top - start_);
- DCHECK(may_move_store_buffer_entries_);
- // Goes through the addresses in the store buffer attempting to remove
- // duplicates. In the interest of speed this is a lossy operation. Some
- // duplicates will remain. We have two hash sets with different hash
- // functions to reduce the number of unnecessary clashes.
- hash_sets_are_empty_ = false; // Hash sets are in use.
for (Address* current = start_; current < top; current++) {
DCHECK(!heap_->code_space()->Contains(*current));
- uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
- // Shift out the last bits including any tags.
- int_addr >>= kPointerSizeLog2;
- // The upper part of an address is basically random because of ASLR and OS
- // non-determinism, so we use only the bits within a page for hashing to
- // make v8's behavior (more) deterministic.
- uintptr_t hash_addr =
- int_addr & (Page::kPageAlignmentMask >> kPointerSizeLog2);
- int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) &
- (kHashSetLength - 1));
- if (hash_set_1_[hash1] == int_addr) continue;
- uintptr_t hash2 = (hash_addr - (hash_addr >> kHashSetLengthLog2));
- hash2 ^= hash2 >> (kHashSetLengthLog2 * 2);
- hash2 &= (kHashSetLength - 1);
- if (hash_set_2_[hash2] == int_addr) continue;
- if (hash_set_1_[hash1] == 0) {
- hash_set_1_[hash1] = int_addr;
- } else if (hash_set_2_[hash2] == 0) {
- hash_set_2_[hash2] = int_addr;
- } else {
- // Rather than slowing down we just throw away some entries. This will
- // cause some duplicates to remain undetected.
- hash_set_1_[hash1] = int_addr;
- hash_set_2_[hash2] = 0;
- }
- old_buffer_is_sorted_ = false;
- old_buffer_is_filtered_ = false;
- *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
- DCHECK(old_top_ <= old_limit_);
- }
- heap_->isolate()->counters()->store_buffer_compactions()->Increment();
-}
-
-
-void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
- if (event == kStoreBufferStartScanningPagesEvent) {
- start_of_current_page_ = NULL;
- current_page_ = NULL;
- } else if (event == kStoreBufferScanningPageEvent) {
- if (current_page_ != NULL) {
- // If this page already overflowed the store buffer during this iteration.
- if (current_page_->scan_on_scavenge()) {
- // Then we should wipe out the entries that have been added for it.
- store_buffer_->SetTop(start_of_current_page_);
- } else if (store_buffer_->Top() - start_of_current_page_ >=
- (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
- // Did we find too many pointers in the previous page? The heuristic is
- // that no page can take more then 1/5 the remaining slots in the store
- // buffer.
- current_page_->set_scan_on_scavenge(true);
- store_buffer_->SetTop(start_of_current_page_);
- } else {
- // In this case the page we scanned took a reasonable number of slots in
- // the store buffer. It has now been rehabilitated and is no longer
- // marked scan_on_scavenge.
- DCHECK(!current_page_->scan_on_scavenge());
- }
- }
- start_of_current_page_ = store_buffer_->Top();
- current_page_ = page;
- } else if (event == kStoreBufferFullEvent) {
- // The current page overflowed the store buffer again. Wipe out its entries
- // in the store buffer and mark it scan-on-scavenge again. This may happen
- // several times while scanning.
- if (current_page_ == NULL) {
- // Store Buffer overflowed while scanning promoted objects. These are not
- // in any particular page, though they are likely to be clustered by the
- // allocation routines.
- store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
- } else {
- // Store Buffer overflowed while scanning a particular old space page for
- // pointers to new space.
- DCHECK(current_page_ == page);
- DCHECK(page != NULL);
- current_page_->set_scan_on_scavenge(true);
- DCHECK(start_of_current_page_ != store_buffer_->Top());
- store_buffer_->SetTop(start_of_current_page_);
- }
- } else {
- UNREACHABLE();
+ Address addr = *current;
+ Page* page = Page::FromAnyPointerAddress(heap_, addr);
+ RememberedSet<OLD_TO_NEW>::Insert(page, addr);
}
}
diff --git a/deps/v8/src/heap/store-buffer.h b/deps/v8/src/heap/store-buffer.h
index 9eeb00117b..e7e9c985eb 100644
--- a/deps/v8/src/heap/store-buffer.h
+++ b/deps/v8/src/heap/store-buffer.h
@@ -9,213 +9,72 @@
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
#include "src/globals.h"
+#include "src/heap/slot-set.h"
namespace v8 {
namespace internal {
-class Page;
-class PagedSpace;
-class StoreBuffer;
-
-typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
-
-// Used to implement the write barrier by collecting addresses of pointers
-// between spaces.
+// Intermediate buffer that accumulates old-to-new stores from the generated
+// code. On buffer overflow the slots are moved to the remembered set.
class StoreBuffer {
public:
explicit StoreBuffer(Heap* heap);
-
static void StoreBufferOverflow(Isolate* isolate);
-
void SetUp();
void TearDown();
- // This is used to add addresses to the store buffer non-concurrently.
- inline void Mark(Address addr);
-
- // This is used to add addresses to the store buffer when multiple threads
- // may operate on the store buffer.
- inline void MarkSynchronized(Address addr);
-
- // This is used by the heap traversal to enter the addresses into the store
- // buffer that should still be in the store buffer after GC. It enters
- // addresses directly into the old buffer because the GC starts by wiping the
- // old buffer and thereafter only visits each cell once so there is no need
- // to attempt to remove any dupes. During the first part of a GC we
- // are using the store buffer to access the old spaces and at the same time
- // we are rebuilding the store buffer using this function. There is, however
- // no issue of overwriting the buffer we are iterating over, because this
- // stage of the scavenge can only reduce the number of addresses in the store
- // buffer (some objects are promoted so pointers to them do not need to be in
- // the store buffer). The later parts of the GC scan the pages that are
- // exempt from the store buffer and process the promotion queue. These steps
- // can overflow this buffer. We check for this and on overflow we call the
- // callback set up with the StoreBufferRebuildScope object.
- inline void EnterDirectlyIntoStoreBuffer(Address addr);
-
- // Iterates over all pointers that go from old space to new space. It will
- // delete the store buffer as it starts so the callback should reenter
- // surviving old-to-new pointers into the store buffer to rebuild it.
- void IteratePointersToNewSpace(ObjectSlotCallback callback);
-
static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2);
static const int kStoreBufferSize = kStoreBufferOverflowBit;
static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
- static const int kOldStoreBufferLength = kStoreBufferLength * 16;
- static const int kHashSetLengthLog2 = 12;
- static const int kHashSetLength = 1 << kHashSetLengthLog2;
-
- void Compact();
-
- void GCPrologue();
- void GCEpilogue();
-
- Object*** Limit() { return reinterpret_cast<Object***>(old_limit_); }
- Object*** Start() { return reinterpret_cast<Object***>(old_start_); }
- Object*** Top() { return reinterpret_cast<Object***>(old_top_); }
- void SetTop(Object*** top) {
- DCHECK(top >= Start());
- DCHECK(top <= Limit());
- old_top_ = reinterpret_cast<Address*>(top);
- }
-
- bool old_buffer_is_sorted() { return old_buffer_is_sorted_; }
- bool old_buffer_is_filtered() { return old_buffer_is_filtered_; }
-
- void EnsureSpace(intptr_t space_needed);
- void Verify();
-
- bool PrepareForIteration();
- void Filter(int flag);
-
- // Eliminates all stale store buffer entries from the store buffer, i.e.,
- // slots that are not part of live objects anymore. This method must be
- // called after marking, when the whole transitive closure is known and
- // must be called before sweeping when mark bits are still intact.
- void ClearInvalidStoreBufferEntries();
- void VerifyValidStoreBufferEntries();
+ void MoveEntriesToRememberedSet();
private:
Heap* heap_;
- // The store buffer is divided up into a new buffer that is constantly being
- // filled by mutator activity and an old buffer that is filled with the data
- // from the new buffer after compression.
+ // The start and the limit of the buffer that contains store slots
+ // added from the generated code.
Address* start_;
Address* limit_;
- Address* old_start_;
- Address* old_limit_;
- Address* old_top_;
- Address* old_reserved_limit_;
- base::VirtualMemory* old_virtual_memory_;
-
- bool old_buffer_is_sorted_;
- bool old_buffer_is_filtered_;
- bool during_gc_;
- // The garbage collector iterates over many pointers to new space that are not
- // handled by the store buffer. This flag indicates whether the pointers
- // found by the callbacks should be added to the store buffer or not.
- bool store_buffer_rebuilding_enabled_;
- StoreBufferCallback callback_;
- bool may_move_store_buffer_entries_;
-
base::VirtualMemory* virtual_memory_;
-
- // Two hash sets used for filtering.
- // If address is in the hash set then it is guaranteed to be in the
- // old part of the store buffer.
- uintptr_t* hash_set_1_;
- uintptr_t* hash_set_2_;
- bool hash_sets_are_empty_;
-
- // Used for synchronization of concurrent store buffer access.
- base::Mutex mutex_;
-
- void ClearFilteringHashSets();
-
- bool SpaceAvailable(intptr_t space_needed);
- void ExemptPopularPages(int prime_sample_step, int threshold);
-
- void ProcessOldToNewSlot(Address slot_address,
- ObjectSlotCallback slot_callback);
-
- void FindPointersToNewSpaceInRegion(Address start, Address end,
- ObjectSlotCallback slot_callback);
-
- void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback);
-
-#ifdef VERIFY_HEAP
- void VerifyPointers(LargeObjectSpace* space);
-#endif
-
- friend class DontMoveStoreBufferEntriesScope;
- friend class FindPointersToNewSpaceVisitor;
- friend class StoreBufferRebuildScope;
-};
-
-
-class StoreBufferRebuilder {
- public:
- explicit StoreBufferRebuilder(StoreBuffer* store_buffer)
- : store_buffer_(store_buffer) {}
-
- void Callback(MemoryChunk* page, StoreBufferEvent event);
-
- private:
- StoreBuffer* store_buffer_;
-
- // We record in this variable how full the store buffer was when we started
- // iterating over the current page, finding pointers to new space. If the
- // store buffer overflows again we can exempt the page from the store buffer
- // by rewinding to this point instead of having to search the store buffer.
- Object*** start_of_current_page_;
- // The current page we are scanning in the store buffer iterator.
- MemoryChunk* current_page_;
};
-class StoreBufferRebuildScope {
+class LocalStoreBuffer BASE_EMBEDDED {
public:
- explicit StoreBufferRebuildScope(Heap* heap, StoreBuffer* store_buffer,
- StoreBufferCallback callback)
- : store_buffer_(store_buffer),
- stored_state_(store_buffer->store_buffer_rebuilding_enabled_),
- stored_callback_(store_buffer->callback_) {
- store_buffer_->store_buffer_rebuilding_enabled_ = true;
- store_buffer_->callback_ = callback;
- (*callback)(heap, NULL, kStoreBufferStartScanningPagesEvent);
+ explicit LocalStoreBuffer(Heap* heap)
+ : top_(new Node(nullptr)), heap_(heap) {}
+
+ ~LocalStoreBuffer() {
+ Node* current = top_;
+ while (current != nullptr) {
+ Node* tmp = current->next;
+ delete current;
+ current = tmp;
+ }
}
- ~StoreBufferRebuildScope() {
- store_buffer_->callback_ = stored_callback_;
- store_buffer_->store_buffer_rebuilding_enabled_ = stored_state_;
- }
+ inline void Record(Address addr);
+ inline void Process(StoreBuffer* store_buffer);
private:
- StoreBuffer* store_buffer_;
- bool stored_state_;
- StoreBufferCallback stored_callback_;
-};
+ static const int kBufferSize = 16 * KB;
+ struct Node : Malloced {
+ explicit Node(Node* next_node) : next(next_node), count(0) {}
-class DontMoveStoreBufferEntriesScope {
- public:
- explicit DontMoveStoreBufferEntriesScope(StoreBuffer* store_buffer)
- : store_buffer_(store_buffer),
- stored_state_(store_buffer->may_move_store_buffer_entries_) {
- store_buffer_->may_move_store_buffer_entries_ = false;
- }
+ inline bool is_full() { return count == kBufferSize; }
- ~DontMoveStoreBufferEntriesScope() {
- store_buffer_->may_move_store_buffer_entries_ = stored_state_;
- }
+ Node* next;
+ Address buffer[kBufferSize];
+ int count;
+ };
- private:
- StoreBuffer* store_buffer_;
- bool stored_state_;
+ Node* top_;
+ Heap* heap_;
};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index d957872cab..cb6bad8a20 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -203,10 +203,8 @@ void RelocInfo::set_target_cell(Cell* cell,
Assembler::FlushICache(isolate_, pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
- // TODO(1550) We are passing NULL as a slot because cell can never be on
- // evacuation candidate.
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), NULL, cell);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
+ cell);
}
}
@@ -268,16 +266,6 @@ void RelocInfo::WipeOut() {
}
-bool RelocInfo::IsPatchedReturnSequence() {
- return *pc_ == kCallOpcode;
-}
-
-
-bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
- return !Assembler::IsNop(pc());
-}
-
-
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index f120a6233e..2ac3088020 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -803,6 +803,11 @@ void Assembler::cmp(Register reg, const Operand& op) {
emit_operand(reg, op);
}
+void Assembler::cmp(const Operand& op, Register reg) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x39);
+ emit_operand(reg, op);
+}
void Assembler::cmp(const Operand& op, const Immediate& imm) {
EnsureSpace ensure_space(this);
@@ -2000,6 +2005,15 @@ void Assembler::cvtsd2si(Register dst, XMMRegister src) {
}
+void Assembler::cvtsi2ss(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF3);
+ EMIT(0x0F);
+ EMIT(0x2A);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 0b202529f9..f517c9878e 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -184,6 +184,8 @@ DOUBLE_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER
const DoubleRegister no_double_reg = {DoubleRegister::kCode_no_reg};
+typedef DoubleRegister Simd128Register;
+
typedef DoubleRegister XMMRegister;
enum Condition {
@@ -676,6 +678,7 @@ class Assembler : public AssemblerBase {
void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); }
void cmp(Register reg, const Operand& op);
void cmp(Register reg, const Immediate& imm) { cmp(Operand(reg), imm); }
+ void cmp(const Operand& op, Register reg);
void cmp(const Operand& op, const Immediate& imm);
void cmp(const Operand& op, Handle<Object> handle);
@@ -960,6 +963,8 @@ class Assembler : public AssemblerBase {
}
void cvtsd2si(Register dst, XMMRegister src);
+ void cvtsi2ss(XMMRegister dst, Register src) { cvtsi2ss(dst, Operand(src)); }
+ void cvtsi2ss(XMMRegister dst, const Operand& src);
void cvtsi2sd(XMMRegister dst, Register src) { cvtsi2sd(dst, Operand(src)); }
void cvtsi2sd(XMMRegister dst, const Operand& src);
void cvtss2sd(XMMRegister dst, const Operand& src);
@@ -1408,7 +1413,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, const SourcePosition position);
+ void RecordDeoptReason(const int reason, int raw_position);
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index a2aec74162..c48c74abad 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -60,42 +60,45 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
-
-static void CallRuntimePassFunction(
- MacroAssembler* masm, Runtime::FunctionId function_id) {
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
+ // -- eax : argument count (preserved for callee)
// -- edx : new target (preserved for callee)
// -- edi : target function (preserved for callee)
// -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push the number of arguments to the callee.
+ __ SmiTag(eax);
+ __ push(eax);
+ // Push a copy of the target function and the new target.
+ __ push(edi);
+ __ push(edx);
+ // Function is also the parameter to the runtime call.
+ __ push(edi);
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the target function and the new target.
- __ push(edi);
- __ push(edx);
- // Function is also the parameter to the runtime call.
- __ push(edi);
-
- __ CallRuntime(function_id, 1);
- // Restore target function and new target.
- __ pop(edx);
- __ pop(edi);
-}
+ __ CallRuntime(function_id, 1);
+ __ mov(ebx, eax);
+ // Restore target function and new target.
+ __ pop(edx);
+ __ pop(edi);
+ __ pop(eax);
+ __ SmiUntag(eax);
+ }
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kCodeOffset));
- __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
- __ jmp(eax);
+ __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
+ __ jmp(ebx);
}
-
-static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
- __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
- __ jmp(eax);
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+ __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kCodeOffset));
+ __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
+ __ jmp(ebx);
}
-
void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
@@ -108,17 +111,16 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok, Label::kNear);
- CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
}
-
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool create_implicit_receiver) {
+ bool create_implicit_receiver,
+ bool check_derived_construct) {
// ----------- S t a t e -------------
// -- eax: number of arguments
// -- edi: constructor function
@@ -137,148 +139,20 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ push(eax);
if (create_implicit_receiver) {
- __ push(edi);
- __ push(edx);
-
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- // Verify that the new target is a JSFunction.
- __ CmpObjectType(edx, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &rt_call);
-
- // Load the initial map and verify that it is in fact a map.
- // edx: new target
- __ mov(eax,
- FieldOperand(edx, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi
- __ JumpIfSmi(eax, &rt_call);
- // edi: constructor
- // eax: initial map (if proven valid below)
- __ CmpObjectType(eax, MAP_TYPE, ebx);
- __ j(not_equal, &rt_call);
-
- // Fall back to runtime if the expected base constructor and base
- // constructor differ.
- __ cmp(edi, FieldOperand(eax, Map::kConstructorOrBackPointerOffset));
- __ j(not_equal, &rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // edi: constructor
- // eax: initial map
- __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
- __ j(equal, &rt_call);
-
- // Now allocate the JSObject on the heap.
- // edi: constructor
- // eax: initial map
- __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
- __ shl(edi, kPointerSizeLog2);
-
- __ Allocate(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
-
- Factory* factory = masm->isolate()->factory();
-
- // Allocated the JSObject, now initialize the fields.
- // eax: initial map
- // ebx: JSObject (not HeapObject tagged - the actual address).
- // edi: start of next object
- __ mov(Operand(ebx, JSObject::kMapOffset), eax);
- __ mov(ecx, factory->empty_fixed_array());
- __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
- __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
- __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
-
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on.
- __ or_(ebx, Immediate(kHeapObjectTag));
-
- // Fill all the in-object properties with the appropriate filler.
- // ebx: JSObject (tagged)
- // ecx: First in-object property of JSObject (not tagged)
- __ mov(edx, factory->undefined_value());
-
- if (!is_api_function) {
- Label no_inobject_slack_tracking;
-
- // The code below relies on these assumptions.
- STATIC_ASSERT(Map::kNoSlackTracking == 0);
- STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
- // Check if slack tracking is enabled.
- __ mov(esi, FieldOperand(eax, Map::kBitField3Offset));
- __ shr(esi, Map::ConstructionCounter::kShift);
- __ j(zero, &no_inobject_slack_tracking); // Map::kNoSlackTracking
- __ push(esi); // Save allocation count value.
- // Decrease generous allocation count.
- __ sub(FieldOperand(eax, Map::kBitField3Offset),
- Immediate(1 << Map::ConstructionCounter::kShift));
-
- // Allocate object with a slack.
- __ movzx_b(esi, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
- __ neg(esi);
- __ lea(esi, Operand(edi, esi, times_pointer_size, 0));
- // esi: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ cmp(ecx, esi);
- __ Assert(less_equal,
- kUnexpectedNumberOfPreAllocatedPropertyFields);
- }
- __ InitializeFieldsWithFiller(ecx, esi, edx);
-
- // To allow truncation fill the remaining fields with one pointer
- // filler map.
- __ mov(edx, factory->one_pointer_filler_map());
- __ InitializeFieldsWithFiller(ecx, edi, edx);
-
- __ pop(esi); // Restore allocation count value before decreasing.
- __ cmp(esi, Map::kSlackTrackingCounterEnd);
- __ j(not_equal, &allocated);
-
- // Push the object to the stack, and then the initial map as
- // an argument to the runtime call.
- __ push(ebx);
- __ push(eax); // initial map
- __ CallRuntime(Runtime::kFinalizeInstanceSize);
- __ pop(ebx);
-
- // Continue with JSObject being successfully allocated
- // ebx: JSObject (tagged)
- __ jmp(&allocated);
-
- __ bind(&no_inobject_slack_tracking);
- }
-
- __ InitializeFieldsWithFiller(ecx, edi, edx);
-
- // Continue with JSObject being successfully allocated
- // ebx: JSObject (tagged)
- __ jmp(&allocated);
- }
-
- // Allocate the new receiver object using the runtime call.
- // edx: new target
- __ bind(&rt_call);
- int offset = kPointerSize;
-
- // Must restore esi (context) and edi (constructor) before calling
- // runtime.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ mov(edi, Operand(esp, offset));
- __ push(edi); // constructor function
- __ push(edx); // new target
- __ CallRuntime(Runtime::kNewObject);
- __ mov(ebx, eax); // store result in ebx
-
- // New object allocated.
- // ebx: newly allocated object
- __ bind(&allocated);
+ // Allocate the new receiver object.
+ __ Push(edi);
+ __ Push(edx);
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ mov(ebx, eax);
+ __ Pop(edx);
+ __ Pop(edi);
- // Restore the parameters.
- __ pop(edx); // new.target
- __ pop(edi); // Constructor function.
+ // ----------- S t a t e -------------
+ // -- edi: constructor function
+ // -- ebx: newly allocated object
+ // -- edx: new target
+ // -----------------------------------
// Retrieve smi-tagged arguments count from the stack.
__ mov(eax, Operand(esp, 0));
@@ -331,8 +205,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
if (create_implicit_receiver) {
// If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
+ // of the receiver and use the result.
Label use_receiver, exit;
// If the result is a smi, it is *not* an object in the ECMA sense.
@@ -359,6 +232,19 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Leave construct frame.
}
+ // ES6 9.2.2. Step 13+
+ // Check that the result is not a Smi, indicating that the constructor result
+ // from a derived class is neither undefined nor an Object.
+ if (check_derived_construct) {
+ Label dont_throw;
+ __ JumpIfNotSmi(eax, &dont_throw);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
+ }
+ __ bind(&dont_throw);
+ }
+
// Remove caller arguments from the stack and return.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ pop(ecx);
@@ -372,17 +258,23 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
+ Generate_JSConstructStubHelper(masm, false, true, false);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, true);
+ Generate_JSConstructStubHelper(masm, true, false, false);
}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
+ Generate_JSConstructStubHelper(masm, false, false, false);
+}
+
+
+void Builtins::Generate_JSBuiltinsConstructStubForDerived(
+ MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false, true);
}
@@ -513,10 +405,8 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
// o ebp: the caller's frame pointer
// o esp: stack pointer (pointing to return address)
//
-// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-ia32.h for its layout.
-// TODO(rmcilroy): We will need to include the current bytecode pointer in the
-// frame.
+// The function builds an interpreter frame. See InterpreterFrameConstants in
+// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
@@ -528,14 +418,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ push(edi); // Callee's JS function.
__ push(edx); // Callee's new target.
- // Push zero for bytecode array offset.
- __ push(Immediate(0));
-
// Get the bytecode array from the function object and load the pointer to the
// first entry into edi (InterpreterBytecodeRegister).
__ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+
+ Label load_debug_bytecode_array, bytecode_array_loaded;
+ __ cmp(FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset),
+ Immediate(DebugInfo::uninitialized()));
+ __ j(not_equal, &load_debug_bytecode_array);
__ mov(kInterpreterBytecodeArrayRegister,
FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset));
+ __ bind(&bytecode_array_loaded);
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -545,6 +438,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
+ // Push bytecode array.
+ __ push(kInterpreterBytecodeArrayRegister);
+ // Push zero for bytecode array offset.
+ __ push(Immediate(0));
+
// Allocate the local and temporary register file on the stack.
{
// Load frame size from the BytecodeArray object.
@@ -578,24 +476,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// TODO(rmcilroy): List of things not currently dealt with here but done in
// fullcodegen's prologue:
- // - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
- // - Allow simulator stop operations if FLAG_stop_at is set.
// - Code aging of the BytecodeArray object.
- // Perform stack guard check.
- {
- Label ok;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm->isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok);
- __ push(kInterpreterBytecodeArrayRegister);
- __ CallRuntime(Runtime::kStackGuard);
- __ pop(kInterpreterBytecodeArrayRegister);
- __ bind(&ok);
- }
-
// Load accumulator, register file, bytecode offset, dispatch table into
// registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
@@ -604,10 +487,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ mov(kInterpreterBytecodeOffsetRegister,
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
- // Since the dispatch table root might be set after builtins are generated,
- // load directly from the roots table.
- __ LoadRoot(ebx, Heap::kInterpreterTableRootIndex);
- __ add(ebx, Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ mov(ebx, Immediate(ExternalReference::interpreter_dispatch_table_address(
+ masm->isolate())));
// Push dispatch table as a stack located parameter to the bytecode handler.
DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
@@ -625,8 +506,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// and header removal.
__ add(ebx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(ebx);
- __ nop(); // Ensure that return address still counts as interpreter entry
- // trampoline.
+
+ // Even though the first bytecode handler was called, we will never return.
+ __ Abort(kUnexpectedReturnFromBytecodeHandler);
+
+ // Load debug copy of the bytecode array.
+ __ bind(&load_debug_bytecode_array);
+ Register debug_info = kInterpreterBytecodeArrayRegister;
+ __ mov(debug_info, FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset));
+ __ mov(kInterpreterBytecodeArrayRegister,
+ FieldOperand(debug_info, DebugInfo::kAbstractCodeIndex));
+ __ jmp(&bytecode_array_loaded);
}
@@ -671,7 +561,8 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// static
-void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+ MacroAssembler* masm, TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- ebx : the address of the first argument to be pushed. Subsequent
@@ -694,7 +585,9 @@ void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
// Call the target.
__ Push(edx); // Re-push return address.
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ tail_call_mode),
+ RelocInfo::CODE_TARGET);
}
@@ -739,33 +632,16 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
}
-static void Generate_InterpreterNotifyDeoptimizedHelper(
- MacroAssembler* masm, Deoptimizer::BailoutType type) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(kInterpreterAccumulatorRegister); // Save accumulator register.
-
- // Pass the deoptimization type to the runtime system.
- __ Push(Smi::FromInt(static_cast<int>(type)));
-
- __ CallRuntime(Runtime::kNotifyDeoptimized);
-
- __ Pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
- // Tear down internal frame.
- }
-
+static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
// Initialize register file register.
__ mov(kInterpreterRegisterFileRegister, ebp);
__ add(kInterpreterRegisterFileRegister,
Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
// Get the bytecode array pointer from the frame.
- __ mov(ebx, Operand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kFunctionFromRegisterPointer));
- __ mov(ebx, FieldOperand(ebx, JSFunction::kSharedFunctionInfoOffset));
__ mov(kInterpreterBytecodeArrayRegister,
- FieldOperand(ebx, SharedFunctionInfo::kFunctionDataOffset));
+ Operand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -782,12 +658,13 @@ static void Generate_InterpreterNotifyDeoptimizedHelper(
InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
- // Push dispatch table as a stack located parameter to the bytecode handler -
- // overwrite the state slot (we don't use these for interpreter deopts).
- __ LoadRoot(ebx, Heap::kInterpreterTableRootIndex);
- __ add(ebx, Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
+ // Push dispatch table as a stack located parameter to the bytecode handler.
+ __ mov(ebx, Immediate(ExternalReference::interpreter_dispatch_table_address(
+ masm->isolate())));
DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
- __ mov(Operand(esp, kPointerSize), ebx);
+ __ Pop(esi);
+ __ Push(ebx);
+ __ Push(esi);
// Dispatch to the target bytecode.
__ movzx_b(esi, Operand(kInterpreterBytecodeArrayRegister,
@@ -795,8 +672,6 @@ static void Generate_InterpreterNotifyDeoptimizedHelper(
__ mov(ebx, Operand(ebx, esi, times_pointer_size, 0));
// Get the context from the frame.
- // TODO(rmcilroy): Update interpreter frame to expect current context at the
- // context slot instead of the function context.
__ mov(kContextRegister,
Operand(kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kContextFromRegisterPointer));
@@ -808,6 +683,32 @@ static void Generate_InterpreterNotifyDeoptimizedHelper(
}
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+ MacroAssembler* masm, Deoptimizer::BailoutType type) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Pass the deoptimization type to the runtime system.
+ __ Push(Smi::FromInt(static_cast<int>(type)));
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+ // Tear down internal frame.
+ }
+
+ // Drop state (we don't use these for interpreter deopts) and and pop the
+ // accumulator value into the accumulator register and push PC at top
+ // of stack (to simulate initial call to bytecode handler in interpreter entry
+ // trampoline).
+ __ Pop(ebx);
+ __ Drop(1);
+ __ Pop(kInterpreterAccumulatorRegister);
+ __ Push(ebx);
+
+ // Enter the bytecode dispatch.
+ Generate_EnterBytecodeDispatch(masm);
+}
+
+
void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
@@ -822,22 +723,30 @@ void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ // Set the address of the interpreter entry trampoline as a return address.
+ // This simulates the initial call to bytecode handlers in interpreter entry
+ // trampoline. The return will never actually be taken, but our stack walker
+ // uses this address to determine whether a frame is interpreted.
+ __ Push(masm->isolate()->builtins()->InterpreterEntryTrampoline());
+
+ Generate_EnterBytecodeDispatch(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileLazy);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm,
+ Runtime::kCompileOptimized_NotConcurrent);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
@@ -1375,6 +1284,122 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// static
+void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
+ // ----------- S t a t e -------------
+ // -- eax : number of arguments
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 8] : arg[n] (zero-based)
+ // -- esp[(argc + 1) * 8] : receiver
+ // -----------------------------------
+ Condition const cc = (kind == MathMaxMinKind::kMin) ? below : above;
+ Heap::RootListIndex const root_index =
+ (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
+ : Heap::kMinusInfinityValueRootIndex;
+ XMMRegister const reg = (kind == MathMaxMinKind::kMin) ? xmm1 : xmm0;
+
+ // Load the accumulator with the default return value (either -Infinity or
+ // +Infinity), with the tagged value in edx and the double value in xmm0.
+ __ LoadRoot(edx, root_index);
+ __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+ __ Move(ecx, eax);
+
+ Label done_loop, loop;
+ __ bind(&loop);
+ {
+ // Check if all parameters done.
+ __ test(ecx, ecx);
+ __ j(zero, &done_loop);
+
+ // Load the next parameter tagged value into ebx.
+ __ mov(ebx, Operand(esp, ecx, times_pointer_size, 0));
+
+ // Load the double value of the parameter into xmm1, maybe converting the
+ // parameter to a number first using the ToNumberStub if necessary.
+ Label convert, convert_smi, convert_number, done_convert;
+ __ bind(&convert);
+ __ JumpIfSmi(ebx, &convert_smi);
+ __ JumpIfRoot(FieldOperand(ebx, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex, &convert_number);
+ {
+ // Parameter is not a Number, use the ToNumberStub to convert it.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(eax);
+ __ SmiTag(ecx);
+ __ Push(eax);
+ __ Push(ecx);
+ __ Push(edx);
+ __ mov(eax, ebx);
+ ToNumberStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ mov(ebx, eax);
+ __ Pop(edx);
+ __ Pop(ecx);
+ __ Pop(eax);
+ {
+ // Restore the double accumulator value (xmm0).
+ Label restore_smi, done_restore;
+ __ JumpIfSmi(edx, &restore_smi, Label::kNear);
+ __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+ __ jmp(&done_restore, Label::kNear);
+ __ bind(&restore_smi);
+ __ SmiUntag(edx);
+ __ Cvtsi2sd(xmm0, edx);
+ __ SmiTag(edx);
+ __ bind(&done_restore);
+ }
+ __ SmiUntag(ecx);
+ __ SmiUntag(eax);
+ }
+ __ jmp(&convert);
+ __ bind(&convert_number);
+ __ movsd(xmm1, FieldOperand(ebx, HeapNumber::kValueOffset));
+ __ jmp(&done_convert, Label::kNear);
+ __ bind(&convert_smi);
+ __ SmiUntag(ebx);
+ __ Cvtsi2sd(xmm1, ebx);
+ __ SmiTag(ebx);
+ __ bind(&done_convert);
+
+ // Perform the actual comparison with the accumulator value on the left hand
+ // side (xmm0) and the next parameter value on the right hand side (xmm1).
+ Label compare_equal, compare_nan, compare_swap, done_compare;
+ __ ucomisd(xmm0, xmm1);
+ __ j(parity_even, &compare_nan, Label::kNear);
+ __ j(cc, &done_compare, Label::kNear);
+ __ j(equal, &compare_equal, Label::kNear);
+
+ // Result is on the right hand side.
+ __ bind(&compare_swap);
+ __ movaps(xmm0, xmm1);
+ __ mov(edx, ebx);
+ __ jmp(&done_compare, Label::kNear);
+
+ // At least one side is NaN, which means that the result will be NaN too.
+ __ bind(&compare_nan);
+ __ LoadRoot(edx, Heap::kNanValueRootIndex);
+ __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+ __ jmp(&done_compare, Label::kNear);
+
+ // Left and right hand side are equal, check for -0 vs. +0.
+ __ bind(&compare_equal);
+ __ movmskpd(edi, reg);
+ __ test(edi, Immediate(1));
+ __ j(not_zero, &compare_swap);
+
+ __ bind(&done_compare);
+ __ dec(ecx);
+ __ jmp(&loop);
+ }
+
+ __ bind(&done_loop);
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(ecx);
+ __ mov(eax, edx);
+ __ Ret();
+}
+
+// static
void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : number of arguments
@@ -1472,9 +1497,8 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(ebx); // the first argument
- __ Push(edi); // constructor function
- __ Push(edx); // new target
- __ CallRuntime(Runtime::kNewObject);
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ Pop(FieldOperand(eax, JSValue::kValueOffset));
}
__ Ret();
@@ -1606,9 +1630,8 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(ebx); // the first argument
- __ Push(edi); // constructor function
- __ Push(edx); // new target
- __ CallRuntime(Runtime::kNewObject);
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ Pop(FieldOperand(eax, JSValue::kValueOffset));
}
__ Ret();
@@ -1724,9 +1747,7 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Try to create the list from an arguments object.
__ bind(&create_arguments);
- __ mov(ebx,
- FieldOperand(eax, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize));
+ __ mov(ebx, FieldOperand(eax, JSArgumentsObject::kLengthOffset));
__ mov(ecx, FieldOperand(eax, JSObject::kElementsOffset));
__ cmp(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
__ j(not_equal, &create_runtime);
@@ -1808,10 +1829,138 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
}
}
+namespace {
+
+// Drops top JavaScript frame and an arguments adaptor frame below it (if
+// present) preserving all the arguments prepared for current call.
+// Does nothing if debugger is currently active.
+// ES6 14.6.3. PrepareForTailCall
+//
+// Stack structure for the function g() tail calling f():
+//
+// ------- Caller frame: -------
+// | ...
+// | g()'s arg M
+// | ...
+// | g()'s arg 1
+// | g()'s receiver arg
+// | g()'s caller pc
+// ------- g()'s frame: -------
+// | g()'s caller fp <- fp
+// | g()'s context
+// | function pointer: g
+// | -------------------------
+// | ...
+// | ...
+// | f()'s arg N
+// | ...
+// | f()'s arg 1
+// | f()'s receiver arg
+// | f()'s caller pc <- sp
+// ----------------------
+//
+void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+ DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+ Comment cmnt(masm, "[ PrepareForTailCall");
+
+ // Prepare for tail call only if the debugger is not active.
+ Label done;
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(masm->isolate());
+ __ movzx_b(scratch1, Operand::StaticVariable(debug_is_active));
+ __ cmp(scratch1, Immediate(0));
+ __ j(not_equal, &done, Label::kNear);
+
+ // Drop possible interpreter handler/stub frame.
+ {
+ Label no_interpreter_frame;
+ __ cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
+ Immediate(Smi::FromInt(StackFrame::STUB)));
+ __ j(not_equal, &no_interpreter_frame, Label::kNear);
+ __ mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&no_interpreter_frame);
+ }
+
+ // Check if next frame is an arguments adaptor frame.
+ Label no_arguments_adaptor, formal_parameter_count_loaded;
+ __ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ cmp(Operand(scratch2, StandardFrameConstants::kContextOffset),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &no_arguments_adaptor, Label::kNear);
+
+ // Drop arguments adaptor frame and load arguments count.
+ __ mov(ebp, scratch2);
+ __ mov(scratch1, Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(scratch1);
+ __ jmp(&formal_parameter_count_loaded, Label::kNear);
+
+ __ bind(&no_arguments_adaptor);
+ // Load caller's formal parameter count
+ __ mov(scratch1, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(scratch1,
+ FieldOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(
+ scratch1,
+ FieldOperand(scratch1, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ SmiUntag(scratch1);
+
+ __ bind(&formal_parameter_count_loaded);
+
+ // Calculate the destination address where we will put the return address
+ // after we drop current frame.
+ Register new_sp_reg = scratch2;
+ __ sub(scratch1, args_reg);
+ __ lea(new_sp_reg, Operand(ebp, scratch1, times_pointer_size,
+ StandardFrameConstants::kCallerPCOffset));
+
+ if (FLAG_debug_code) {
+ __ cmp(esp, new_sp_reg);
+ __ Check(below, kStackAccessBelowStackPointer);
+ }
+
+ // Copy receiver and return address as well.
+ Register count_reg = scratch1;
+ __ lea(count_reg, Operand(args_reg, 2));
+
+ // Copy return address from caller's frame to current frame's return address
+ // to avoid its trashing and let the following loop copy it to the right
+ // place.
+ Register tmp_reg = scratch3;
+ __ mov(tmp_reg, Operand(ebp, StandardFrameConstants::kCallerPCOffset));
+ __ mov(Operand(esp, 0), tmp_reg);
+
+ // Restore caller's frame pointer now as it could be overwritten by
+ // the copying loop.
+ __ mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+
+ Operand src(esp, count_reg, times_pointer_size, 0);
+ Operand dst(new_sp_reg, count_reg, times_pointer_size, 0);
+
+ // Now copy callee arguments to the caller frame going backwards to avoid
+ // callee arguments corruption (source and destination areas could overlap).
+ Label loop, entry;
+ __ jmp(&entry, Label::kNear);
+ __ bind(&loop);
+ __ dec(count_reg);
+ __ mov(tmp_reg, src);
+ __ mov(dst, tmp_reg);
+ __ bind(&entry);
+ __ cmp(count_reg, Immediate(0));
+ __ j(not_equal, &loop, Label::kNear);
+
+ // Leave current frame.
+ __ mov(esp, new_sp_reg);
+
+ __ bind(&done);
+}
+} // namespace
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode) {
+ ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edi : the function to call (checked to be a JSFunction)
@@ -1900,6 +2049,12 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- esi : the function context.
// -----------------------------------
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, eax, ebx, ecx, edx);
+ // Reload shared function info.
+ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ }
+
__ mov(ebx,
FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
__ SmiUntag(ebx);
@@ -2005,13 +2160,18 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// static
-void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edi : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(edi);
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, eax, ebx, ecx, edx);
+ }
+
// Patch the receiver to [[BoundThis]].
__ mov(ebx, FieldOperand(edi, JSBoundFunction::kBoundThisOffset));
__ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ebx);
@@ -2029,7 +2189,8 @@ void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
// static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edi : the target to call (can be any Object).
@@ -2039,14 +2200,24 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ JumpIfSmi(edi, &non_callable);
__ bind(&non_smi);
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(equal, masm->isolate()->builtins()->CallFunction(mode),
+ __ j(equal, masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
RelocInfo::CODE_TARGET);
__ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
- __ j(equal, masm->isolate()->builtins()->CallBoundFunction(),
+ __ j(equal, masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
RelocInfo::CODE_TARGET);
+
+ // Check if target has a [[Call]] internal method.
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
+ __ j(zero, &non_callable);
+
__ CmpInstanceType(ecx, JS_PROXY_TYPE);
__ j(not_equal, &non_function);
+ // 0. Prepare for tail call if necessary.
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, eax, ebx, ecx, edx);
+ }
+
// 1. Runtime fallback for Proxy [[Call]].
__ PopReturnAddressTo(ecx);
__ Push(edi);
@@ -2061,15 +2232,12 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
__ bind(&non_function);
- // Check if target has a [[Call]] internal method.
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
- __ j(zero, &non_callable, Label::kNear);
// Overwrite the original receiver with the (original) target.
__ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
// Let the "call_as_function_delegate" take care of the rest.
__ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, edi);
__ Jump(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined),
+ ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
@@ -2387,14 +2555,12 @@ static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
// Load the next prototype.
__ bind(&next_prototype);
__ mov(receiver, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ test(FieldOperand(receiver, Map::kBitField3Offset),
+ Immediate(Map::HasHiddenPrototype::kMask));
+ __ j(zero, receiver_check_failed);
+
__ mov(receiver, FieldOperand(receiver, Map::kPrototypeOffset));
- // End if the prototype is null or not hidden.
- __ CompareRoot(receiver, Heap::kNullValueRootIndex);
- __ j(equal, receiver_check_failed);
__ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
- __ test(FieldOperand(scratch0, Map::kBitField3Offset),
- Immediate(Map::IsHiddenPrototype::kMask));
- __ j(zero, receiver_check_failed);
// Iterate.
__ jmp(&prototype_loop_start, Label::kNear);
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 6e597e2814..2565c45d31 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -613,7 +613,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ Cvtsi2sd(double_exponent, exponent);
// Returning or bailing out.
- Counters* counters = isolate()->counters();
if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
@@ -624,7 +623,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&done);
__ AllocateHeapNumber(eax, scratch, base, &call_runtime);
__ movsd(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
- __ IncrementCounter(counters->math_pow(), 1);
__ ret(2 * kPointerSize);
} else {
__ bind(&call_runtime);
@@ -644,7 +642,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ add(esp, Immediate(kDoubleSize));
__ bind(&done);
- __ IncrementCounter(counters->math_pow(), 1);
__ ret(0);
}
}
@@ -730,456 +727,6 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- // The key is in edx and the parameter count is in eax.
- DCHECK(edx.is(ArgumentsAccessReadDescriptor::index()));
- DCHECK(eax.is(ArgumentsAccessReadDescriptor::parameter_count()));
-
- // The displacement is used for skipping the frame pointer on the
- // stack. It is the offset of the last parameter (if any) relative
- // to the frame pointer.
- static const int kDisplacement = 1 * kPointerSize;
-
- // Check that the key is a smi.
- Label slow;
- __ JumpIfNotSmi(edx, &slow, Label::kNear);
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor;
- __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adaptor, Label::kNear);
-
- // Check index against formal parameters count limit passed in
- // through register eax. Use unsigned comparison to get negative
- // check for free.
- __ cmp(edx, eax);
- __ j(above_equal, &slow, Label::kNear);
-
- // Read the argument from the stack and return it.
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
- __ lea(ebx, Operand(ebp, eax, times_2, 0));
- __ neg(edx);
- __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
- __ ret(0);
-
- // Arguments adaptor case: Check index against actual arguments
- // limit found in the arguments adaptor frame. Use unsigned
- // comparison to get negative check for free.
- __ bind(&adaptor);
- __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmp(edx, ecx);
- __ j(above_equal, &slow, Label::kNear);
-
- // Read the argument from the stack and return it.
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
- __ lea(ebx, Operand(ebx, ecx, times_2, 0));
- __ neg(edx);
- __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
- __ ret(0);
-
- // Slow-case: Handle non-smi or out-of-bounds access to arguments
- // by calling the runtime system.
- __ bind(&slow);
- __ pop(ebx); // Return address.
- __ push(edx);
- __ push(ebx);
- __ TailCallRuntime(Runtime::kArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
- // ecx : number of parameters (tagged)
- // edx : parameters pointer
- // edi : function
- // esp[0] : return address
-
- DCHECK(edi.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(ecx.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(edx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(eax, Operand(ebx, StandardFrameConstants::kContextOffset));
- __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &runtime, Label::kNear);
-
- // Patch the arguments.length and the parameters pointer.
- __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ lea(edx,
- Operand(ebx, ecx, times_2, StandardFrameConstants::kCallerSPOffset));
-
- __ bind(&runtime);
- __ pop(eax); // Pop return address.
- __ push(edi); // Push function.
- __ push(edx); // Push parameters pointer.
- __ push(ecx); // Push parameter count.
- __ push(eax); // Push return address.
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
- // ecx : number of parameters (tagged)
- // edx : parameters pointer
- // edi : function
- // esp[0] : return address
-
- DCHECK(edi.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(ecx.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(edx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(eax, Operand(ebx, StandardFrameConstants::kContextOffset));
- __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adaptor_frame, Label::kNear);
-
- // No adaptor, parameter count = argument count.
- __ mov(ebx, ecx);
- __ push(ecx);
- __ jmp(&try_allocate, Label::kNear);
-
- // We have an adaptor frame. Patch the parameters pointer.
- __ bind(&adaptor_frame);
- __ mov(ebx, ecx);
- __ push(ecx);
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ lea(edx, Operand(edx, ecx, times_2,
- StandardFrameConstants::kCallerSPOffset));
-
- // ebx = parameter count (tagged)
- // ecx = argument count (smi-tagged)
- // Compute the mapped parameter count = min(ebx, ecx) in ebx.
- __ cmp(ebx, ecx);
- __ j(less_equal, &try_allocate, Label::kNear);
- __ mov(ebx, ecx);
-
- // Save mapped parameter count and function.
- __ bind(&try_allocate);
- __ push(edi);
- __ push(ebx);
-
- // Compute the sizes of backing store, parameter map, and arguments object.
- // 1. Parameter map, has 2 extra words containing context and backing store.
- const int kParameterMapHeaderSize =
- FixedArray::kHeaderSize + 2 * kPointerSize;
- Label no_parameter_map;
- __ test(ebx, ebx);
- __ j(zero, &no_parameter_map, Label::kNear);
- __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
- __ bind(&no_parameter_map);
-
- // 2. Backing store.
- __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
-
- // 3. Arguments object.
- __ add(ebx, Immediate(Heap::kSloppyArgumentsObjectSize));
-
- // Do the allocation of all three objects in one go.
- __ Allocate(ebx, eax, edi, no_reg, &runtime, TAG_OBJECT);
-
- // eax = address of new object(s) (tagged)
- // ecx = argument count (smi-tagged)
- // esp[0] = mapped parameter count (tagged)
- // esp[4] = function
- // esp[8] = parameter count (tagged)
- // Get the arguments map from the current native context into edi.
- Label has_mapped_parameters, instantiate;
- __ mov(edi, NativeContextOperand());
- __ mov(ebx, Operand(esp, 0 * kPointerSize));
- __ test(ebx, ebx);
- __ j(not_zero, &has_mapped_parameters, Label::kNear);
- __ mov(
- edi,
- Operand(edi, Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX)));
- __ jmp(&instantiate, Label::kNear);
-
- __ bind(&has_mapped_parameters);
- __ mov(edi, Operand(edi, Context::SlotOffset(
- Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX)));
- __ bind(&instantiate);
-
- // eax = address of new object (tagged)
- // ebx = mapped parameter count (tagged)
- // ecx = argument count (smi-tagged)
- // edi = address of arguments map (tagged)
- // esp[0] = mapped parameter count (tagged)
- // esp[4] = function
- // esp[8] = parameter count (tagged)
- // Copy the JS object part.
- __ mov(FieldOperand(eax, JSObject::kMapOffset), edi);
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
- masm->isolate()->factory()->empty_fixed_array());
- __ mov(FieldOperand(eax, JSObject::kElementsOffset),
- masm->isolate()->factory()->empty_fixed_array());
-
- // Set up the callee in-object property.
- STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ mov(edi, Operand(esp, 1 * kPointerSize));
- __ AssertNotSmi(edi);
- __ mov(FieldOperand(eax, JSObject::kHeaderSize +
- Heap::kArgumentsCalleeIndex * kPointerSize),
- edi);
-
- // Use the length (smi tagged) and set that as an in-object property too.
- __ AssertSmi(ecx);
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ mov(FieldOperand(eax, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize),
- ecx);
-
- // Set up the elements pointer in the allocated arguments object.
- // If we allocated a parameter map, edi will point there, otherwise to the
- // backing store.
- __ lea(edi, Operand(eax, Heap::kSloppyArgumentsObjectSize));
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
-
- // eax = address of new object (tagged)
- // ebx = mapped parameter count (tagged)
- // ecx = argument count (tagged)
- // edx = address of receiver argument
- // edi = address of parameter map or backing store (tagged)
- // esp[0] = mapped parameter count (tagged)
- // esp[4] = function
- // esp[8] = parameter count (tagged)
- // Free two registers.
- __ push(edx);
- __ push(eax);
-
- // Initialize parameter map. If there are no mapped arguments, we're done.
- Label skip_parameter_map;
- __ test(ebx, ebx);
- __ j(zero, &skip_parameter_map);
-
- __ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(isolate()->factory()->sloppy_arguments_elements_map()));
- __ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2))));
- __ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax);
- __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi);
- __ lea(eax, Operand(edi, ebx, times_2, kParameterMapHeaderSize));
- __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 1 * kPointerSize), eax);
-
- // Copy the parameter slots and the holes in the arguments.
- // We need to fill in mapped_parameter_count slots. They index the context,
- // where parameters are stored in reverse order, at
- // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
- // The mapped parameter thus need to get indices
- // MIN_CONTEXT_SLOTS+parameter_count-1 ..
- // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
- // We loop from right to left.
- Label parameters_loop, parameters_test;
- __ push(ecx);
- __ mov(eax, Operand(esp, 3 * kPointerSize));
- __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
- __ add(ebx, Operand(esp, 5 * kPointerSize));
- __ sub(ebx, eax);
- __ mov(ecx, isolate()->factory()->the_hole_value());
- __ mov(edx, edi);
- __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
- // eax = loop variable (tagged)
- // ebx = mapping index (tagged)
- // ecx = the hole value
- // edx = address of parameter map (tagged)
- // edi = address of backing store (tagged)
- // esp[0] = argument count (tagged)
- // esp[4] = address of new object (tagged)
- // esp[8] = address of receiver argument
- // esp[12] = mapped parameter count (tagged)
- // esp[16] = function
- // esp[20] = parameter count (tagged)
- __ jmp(&parameters_test, Label::kNear);
-
- __ bind(&parameters_loop);
- __ sub(eax, Immediate(Smi::FromInt(1)));
- __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
- __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx);
- __ add(ebx, Immediate(Smi::FromInt(1)));
- __ bind(&parameters_test);
- __ test(eax, eax);
- __ j(not_zero, &parameters_loop, Label::kNear);
- __ pop(ecx);
-
- __ bind(&skip_parameter_map);
-
- // ecx = argument count (tagged)
- // edi = address of backing store (tagged)
- // esp[0] = address of new object (tagged)
- // esp[4] = address of receiver argument
- // esp[8] = mapped parameter count (tagged)
- // esp[12] = function
- // esp[16] = parameter count (tagged)
- // Copy arguments header and remaining slots (if there are any).
- __ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(isolate()->factory()->fixed_array_map()));
- __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
-
- Label arguments_loop, arguments_test;
- __ mov(ebx, Operand(esp, 2 * kPointerSize));
- __ mov(edx, Operand(esp, 1 * kPointerSize));
- __ sub(edx, ebx); // Is there a smarter way to do negative scaling?
- __ sub(edx, ebx);
- __ jmp(&arguments_test, Label::kNear);
-
- __ bind(&arguments_loop);
- __ sub(edx, Immediate(kPointerSize));
- __ mov(eax, Operand(edx, 0));
- __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax);
- __ add(ebx, Immediate(Smi::FromInt(1)));
-
- __ bind(&arguments_test);
- __ cmp(ebx, ecx);
- __ j(less, &arguments_loop, Label::kNear);
-
- // Restore.
- __ pop(eax); // Address of arguments object.
- __ Drop(4);
-
- // Return.
- __ ret(0);
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ pop(eax); // Remove saved mapped parameter count.
- __ pop(edi); // Pop saved function.
- __ pop(eax); // Remove saved parameter count.
- __ pop(eax); // Pop return address.
- __ push(edi); // Push function.
- __ push(edx); // Push parameters pointer.
- __ push(ecx); // Push parameter count.
- __ push(eax); // Push return address.
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
- // ecx : number of parameters (tagged)
- // edx : parameters pointer
- // edi : function
- // esp[0] : return address
-
- DCHECK(edi.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(ecx.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(edx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label try_allocate, runtime;
- __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(eax, Operand(ebx, StandardFrameConstants::kContextOffset));
- __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &try_allocate, Label::kNear);
-
- // Patch the arguments.length and the parameters pointer.
- __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ lea(edx,
- Operand(ebx, ecx, times_2, StandardFrameConstants::kCallerSPOffset));
-
- // Try the new space allocation. Start out with computing the size of
- // the arguments object and the elements array.
- Label add_arguments_object;
- __ bind(&try_allocate);
- __ mov(eax, ecx);
- __ test(eax, eax);
- __ j(zero, &add_arguments_object, Label::kNear);
- __ lea(eax, Operand(eax, times_2, FixedArray::kHeaderSize));
- __ bind(&add_arguments_object);
- __ add(eax, Immediate(Heap::kStrictArgumentsObjectSize));
-
- // Do the allocation of both objects in one go.
- __ Allocate(eax, eax, ebx, no_reg, &runtime, TAG_OBJECT);
-
- // Get the arguments map from the current native context.
- __ mov(edi, NativeContextOperand());
- __ mov(edi, ContextOperand(edi, Context::STRICT_ARGUMENTS_MAP_INDEX));
-
- __ mov(FieldOperand(eax, JSObject::kMapOffset), edi);
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
- masm->isolate()->factory()->empty_fixed_array());
- __ mov(FieldOperand(eax, JSObject::kElementsOffset),
- masm->isolate()->factory()->empty_fixed_array());
-
- // Get the length (smi tagged) and set that as an in-object property too.
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ AssertSmi(ecx);
- __ mov(FieldOperand(eax, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize),
- ecx);
-
- // If there are no actual arguments, we're done.
- Label done;
- __ test(ecx, ecx);
- __ j(zero, &done, Label::kNear);
-
- // Set up the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- __ lea(edi, Operand(eax, Heap::kStrictArgumentsObjectSize));
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
- __ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(isolate()->factory()->fixed_array_map()));
- __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
-
- // Untag the length for the loop below.
- __ SmiUntag(ecx);
-
- // Copy the fixed array slots.
- Label loop;
- __ bind(&loop);
- __ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver.
- __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
- __ add(edi, Immediate(kPointerSize));
- __ sub(edx, Immediate(kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &loop);
-
- // Return.
- __ bind(&done);
- __ ret(0);
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ pop(eax); // Pop return address.
- __ push(edi); // Push function.
- __ push(edx); // Push parameters pointer.
- __ push(ecx); // Push parameter count.
- __ push(eax); // Push return address.
- __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
-void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
- // ecx : number of parameters (tagged)
- // edx : parameters pointer
- // ebx : rest parameter index (tagged)
- // esp[0] : return address
-
- // Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- __ mov(edi, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(eax, Operand(edi, StandardFrameConstants::kContextOffset));
- __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &runtime);
-
- // Patch the arguments.length and the parameters pointer.
- __ mov(ecx, Operand(edi, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ lea(edx,
- Operand(edi, ecx, times_2, StandardFrameConstants::kCallerSPOffset));
-
- __ bind(&runtime);
- __ pop(eax); // Save return address.
- __ push(ecx); // Push number of parameters.
- __ push(edx); // Push parameters pointer.
- __ push(ebx); // Push rest parameter index.
- __ push(eax); // Push return address.
- __ TailCallRuntime(Runtime::kNewRestParam);
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -1689,16 +1236,11 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Check for undefined. undefined OP undefined is false even though
// undefined == undefined.
__ cmp(edx, isolate()->factory()->undefined_value());
- if (is_strong(strength())) {
- // In strong mode, this comparison must throw, so call the runtime.
- __ j(equal, &runtime_call, Label::kFar);
- } else {
- Label check_for_nan;
- __ j(not_equal, &check_for_nan, Label::kNear);
- __ Move(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
- __ ret(0);
- __ bind(&check_for_nan);
- }
+ Label check_for_nan;
+ __ j(not_equal, &check_for_nan, Label::kNear);
+ __ Move(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
+ __ ret(0);
+ __ bind(&check_for_nan);
}
// Test for NaN. Compare heap numbers in a general way,
@@ -1718,12 +1260,6 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call runtime on identical SIMD values since we must throw a TypeError.
__ cmpb(ecx, static_cast<uint8_t>(SIMD128_VALUE_TYPE));
__ j(equal, &runtime_call, Label::kFar);
- if (is_strong(strength())) {
- // We have already tested for smis and heap numbers, so if both
- // arguments are not strings we must proceed to the slow case.
- __ test(ecx, Immediate(kIsNotStringMask));
- __ j(not_zero, &runtime_call, Label::kFar);
- }
}
__ Move(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
@@ -1868,7 +1404,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Non-strict equality. Objects are unequal if
// they are both JSObjects and not undetectable,
// and their pointers are different.
- Label return_unequal;
+ Label return_unequal, undetectable;
// At most one is a smi, so we can test for smi by adding the two.
// A smi plus a heap object has the low bit set, a heap object plus
// a heap object has the low bit clear.
@@ -1877,26 +1413,32 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ lea(ecx, Operand(eax, edx, times_1, 0));
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &runtime_call, Label::kNear);
- __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
+
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+
+ __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ __ j(not_zero, &undetectable, Label::kNear);
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ __ j(not_zero, &return_unequal, Label::kNear);
+
+ __ CmpInstanceType(ebx, FIRST_JS_RECEIVER_TYPE);
__ j(below, &runtime_call, Label::kNear);
- __ CmpObjectType(edx, FIRST_JS_RECEIVER_TYPE, ebx);
+ __ CmpInstanceType(ecx, FIRST_JS_RECEIVER_TYPE);
__ j(below, &runtime_call, Label::kNear);
- // We do not bail out after this point. Both are JSObjects, and
- // they are equal if and only if both are undetectable.
- // The and of the undetectable flags is 1 if and only if they are equal.
+
+ __ bind(&return_unequal);
+ // Return non-equal by returning the non-zero object pointer in eax.
+ __ ret(0); // eax, edx were pushed
+
+ __ bind(&undetectable);
__ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
__ j(zero, &return_unequal, Label::kNear);
- __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- __ j(zero, &return_unequal, Label::kNear);
- // The objects are both undetectable, so they both compare as the value
- // undefined, and are equal.
__ Move(eax, Immediate(EQUAL));
- __ bind(&return_unequal);
- // Return non-equal by returning the non-zero object pointer in eax,
- // or return equal if we fell through to here.
- __ ret(0); // rax, rdx were pushed
+ __ ret(0); // eax, edx were pushed
}
__ bind(&runtime_call);
@@ -1917,8 +1459,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
- : Runtime::kCompare);
+ __ TailCallRuntime(Runtime::kCompare);
}
__ bind(&miss);
@@ -2147,7 +1688,8 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&call_function);
__ Set(eax, argc);
- __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+ __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
+ tail_call_mode()),
RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
@@ -2186,7 +1728,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&call);
__ Set(eax, argc);
- __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+ __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
RelocInfo::CODE_TARGET);
__ bind(&uninitialized);
@@ -2303,16 +1845,22 @@ void CEntryStub::Generate(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
+ // Reserve space on the stack for the three arguments passed to the call. If
+ // result size is greater than can be returned in registers, also reserve
+ // space for the hidden argument for the result location, and space for the
+ // result itself.
+ int arg_stack_space = result_size() < 3 ? 3 : 4 + result_size();
+
// Enter the exit frame that transitions from JavaScript to C++.
if (argv_in_register()) {
DCHECK(!save_doubles());
- __ EnterApiExitFrame(3);
+ __ EnterApiExitFrame(arg_stack_space);
// Move argc and argv into the correct registers.
__ mov(esi, ecx);
__ mov(edi, eax);
} else {
- __ EnterExitFrame(save_doubles());
+ __ EnterExitFrame(arg_stack_space, save_doubles());
}
// ebx: pointer to C function (C callee-saved)
@@ -2327,14 +1875,36 @@ void CEntryStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ CheckStackAlignment();
}
-
// Call C function.
- __ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
- __ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
- __ mov(Operand(esp, 2 * kPointerSize),
- Immediate(ExternalReference::isolate_address(isolate())));
+ if (result_size() <= 2) {
+ __ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
+ __ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
+ __ mov(Operand(esp, 2 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(isolate())));
+ } else {
+ DCHECK_EQ(3, result_size());
+ // Pass a pointer to the result location as the first argument.
+ __ lea(eax, Operand(esp, 4 * kPointerSize));
+ __ mov(Operand(esp, 0 * kPointerSize), eax);
+ __ mov(Operand(esp, 1 * kPointerSize), edi); // argc.
+ __ mov(Operand(esp, 2 * kPointerSize), esi); // argv.
+ __ mov(Operand(esp, 3 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(isolate())));
+ }
__ call(ebx);
- // Result is in eax or edx:eax - do not destroy these registers!
+
+ if (result_size() > 2) {
+ DCHECK_EQ(3, result_size());
+#ifndef _WIN32
+ // Restore the "hidden" argument on the stack which was popped by caller.
+ __ sub(esp, Immediate(kPointerSize));
+#endif
+ // Read result values stored on stack. Result is stored above the arguments.
+ __ mov(kReturnRegister0, Operand(esp, 4 * kPointerSize));
+ __ mov(kReturnRegister1, Operand(esp, 5 * kPointerSize));
+ __ mov(kReturnRegister2, Operand(esp, 6 * kPointerSize));
+ }
+ // Result is in eax, edx:eax or edi:edx:eax - do not destroy these registers!
// Check result for exception sentinel.
Label exception_returned;
@@ -3130,6 +2700,42 @@ void ToStringStub::Generate(MacroAssembler* masm) {
}
+void ToNameStub::Generate(MacroAssembler* masm) {
+ // The ToName stub takes one argument in eax.
+ Label is_number;
+ __ JumpIfSmi(eax, &is_number, Label::kNear);
+
+ Label not_name;
+ STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+ __ CmpObjectType(eax, LAST_NAME_TYPE, edi);
+ // eax: receiver
+ // edi: receiver map
+ __ j(above, &not_name, Label::kNear);
+ __ Ret();
+ __ bind(&not_name);
+
+ Label not_heap_number;
+ __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
+ __ j(not_equal, &not_heap_number, Label::kNear);
+ __ bind(&is_number);
+ NumberToStringStub stub(isolate());
+ __ TailCallStub(&stub);
+ __ bind(&not_heap_number);
+
+ Label not_oddball;
+ __ CmpInstanceType(edi, ODDBALL_TYPE);
+ __ j(not_equal, &not_oddball, Label::kNear);
+ __ mov(eax, FieldOperand(eax, Oddball::kToStringOffset));
+ __ Ret();
+ __ bind(&not_oddball);
+
+ __ pop(ecx); // Pop return address.
+ __ push(eax); // Push argument.
+ __ push(ecx); // Push return address.
+ __ TailCallRuntime(Runtime::kToName);
+}
+
+
void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register left,
Register right,
@@ -3333,21 +2939,17 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ JumpIfNotRoot(ecx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
__ JumpIfNotRoot(ebx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
- if (op() != Token::EQ_STRICT && is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
- } else {
- if (!Token::IsEqualityOp(op())) {
- __ mov(eax, FieldOperand(eax, Oddball::kToNumberOffset));
- __ AssertSmi(eax);
- __ mov(edx, FieldOperand(edx, Oddball::kToNumberOffset));
- __ AssertSmi(edx);
- __ push(eax);
- __ mov(eax, edx);
- __ pop(edx);
- }
- __ sub(eax, edx);
- __ Ret();
+ if (!Token::IsEqualityOp(op())) {
+ __ mov(eax, FieldOperand(eax, Oddball::kToNumberOffset));
+ __ AssertSmi(eax);
+ __ mov(edx, FieldOperand(edx, Oddball::kToNumberOffset));
+ __ AssertSmi(edx);
+ __ push(eax);
+ __ mov(eax, edx);
+ __ pop(edx);
}
+ __ sub(eax, edx);
+ __ Ret();
__ bind(&miss);
GenerateMiss(masm);
@@ -3437,7 +3039,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&unordered);
__ bind(&generic_stub);
- CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
+ CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
CompareICState::GENERIC, CompareICState::GENERIC);
__ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
@@ -3680,8 +3282,6 @@ void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
if (Token::IsEqualityOp(op())) {
__ sub(eax, edx);
__ ret(0);
- } else if (is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
__ PopReturnAddressTo(ecx);
__ Push(edx);
@@ -3985,11 +3585,8 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
regs_.scratch0(),
&dont_need_remembered_set);
- __ CheckPageFlag(regs_.object(),
- regs_.scratch0(),
- 1 << MemoryChunk::SCAN_ON_SCAVENGE,
- not_zero,
- &dont_need_remembered_set);
+ __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
+ &dont_need_remembered_set);
// First notify the incremental marker if necessary, then update the
// remembered set.
@@ -5133,6 +4730,644 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void FastNewObjectStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- edi : target
+ // -- edx : new target
+ // -- esi : context
+ // -- esp[0] : return address
+ // -----------------------------------
+ __ AssertFunction(edi);
+ __ AssertReceiver(edx);
+
+ // Verify that the new target is a JSFunction.
+ Label new_object;
+ __ CmpObjectType(edx, JS_FUNCTION_TYPE, ebx);
+ __ j(not_equal, &new_object);
+
+ // Load the initial map and verify that it's in fact a map.
+ __ mov(ecx, FieldOperand(edx, JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(ecx, &new_object);
+ __ CmpObjectType(ecx, MAP_TYPE, ebx);
+ __ j(not_equal, &new_object);
+
+ // Fall back to runtime if the target differs from the new target's
+ // initial map constructor.
+ __ cmp(edi, FieldOperand(ecx, Map::kConstructorOrBackPointerOffset));
+ __ j(not_equal, &new_object);
+
+ // Allocate the JSObject on the heap.
+ Label allocate, done_allocate;
+ __ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceSizeOffset));
+ __ lea(ebx, Operand(ebx, times_pointer_size, 0));
+ __ Allocate(ebx, eax, edi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
+ __ bind(&done_allocate);
+
+ // Initialize the JSObject fields.
+ __ mov(Operand(eax, JSObject::kMapOffset), ecx);
+ __ mov(Operand(eax, JSObject::kPropertiesOffset),
+ masm->isolate()->factory()->empty_fixed_array());
+ __ mov(Operand(eax, JSObject::kElementsOffset),
+ masm->isolate()->factory()->empty_fixed_array());
+ STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
+ __ lea(ebx, Operand(eax, JSObject::kHeaderSize));
+
+ // ----------- S t a t e -------------
+ // -- eax : result (untagged)
+ // -- ebx : result fields (untagged)
+ // -- edi : result end (untagged)
+ // -- ecx : initial map
+ // -- esi : context
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ // Perform in-object slack tracking if requested.
+ Label slack_tracking;
+ STATIC_ASSERT(Map::kNoSlackTracking == 0);
+ __ test(FieldOperand(ecx, Map::kBitField3Offset),
+ Immediate(Map::ConstructionCounter::kMask));
+ __ j(not_zero, &slack_tracking, Label::kNear);
+ {
+ // Initialize all in-object fields with undefined.
+ __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
+ __ InitializeFieldsWithFiller(ebx, edi, edx);
+
+ // Add the object tag to make the JSObject real.
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ inc(eax);
+ __ Ret();
+ }
+ __ bind(&slack_tracking);
+ {
+ // Decrease generous allocation count.
+ STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+ __ sub(FieldOperand(ecx, Map::kBitField3Offset),
+ Immediate(1 << Map::ConstructionCounter::kShift));
+
+ // Initialize the in-object fields with undefined.
+ __ movzx_b(edx, FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset));
+ __ neg(edx);
+ __ lea(edx, Operand(edi, edx, times_pointer_size, 0));
+ __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
+ __ InitializeFieldsWithFiller(ebx, edx, edi);
+
+ // Initialize the remaining (reserved) fields with one pointer filler map.
+ __ movzx_b(edx, FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset));
+ __ lea(edx, Operand(ebx, edx, times_pointer_size, 0));
+ __ LoadRoot(edi, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(ebx, edx, edi);
+
+ // Add the object tag to make the JSObject real.
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ inc(eax);
+
+ // Check if we can finalize the instance size.
+ Label finalize;
+ STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
+ __ test(FieldOperand(ecx, Map::kBitField3Offset),
+ Immediate(Map::ConstructionCounter::kMask));
+ __ j(zero, &finalize, Label::kNear);
+ __ Ret();
+
+ // Finalize the instance size.
+ __ bind(&finalize);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(eax);
+ __ Push(ecx);
+ __ CallRuntime(Runtime::kFinalizeInstanceSize);
+ __ Pop(eax);
+ }
+ __ Ret();
+ }
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(ebx);
+ __ Push(ecx);
+ __ Push(ebx);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ Pop(ecx);
+ }
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ dec(eax);
+ __ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceSizeOffset));
+ __ lea(edi, Operand(eax, ebx, times_pointer_size, 0));
+ __ jmp(&done_allocate);
+
+ // Fall back to %NewObject.
+ __ bind(&new_object);
+ __ PopReturnAddressTo(ecx);
+ __ Push(edi);
+ __ Push(edx);
+ __ PushReturnAddressFrom(ecx);
+ __ TailCallRuntime(Runtime::kNewObject);
+}
+
+
+void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- edi : function
+ // -- esi : context
+ // -- ebp : frame pointer
+ // -- esp[0] : return address
+ // -----------------------------------
+ __ AssertFunction(edi);
+
+ // For Ignition we need to skip all possible handler/stub frames until
+ // we reach the JavaScript frame for the function (similar to what the
+ // runtime fallback implementation does). So make edx point to that
+ // JavaScript frame.
+ {
+ Label loop, loop_entry;
+ __ mov(edx, ebp);
+ __ jmp(&loop_entry, Label::kNear);
+ __ bind(&loop);
+ __ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&loop_entry);
+ __ cmp(edi, Operand(edx, StandardFrameConstants::kMarkerOffset));
+ __ j(not_equal, &loop);
+ }
+
+ // Check if we have rest parameters (only possible if we have an
+ // arguments adaptor frame below the function frame).
+ Label no_rest_parameters;
+ __ mov(ebx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
+ __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &no_rest_parameters, Label::kNear);
+
+ // Check if the arguments adaptor frame contains more arguments than
+ // specified by the function's internal formal parameter count.
+ Label rest_parameters;
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ sub(eax,
+ FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ j(greater, &rest_parameters);
+
+ // Return an empty rest parameter array.
+ __ bind(&no_rest_parameters);
+ {
+ // ----------- S t a t e -------------
+ // -- esi : context
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ // Allocate an empty rest parameter array.
+ Label allocate, done_allocate;
+ __ Allocate(JSArray::kSize, eax, edx, ecx, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Setup the rest parameter array in rax.
+ __ LoadGlobalFunction(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, ecx);
+ __ mov(FieldOperand(eax, JSArray::kMapOffset), ecx);
+ __ mov(ecx, isolate()->factory()->empty_fixed_array());
+ __ mov(FieldOperand(eax, JSArray::kPropertiesOffset), ecx);
+ __ mov(FieldOperand(eax, JSArray::kElementsOffset), ecx);
+ __ mov(FieldOperand(eax, JSArray::kLengthOffset),
+ Immediate(Smi::FromInt(0)));
+ STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+ __ Ret();
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(Smi::FromInt(JSArray::kSize));
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ }
+ __ jmp(&done_allocate);
+ }
+
+ __ bind(&rest_parameters);
+ {
+ // Compute the pointer to the first rest parameter (skippping the receiver).
+ __ lea(ebx,
+ Operand(ebx, eax, times_half_pointer_size,
+ StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
+
+ // ----------- S t a t e -------------
+ // -- esi : context
+ // -- eax : number of rest parameters (tagged)
+ // -- ebx : pointer to first rest parameters
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ // Allocate space for the rest parameter array plus the backing store.
+ Label allocate, done_allocate;
+ __ lea(ecx, Operand(eax, times_half_pointer_size,
+ JSArray::kSize + FixedArray::kHeaderSize));
+ __ Allocate(ecx, edx, edi, no_reg, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Setup the elements array in edx.
+ __ mov(FieldOperand(edx, FixedArray::kMapOffset),
+ isolate()->factory()->fixed_array_map());
+ __ mov(FieldOperand(edx, FixedArray::kLengthOffset), eax);
+ {
+ Label loop, done_loop;
+ __ Move(ecx, Smi::FromInt(0));
+ __ bind(&loop);
+ __ cmp(ecx, eax);
+ __ j(equal, &done_loop, Label::kNear);
+ __ mov(edi, Operand(ebx, 0 * kPointerSize));
+ __ mov(FieldOperand(edx, ecx, times_half_pointer_size,
+ FixedArray::kHeaderSize),
+ edi);
+ __ sub(ebx, Immediate(1 * kPointerSize));
+ __ add(ecx, Immediate(Smi::FromInt(1)));
+ __ jmp(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Setup the rest parameter array in edi.
+ __ lea(edi,
+ Operand(edx, eax, times_half_pointer_size, FixedArray::kHeaderSize));
+ __ LoadGlobalFunction(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, ecx);
+ __ mov(FieldOperand(edi, JSArray::kMapOffset), ecx);
+ __ mov(FieldOperand(edi, JSArray::kPropertiesOffset),
+ isolate()->factory()->empty_fixed_array());
+ __ mov(FieldOperand(edi, JSArray::kElementsOffset), edx);
+ __ mov(FieldOperand(edi, JSArray::kLengthOffset), eax);
+ STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+ __ mov(eax, edi);
+ __ Ret();
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(ecx);
+ __ Push(eax);
+ __ Push(ebx);
+ __ Push(ecx);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ mov(edx, eax);
+ __ Pop(ebx);
+ __ Pop(eax);
+ }
+ __ jmp(&done_allocate);
+ }
+}
+
+
+void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- edi : function
+ // -- esi : context
+ // -- ebp : frame pointer
+ // -- esp[0] : return address
+ // -----------------------------------
+ __ AssertFunction(edi);
+
+ // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ecx,
+ FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ lea(edx, Operand(ebp, ecx, times_half_pointer_size,
+ StandardFrameConstants::kCallerSPOffset));
+
+ // ecx : number of parameters (tagged)
+ // edx : parameters pointer
+ // edi : function
+ // esp[0] : return address
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, try_allocate, runtime;
+ __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(eax, Operand(ebx, StandardFrameConstants::kContextOffset));
+ __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &adaptor_frame, Label::kNear);
+
+ // No adaptor, parameter count = argument count.
+ __ mov(ebx, ecx);
+ __ push(ecx);
+ __ jmp(&try_allocate, Label::kNear);
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ mov(ebx, ecx);
+ __ push(ecx);
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ lea(edx, Operand(edx, ecx, times_2,
+ StandardFrameConstants::kCallerSPOffset));
+
+ // ebx = parameter count (tagged)
+ // ecx = argument count (smi-tagged)
+ // Compute the mapped parameter count = min(ebx, ecx) in ebx.
+ __ cmp(ebx, ecx);
+ __ j(less_equal, &try_allocate, Label::kNear);
+ __ mov(ebx, ecx);
+
+ // Save mapped parameter count and function.
+ __ bind(&try_allocate);
+ __ push(edi);
+ __ push(ebx);
+
+ // Compute the sizes of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has 2 extra words containing context and backing store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+ Label no_parameter_map;
+ __ test(ebx, ebx);
+ __ j(zero, &no_parameter_map, Label::kNear);
+ __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
+ __ bind(&no_parameter_map);
+
+ // 2. Backing store.
+ __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
+
+ // 3. Arguments object.
+ __ add(ebx, Immediate(JSSloppyArgumentsObject::kSize));
+
+ // Do the allocation of all three objects in one go.
+ __ Allocate(ebx, eax, edi, no_reg, &runtime, TAG_OBJECT);
+
+ // eax = address of new object(s) (tagged)
+ // ecx = argument count (smi-tagged)
+ // esp[0] = mapped parameter count (tagged)
+ // esp[4] = function
+ // esp[8] = parameter count (tagged)
+ // Get the arguments map from the current native context into edi.
+ Label has_mapped_parameters, instantiate;
+ __ mov(edi, NativeContextOperand());
+ __ mov(ebx, Operand(esp, 0 * kPointerSize));
+ __ test(ebx, ebx);
+ __ j(not_zero, &has_mapped_parameters, Label::kNear);
+ __ mov(
+ edi,
+ Operand(edi, Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX)));
+ __ jmp(&instantiate, Label::kNear);
+
+ __ bind(&has_mapped_parameters);
+ __ mov(edi, Operand(edi, Context::SlotOffset(
+ Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX)));
+ __ bind(&instantiate);
+
+ // eax = address of new object (tagged)
+ // ebx = mapped parameter count (tagged)
+ // ecx = argument count (smi-tagged)
+ // edi = address of arguments map (tagged)
+ // esp[0] = mapped parameter count (tagged)
+ // esp[4] = function
+ // esp[8] = parameter count (tagged)
+ // Copy the JS object part.
+ __ mov(FieldOperand(eax, JSObject::kMapOffset), edi);
+ __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
+ masm->isolate()->factory()->empty_fixed_array());
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset),
+ masm->isolate()->factory()->empty_fixed_array());
+
+ // Set up the callee in-object property.
+ STATIC_ASSERT(JSSloppyArgumentsObject::kCalleeIndex == 1);
+ __ mov(edi, Operand(esp, 1 * kPointerSize));
+ __ AssertNotSmi(edi);
+ __ mov(FieldOperand(eax, JSSloppyArgumentsObject::kCalleeOffset), edi);
+
+ // Use the length (smi tagged) and set that as an in-object property too.
+ __ AssertSmi(ecx);
+ __ mov(FieldOperand(eax, JSSloppyArgumentsObject::kLengthOffset), ecx);
+
+ // Set up the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, edi will point there, otherwise to the
+ // backing store.
+ __ lea(edi, Operand(eax, JSSloppyArgumentsObject::kSize));
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
+
+ // eax = address of new object (tagged)
+ // ebx = mapped parameter count (tagged)
+ // ecx = argument count (tagged)
+ // edx = address of receiver argument
+ // edi = address of parameter map or backing store (tagged)
+ // esp[0] = mapped parameter count (tagged)
+ // esp[4] = function
+ // esp[8] = parameter count (tagged)
+ // Free two registers.
+ __ push(edx);
+ __ push(eax);
+
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ __ test(ebx, ebx);
+ __ j(zero, &skip_parameter_map);
+
+ __ mov(FieldOperand(edi, FixedArray::kMapOffset),
+ Immediate(isolate()->factory()->sloppy_arguments_elements_map()));
+ __ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2))));
+ __ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax);
+ __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi);
+ __ lea(eax, Operand(edi, ebx, times_2, kParameterMapHeaderSize));
+ __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 1 * kPointerSize), eax);
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. They index the context,
+ // where parameters are stored in reverse order, at
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+ // The mapped parameter thus need to get indices
+ // MIN_CONTEXT_SLOTS+parameter_count-1 ..
+ // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+ // We loop from right to left.
+ Label parameters_loop, parameters_test;
+ __ push(ecx);
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
+ __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+ __ add(ebx, Operand(esp, 5 * kPointerSize));
+ __ sub(ebx, eax);
+ __ mov(ecx, isolate()->factory()->the_hole_value());
+ __ mov(edx, edi);
+ __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
+ // eax = loop variable (tagged)
+ // ebx = mapping index (tagged)
+ // ecx = the hole value
+ // edx = address of parameter map (tagged)
+ // edi = address of backing store (tagged)
+ // esp[0] = argument count (tagged)
+ // esp[4] = address of new object (tagged)
+ // esp[8] = address of receiver argument
+ // esp[12] = mapped parameter count (tagged)
+ // esp[16] = function
+ // esp[20] = parameter count (tagged)
+ __ jmp(&parameters_test, Label::kNear);
+
+ __ bind(&parameters_loop);
+ __ sub(eax, Immediate(Smi::FromInt(1)));
+ __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
+ __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx);
+ __ add(ebx, Immediate(Smi::FromInt(1)));
+ __ bind(&parameters_test);
+ __ test(eax, eax);
+ __ j(not_zero, &parameters_loop, Label::kNear);
+ __ pop(ecx);
+
+ __ bind(&skip_parameter_map);
+
+ // ecx = argument count (tagged)
+ // edi = address of backing store (tagged)
+ // esp[0] = address of new object (tagged)
+ // esp[4] = address of receiver argument
+ // esp[8] = mapped parameter count (tagged)
+ // esp[12] = function
+ // esp[16] = parameter count (tagged)
+ // Copy arguments header and remaining slots (if there are any).
+ __ mov(FieldOperand(edi, FixedArray::kMapOffset),
+ Immediate(isolate()->factory()->fixed_array_map()));
+ __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
+
+ Label arguments_loop, arguments_test;
+ __ mov(ebx, Operand(esp, 2 * kPointerSize));
+ __ mov(edx, Operand(esp, 1 * kPointerSize));
+ __ sub(edx, ebx); // Is there a smarter way to do negative scaling?
+ __ sub(edx, ebx);
+ __ jmp(&arguments_test, Label::kNear);
+
+ __ bind(&arguments_loop);
+ __ sub(edx, Immediate(kPointerSize));
+ __ mov(eax, Operand(edx, 0));
+ __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax);
+ __ add(ebx, Immediate(Smi::FromInt(1)));
+
+ __ bind(&arguments_test);
+ __ cmp(ebx, ecx);
+ __ j(less, &arguments_loop, Label::kNear);
+
+ // Restore.
+ __ pop(eax); // Address of arguments object.
+ __ Drop(4);
+
+ // Return.
+ __ ret(0);
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ pop(eax); // Remove saved mapped parameter count.
+ __ pop(edi); // Pop saved function.
+ __ pop(eax); // Remove saved parameter count.
+ __ pop(eax); // Pop return address.
+ __ push(edi); // Push function.
+ __ push(edx); // Push parameters pointer.
+ __ push(ecx); // Push parameter count.
+ __ push(eax); // Push return address.
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
+}
+
+
+void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- edi : function
+ // -- esi : context
+ // -- ebp : frame pointer
+ // -- esp[0] : return address
+ // -----------------------------------
+ __ AssertFunction(edi);
+
+ // For Ignition we need to skip all possible handler/stub frames until
+ // we reach the JavaScript frame for the function (similar to what the
+ // runtime fallback implementation does). So make edx point to that
+ // JavaScript frame.
+ {
+ Label loop, loop_entry;
+ __ mov(edx, ebp);
+ __ jmp(&loop_entry, Label::kNear);
+ __ bind(&loop);
+ __ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&loop_entry);
+ __ cmp(edi, Operand(edx, StandardFrameConstants::kMarkerOffset));
+ __ j(not_equal, &loop);
+ }
+
+ // Check if we have an arguments adaptor frame below the function frame.
+ Label arguments_adaptor, arguments_done;
+ __ mov(ebx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
+ __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &arguments_adaptor, Label::kNear);
+ {
+ __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(eax,
+ FieldOperand(eax, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ lea(ebx,
+ Operand(edx, eax, times_half_pointer_size,
+ StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
+ }
+ __ jmp(&arguments_done, Label::kNear);
+ __ bind(&arguments_adaptor);
+ {
+ __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ lea(ebx,
+ Operand(ebx, eax, times_half_pointer_size,
+ StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
+ }
+ __ bind(&arguments_done);
+
+ // ----------- S t a t e -------------
+ // -- eax : number of arguments (tagged)
+ // -- ebx : pointer to the first argument
+ // -- esi : context
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ // Allocate space for the strict arguments object plus the backing store.
+ Label allocate, done_allocate;
+ __ lea(ecx,
+ Operand(eax, times_half_pointer_size,
+ JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
+ __ Allocate(ecx, edx, edi, no_reg, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Setup the elements array in edx.
+ __ mov(FieldOperand(edx, FixedArray::kMapOffset),
+ isolate()->factory()->fixed_array_map());
+ __ mov(FieldOperand(edx, FixedArray::kLengthOffset), eax);
+ {
+ Label loop, done_loop;
+ __ Move(ecx, Smi::FromInt(0));
+ __ bind(&loop);
+ __ cmp(ecx, eax);
+ __ j(equal, &done_loop, Label::kNear);
+ __ mov(edi, Operand(ebx, 0 * kPointerSize));
+ __ mov(FieldOperand(edx, ecx, times_half_pointer_size,
+ FixedArray::kHeaderSize),
+ edi);
+ __ sub(ebx, Immediate(1 * kPointerSize));
+ __ add(ecx, Immediate(Smi::FromInt(1)));
+ __ jmp(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Setup the rest parameter array in edi.
+ __ lea(edi,
+ Operand(edx, eax, times_half_pointer_size, FixedArray::kHeaderSize));
+ __ LoadGlobalFunction(Context::STRICT_ARGUMENTS_MAP_INDEX, ecx);
+ __ mov(FieldOperand(edi, JSStrictArgumentsObject::kMapOffset), ecx);
+ __ mov(FieldOperand(edi, JSStrictArgumentsObject::kPropertiesOffset),
+ isolate()->factory()->empty_fixed_array());
+ __ mov(FieldOperand(edi, JSStrictArgumentsObject::kElementsOffset), edx);
+ __ mov(FieldOperand(edi, JSStrictArgumentsObject::kLengthOffset), eax);
+ STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
+ __ mov(eax, edi);
+ __ Ret();
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(ecx);
+ __ Push(eax);
+ __ Push(ebx);
+ __ Push(ecx);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ mov(edx, eax);
+ __ Pop(ebx);
+ __ Pop(eax);
+ }
+ __ jmp(&done_allocate);
+}
+
+
void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
Register context_reg = esi;
Register slot_reg = ebx;
@@ -5470,11 +5705,10 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ jmp(&leave_exit_frame);
}
-
static void CallApiFunctionStubHelper(MacroAssembler* masm,
const ParameterCount& argc,
bool return_first_arg,
- bool call_data_undefined) {
+ bool call_data_undefined, bool is_lazy) {
// ----------- S t a t e -------------
// -- edi : callee
// -- ebx : call_data
@@ -5548,8 +5782,10 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
// push return address
__ push(return_address);
- // load context from callee
- __ mov(context, FieldOperand(callee, JSFunction::kContextOffset));
+ if (!is_lazy) {
+ // load context from callee
+ __ mov(context, FieldOperand(callee, JSFunction::kContextOffset));
+ }
// API function gets reference to the v8::Arguments. If CPU profiler
// is enabled wrapper function will be called and we need to pass
@@ -5621,7 +5857,7 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
void CallApiFunctionStub::Generate(MacroAssembler* masm) {
bool call_data_undefined = this->call_data_undefined();
CallApiFunctionStubHelper(masm, ParameterCount(eax), false,
- call_data_undefined);
+ call_data_undefined, false);
}
@@ -5629,45 +5865,58 @@ void CallApiAccessorStub::Generate(MacroAssembler* masm) {
bool is_store = this->is_store();
int argc = this->argc();
bool call_data_undefined = this->call_data_undefined();
+ bool is_lazy = this->is_lazy();
CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
- call_data_undefined);
+ call_data_undefined, is_lazy);
}
void CallApiGetterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- esp[0] : return address
- // -- esp[4] : name
- // -- esp[8 - kArgsLength*4] : PropertyCallbackArguments object
+ // -- esp[0] : return address
+ // -- esp[4] : name
+ // -- esp[8 .. (8 + kArgsLength*4)] : v8::PropertyCallbackInfo::args_
// -- ...
- // -- edx : api_function_address
+ // -- edx : api_function_address
// -----------------------------------
DCHECK(edx.is(ApiGetterDescriptor::function_address()));
- // array for v8::Arguments::values_, handler for name and pointer
- // to the values (it considered as smi in GC).
- const int kStackSpace = PropertyCallbackArguments::kArgsLength + 2;
- // Allocate space for opional callback address parameter in case
- // CPU profiler is active.
- const int kApiArgc = 2 + 1;
+ // v8::PropertyCallbackInfo::args_ array and name handle.
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ // Allocate v8::PropertyCallbackInfo object, arguments for callback and
+ // space for optional callback address parameter (in case CPU profiler is
+ // active) in non-GCed stack space.
+ const int kApiArgc = 3 + 1;
Register api_function_address = edx;
Register scratch = ebx;
- // load address of name
- __ lea(scratch, Operand(esp, 1 * kPointerSize));
+ // Load address of v8::PropertyAccessorInfo::args_ array.
+ __ lea(scratch, Operand(esp, 2 * kPointerSize));
PrepareCallApiFunction(masm, kApiArgc);
+ // Create v8::PropertyCallbackInfo object on the stack and initialize
+ // it's args_ field.
+ Operand info_object = ApiParameterOperand(3);
+ __ mov(info_object, scratch);
+
+ __ sub(scratch, Immediate(kPointerSize));
__ mov(ApiParameterOperand(0), scratch); // name.
- __ add(scratch, Immediate(kPointerSize));
+ __ lea(scratch, info_object);
__ mov(ApiParameterOperand(1), scratch); // arguments pointer.
+ // Reserve space for optional callback address parameter.
+ Operand thunk_last_arg = ApiParameterOperand(2);
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback(isolate());
+ // +3 is to skip prolog, return address and name handle.
+ Operand return_value_operand(
+ ebp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
- ApiParameterOperand(2), kStackSpace, nullptr,
- Operand(ebp, 7 * kPointerSize), NULL);
+ thunk_last_arg, kStackUnwindSpace, nullptr,
+ return_value_operand, NULL);
}
diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h
index 121d12fe74..fc813f50c1 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.h
+++ b/deps/v8/src/ia32/code-stubs-ia32.h
@@ -274,32 +274,12 @@ class RecordWriteStub: public PlatformCodeStub {
// registers are eax, ecx and edx. The three scratch registers (incl. ecx)
// will be restored by other means so we don't bother pushing them here.
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
- if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax);
- if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx);
- if (mode == kSaveFPRegs) {
- masm->sub(esp,
- Immediate(kDoubleSize * (XMMRegister::kMaxNumRegisters - 1)));
- // Save all XMM registers except XMM0.
- for (int i = XMMRegister::kMaxNumRegisters - 1; i > 0; i--) {
- XMMRegister reg = XMMRegister::from_code(i);
- masm->movsd(Operand(esp, (i - 1) * kDoubleSize), reg);
- }
- }
+ masm->PushCallerSaved(mode, ecx, scratch0_, scratch1_);
}
- inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
+ inline void RestoreCallerSaveRegisters(MacroAssembler* masm,
SaveFPRegsMode mode) {
- if (mode == kSaveFPRegs) {
- // Restore all XMM registers except XMM0.
- for (int i = XMMRegister::kMaxNumRegisters - 1; i > 0; i--) {
- XMMRegister reg = XMMRegister::from_code(i);
- masm->movsd(reg, Operand(esp, (i - 1) * kDoubleSize));
- }
- masm->add(esp,
- Immediate(kDoubleSize * (XMMRegister::kMaxNumRegisters - 1)));
- }
- if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx);
- if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->pop(eax);
+ masm->PopCallerSaved(mode, ecx, scratch0_, scratch1_);
}
inline Register object() { return object_; }
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index efe6476203..a3756ae443 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -169,27 +169,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
-void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
- // Set the register values. The values are not important as there are no
- // callee saved registers in JavaScript frames, so all registers are
- // spilled. Registers ebp and esp are set to the correct values though.
-
- for (int i = 0; i < Register::kNumRegisters; i++) {
- input_->SetRegister(i, i * 4);
- }
- input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
- input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
- input_->SetDoubleRegister(i, 0.0);
- }
-
- // Fill the frame content from the actual data on the frame.
- for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
- input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
- }
-}
-
-
void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
intptr_t handler =
@@ -207,10 +186,8 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
}
}
-
-bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
- int parameter_count =
- function->shared()->internal_formal_parameter_count() + 1;
+bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
+ int parameter_count = shared->internal_formal_parameter_count() + 1;
unsigned input_frame_size = input_->GetFrameSize();
unsigned alignment_state_offset =
input_frame_size - parameter_count * kPointerSize -
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 5a43280659..b11ff97752 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -28,32 +28,30 @@ struct ByteMnemonic {
OperandOrder op_order_;
};
-
static const ByteMnemonic two_operands_instr[] = {
- {0x01, "add", OPER_REG_OP_ORDER},
- {0x03, "add", REG_OPER_OP_ORDER},
- {0x09, "or", OPER_REG_OP_ORDER},
- {0x0B, "or", REG_OPER_OP_ORDER},
- {0x1B, "sbb", REG_OPER_OP_ORDER},
- {0x21, "and", OPER_REG_OP_ORDER},
- {0x23, "and", REG_OPER_OP_ORDER},
- {0x29, "sub", OPER_REG_OP_ORDER},
- {0x2A, "subb", REG_OPER_OP_ORDER},
- {0x2B, "sub", REG_OPER_OP_ORDER},
- {0x31, "xor", OPER_REG_OP_ORDER},
- {0x33, "xor", REG_OPER_OP_ORDER},
- {0x38, "cmpb", OPER_REG_OP_ORDER},
- {0x3A, "cmpb", REG_OPER_OP_ORDER},
- {0x3B, "cmp", REG_OPER_OP_ORDER},
- {0x84, "test_b", REG_OPER_OP_ORDER},
- {0x85, "test", REG_OPER_OP_ORDER},
- {0x87, "xchg", REG_OPER_OP_ORDER},
- {0x8A, "mov_b", REG_OPER_OP_ORDER},
- {0x8B, "mov", REG_OPER_OP_ORDER},
- {0x8D, "lea", REG_OPER_OP_ORDER},
- {-1, "", UNSET_OP_ORDER}
-};
-
+ {0x01, "add", OPER_REG_OP_ORDER},
+ {0x03, "add", REG_OPER_OP_ORDER},
+ {0x09, "or", OPER_REG_OP_ORDER},
+ {0x0B, "or", REG_OPER_OP_ORDER},
+ {0x1B, "sbb", REG_OPER_OP_ORDER},
+ {0x21, "and", OPER_REG_OP_ORDER},
+ {0x23, "and", REG_OPER_OP_ORDER},
+ {0x29, "sub", OPER_REG_OP_ORDER},
+ {0x2A, "subb", REG_OPER_OP_ORDER},
+ {0x2B, "sub", REG_OPER_OP_ORDER},
+ {0x31, "xor", OPER_REG_OP_ORDER},
+ {0x33, "xor", REG_OPER_OP_ORDER},
+ {0x38, "cmpb", OPER_REG_OP_ORDER},
+ {0x39, "cmp", OPER_REG_OP_ORDER},
+ {0x3A, "cmpb", REG_OPER_OP_ORDER},
+ {0x3B, "cmp", REG_OPER_OP_ORDER},
+ {0x84, "test_b", REG_OPER_OP_ORDER},
+ {0x85, "test", REG_OPER_OP_ORDER},
+ {0x87, "xchg", REG_OPER_OP_ORDER},
+ {0x8A, "mov_b", REG_OPER_OP_ORDER},
+ {0x8B, "mov", REG_OPER_OP_ORDER},
+ {0x8D, "lea", REG_OPER_OP_ORDER},
+ {-1, "", UNSET_OP_ORDER}};
static const ByteMnemonic zero_operands_instr[] = {
{0xC3, "ret", UNSET_OP_ORDER},
@@ -1630,7 +1628,15 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
}
} else if (*data == 0x3A) {
data++;
- if (*data == 0x0B) {
+ if (*data == 0x0A) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("roundss %s,%s,%d", NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm), static_cast<int>(imm8));
+ data += 2;
+ } else if (*data == 0x0B) {
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
diff --git a/deps/v8/src/ia32/interface-descriptors-ia32.cc b/deps/v8/src/ia32/interface-descriptors-ia32.cc
index ad381c7eb2..b36cf63b87 100644
--- a/deps/v8/src/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/ia32/interface-descriptors-ia32.cc
@@ -59,20 +59,6 @@ const Register StringCompareDescriptor::LeftRegister() { return edx; }
const Register StringCompareDescriptor::RightRegister() { return eax; }
-const Register ArgumentsAccessReadDescriptor::index() { return edx; }
-const Register ArgumentsAccessReadDescriptor::parameter_count() { return eax; }
-
-
-const Register ArgumentsAccessNewDescriptor::function() { return edi; }
-const Register ArgumentsAccessNewDescriptor::parameter_count() { return ecx; }
-const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return edx; }
-
-
-const Register RestParamAccessDescriptor::parameter_count() { return ecx; }
-const Register RestParamAccessDescriptor::parameter_pointer() { return edx; }
-const Register RestParamAccessDescriptor::rest_parameter_index() { return ebx; }
-
-
const Register ApiGetterDescriptor::function_address() { return edx; }
@@ -101,6 +87,32 @@ void FastNewContextDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
+void FastNewObjectDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {edi, edx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewRestParameterDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {edi};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {edi};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {edi};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
void ToNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -119,6 +131,10 @@ const Register ToStringDescriptor::ReceiverRegister() { return eax; }
// static
+const Register ToNameDescriptor::ReceiverRegister() { return eax; }
+
+
+// static
const Register ToObjectDescriptor::ReceiverRegister() { return eax; }
@@ -171,13 +187,6 @@ void CreateWeakCellDescriptor::InitializePlatformSpecific(
}
-void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ecx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edi};
@@ -413,6 +422,13 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void InterpreterDispatchDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
+ kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -424,7 +440,6 @@ void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@@ -436,7 +451,6 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 5f80b4d52f..12daec8285 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -120,29 +120,62 @@ void MacroAssembler::PushRoot(Heap::RootListIndex index) {
Push(isolate()->heap()->root_handle(index));
}
+#define REG(Name) \
+ { Register::kCode_##Name }
-void MacroAssembler::InNewSpace(
- Register object,
- Register scratch,
- Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance) {
- DCHECK(cc == equal || cc == not_equal);
- if (scratch.is(object)) {
- and_(scratch, Immediate(~Page::kPageAlignmentMask));
- } else {
- mov(scratch, Immediate(~Page::kPageAlignmentMask));
- and_(scratch, object);
+static const Register saved_regs[] = {REG(eax), REG(ecx), REG(edx)};
+
+#undef REG
+
+static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
+
+void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1, Register exclusion2,
+ Register exclusion3) {
+ // We don't allow a GC during a store buffer overflow so there is no need to
+ // store the registers in any particular way, but we do have to store and
+ // restore them.
+ for (int i = 0; i < kNumberOfSavedRegs; i++) {
+ Register reg = saved_regs[i];
+ if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
+ push(reg);
+ }
+ }
+ if (fp_mode == kSaveFPRegs) {
+ sub(esp, Immediate(kDoubleSize * (XMMRegister::kMaxNumRegisters - 1)));
+ // Save all XMM registers except XMM0.
+ for (int i = XMMRegister::kMaxNumRegisters - 1; i > 0; i--) {
+ XMMRegister reg = XMMRegister::from_code(i);
+ movsd(Operand(esp, (i - 1) * kDoubleSize), reg);
+ }
+ }
+}
+
+void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+ Register exclusion2, Register exclusion3) {
+ if (fp_mode == kSaveFPRegs) {
+ // Restore all XMM registers except XMM0.
+ for (int i = XMMRegister::kMaxNumRegisters - 1; i > 0; i--) {
+ XMMRegister reg = XMMRegister::from_code(i);
+ movsd(reg, Operand(esp, (i - 1) * kDoubleSize));
+ }
+ add(esp, Immediate(kDoubleSize * (XMMRegister::kMaxNumRegisters - 1)));
+ }
+
+ for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
+ Register reg = saved_regs[i];
+ if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
+ pop(reg);
+ }
}
- // Check that we can use a test_b.
- DCHECK(MemoryChunk::IN_FROM_SPACE < 8);
- DCHECK(MemoryChunk::IN_TO_SPACE < 8);
- int mask = (1 << MemoryChunk::IN_FROM_SPACE)
- | (1 << MemoryChunk::IN_TO_SPACE);
- // If non-zero, the page belongs to new-space.
- test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
- static_cast<uint8_t>(mask));
- j(cc, condition_met, condition_met_distance);
+}
+
+void MacroAssembler::InNewSpace(Register object, Register scratch, Condition cc,
+ Label* condition_met,
+ Label::Distance distance) {
+ const int mask =
+ (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
+ CheckPageFlag(object, scratch, mask, cc, condition_met, distance);
}
@@ -571,6 +604,75 @@ void MacroAssembler::RecordWrite(
}
}
+void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
+ Register code_entry,
+ Register scratch) {
+ const int offset = JSFunction::kCodeEntryOffset;
+
+ // Since a code entry (value) is always in old space, we don't need to update
+ // remembered set. If incremental marking is off, there is nothing for us to
+ // do.
+ if (!FLAG_incremental_marking) return;
+
+ DCHECK(!js_function.is(code_entry));
+ DCHECK(!js_function.is(scratch));
+ DCHECK(!code_entry.is(scratch));
+ AssertNotSmi(js_function);
+
+ if (emit_debug_code()) {
+ Label ok;
+ lea(scratch, FieldOperand(js_function, offset));
+ cmp(code_entry, Operand(scratch, 0));
+ j(equal, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ }
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis and stores into young gen.
+ Label done;
+
+ CheckPageFlag(code_entry, scratch,
+ MemoryChunk::kPointersToHereAreInterestingMask, zero, &done,
+ Label::kNear);
+ CheckPageFlag(js_function, scratch,
+ MemoryChunk::kPointersFromHereAreInterestingMask, zero, &done,
+ Label::kNear);
+
+ // Save input registers.
+ push(js_function);
+ push(code_entry);
+
+ const Register dst = scratch;
+ lea(dst, FieldOperand(js_function, offset));
+
+ // Save caller-saved registers.
+ PushCallerSaved(kDontSaveFPRegs, js_function, code_entry);
+
+ int argument_count = 3;
+ PrepareCallCFunction(argument_count, code_entry);
+ mov(Operand(esp, 0 * kPointerSize), js_function);
+ mov(Operand(esp, 1 * kPointerSize), dst); // Slot.
+ mov(Operand(esp, 2 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(isolate())));
+
+ {
+ AllowExternalCallThatCantCauseGC scope(this);
+ CallCFunction(
+ ExternalReference::incremental_marking_record_write_code_entry_function(
+ isolate()),
+ argument_count);
+ }
+
+ // Restore caller-saved registers.
+ PopCallerSaved(kDontSaveFPRegs, js_function, code_entry);
+
+ // Restore input registers.
+ pop(code_entry);
+ pop(js_function);
+
+ bind(&done);
+}
void MacroAssembler::DebugBreak() {
Move(eax, Immediate(0));
@@ -587,6 +689,25 @@ void MacroAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
}
+void MacroAssembler::Cvtui2ss(XMMRegister dst, Register src, Register tmp) {
+ Label msb_set_src;
+ Label jmp_return;
+ test(src, src);
+ j(sign, &msb_set_src, Label::kNear);
+ cvtsi2ss(dst, src);
+ jmp(&jmp_return, Label::kNear);
+ bind(&msb_set_src);
+ mov(tmp, src);
+ shr(src, 1);
+ // Recover the least significant bit to avoid rounding errors.
+ and_(tmp, Immediate(1));
+ or_(src, tmp);
+ cvtsi2ss(dst, src);
+ addss(dst, dst);
+ bind(&jmp_return);
+}
+
+
bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
static const int kMaxImmediateBits = 17;
if (!RelocInfo::IsNone(x.rmode_)) return false;
@@ -833,6 +954,19 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
+void MacroAssembler::AssertReceiver(Register object) {
+ if (emit_debug_code()) {
+ test(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsASmiAndNotAReceiver);
+ Push(object);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, object);
+ Pop(object);
+ Check(above_equal, kOperandIsNotAReceiver);
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
if (emit_debug_code()) {
Label done_checking;
@@ -967,7 +1101,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
}
-void MacroAssembler::EnterExitFrame(bool save_doubles) {
+void MacroAssembler::EnterExitFrame(int argc, bool save_doubles) {
EnterExitFramePrologue();
// Set up argc and argv in callee-saved registers.
@@ -976,7 +1110,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles) {
lea(esi, Operand(ebp, eax, times_4, offset));
// Reserve space for argc, argv and isolate.
- EnterExitFrameEpilogue(3, save_doubles);
+ EnterExitFrameEpilogue(argc, save_doubles);
}
@@ -1768,13 +1902,13 @@ void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Register end_address,
Register filler) {
Label loop, entry;
- jmp(&entry);
+ jmp(&entry, Label::kNear);
bind(&loop);
mov(Operand(current_address, 0), filler);
add(current_address, Immediate(kPointerSize));
bind(&entry);
cmp(current_address, end_address);
- j(below, &loop);
+ j(below, &loop, Label::kNear);
}
@@ -1796,9 +1930,9 @@ void MacroAssembler::NegativeZeroTest(Register result,
Label* then_label) {
Label ok;
test(result, result);
- j(not_zero, &ok);
+ j(not_zero, &ok, Label::kNear);
test(op, op);
- j(sign, then_label);
+ j(sign, then_label, Label::kNear);
bind(&ok);
}
@@ -1810,10 +1944,10 @@ void MacroAssembler::NegativeZeroTest(Register result,
Label* then_label) {
Label ok;
test(result, result);
- j(not_zero, &ok);
+ j(not_zero, &ok, Label::kNear);
mov(scratch, op1);
or_(scratch, op2);
- j(sign, then_label);
+ j(sign, then_label, Label::kNear);
bind(&ok);
}
@@ -2044,7 +2178,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
}
Push(fun);
Push(fun);
- CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
Pop(fun);
if (new_target.is_valid()) {
Pop(new_target);
@@ -2147,26 +2281,6 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
}
-void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- // You can't call a builtin without a valid frame.
- DCHECK(flag == JUMP_FUNCTION || has_frame());
-
- // Fake a parameter count to avoid emitting code to do the check.
- ParameterCount expected(0);
- GetBuiltinFunction(edi, native_context_index);
- InvokeFunctionCode(edi, no_reg, expected, expected, flag, call_wrapper);
-}
-
-
-void MacroAssembler::GetBuiltinFunction(Register target,
- int native_context_index) {
- // Load the JavaScript builtin function from the builtins object.
- mov(target, NativeContextOperand());
- mov(target, ContextOperand(target, native_context_index));
-}
-
-
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
@@ -2641,9 +2755,9 @@ void MacroAssembler::Abort(BailoutReason reason) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 1);
+ CallRuntime(Runtime::kAbort);
} else {
- CallRuntime(Runtime::kAbort, 1);
+ CallRuntime(Runtime::kAbort);
}
// will not return here
int3();
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 76c4890027..9ebae1f463 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -16,6 +16,7 @@ namespace internal {
// Give alias names to registers for calling conventions.
const Register kReturnRegister0 = {Register::kCode_eax};
const Register kReturnRegister1 = {Register::kCode_edx};
+const Register kReturnRegister2 = {Register::kCode_edi};
const Register kJSFunctionRegister = {Register::kCode_edi};
const Register kContextRegister = {Register::kCode_esi};
const Register kInterpreterAccumulatorRegister = {Register::kCode_eax};
@@ -106,6 +107,16 @@ class MacroAssembler: public Assembler {
j(not_equal, if_not_equal, if_not_equal_distance);
}
+ // These functions do not arrange the registers in any particular order so
+ // they are not useful for calls that can cause a GC. The caller can
+ // exclude up to 3 registers that do not need to be saved and restored.
+ void PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg);
+ void PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg);
+
// ---------------------------------------------------------------------------
// GC Support
enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
@@ -206,6 +217,11 @@ class MacroAssembler: public Assembler {
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting);
+ // Notify the garbage collector that we wrote a code entry into a
+ // JSFunction. Only scratch is clobbered by the operation.
+ void RecordWriteCodeEntryField(Register js_function, Register code_entry,
+ Register scratch);
+
// For page containing |object| mark the region covering the object's map
// dirty. |object| is the object being stored into, |map| is the Map object
// that was stored.
@@ -225,7 +241,7 @@ class MacroAssembler: public Assembler {
// arguments in register eax and sets up the number of arguments in
// register edi and the pointer to the first argument in register
// esi.
- void EnterExitFrame(bool save_doubles);
+ void EnterExitFrame(int argc, bool save_doubles);
void EnterApiExitFrame(int argc);
@@ -270,6 +286,9 @@ class MacroAssembler: public Assembler {
void StoreToSafepointRegisterSlot(Register dst, Immediate src);
void LoadFromSafepointRegisterSlot(Register dst, Register src);
+ // Nop, because ia32 does not have a root register.
+ void InitializeRootRegister() {}
+
void LoadHeapObject(Register result, Handle<HeapObject> object);
void CmpHeapObject(Register reg, Handle<HeapObject> object);
void PushHeapObject(Handle<HeapObject> object);
@@ -330,13 +349,6 @@ class MacroAssembler: public Assembler {
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
- // Invoke specified builtin JavaScript function.
- void InvokeBuiltin(int native_context_index, InvokeFlag flag,
- const CallWrapper& call_wrapper = NullCallWrapper());
-
- // Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, int native_context_index);
-
// Expression support
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
// hinders register renaming and makes dependence chains longer. So we use
@@ -344,6 +356,8 @@ class MacroAssembler: public Assembler {
void Cvtsi2sd(XMMRegister dst, Register src) { Cvtsi2sd(dst, Operand(src)); }
void Cvtsi2sd(XMMRegister dst, const Operand& src);
+ void Cvtui2ss(XMMRegister dst, Register src, Register tmp);
+
// Support for constant splitting.
bool IsUnsafeImmediate(const Immediate& x);
void SafeMove(Register dst, const Immediate& x);
@@ -528,6 +542,9 @@ class MacroAssembler: public Assembler {
// enabled via --debug-code.
void AssertBoundFunction(Register object);
+ // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
+ void AssertReceiver(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object);
@@ -776,6 +793,7 @@ class MacroAssembler: public Assembler {
// Move an immediate into an XMM register.
void Move(XMMRegister dst, uint32_t src);
void Move(XMMRegister dst, uint64_t src);
+ void Move(XMMRegister dst, float src) { Move(dst, bit_cast<uint32_t>(src)); }
void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }
void Move(Register dst, Smi* source) { Move(dst, Immediate(source)); }
diff --git a/deps/v8/src/ic/arm/handler-compiler-arm.cc b/deps/v8/src/ic/arm/handler-compiler-arm.cc
index e293965e6f..5f2b861d08 100644
--- a/deps/v8/src/ic/arm/handler-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/handler-compiler-arm.cc
@@ -4,8 +4,10 @@
#if V8_TARGET_ARCH_ARM
-#include "src/ic/call-optimization.h"
#include "src/ic/handler-compiler.h"
+
+#include "src/field-type.h"
+#include "src/ic/call-optimization.h"
#include "src/ic/ic.h"
#include "src/isolate-inl.h"
@@ -286,11 +288,17 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
call_data_undefined = true;
__ LoadRoot(data, Heap::kUndefinedValueRootIndex);
} else {
- __ ldr(data,
- FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(data,
- FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
- __ ldr(data, FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+ if (optimization.is_constant_call()) {
+ __ ldr(data,
+ FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(data,
+ FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
+ __ ldr(data,
+ FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+ } else {
+ __ ldr(data,
+ FieldMemOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
+ }
__ ldr(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
}
@@ -309,7 +317,8 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ mov(api_function_address, Operand(ref));
// Jump to stub.
- CallApiAccessorStub stub(isolate, is_store, call_data_undefined);
+ CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+ !optimization.is_constant_call());
__ TailCallStub(&stub);
}
@@ -394,8 +403,7 @@ void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
__ b(ne, miss_label);
}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
+void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
Register value_reg,
Label* miss_label) {
Register map_reg = scratch1();
@@ -403,20 +411,11 @@ void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
DCHECK(!value_reg.is(map_reg));
DCHECK(!value_reg.is(scratch));
__ JumpIfSmi(value_reg, miss_label);
- HeapType::Iterator<Map> it = field_type->Classes();
- if (!it.Done()) {
+ if (field_type->IsClass()) {
__ ldr(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
- Label do_store;
- while (true) {
- __ CmpWeakValue(map_reg, Map::WeakCellForMap(it.Current()), scratch);
- it.Advance();
- if (it.Done()) {
- __ b(ne, miss_label);
- break;
- }
- __ b(eq, &do_store);
- }
- __ bind(&do_store);
+ __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
+ scratch);
+ __ b(ne, miss_label);
}
}
@@ -594,38 +593,39 @@ void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
void NamedLoadHandlerCompiler::GenerateLoadCallback(
- Register reg, Handle<ExecutableAccessorInfo> callback) {
- // Build AccessorInfo::args_ list on the stack and push property name below
- // the exit frame to make GC aware of them and store pointers to them.
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
- DCHECK(!scratch2().is(reg));
- DCHECK(!scratch3().is(reg));
- DCHECK(!scratch4().is(reg));
+ Register reg, Handle<AccessorInfo> callback) {
+ DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), receiver()));
+ DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
+
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
__ push(receiver());
- // Push data from ExecutableAccessorInfo.
+ // Push data from AccessorInfo.
Handle<Object> data(callback->data(), isolate());
if (data->IsUndefined() || data->IsSmi()) {
- __ Move(scratch3(), data);
+ __ Move(scratch2(), data);
} else {
Handle<WeakCell> cell =
isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
// The callback is alive if this instruction is executed,
// so the weak cell is not cleared and points to data.
- __ GetWeakValue(scratch3(), cell);
+ __ GetWeakValue(scratch2(), cell);
}
- __ push(scratch3());
- __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
- __ mov(scratch4(), scratch3());
- __ Push(scratch3(), scratch4());
- __ mov(scratch4(), Operand(ExternalReference::isolate_address(isolate())));
- __ Push(scratch4(), reg);
- __ mov(scratch2(), sp); // scratch2 = PropertyAccessorInfo::args_
+ __ push(scratch2());
+ __ LoadRoot(scratch2(), Heap::kUndefinedValueRootIndex);
+ __ Push(scratch2(), scratch2());
+ __ mov(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
+ __ Push(scratch2(), reg);
+ __ Push(Smi::FromInt(0)); // should_throw_on_error -> false
__ push(name());
// Abi for CallApiGetter
@@ -714,8 +714,8 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
- Handle<JSObject> object, Handle<Name> name,
- Handle<ExecutableAccessorInfo> callback) {
+ Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
+ LanguageMode language_mode) {
Register holder_reg = Frontend(name);
__ push(receiver()); // receiver
@@ -732,6 +732,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ push(ip);
__ mov(ip, Operand(name));
__ Push(ip, value());
+ __ Push(Smi::FromInt(language_mode));
// Do tail-call to the runtime system.
__ TailCallRuntime(Runtime::kStoreCallbackProperty);
@@ -780,7 +781,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
}
Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3);
+ __ IncrementCounter(counters->ic_named_load_global_stub(), 1, r1, r3);
if (IC::ICUseVector(kind())) {
DiscardVectorAndSlot();
}
diff --git a/deps/v8/src/ic/arm/ic-arm.cc b/deps/v8/src/ic/arm/ic-arm.cc
index f59ac074be..14ed8b41a5 100644
--- a/deps/v8/src/ic/arm/ic-arm.cc
+++ b/deps/v8/src/ic/arm/ic-arm.cc
@@ -157,8 +157,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register elements,
Register scratch1, Register scratch2,
- Register result, Label* slow,
- LanguageMode language_mode) {
+ Register result, Label* slow) {
// Register use:
//
// receiver - holds the receiver on entry.
@@ -215,13 +214,8 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
__ jmp(&check_next_prototype);
__ bind(&absent);
- if (is_strong(language_mode)) {
- // Strong mode accesses must throw in this case, so call the runtime.
- __ jmp(slow);
- } else {
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ jmp(&done);
- }
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ jmp(&done);
__ bind(&in_bounds);
// Fast case: Do the load.
@@ -264,8 +258,7 @@ static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
__ bind(&unique);
}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = r0;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
@@ -280,7 +273,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
// Dictionary load failed, go slow (but don't miss).
__ bind(&slow);
- GenerateRuntimeGetProperty(masm, language_mode);
+ GenerateRuntimeGetProperty(masm);
}
@@ -304,7 +297,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
DCHECK(!AreAliased(r4, r5, LoadWithVectorDescriptor::SlotRegister(),
LoadWithVectorDescriptor::VectorRegister()));
- __ IncrementCounter(isolate->counters()->load_miss(), 1, r4, r5);
+ __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, r4, r5);
LoadIC_PushArgs(masm);
@@ -312,17 +305,14 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kLoadIC_Miss);
}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
- LanguageMode language_mode) {
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// The return address is in lr.
__ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
__ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
// Do tail-call to runtime routine.
- __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
- : Runtime::kGetProperty);
+ __ TailCallRuntime(Runtime::kGetProperty);
}
@@ -332,7 +322,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
DCHECK(!AreAliased(r4, r5, LoadWithVectorDescriptor::SlotRegister(),
LoadWithVectorDescriptor::VectorRegister()));
- __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r4, r5);
+ __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, r4, r5);
LoadIC_PushArgs(masm);
@@ -340,22 +330,17 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
- LanguageMode language_mode) {
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// The return address is in lr.
__ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
// Perform tail call to the entry.
// Do tail-call to runtime routine.
- __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
- : Runtime::kKeyedGetProperty);
+ __ TailCallRuntime(Runtime::kKeyedGetProperty);
}
-
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
- LanguageMode language_mode) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// The return address is in lr.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
@@ -379,9 +364,9 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(r0, r3, &check_number_dictionary);
- GenerateFastArrayLoad(masm, receiver, key, r0, r3, r4, r0, &slow,
- language_mode);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r4, r3);
+ GenerateFastArrayLoad(masm, receiver, key, r0, r3, r4, r0, &slow);
+ __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, r4,
+ r3);
__ Ret();
__ bind(&check_number_dictionary);
@@ -400,9 +385,9 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
// Slow case, key and receiver still in r2 and r1.
__ bind(&slow);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, r4,
+ __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, r4,
r3);
- GenerateRuntimeGetProperty(masm, language_mode);
+ GenerateRuntimeGetProperty(masm);
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, r0, r3, &index_name, &slow);
@@ -446,8 +431,8 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
GenerateGlobalInstanceTypeCheck(masm, r0, &slow);
// Load the property to r0.
GenerateDictionaryLoad(masm, &slow, r3, key, r0, r5, r4);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, r4,
- r3);
+ __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
+ r4, r3);
__ Ret();
__ bind(&index_name);
@@ -793,11 +778,11 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
GenerateDictionaryStore(masm, &miss, dictionary, name, value, r6, r9);
Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(), 1, r6, r9);
+ __ IncrementCounter(counters->ic_store_normal_hit(), 1, r6, r9);
__ Ret();
__ bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1, r6, r9);
+ __ IncrementCounter(counters->ic_store_normal_miss(), 1, r6, r9);
GenerateMiss(masm);
}
diff --git a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
index 7cfef6a1b4..51ae3b50cf 100644
--- a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
@@ -4,8 +4,10 @@
#if V8_TARGET_ARCH_ARM64
-#include "src/ic/call-optimization.h"
#include "src/ic/handler-compiler.h"
+
+#include "src/field-type.h"
+#include "src/ic/call-optimization.h"
#include "src/ic/ic.h"
#include "src/isolate-inl.h"
@@ -198,11 +200,17 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
call_data_undefined = true;
__ LoadRoot(data, Heap::kUndefinedValueRootIndex);
} else {
- __ Ldr(data,
- FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(data,
- FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
- __ Ldr(data, FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+ if (optimization.is_constant_call()) {
+ __ Ldr(data,
+ FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(data,
+ FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
+ __ Ldr(data,
+ FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+ } else {
+ __ Ldr(data,
+ FieldMemOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
+ }
__ Ldr(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
}
@@ -221,7 +229,8 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ Mov(api_function_address, ref);
// Jump to stub.
- CallApiAccessorStub stub(isolate, is_store, call_data_undefined);
+ CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+ !optimization.is_constant_call());
__ TailCallStub(&stub);
}
@@ -358,7 +367,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
}
Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_load_global_stub(), 1, x1, x3);
+ __ IncrementCounter(counters->ic_named_load_global_stub(), 1, x1, x3);
if (IC::ICUseVector(kind())) {
DiscardVectorAndSlot();
}
@@ -441,8 +450,7 @@ void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
__ B(ne, miss_label);
}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
+void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
Register value_reg,
Label* miss_label) {
Register map_reg = scratch1();
@@ -450,20 +458,11 @@ void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
DCHECK(!value_reg.is(map_reg));
DCHECK(!value_reg.is(scratch));
__ JumpIfSmi(value_reg, miss_label);
- HeapType::Iterator<Map> it = field_type->Classes();
- if (!it.Done()) {
+ if (field_type->IsClass()) {
__ Ldr(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
- Label do_store;
- while (true) {
- __ CmpWeakValue(map_reg, Map::WeakCellForMap(it.Current()), scratch);
- it.Advance();
- if (it.Done()) {
- __ B(ne, miss_label);
- break;
- }
- __ B(eq, &do_store);
- }
- __ Bind(&do_store);
+ __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
+ scratch);
+ __ B(ne, miss_label);
}
}
@@ -645,19 +644,20 @@ void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
void NamedLoadHandlerCompiler::GenerateLoadCallback(
- Register reg, Handle<ExecutableAccessorInfo> callback) {
+ Register reg, Handle<AccessorInfo> callback) {
+ DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), receiver()));
DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
- // Build ExecutableAccessorInfo::args_ list on the stack and push property
- // name below the exit frame to make GC aware of them and store pointers to
- // them.
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
__ Push(receiver());
@@ -673,18 +673,9 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
}
__ LoadRoot(scratch4(), Heap::kUndefinedValueRootIndex);
__ Mov(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
- __ Push(scratch3(), scratch4(), scratch4(), scratch2(), reg, name());
-
- Register args_addr = scratch2();
- __ Add(args_addr, __ StackPointer(), kPointerSize);
-
- // Stack at this point:
- // sp[40] callback data
- // sp[32] undefined
- // sp[24] undefined
- // sp[16] isolate
- // args_addr -> sp[8] reg
- // sp[0] name
+ __ Push(scratch3(), scratch4(), scratch4(), scratch2(), reg);
+ __ Push(Smi::FromInt(0)); // should_throw_on_error -> false
+ __ Push(name());
// Abi for CallApiGetter.
Register getter_address_reg = x2;
@@ -774,8 +765,8 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
- Handle<JSObject> object, Handle<Name> name,
- Handle<ExecutableAccessorInfo> callback) {
+ Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
+ LanguageMode language_mode) {
ASM_LOCATION("NamedStoreHandlerCompiler::CompileStoreCallback");
Register holder_reg = Frontend(name);
@@ -795,6 +786,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
}
__ Mov(scratch2(), Operand(name));
__ Push(receiver(), holder_reg, scratch1(), scratch2(), value());
+ __ Push(Smi::FromInt(language_mode));
// Do tail-call to the runtime system.
__ TailCallRuntime(Runtime::kStoreCallbackProperty);
diff --git a/deps/v8/src/ic/arm64/ic-arm64.cc b/deps/v8/src/ic/arm64/ic-arm64.cc
index eb933c78ec..726a68e45f 100644
--- a/deps/v8/src/ic/arm64/ic-arm64.cc
+++ b/deps/v8/src/ic/arm64/ic-arm64.cc
@@ -164,8 +164,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register elements,
Register scratch1, Register scratch2,
- Register result, Label* slow,
- LanguageMode language_mode) {
+ Register result, Label* slow) {
DCHECK(!AreAliased(receiver, key, elements, scratch1, scratch2));
Label check_prototypes, check_next_prototype;
@@ -203,13 +202,8 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
__ B(&check_next_prototype);
__ Bind(&absent);
- if (is_strong(language_mode)) {
- // Strong mode accesses must throw in this case, so call the runtime.
- __ B(slow);
- } else {
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ B(&done);
- }
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ B(&done);
__ Bind(&in_bounds);
// Fast case: Do the load.
@@ -260,8 +254,7 @@ static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
// Fall through if the key is a unique name.
}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = x0;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
@@ -275,7 +268,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
// Dictionary load failed, go slow (but don't miss).
__ Bind(&slow);
- GenerateRuntimeGetProperty(masm, language_mode);
+ GenerateRuntimeGetProperty(masm);
}
@@ -286,7 +279,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
DCHECK(!AreAliased(x4, x5, LoadWithVectorDescriptor::SlotRegister(),
LoadWithVectorDescriptor::VectorRegister()));
- __ IncrementCounter(isolate->counters()->load_miss(), 1, x4, x5);
+ __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, x4, x5);
// Perform tail call to the entry.
__ Push(LoadWithVectorDescriptor::ReceiverRegister(),
@@ -296,15 +289,12 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kLoadIC_Miss);
}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
- LanguageMode language_mode) {
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// The return address is in lr.
__ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
// Do tail-call to runtime routine.
- __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
- : Runtime::kGetProperty);
+ __ TailCallRuntime(Runtime::kGetProperty);
}
@@ -314,7 +304,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
DCHECK(!AreAliased(x10, x11, LoadWithVectorDescriptor::SlotRegister(),
LoadWithVectorDescriptor::VectorRegister()));
- __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11);
+ __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, x10, x11);
__ Push(LoadWithVectorDescriptor::ReceiverRegister(),
LoadWithVectorDescriptor::NameRegister(),
@@ -325,24 +315,19 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
- LanguageMode language_mode) {
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// The return address is in lr.
__ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
// Do tail-call to runtime routine.
- __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
- : Runtime::kKeyedGetProperty);
+ __ TailCallRuntime(Runtime::kKeyedGetProperty);
}
-
static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm, Register key,
Register receiver, Register scratch1,
Register scratch2, Register scratch3,
Register scratch4, Register scratch5,
- Label* slow,
- LanguageMode language_mode) {
+ Label* slow) {
DCHECK(!AreAliased(key, receiver, scratch1, scratch2, scratch3, scratch4,
scratch5));
@@ -358,8 +343,8 @@ static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm, Register key,
__ CheckFastElements(scratch1, scratch2, &check_number_dictionary);
GenerateFastArrayLoad(masm, receiver, key, scratch3, scratch2, scratch1,
- result, slow, language_mode);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1,
+ result, slow);
+ __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1,
scratch1, scratch2);
__ Ret();
@@ -424,14 +409,12 @@ static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, Register key,
GenerateGlobalInstanceTypeCheck(masm, scratch1, slow);
// Load the property.
GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1,
+ __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
scratch1, scratch2);
__ Ret();
}
-
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
- LanguageMode language_mode) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// The return address is in lr.
Label slow, check_name, index_smi, index_name;
@@ -444,14 +427,13 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
__ Bind(&index_smi);
// Now the key is known to be a smi. This place is also jumped to from below
// where a numeric string is converted to a smi.
- GenerateKeyedLoadWithSmiKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow,
- language_mode);
+ GenerateKeyedLoadWithSmiKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow);
// Slow case.
__ Bind(&slow);
- __ IncrementCounter(masm->isolate()->counters()->keyed_load_generic_slow(), 1,
- x4, x3);
- GenerateRuntimeGetProperty(masm, language_mode);
+ __ IncrementCounter(masm->isolate()->counters()->ic_keyed_load_generic_slow(),
+ 1, x4, x3);
+ GenerateRuntimeGetProperty(masm);
__ Bind(&check_name);
GenerateKeyNameCheck(masm, key, x0, x3, &index_name, &slow);
@@ -783,12 +765,12 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
GenerateDictionaryStore(masm, &miss, dictionary, name, value, x6, x7);
Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(), 1, x6, x7);
+ __ IncrementCounter(counters->ic_store_normal_hit(), 1, x6, x7);
__ Ret();
// Cache miss: Jump to runtime.
__ Bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1, x6, x7);
+ __ IncrementCounter(counters->ic_store_normal_miss(), 1, x6, x7);
GenerateMiss(masm);
}
diff --git a/deps/v8/src/ic/call-optimization.cc b/deps/v8/src/ic/call-optimization.cc
index 45717b50a7..571b614dde 100644
--- a/deps/v8/src/ic/call-optimization.cc
+++ b/deps/v8/src/ic/call-optimization.cc
@@ -8,8 +8,16 @@
namespace v8 {
namespace internal {
-CallOptimization::CallOptimization(Handle<JSFunction> function) {
- Initialize(function);
+CallOptimization::CallOptimization(Handle<Object> function) {
+ constant_function_ = Handle<JSFunction>::null();
+ is_simple_api_call_ = false;
+ expected_receiver_type_ = Handle<FunctionTemplateInfo>::null();
+ api_call_info_ = Handle<CallHandlerInfo>::null();
+ if (function->IsJSFunction()) {
+ Initialize(Handle<JSFunction>::cast(function));
+ } else if (function->IsFunctionTemplateInfo()) {
+ Initialize(Handle<FunctionTemplateInfo>::cast(function));
+ }
}
@@ -27,9 +35,8 @@ Handle<JSObject> CallOptimization::LookupHolderOfExpectedType(
return Handle<JSObject>::null();
}
for (int depth = 1; true; depth++) {
- if (!object_map->prototype()->IsJSObject()) break;
+ if (!object_map->has_hidden_prototype()) break;
Handle<JSObject> prototype(JSObject::cast(object_map->prototype()));
- if (!prototype->map()->is_hidden_prototype()) break;
object_map = handle(prototype->map());
if (expected_receiver_type_->IsTemplateFor(*object_map)) {
*holder_lookup = kHolderFound;
@@ -80,13 +87,20 @@ bool CallOptimization::IsCompatibleReceiverMap(Handle<Map> map,
return false;
}
+void CallOptimization::Initialize(
+ Handle<FunctionTemplateInfo> function_template_info) {
+ if (function_template_info->call_code()->IsUndefined()) return;
+ api_call_info_ =
+ handle(CallHandlerInfo::cast(function_template_info->call_code()));
-void CallOptimization::Initialize(Handle<JSFunction> function) {
- constant_function_ = Handle<JSFunction>::null();
- is_simple_api_call_ = false;
- expected_receiver_type_ = Handle<FunctionTemplateInfo>::null();
- api_call_info_ = Handle<CallHandlerInfo>::null();
+ if (!function_template_info->signature()->IsUndefined()) {
+ expected_receiver_type_ =
+ handle(FunctionTemplateInfo::cast(function_template_info->signature()));
+ }
+ is_simple_api_call_ = true;
+}
+void CallOptimization::Initialize(Handle<JSFunction> function) {
if (function.is_null() || !function->is_compiled()) return;
constant_function_ = function;
diff --git a/deps/v8/src/ic/call-optimization.h b/deps/v8/src/ic/call-optimization.h
index 7963d1ce67..efabd3387c 100644
--- a/deps/v8/src/ic/call-optimization.h
+++ b/deps/v8/src/ic/call-optimization.h
@@ -15,7 +15,7 @@ namespace internal {
// Holds information about possible function call optimizations.
class CallOptimization BASE_EMBEDDED {
public:
- explicit CallOptimization(Handle<JSFunction> function);
+ explicit CallOptimization(Handle<Object> function);
bool is_constant_call() const { return !constant_function_.is_null(); }
@@ -51,6 +51,7 @@ class CallOptimization BASE_EMBEDDED {
private:
void Initialize(Handle<JSFunction> function);
+ void Initialize(Handle<FunctionTemplateInfo> function_template_info);
// Determines whether the given function can be called using the
// fast api call builtin.
diff --git a/deps/v8/src/ic/handler-compiler.cc b/deps/v8/src/ic/handler-compiler.cc
index b353628053..803281e24d 100644
--- a/deps/v8/src/ic/handler-compiler.cc
+++ b/deps/v8/src/ic/handler-compiler.cc
@@ -4,9 +4,10 @@
#include "src/ic/handler-compiler.h"
+#include "src/field-type.h"
#include "src/ic/call-optimization.h"
-#include "src/ic/ic.h"
#include "src/ic/ic-inl.h"
+#include "src/ic/ic.h"
#include "src/isolate-inl.h"
#include "src/profiler/cpu-profiler.h"
@@ -56,11 +57,7 @@ Handle<Code> NamedLoadHandlerCompiler::ComputeLoadNonexistent(
if (name->IsPrivate()) {
// TODO(verwaest): Use nonexistent_private_symbol.
cache_name = name;
- JSReceiver* prototype = JSReceiver::cast(current_map->prototype());
- if (!prototype->map()->is_hidden_prototype() &&
- !prototype->map()->IsJSGlobalObjectMap()) {
- break;
- }
+ if (!current_map->has_hidden_prototype()) break;
}
last = handle(JSObject::cast(current_map->prototype()));
@@ -228,7 +225,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadNonexistent(
Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
- Handle<Name> name, Handle<ExecutableAccessorInfo> callback) {
+ Handle<Name> name, Handle<AccessorInfo> callback) {
Register reg = Frontend(name);
GenerateLoadCallback(reg, callback);
return GetCode(kind(), Code::FAST, name);
@@ -278,7 +275,7 @@ void NamedLoadHandlerCompiler::InterceptorVectorSlotPop(Register holder_reg,
Handle<Code> NamedLoadHandlerCompiler::CompileLoadInterceptor(
LookupIterator* it) {
// So far the most popular follow ups for interceptor loads are DATA and
- // ExecutableAccessorInfo, so inline only them. Other cases may be added
+ // AccessorInfo, so inline only them. Other cases may be added
// later.
bool inline_followup = false;
switch (it->state()) {
@@ -296,20 +293,20 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadInterceptor(
break;
case LookupIterator::ACCESSOR: {
Handle<Object> accessors = it->GetAccessors();
- if (accessors->IsExecutableAccessorInfo()) {
- Handle<ExecutableAccessorInfo> info =
- Handle<ExecutableAccessorInfo>::cast(accessors);
- inline_followup = info->getter() != NULL &&
- ExecutableAccessorInfo::IsCompatibleReceiverMap(
- isolate(), info, map());
+ if (accessors->IsAccessorInfo()) {
+ Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(accessors);
+ inline_followup =
+ info->getter() != NULL &&
+ AccessorInfo::IsCompatibleReceiverMap(isolate(), info, map());
} else if (accessors->IsAccessorPair()) {
Handle<JSObject> property_holder(it->GetHolder<JSObject>());
Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
isolate());
- if (!getter->IsJSFunction()) break;
+ if (!(getter->IsJSFunction() || getter->IsFunctionTemplateInfo())) {
+ break;
+ }
if (!property_holder->HasFastProperties()) break;
- auto function = Handle<JSFunction>::cast(getter);
- CallOptimization call_optimization(function);
+ CallOptimization call_optimization(getter);
Handle<Map> receiver_map = map();
inline_followup = call_optimization.is_simple_api_call() &&
call_optimization.IsCompatibleReceiverMap(
@@ -396,14 +393,14 @@ void NamedLoadHandlerCompiler::GenerateLoadPostInterceptor(
break;
}
case LookupIterator::ACCESSOR:
- if (it->GetAccessors()->IsExecutableAccessorInfo()) {
- Handle<ExecutableAccessorInfo> info =
- Handle<ExecutableAccessorInfo>::cast(it->GetAccessors());
+ if (it->GetAccessors()->IsAccessorInfo()) {
+ Handle<AccessorInfo> info =
+ Handle<AccessorInfo>::cast(it->GetAccessors());
DCHECK_NOT_NULL(info->getter());
GenerateLoadCallback(reg, info);
} else {
- auto function = handle(JSFunction::cast(
- AccessorPair::cast(*it->GetAccessors())->getter()));
+ Handle<Object> function = handle(
+ AccessorPair::cast(*it->GetAccessors())->getter(), isolate());
CallOptimization call_optimization(function);
GenerateApiAccessorCall(masm(), call_optimization, holder_map,
receiver(), scratch2(), false, no_reg, reg,
@@ -437,8 +434,9 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
PrototypeIterator::WhereToEnd end =
name->IsPrivate() ? PrototypeIterator::END_AT_NON_HIDDEN
: PrototypeIterator::END_AT_NULL;
- PrototypeIterator iter(isolate(), holder());
- while (!iter.IsAtEnd(end)) {
+ PrototypeIterator iter(isolate(), holder(),
+ PrototypeIterator::START_AT_PROTOTYPE, end);
+ while (!iter.IsAtEnd()) {
last = PrototypeIterator::GetCurrent<JSObject>(iter);
iter.Advance();
}
@@ -510,10 +508,9 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
return GetCode(kind(), Code::FAST, name);
}
-
bool NamedStoreHandlerCompiler::RequiresFieldTypeChecks(
- HeapType* field_type) const {
- return !field_type->Classes().Done();
+ FieldType* field_type) const {
+ return field_type->IsClass();
}
@@ -521,7 +518,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreField(LookupIterator* it) {
Label miss;
DCHECK(it->representation().IsHeapObject());
- HeapType* field_type = *it->GetFieldType();
+ FieldType* field_type = *it->GetFieldType();
bool need_save_restore = false;
if (RequiresFieldTypeChecks(field_type)) {
need_save_restore = IC::ICUseVector(kind());
@@ -564,10 +561,8 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
#undef __
-
void ElementHandlerCompiler::CompileElementHandlers(
- MapHandleList* receiver_maps, CodeHandleList* handlers,
- LanguageMode language_mode) {
+ MapHandleList* receiver_maps, CodeHandleList* handlers) {
for (int i = 0; i < receiver_maps->length(); ++i) {
Handle<Map> receiver_map = receiver_maps->at(i);
Handle<Code> cached_stub;
@@ -575,9 +570,7 @@ void ElementHandlerCompiler::CompileElementHandlers(
if (receiver_map->IsStringMap()) {
cached_stub = LoadIndexedStringStub(isolate()).GetCode();
} else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
- cached_stub = is_strong(language_mode)
- ? isolate()->builtins()->KeyedLoadIC_Slow_Strong()
- : isolate()->builtins()->KeyedLoadIC_Slow();
+ cached_stub = isolate()->builtins()->KeyedLoadIC_Slow();
} else {
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
ElementsKind elements_kind = receiver_map->elements_kind();
@@ -586,9 +579,7 @@ void ElementHandlerCompiler::CompileElementHandlers(
// generated stub code needs to check that dynamically anyway.
bool convert_hole_to_undefined =
(is_js_array && elements_kind == FAST_HOLEY_ELEMENTS &&
- *receiver_map ==
- isolate()->get_initial_js_array_map(elements_kind)) &&
- !is_strong(language_mode);
+ *receiver_map == isolate()->get_initial_js_array_map(elements_kind));
if (receiver_map->has_indexed_interceptor()) {
cached_stub = LoadIndexedInterceptorStub(isolate()).GetCode();
@@ -600,9 +591,7 @@ void ElementHandlerCompiler::CompileElementHandlers(
convert_hole_to_undefined).GetCode();
} else {
DCHECK(elements_kind == DICTIONARY_ELEMENTS);
- LoadICState state =
- LoadICState(is_strong(language_mode) ? LoadICState::kStrongModeState
- : kNoExtraICState);
+ LoadICState state = LoadICState(kNoExtraICState);
cached_stub = LoadDictionaryElementStub(isolate(), state).GetCode();
}
}
diff --git a/deps/v8/src/ic/handler-compiler.h b/deps/v8/src/ic/handler-compiler.h
index fe59210353..45d7d73089 100644
--- a/deps/v8/src/ic/handler-compiler.h
+++ b/deps/v8/src/ic/handler-compiler.h
@@ -123,7 +123,7 @@ class NamedLoadHandlerCompiler : public PropertyHandlerCompiler {
Handle<Code> CompileLoadField(Handle<Name> name, FieldIndex index);
Handle<Code> CompileLoadCallback(Handle<Name> name,
- Handle<ExecutableAccessorInfo> callback);
+ Handle<AccessorInfo> callback);
Handle<Code> CompileLoadCallback(Handle<Name> name,
const CallOptimization& call_optimization,
@@ -180,8 +180,7 @@ class NamedLoadHandlerCompiler : public PropertyHandlerCompiler {
private:
Handle<Code> CompileLoadNonexistent(Handle<Name> name);
void GenerateLoadConstant(Handle<Object> value);
- void GenerateLoadCallback(Register reg,
- Handle<ExecutableAccessorInfo> callback);
+ void GenerateLoadCallback(Register reg, Handle<AccessorInfo> callback);
void GenerateLoadCallback(const CallOptimization& call_optimization,
Handle<Map> receiver_map);
@@ -224,7 +223,8 @@ class NamedStoreHandlerCompiler : public PropertyHandlerCompiler {
Handle<Name> name);
Handle<Code> CompileStoreField(LookupIterator* it);
Handle<Code> CompileStoreCallback(Handle<JSObject> object, Handle<Name> name,
- Handle<ExecutableAccessorInfo> callback);
+ Handle<AccessorInfo> callback,
+ LanguageMode language_mode);
Handle<Code> CompileStoreCallback(Handle<JSObject> object, Handle<Name> name,
const CallOptimization& call_optimization,
int accessor_index);
@@ -265,8 +265,8 @@ class NamedStoreHandlerCompiler : public PropertyHandlerCompiler {
Register value_reg, Register scratch,
Label* miss_label);
- bool RequiresFieldTypeChecks(HeapType* field_type) const;
- void GenerateFieldTypeChecks(HeapType* field_type, Register value_reg,
+ bool RequiresFieldTypeChecks(FieldType* field_type) const;
+ void GenerateFieldTypeChecks(FieldType* field_type, Register value_reg,
Label* miss_label);
static Builtins::Name SlowBuiltin(Code::Kind kind) {
@@ -295,8 +295,7 @@ class ElementHandlerCompiler : public PropertyHandlerCompiler {
virtual ~ElementHandlerCompiler() {}
void CompileElementHandlers(MapHandleList* receiver_maps,
- CodeHandleList* handlers,
- LanguageMode language_mode);
+ CodeHandleList* handlers);
static void GenerateStoreSlow(MacroAssembler* masm);
};
diff --git a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
index 0b380b3ee2..3bdddf9b6d 100644
--- a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
@@ -4,8 +4,10 @@
#if V8_TARGET_ARCH_IA32
-#include "src/ic/call-optimization.h"
#include "src/ic/handler-compiler.h"
+
+#include "src/field-type.h"
+#include "src/ic/call-optimization.h"
#include "src/ic/ic.h"
#include "src/isolate-inl.h"
@@ -197,9 +199,13 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
call_data_undefined = true;
__ mov(data, Immediate(isolate->factory()->undefined_value()));
} else {
- __ mov(data, FieldOperand(callee, JSFunction::kSharedFunctionInfoOffset));
- __ mov(data, FieldOperand(data, SharedFunctionInfo::kFunctionDataOffset));
- __ mov(data, FieldOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+ if (optimization.is_constant_call()) {
+ __ mov(data, FieldOperand(callee, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(data, FieldOperand(data, SharedFunctionInfo::kFunctionDataOffset));
+ __ mov(data, FieldOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+ } else {
+ __ mov(data, FieldOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
+ }
__ mov(data, FieldOperand(data, CallHandlerInfo::kDataOffset));
}
@@ -214,7 +220,8 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ mov(api_function_address, Immediate(function_address));
// Jump to stub.
- CallApiAccessorStub stub(isolate, is_store, call_data_undefined);
+ CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+ !optimization.is_constant_call());
__ TailCallStub(&stub);
}
@@ -399,8 +406,7 @@ void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
__ j(not_equal, miss_label);
}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
+void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
Register value_reg,
Label* miss_label) {
Register map_reg = scratch1();
@@ -408,20 +414,11 @@ void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
DCHECK(!value_reg.is(map_reg));
DCHECK(!value_reg.is(scratch));
__ JumpIfSmi(value_reg, miss_label);
- HeapType::Iterator<Map> it = field_type->Classes();
- if (!it.Done()) {
- Label do_store;
+ if (field_type->IsClass()) {
__ mov(map_reg, FieldOperand(value_reg, HeapObject::kMapOffset));
- while (true) {
- __ CmpWeakValue(map_reg, Map::WeakCellForMap(it.Current()), scratch);
- it.Advance();
- if (it.Done()) {
- __ j(not_equal, miss_label);
- break;
- }
- __ j(equal, &do_store, Label::kNear);
- }
- __ bind(&do_store);
+ __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
+ scratch);
+ __ j(not_equal, miss_label);
}
}
@@ -593,24 +590,30 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
void NamedLoadHandlerCompiler::GenerateLoadCallback(
- Register reg, Handle<ExecutableAccessorInfo> callback) {
+ Register reg, Handle<AccessorInfo> callback) {
+ DCHECK(!AreAliased(scratch2(), scratch3(), receiver()));
+ DCHECK(!AreAliased(scratch2(), scratch3(), reg));
+
// Insert additional parameters into the stack frame above return address.
- DCHECK(!scratch3().is(reg));
__ pop(scratch3()); // Get return address to place it below.
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
__ push(receiver()); // receiver
- // Push data from ExecutableAccessorInfo.
+ // Push data from AccessorInfo.
Handle<Object> data(callback->data(), isolate());
if (data->IsUndefined() || data->IsSmi()) {
__ push(Immediate(data));
} else {
- DCHECK(!scratch2().is(reg));
Handle<WeakCell> cell =
isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
// The callback is alive if this instruction is executed,
@@ -623,13 +626,9 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
__ push(Immediate(isolate()->factory()->undefined_value()));
__ push(Immediate(reinterpret_cast<int>(isolate())));
__ push(reg); // holder
-
- // Save a pointer to where we pushed the arguments. This will be
- // passed as the const PropertyAccessorInfo& to the C++ callback.
- __ push(esp);
+ __ push(Immediate(Smi::FromInt(0))); // should_throw_on_error -> false
__ push(name()); // name
-
__ push(scratch3()); // Restore return address.
// Abi for CallApiGetter
@@ -731,8 +730,8 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
- Handle<JSObject> object, Handle<Name> name,
- Handle<ExecutableAccessorInfo> callback) {
+ Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
+ LanguageMode language_mode) {
Register holder_reg = Frontend(name);
__ pop(scratch1()); // remove the return address
@@ -748,6 +747,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
}
__ Push(name);
__ push(value());
+ __ push(Immediate(Smi::FromInt(language_mode)));
__ push(scratch1()); // restore return address
// Do tail-call to the runtime system.
@@ -802,7 +802,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
}
Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_load_global_stub(), 1);
+ __ IncrementCounter(counters->ic_named_load_global_stub(), 1);
// The code above already loads the result into the return register.
if (IC::ICUseVector(kind())) {
DiscardVectorAndSlot();
diff --git a/deps/v8/src/ic/ia32/ic-ia32.cc b/deps/v8/src/ic/ia32/ic-ia32.cc
index 88947e47e7..0eba42720d 100644
--- a/deps/v8/src/ic/ia32/ic-ia32.cc
+++ b/deps/v8/src/ic/ia32/ic-ia32.cc
@@ -167,7 +167,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register scratch,
Register scratch2, Register result,
- Label* slow, LanguageMode language_mode) {
+ Label* slow) {
// Register use:
// receiver - holds the receiver and is unchanged.
// key - holds the key and is unchanged (must be a smi).
@@ -211,13 +211,8 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
__ jmp(&check_next_prototype);
__ bind(&absent);
- if (is_strong(language_mode)) {
- // Strong mode accesses must throw in this case, so call the runtime.
- __ jmp(slow);
- } else {
- __ mov(result, masm->isolate()->factory()->undefined_value());
- __ jmp(&done);
- }
+ __ mov(result, masm->isolate()->factory()->undefined_value());
+ __ jmp(&done);
__ bind(&in_bounds);
// Fast case: Do the load.
@@ -262,9 +257,7 @@ static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
__ bind(&unique);
}
-
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
- LanguageMode language_mode) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// The return address is on the stack.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
@@ -286,11 +279,10 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(eax, &check_number_dictionary);
- GenerateFastArrayLoad(masm, receiver, key, eax, ebx, eax, &slow,
- language_mode);
+ GenerateFastArrayLoad(masm, receiver, key, eax, ebx, eax, &slow);
Isolate* isolate = masm->isolate();
Counters* counters = isolate->counters();
- __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
+ __ IncrementCounter(counters->ic_keyed_load_generic_smi(), 1);
__ ret(0);
__ bind(&check_number_dictionary);
@@ -318,8 +310,8 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
__ bind(&slow);
// Slow case: jump to runtime.
- __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
- GenerateRuntimeGetProperty(masm, language_mode);
+ __ IncrementCounter(counters->ic_keyed_load_generic_slow(), 1);
+ GenerateRuntimeGetProperty(masm);
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, eax, ebx, &index_name, &slow);
@@ -363,7 +355,7 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
GenerateGlobalInstanceTypeCheck(masm, eax, &slow);
GenerateDictionaryLoad(masm, &slow, ebx, key, eax, edi, eax);
- __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
+ __ IncrementCounter(counters->ic_keyed_load_generic_symbol(), 1);
__ ret(0);
__ bind(&index_name);
@@ -628,8 +620,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
GenerateMiss(masm);
}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = eax;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
@@ -644,7 +635,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
// Dictionary load failed, go slow (but don't miss).
__ bind(&slow);
- GenerateRuntimeGetProperty(masm, language_mode);
+ GenerateRuntimeGetProperty(masm);
}
@@ -668,16 +659,14 @@ static void LoadIC_PushArgs(MacroAssembler* masm) {
void LoadIC::GenerateMiss(MacroAssembler* masm) {
// Return address is on the stack.
- __ IncrementCounter(masm->isolate()->counters()->load_miss(), 1);
+ __ IncrementCounter(masm->isolate()->counters()->ic_load_miss(), 1);
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
__ TailCallRuntime(Runtime::kLoadIC_Miss);
}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
- LanguageMode language_mode) {
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// Return address is on the stack.
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
@@ -689,14 +678,13 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
__ push(ebx);
// Do tail-call to runtime routine.
- __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
- : Runtime::kGetProperty);
+ __ TailCallRuntime(Runtime::kGetProperty);
}
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// Return address is on the stack.
- __ IncrementCounter(masm->isolate()->counters()->keyed_load_miss(), 1);
+ __ IncrementCounter(masm->isolate()->counters()->ic_keyed_load_miss(), 1);
LoadIC_PushArgs(masm);
@@ -704,9 +692,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
- LanguageMode language_mode) {
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// Return address is on the stack.
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
@@ -718,8 +704,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
__ push(ebx);
// Do tail-call to runtime routine.
- __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
- : Runtime::kKeyedGetProperty);
+ __ TailCallRuntime(Runtime::kKeyedGetProperty);
}
@@ -777,14 +762,14 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
receiver, edi);
__ Drop(3);
Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(), 1);
+ __ IncrementCounter(counters->ic_store_normal_hit(), 1);
__ ret(0);
__ bind(&restore_miss);
__ pop(slot);
__ pop(vector);
__ pop(receiver);
- __ IncrementCounter(counters->store_normal_miss(), 1);
+ __ IncrementCounter(counters->ic_store_normal_miss(), 1);
GenerateMiss(masm);
}
diff --git a/deps/v8/src/ic/ic-compiler.cc b/deps/v8/src/ic/ic-compiler.cc
index ae4b2a5d58..f74c69e50d 100644
--- a/deps/v8/src/ic/ic-compiler.cc
+++ b/deps/v8/src/ic/ic-compiler.cc
@@ -43,13 +43,11 @@ Handle<Code> PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler(
// stub code needs to check that dynamically anyway.
bool convert_hole_to_undefined =
is_js_array && elements_kind == FAST_HOLEY_ELEMENTS &&
- *receiver_map == isolate->get_initial_js_array_map(elements_kind) &&
- !(is_strong(LoadICState::GetLanguageMode(extra_ic_state)));
+ *receiver_map == isolate->get_initial_js_array_map(elements_kind);
Handle<Code> stub;
if (receiver_map->has_indexed_interceptor()) {
stub = LoadIndexedInterceptorStub(isolate).GetCode();
} else if (receiver_map->IsStringMap()) {
- // We have a string.
stub = LoadIndexedStringStub(isolate).GetCode();
} else if (receiver_map->has_sloppy_arguments_elements()) {
stub = KeyedLoadSloppyArgumentsStub(isolate).GetCode();
@@ -58,6 +56,7 @@ Handle<Code> PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler(
stub = LoadFastElementStub(isolate, is_js_array, elements_kind,
convert_hole_to_undefined).GetCode();
} else {
+ DCHECK(receiver_map->has_dictionary_elements());
stub = LoadDictionaryElementStub(isolate, LoadICState(extra_ic_state))
.GetCode();
}
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index 6dab006ad5..998bd8cf12 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -95,9 +95,6 @@ void LoadIC::set_target(Code* code) {
// The contextual mode must be preserved across IC patching.
DCHECK(LoadICState::GetTypeofMode(code->extra_ic_state()) ==
LoadICState::GetTypeofMode(target()->extra_ic_state()));
- // Strongness must be preserved across IC patching.
- DCHECK(LoadICState::GetLanguageMode(code->extra_ic_state()) ==
- LoadICState::GetLanguageMode(target()->extra_ic_state()));
IC::set_target(code);
}
diff --git a/deps/v8/src/ic/ic-state.cc b/deps/v8/src/ic/ic-state.cc
index 4bdaf3ff03..933803c653 100644
--- a/deps/v8/src/ic/ic-state.cc
+++ b/deps/v8/src/ic/ic-state.cc
@@ -37,7 +37,6 @@ BinaryOpICState::BinaryOpICState(Isolate* isolate, ExtraICState extra_ic_state)
isolate_(isolate) {
op_ =
static_cast<Token::Value>(FIRST_TOKEN + OpField::decode(extra_ic_state));
- strong_ = StrengthField::decode(extra_ic_state);
left_kind_ = LeftKindField::decode(extra_ic_state);
right_kind_ = fixed_right_arg_.IsJust()
? (Smi::IsValid(fixed_right_arg_.FromJust()) ? SMI : INT32)
@@ -51,7 +50,7 @@ BinaryOpICState::BinaryOpICState(Isolate* isolate, ExtraICState extra_ic_state)
ExtraICState BinaryOpICState::GetExtraICState() const {
ExtraICState extra_ic_state =
OpField::encode(op_ - FIRST_TOKEN) | LeftKindField::encode(left_kind_) |
- ResultKindField::encode(result_kind_) | StrengthField::encode(strong_) |
+ ResultKindField::encode(result_kind_) |
HasFixedRightArgField::encode(fixed_right_arg_.IsJust());
if (fixed_right_arg_.IsJust()) {
extra_ic_state = FixedRightArgValueField::update(
@@ -72,7 +71,7 @@ void BinaryOpICState::GenerateAheadOfTime(
// Generated list of commonly used stubs
#define GENERATE(op, left_kind, right_kind, result_kind) \
do { \
- BinaryOpICState state(isolate, op, Strength::WEAK); \
+ BinaryOpICState state(isolate, op); \
state.left_kind_ = left_kind; \
state.fixed_right_arg_ = Nothing<int>(); \
state.right_kind_ = right_kind; \
@@ -174,7 +173,7 @@ void BinaryOpICState::GenerateAheadOfTime(
#undef GENERATE
#define GENERATE(op, left_kind, fixed_right_arg_value, result_kind) \
do { \
- BinaryOpICState state(isolate, op, Strength::WEAK); \
+ BinaryOpICState state(isolate, op); \
state.left_kind_ = left_kind; \
state.fixed_right_arg_ = Just(fixed_right_arg_value); \
state.right_kind_ = SMI; \
@@ -208,7 +207,6 @@ Type* BinaryOpICState::GetResultType() const {
std::ostream& operator<<(std::ostream& os, const BinaryOpICState& s) {
os << "(" << Token::Name(s.op_);
if (s.CouldCreateAllocationMementos()) os << "_CreateAllocationMementos";
- if (is_strong(s.strength())) os << "_Strong";
os << ":" << BinaryOpICState::KindToString(s.left_kind_) << "*";
if (s.fixed_right_arg_.IsJust()) {
os << s.fixed_right_arg_.FromJust();
@@ -371,25 +369,25 @@ const char* CompareICState::GetStateName(State state) {
Type* CompareICState::StateToType(Zone* zone, State state, Handle<Map> map) {
switch (state) {
case UNINITIALIZED:
- return Type::None(zone);
+ return Type::None();
case BOOLEAN:
- return Type::Boolean(zone);
+ return Type::Boolean();
case SMI:
- return Type::SignedSmall(zone);
+ return Type::SignedSmall();
case NUMBER:
- return Type::Number(zone);
+ return Type::Number();
case STRING:
- return Type::String(zone);
+ return Type::String();
case INTERNALIZED_STRING:
- return Type::InternalizedString(zone);
+ return Type::InternalizedString();
case UNIQUE_NAME:
- return Type::UniqueName(zone);
+ return Type::UniqueName();
case RECEIVER:
- return Type::Receiver(zone);
+ return Type::Receiver();
case KNOWN_RECEIVER:
- return map.is_null() ? Type::Receiver(zone) : Type::Class(map, zone);
+ return map.is_null() ? Type::Receiver() : Type::Class(map, zone);
case GENERIC:
- return Type::Any(zone);
+ return Type::Any();
}
UNREACHABLE();
return NULL;
diff --git a/deps/v8/src/ic/ic-state.h b/deps/v8/src/ic/ic-state.h
index 1982fbe08b..e1d33f8678 100644
--- a/deps/v8/src/ic/ic-state.h
+++ b/deps/v8/src/ic/ic-state.h
@@ -25,9 +25,11 @@ class CallICState final BASE_EMBEDDED {
public:
explicit CallICState(ExtraICState extra_ic_state)
: bit_field_(extra_ic_state) {}
- CallICState(int argc, ConvertReceiverMode convert_mode)
+ CallICState(int argc, ConvertReceiverMode convert_mode,
+ TailCallMode tail_call_mode)
: bit_field_(ArgcBits::encode(argc) |
- ConvertModeBits::encode(convert_mode)) {}
+ ConvertModeBits::encode(convert_mode) |
+ TailCallModeBits::encode(tail_call_mode)) {}
ExtraICState GetExtraICState() const { return bit_field_; }
@@ -39,11 +41,14 @@ class CallICState final BASE_EMBEDDED {
ConvertReceiverMode convert_mode() const {
return ConvertModeBits::decode(bit_field_);
}
+ TailCallMode tail_call_mode() const {
+ return TailCallModeBits::decode(bit_field_);
+ }
private:
typedef BitField<int, 0, Code::kArgumentsBits> ArgcBits;
- typedef BitField<ConvertReceiverMode, Code::kArgumentsBits, 2>
- ConvertModeBits;
+ typedef BitField<ConvertReceiverMode, ArgcBits::kNext, 2> ConvertModeBits;
+ typedef BitField<TailCallMode, ConvertModeBits::kNext, 1> TailCallModeBits;
int const bit_field_;
};
@@ -55,9 +60,8 @@ std::ostream& operator<<(std::ostream& os, const CallICState& s);
class BinaryOpICState final BASE_EMBEDDED {
public:
BinaryOpICState(Isolate* isolate, ExtraICState extra_ic_state);
- BinaryOpICState(Isolate* isolate, Token::Value op, Strength strength)
+ BinaryOpICState(Isolate* isolate, Token::Value op)
: op_(op),
- strong_(is_strong(strength)),
left_kind_(NONE),
right_kind_(NONE),
result_kind_(NONE),
@@ -104,10 +108,6 @@ class BinaryOpICState final BASE_EMBEDDED {
return Max(left_kind_, right_kind_) == GENERIC;
}
- Strength strength() const {
- return strong_ ? Strength::STRONG : Strength::WEAK;
- }
-
// Returns true if the IC should enable the inline smi code (i.e. if either
// parameter may be a smi).
bool UseInlinedSmiCode() const {
@@ -146,15 +146,13 @@ class BinaryOpICState final BASE_EMBEDDED {
class OpField : public BitField<int, 0, 4> {};
class ResultKindField : public BitField<Kind, 4, 3> {};
class LeftKindField : public BitField<Kind, 7, 3> {};
- class StrengthField : public BitField<bool, 10, 1> {};
// When fixed right arg is set, we don't need to store the right kind.
// Thus the two fields can overlap.
- class HasFixedRightArgField : public BitField<bool, 11, 1> {};
- class FixedRightArgValueField : public BitField<int, 12, 4> {};
- class RightKindField : public BitField<Kind, 12, 3> {};
+ class HasFixedRightArgField : public BitField<bool, 10, 1> {};
+ class FixedRightArgValueField : public BitField<int, 11, 4> {};
+ class RightKindField : public BitField<Kind, 11, 3> {};
Token::Value op_;
- bool strong_;
Kind left_kind_;
Kind right_kind_;
Kind result_kind_;
@@ -204,38 +202,24 @@ class CompareICState {
class LoadICState final BASE_EMBEDDED {
private:
class TypeofModeBits : public BitField<TypeofMode, 0, 1> {};
- class LanguageModeBits
- : public BitField<LanguageMode, TypeofModeBits::kNext, 2> {};
STATIC_ASSERT(static_cast<int>(INSIDE_TYPEOF) == 0);
const ExtraICState state_;
public:
- static const uint32_t kNextBitFieldOffset = LanguageModeBits::kNext;
-
- static const ExtraICState kStrongModeState = STRONG
- << LanguageModeBits::kShift;
+ static const uint32_t kNextBitFieldOffset = TypeofModeBits::kNext;
explicit LoadICState(ExtraICState extra_ic_state) : state_(extra_ic_state) {}
- explicit LoadICState(TypeofMode typeof_mode, LanguageMode language_mode)
- : state_(TypeofModeBits::encode(typeof_mode) |
- LanguageModeBits::encode(language_mode)) {}
+ explicit LoadICState(TypeofMode typeof_mode)
+ : state_(TypeofModeBits::encode(typeof_mode)) {}
ExtraICState GetExtraICState() const { return state_; }
TypeofMode typeof_mode() const { return TypeofModeBits::decode(state_); }
- LanguageMode language_mode() const {
- return LanguageModeBits::decode(state_);
- }
-
static TypeofMode GetTypeofMode(ExtraICState state) {
return LoadICState(state).typeof_mode();
}
-
- static LanguageMode GetLanguageMode(ExtraICState state) {
- return LoadICState(state).language_mode();
- }
};
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 73ac666a41..4c2b20ca1b 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -11,6 +11,7 @@
#include "src/codegen.h"
#include "src/conversions.h"
#include "src/execution.h"
+#include "src/field-type.h"
#include "src/frames-inl.h"
#include "src/ic/call-optimization.h"
#include "src/ic/handler-compiler.h"
@@ -21,6 +22,8 @@
#include "src/macro-assembler.h"
#include "src/prototype.h"
#include "src/runtime/runtime.h"
+#include "src/runtime/runtime-utils.h"
+#include "src/tracing/trace-event.h"
namespace v8 {
namespace internal {
@@ -545,8 +548,7 @@ void CompareIC::Clear(Isolate* isolate, Address address, Code* target,
CompareICStub stub(target->stub_key(), isolate);
// Only clear CompareICs that can retain objects.
if (stub.state() != CompareICState::KNOWN_RECEIVER) return;
- SetTargetAtAddress(address,
- GetRawUninitialized(isolate, stub.op(), stub.strength()),
+ SetTargetAtAddress(address, GetRawUninitialized(isolate, stub.op()),
constant_pool);
PatchInlinedSmiCode(isolate, address, DISABLE_INLINED_SMI_CHECK);
}
@@ -558,9 +560,7 @@ Handle<Code> KeyedLoadIC::ChooseMegamorphicStub(Isolate* isolate,
if (FLAG_compiled_keyed_generic_loads) {
return KeyedLoadGenericStub(isolate, LoadICState(extra_state)).GetCode();
} else {
- return is_strong(LoadICState::GetLanguageMode(extra_state))
- ? isolate->builtins()->KeyedLoadIC_Megamorphic_Strong()
- : isolate->builtins()->KeyedLoadIC_Megamorphic();
+ return isolate->builtins()->KeyedLoadIC_Megamorphic();
}
}
@@ -667,9 +667,9 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
TRACE_GENERIC_IC(isolate(), "LoadIC", "name as array index");
}
Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result,
- Object::GetElement(isolate(), object, index, language_mode()), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
+ Object::GetElement(isolate(), object, index),
+ Object);
return result;
}
@@ -685,9 +685,9 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
ScriptContextTable::LookupResult lookup_result;
if (ScriptContextTable::Lookup(script_contexts, str_name, &lookup_result)) {
Handle<Object> result =
- FixedArray::get(ScriptContextTable::GetContext(
+ FixedArray::get(*ScriptContextTable::GetContext(
script_contexts, lookup_result.context_index),
- lookup_result.slot_index);
+ lookup_result.slot_index, isolate());
if (*result == *isolate()->factory()->the_hole_value()) {
// Do not install stubs and stay pre-monomorphic for
// uninitialized accesses.
@@ -713,8 +713,8 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
// Get the property.
Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result, Object::GetProperty(&it, language_mode()), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate(), result, Object::GetProperty(&it),
+ Object);
if (it.IsFound()) {
return result;
} else if (!ShouldThrowReferenceError(object)) {
@@ -899,9 +899,7 @@ Handle<Code> KeyedLoadIC::initialize_stub_in_optimized_code(
if (initialization_state != MEGAMORPHIC) {
return KeyedLoadICStub(isolate, LoadICState(extra_state)).GetCode();
}
- return is_strong(LoadICState::GetLanguageMode(extra_state))
- ? isolate->builtins()->KeyedLoadIC_Megamorphic_Strong()
- : isolate->builtins()->KeyedLoadIC_Megamorphic();
+ return isolate->builtins()->KeyedLoadIC_Megamorphic();
}
@@ -976,29 +974,34 @@ bool IsCompatibleReceiver(LookupIterator* lookup, Handle<Map> receiver_map) {
DCHECK(lookup->state() == LookupIterator::ACCESSOR);
Isolate* isolate = lookup->isolate();
Handle<Object> accessors = lookup->GetAccessors();
- if (accessors->IsExecutableAccessorInfo()) {
- Handle<ExecutableAccessorInfo> info =
- Handle<ExecutableAccessorInfo>::cast(accessors);
+ if (accessors->IsAccessorInfo()) {
+ Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(accessors);
if (info->getter() != NULL &&
- !ExecutableAccessorInfo::IsCompatibleReceiverMap(isolate, info,
- receiver_map)) {
+ !AccessorInfo::IsCompatibleReceiverMap(isolate, info, receiver_map)) {
return false;
}
} else if (accessors->IsAccessorPair()) {
Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
isolate);
+ if (!getter->IsJSFunction() && !getter->IsFunctionTemplateInfo())
+ return false;
Handle<JSObject> holder = lookup->GetHolder<JSObject>();
Handle<Object> receiver = lookup->GetReceiver();
- if (getter->IsJSFunction() && holder->HasFastProperties()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
- if (receiver->IsJSObject() || function->shared()->IsBuiltin() ||
- !is_sloppy(function->shared()->language_mode())) {
- CallOptimization call_optimization(function);
- if (call_optimization.is_simple_api_call() &&
- !call_optimization.IsCompatibleReceiverMap(receiver_map, holder)) {
+ if (holder->HasFastProperties()) {
+ if (getter->IsJSFunction()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
+ if (!receiver->IsJSObject() && !function->shared()->IsBuiltin() &&
+ is_sloppy(function->shared()->language_mode())) {
+ // Calling sloppy non-builtins with a value as the receiver
+ // requires boxing.
return false;
}
}
+ CallOptimization call_optimization(getter);
+ if (call_optimization.is_simple_api_call() &&
+ !call_optimization.IsCompatibleReceiverMap(receiver_map, holder)) {
+ return false;
+ }
}
}
return true;
@@ -1019,7 +1022,7 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
lookup->state() == LookupIterator::ACCESS_CHECK) {
code = slow_stub();
} else if (!lookup->IsFound()) {
- if (kind() == Code::LOAD_IC && !is_strong(language_mode())) {
+ if (kind() == Code::LOAD_IC) {
code = NamedLoadHandlerCompiler::ComputeLoadNonexistent(lookup->name(),
receiver_map());
// TODO(jkummerow/verwaest): Introduce a builtin that handles this case.
@@ -1168,50 +1171,39 @@ Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
return stub.GetCode();
}
- Handle<Object> accessors = lookup->GetAccessors();
- if (accessors->IsExecutableAccessorInfo()) {
- Handle<ExecutableAccessorInfo> info =
- Handle<ExecutableAccessorInfo>::cast(accessors);
- if (v8::ToCData<Address>(info->getter()) == 0) break;
- if (!ExecutableAccessorInfo::IsCompatibleReceiverMap(isolate(), info,
- map)) {
- // This case should be already handled in LoadIC::UpdateCaches.
- UNREACHABLE();
- break;
- }
- if (!holder->HasFastProperties()) break;
- NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
- return compiler.CompileLoadCallback(lookup->name(), info);
- }
- if (accessors->IsAccessorPair()) {
- Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
- isolate());
- if (!getter->IsJSFunction()) break;
- if (!holder->HasFastProperties()) break;
- // When debugging we need to go the slow path to flood the accessor.
- if (GetSharedFunctionInfo()->HasDebugInfo()) break;
- Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
- if (!receiver->IsJSObject() && !function->shared()->IsBuiltin() &&
- is_sloppy(function->shared()->language_mode())) {
- // Calling sloppy non-builtins with a value as the receiver
- // requires boxing.
- break;
- }
- CallOptimization call_optimization(function);
- NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
- if (call_optimization.is_simple_api_call()) {
- if (call_optimization.IsCompatibleReceiver(receiver, holder)) {
+ if (IsCompatibleReceiver(lookup, map)) {
+ Handle<Object> accessors = lookup->GetAccessors();
+ if (accessors->IsAccessorPair()) {
+ if (!holder->HasFastProperties()) break;
+ // When debugging we need to go the slow path to flood the accessor.
+ if (GetSharedFunctionInfo()->HasDebugInfo()) break;
+ Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
+ isolate());
+ CallOptimization call_optimization(getter);
+ NamedLoadHandlerCompiler compiler(isolate(), map, holder,
+ cache_holder);
+ if (call_optimization.is_simple_api_call()) {
return compiler.CompileLoadCallback(
lookup->name(), call_optimization, lookup->GetAccessorIndex());
- } else {
+ }
+ int expected_arguments = Handle<JSFunction>::cast(getter)
+ ->shared()
+ ->internal_formal_parameter_count();
+ return compiler.CompileLoadViaGetter(
+ lookup->name(), lookup->GetAccessorIndex(), expected_arguments);
+ } else if (accessors->IsAccessorInfo()) {
+ Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(accessors);
+ if (v8::ToCData<Address>(info->getter()) == 0) break;
+ if (!AccessorInfo::IsCompatibleReceiverMap(isolate(), info, map)) {
// This case should be already handled in LoadIC::UpdateCaches.
UNREACHABLE();
+ break;
}
+ if (!holder->HasFastProperties()) break;
+ NamedLoadHandlerCompiler compiler(isolate(), map, holder,
+ cache_holder);
+ return compiler.CompileLoadCallback(lookup->name(), info);
}
- int expected_arguments =
- function->shared()->internal_formal_parameter_count();
- return compiler.CompileLoadViaGetter(
- lookup->name(), lookup->GetAccessorIndex(), expected_arguments);
}
break;
}
@@ -1237,9 +1229,7 @@ Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
// property must be found in the object for the stub to be
// applicable.
if (!receiver_is_holder) break;
- return is_strong(language_mode())
- ? isolate()->builtins()->LoadIC_Normal_Strong()
- : isolate()->builtins()->LoadIC_Normal();
+ return isolate()->builtins()->LoadIC_Normal();
}
// -------------- Fields --------------
@@ -1349,8 +1339,7 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<HeapObject> receiver) {
CodeHandleList handlers(target_receiver_maps.length());
ElementHandlerCompiler compiler(isolate());
- compiler.CompileElementHandlers(&target_receiver_maps, &handlers,
- language_mode());
+ compiler.CompileElementHandlers(&target_receiver_maps, &handlers);
ConfigureVectorState(Handle<Name>::null(), &target_receiver_maps, &handlers);
return null_handle;
}
@@ -1361,8 +1350,7 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
if (MigrateDeprecated(object)) {
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result,
- Runtime::GetObjectProperty(isolate(), object, key, language_mode()),
+ isolate(), result, Runtime::GetObjectProperty(isolate(), object, key),
Object);
return result;
}
@@ -1378,7 +1366,8 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
ASSIGN_RETURN_ON_EXCEPTION(isolate(), load_handle,
LoadIC::Load(object, Handle<Name>::cast(key)),
Object);
- } else if (FLAG_use_ic && !object->IsAccessCheckNeeded()) {
+ } else if (FLAG_use_ic && !object->IsAccessCheckNeeded() &&
+ !object->IsJSValue()) {
if (object->IsJSObject() || (object->IsString() && key->IsNumber())) {
Handle<HeapObject> receiver = Handle<HeapObject>::cast(object);
if (object->IsString() || key->IsSmi()) stub = LoadElementStub(receiver);
@@ -1399,10 +1388,9 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
if (!load_handle.is_null()) return load_handle;
Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result,
- Runtime::GetObjectProperty(isolate(), object, key, language_mode()),
- Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
+ Runtime::GetObjectProperty(isolate(), object, key),
+ Object);
return result;
}
@@ -1410,9 +1398,10 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
JSReceiver::StoreFromKeyed store_mode) {
// Disable ICs for non-JSObjects for now.
- Handle<Object> receiver = it->GetReceiver();
- if (!receiver->IsJSObject()) return false;
- DCHECK(!Handle<JSObject>::cast(receiver)->map()->is_deprecated());
+ Handle<Object> object = it->GetReceiver();
+ if (!object->IsJSObject()) return false;
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ DCHECK(!receiver->map()->is_deprecated());
for (; it->IsFound(); it->Next()) {
switch (it->state()) {
@@ -1451,21 +1440,24 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
}
// Receiver != holder.
- PrototypeIterator iter(it->isolate(), receiver);
if (receiver->IsJSGlobalProxy()) {
+ PrototypeIterator iter(it->isolate(), receiver);
return it->GetHolder<Object>().is_identical_to(
PrototypeIterator::GetCurrent(iter));
}
if (it->HolderIsReceiverOrHiddenPrototype()) return false;
- it->PrepareTransitionToDataProperty(value, NONE, store_mode);
+ if (it->ExtendingNonExtensible(receiver)) return false;
+ it->PrepareTransitionToDataProperty(receiver, value, NONE, store_mode);
return it->IsCacheableTransition();
}
}
}
- it->PrepareTransitionToDataProperty(value, NONE, store_mode);
+ receiver = it->GetStoreTarget();
+ if (it->ExtendingNonExtensible(receiver)) return false;
+ it->PrepareTransitionToDataProperty(receiver, value, NONE, store_mode);
return it->IsCacheableTransition();
}
@@ -1510,7 +1502,7 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
}
Handle<Object> previous_value =
- FixedArray::get(script_context, lookup_result.slot_index);
+ FixedArray::get(*script_context, lookup_result.slot_index, isolate());
if (*previous_value == *isolate()->factory()->the_hole_value()) {
// Do not install stubs and stay pre-monomorphic for
@@ -1564,18 +1556,18 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
return value;
}
-
Handle<Code> CallIC::initialize_stub(Isolate* isolate, int argc,
- ConvertReceiverMode mode) {
- CallICTrampolineStub stub(isolate, CallICState(argc, mode));
+ ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
+ CallICTrampolineStub stub(isolate, CallICState(argc, mode, tail_call_mode));
Handle<Code> code = stub.GetCode();
return code;
}
-
Handle<Code> CallIC::initialize_stub_in_optimized_code(
- Isolate* isolate, int argc, ConvertReceiverMode mode) {
- CallICStub stub(isolate, CallICState(argc, mode));
+ Isolate* isolate, int argc, ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
+ CallICStub stub(isolate, CallICState(argc, mode, tail_call_mode));
Handle<Code> code = stub.GetCode();
return code;
}
@@ -1693,8 +1685,7 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
// This is currently guaranteed by checks in StoreIC::Store.
Handle<JSObject> receiver = Handle<JSObject>::cast(lookup->GetReceiver());
Handle<JSObject> holder = lookup->GetHolder<JSObject>();
- DCHECK(!receiver->IsAccessCheckNeeded() ||
- isolate()->IsInternallyUsedPropertyName(lookup->name()));
+ DCHECK(!receiver->IsAccessCheckNeeded() || lookup->name()->IsPrivate());
switch (lookup->state()) {
case LookupIterator::TRANSITION: {
@@ -1733,9 +1724,8 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
break;
}
Handle<Object> accessors = lookup->GetAccessors();
- if (accessors->IsExecutableAccessorInfo()) {
- Handle<ExecutableAccessorInfo> info =
- Handle<ExecutableAccessorInfo>::cast(accessors);
+ if (accessors->IsAccessorInfo()) {
+ Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(accessors);
if (v8::ToCData<Address>(info->setter()) == 0) {
TRACE_GENERIC_IC(isolate(), "StoreIC", "setter == 0");
break;
@@ -1746,13 +1736,14 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
"special data property in prototype chain");
break;
}
- if (!ExecutableAccessorInfo::IsCompatibleReceiverMap(isolate(), info,
- receiver_map())) {
+ if (!AccessorInfo::IsCompatibleReceiverMap(isolate(), info,
+ receiver_map())) {
TRACE_GENERIC_IC(isolate(), "StoreIC", "incompatible receiver type");
break;
}
NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
- return compiler.CompileStoreCallback(receiver, lookup->name(), info);
+ return compiler.CompileStoreCallback(receiver, lookup->name(), info,
+ language_mode());
} else if (accessors->IsAccessorPair()) {
Handle<Object> setter(Handle<AccessorPair>::cast(accessors)->setter(),
isolate());
@@ -1800,9 +1791,8 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
bool use_stub = true;
if (lookup->representation().IsHeapObject()) {
// Only use a generic stub if no types need to be tracked.
- Handle<HeapType> field_type = lookup->GetFieldType();
- HeapType::Iterator<Map> it = field_type->Classes();
- use_stub = it.Done();
+ Handle<FieldType> field_type = lookup->GetFieldType();
+ use_stub = !field_type->IsClass();
}
if (use_stub) {
StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
@@ -2220,6 +2210,7 @@ void CallIC::HandleMiss(Handle<Object> function) {
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(Runtime_CallIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
HandleScope scope(isolate);
DCHECK(args.length() == 3);
Handle<Object> function = args.at<Object>(0);
@@ -2236,6 +2227,7 @@ RUNTIME_FUNCTION(Runtime_CallIC_Miss) {
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
HandleScope scope(isolate);
Handle<Object> receiver = args.at<Object>(0);
Handle<Name> key = args.at<Name>(1);
@@ -2268,6 +2260,7 @@ RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
// Used from ic-<arch>.cc
RUNTIME_FUNCTION(Runtime_KeyedLoadIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
HandleScope scope(isolate);
Handle<Object> receiver = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
@@ -2287,6 +2280,7 @@ RUNTIME_FUNCTION(Runtime_KeyedLoadIC_Miss) {
RUNTIME_FUNCTION(Runtime_KeyedLoadIC_MissFromStubFailure) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
HandleScope scope(isolate);
Handle<Object> receiver = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
@@ -2308,6 +2302,7 @@ RUNTIME_FUNCTION(Runtime_KeyedLoadIC_MissFromStubFailure) {
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
HandleScope scope(isolate);
Handle<Object> receiver = args.at<Object>(0);
Handle<Name> key = args.at<Name>(1);
@@ -2339,6 +2334,7 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
RUNTIME_FUNCTION(Runtime_StoreIC_MissFromStubFailure) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
HandleScope scope(isolate);
Handle<Object> receiver = args.at<Object>(0);
Handle<Name> key = args.at<Name>(1);
@@ -2391,6 +2387,7 @@ RUNTIME_FUNCTION(Runtime_StoreIC_MissFromStubFailure) {
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
HandleScope scope(isolate);
Handle<Object> receiver = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
@@ -2412,6 +2409,7 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
RUNTIME_FUNCTION(Runtime_KeyedStoreIC_MissFromStubFailure) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
HandleScope scope(isolate);
Handle<Object> receiver = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
@@ -2469,6 +2467,7 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
HandleScope scope(isolate);
// Length == 5 or 6, depending on whether the vector slot
// is passed in a virtual register or not.
@@ -2504,60 +2503,52 @@ MaybeHandle<Object> BinaryOpIC::Transition(
default:
UNREACHABLE();
case Token::ADD:
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result,
- Object::Add(isolate(), left, right, state.strength()), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
+ Object::Add(isolate(), left, right), Object);
break;
case Token::SUB:
ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result,
- Object::Subtract(isolate(), left, right, state.strength()), Object);
+ isolate(), result, Object::Subtract(isolate(), left, right), Object);
break;
case Token::MUL:
ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result,
- Object::Multiply(isolate(), left, right, state.strength()), Object);
+ isolate(), result, Object::Multiply(isolate(), left, right), Object);
break;
case Token::DIV:
ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result,
- Object::Divide(isolate(), left, right, state.strength()), Object);
+ isolate(), result, Object::Divide(isolate(), left, right), Object);
break;
case Token::MOD:
ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result,
- Object::Modulus(isolate(), left, right, state.strength()), Object);
+ isolate(), result, Object::Modulus(isolate(), left, right), Object);
break;
case Token::BIT_OR:
ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result,
- Object::BitwiseOr(isolate(), left, right, state.strength()), Object);
+ isolate(), result, Object::BitwiseOr(isolate(), left, right), Object);
break;
case Token::BIT_AND:
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result,
- Object::BitwiseAnd(isolate(), left, right, state.strength()), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
+ Object::BitwiseAnd(isolate(), left, right),
+ Object);
break;
case Token::BIT_XOR:
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result,
- Object::BitwiseXor(isolate(), left, right, state.strength()), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
+ Object::BitwiseXor(isolate(), left, right),
+ Object);
break;
case Token::SAR:
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result,
- Object::ShiftRight(isolate(), left, right, state.strength()), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
+ Object::ShiftRight(isolate(), left, right),
+ Object);
break;
case Token::SHR:
ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result,
- Object::ShiftRightLogical(isolate(), left, right, state.strength()),
+ isolate(), result, Object::ShiftRightLogical(isolate(), left, right),
Object);
break;
case Token::SHL:
ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result,
- Object::ShiftLeft(isolate(), left, right, state.strength()), Object);
+ isolate(), result, Object::ShiftLeft(isolate(), left, right), Object);
break;
}
@@ -2624,6 +2615,7 @@ MaybeHandle<Object> BinaryOpIC::Transition(
RUNTIME_FUNCTION(Runtime_BinaryOpIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
Handle<Object> left = args.at<Object>(BinaryOpICStub::kLeft);
@@ -2639,6 +2631,7 @@ RUNTIME_FUNCTION(Runtime_BinaryOpIC_Miss) {
RUNTIME_FUNCTION(Runtime_BinaryOpIC_MissWithAllocationSite) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
Handle<AllocationSite> allocation_site =
@@ -2653,10 +2646,8 @@ RUNTIME_FUNCTION(Runtime_BinaryOpIC_MissWithAllocationSite) {
return *result;
}
-
-Code* CompareIC::GetRawUninitialized(Isolate* isolate, Token::Value op,
- Strength strength) {
- CompareICStub stub(isolate, op, strength, CompareICState::UNINITIALIZED,
+Code* CompareIC::GetRawUninitialized(Isolate* isolate, Token::Value op) {
+ CompareICStub stub(isolate, op, CompareICState::UNINITIALIZED,
CompareICState::UNINITIALIZED,
CompareICState::UNINITIALIZED);
Code* code = NULL;
@@ -2664,10 +2655,8 @@ Code* CompareIC::GetRawUninitialized(Isolate* isolate, Token::Value op,
return code;
}
-
-Handle<Code> CompareIC::GetUninitialized(Isolate* isolate, Token::Value op,
- Strength strength) {
- CompareICStub stub(isolate, op, strength, CompareICState::UNINITIALIZED,
+Handle<Code> CompareIC::GetUninitialized(Isolate* isolate, Token::Value op) {
+ CompareICStub stub(isolate, op, CompareICState::UNINITIALIZED,
CompareICState::UNINITIALIZED,
CompareICState::UNINITIALIZED);
return stub.GetCode();
@@ -2684,8 +2673,7 @@ Code* CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
CompareICState::State state = CompareICState::TargetState(
old_stub.state(), old_stub.left(), old_stub.right(), op_,
HasInlinedSmiCode(address()), x, y);
- CompareICStub stub(isolate(), op_, old_stub.strength(), new_left, new_right,
- state);
+ CompareICStub stub(isolate(), op_, new_left, new_right, state);
if (state == CompareICState::KNOWN_RECEIVER) {
stub.set_known_map(
Handle<Map>(Handle<JSReceiver>::cast(x)->map(), isolate()));
@@ -2718,6 +2706,7 @@ Code* CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
// Used from CompareICStub::GenerateMiss in code-stubs-<arch>.cc.
RUNTIME_FUNCTION(Runtime_CompareIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
HandleScope scope(isolate);
DCHECK(args.length() == 3);
CompareIC ic(isolate, static_cast<Token::Value>(args.smi_at(2)));
@@ -2740,15 +2729,6 @@ void CompareNilIC::Clear(Address address, Code* target, Address constant_pool) {
}
-Handle<Object> CompareNilIC::DoCompareNilSlow(Isolate* isolate, NilValue nil,
- Handle<Object> object) {
- if (object->IsNull() || object->IsUndefined()) {
- return isolate->factory()->true_value();
- }
- return isolate->factory()->ToBoolean(object->IsUndetectableObject());
-}
-
-
Handle<Object> CompareNilIC::CompareNil(Handle<Object> object) {
ExtraICState extra_ic_state = target()->extra_ic_state();
@@ -2760,8 +2740,6 @@ Handle<Object> CompareNilIC::CompareNil(Handle<Object> object) {
stub.UpdateStatus(object);
- NilValue nil = stub.nil_value();
-
// Find or create the specialized stub to support the new set of types.
Handle<Code> code;
if (stub.IsMonomorphic()) {
@@ -2773,12 +2751,13 @@ Handle<Object> CompareNilIC::CompareNil(Handle<Object> object) {
code = stub.GetCode();
}
set_target(*code);
- return DoCompareNilSlow(isolate(), nil, object);
+ return isolate()->factory()->ToBoolean(object->IsUndetectableObject());
}
RUNTIME_FUNCTION(Runtime_CompareNilIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
HandleScope scope(isolate);
Handle<Object> object = args.at<Object>(0);
CompareNilIC ic(isolate);
@@ -2804,6 +2783,7 @@ Handle<Object> ToBooleanIC::ToBoolean(Handle<Object> object) {
RUNTIME_FUNCTION(Runtime_ToBooleanIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
DCHECK(args.length() == 1);
HandleScope scope(isolate);
Handle<Object> object = args.at<Object>(0);
@@ -2818,13 +2798,13 @@ RUNTIME_FUNCTION(Runtime_StoreCallbackProperty) {
Handle<HeapObject> callback_or_cell = args.at<HeapObject>(2);
Handle<Name> name = args.at<Name>(3);
Handle<Object> value = args.at<Object>(4);
+ CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 5);
HandleScope scope(isolate);
- Handle<ExecutableAccessorInfo> callback(
+ Handle<AccessorInfo> callback(
callback_or_cell->IsWeakCell()
- ? ExecutableAccessorInfo::cast(
- WeakCell::cast(*callback_or_cell)->value())
- : ExecutableAccessorInfo::cast(*callback_or_cell));
+ ? AccessorInfo::cast(WeakCell::cast(*callback_or_cell)->value())
+ : AccessorInfo::cast(*callback_or_cell));
DCHECK(callback->IsCompatibleReceiver(*receiver));
@@ -2834,8 +2814,10 @@ RUNTIME_FUNCTION(Runtime_StoreCallbackProperty) {
DCHECK(fun != NULL);
LOG(isolate, ApiNamedPropertyAccess("store", *receiver, *name));
+ Object::ShouldThrow should_throw =
+ is_sloppy(language_mode) ? Object::DONT_THROW : Object::THROW_ON_ERROR;
PropertyCallbackArguments custom_args(isolate, callback->data(), *receiver,
- *holder);
+ *holder, should_throw);
custom_args.Call(fun, v8::Utils::ToLocal(name), v8::Utils::ToLocal(value));
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return *value;
@@ -2914,9 +2896,10 @@ RUNTIME_FUNCTION(Runtime_StorePropertyWithInterceptor) {
Handle<Object> value = args.at<Object>(2);
#ifdef DEBUG
PrototypeIterator iter(isolate, receiver,
- PrototypeIterator::START_AT_RECEIVER);
+ PrototypeIterator::START_AT_RECEIVER,
+ PrototypeIterator::END_AT_NON_HIDDEN);
bool found = false;
- for (; !iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN); iter.Advance()) {
+ for (; !iter.IsAtEnd(); iter.Advance()) {
Handle<Object> current = PrototypeIterator::GetCurrent(iter);
if (current->IsJSObject() &&
Handle<JSObject>::cast(current)->HasNamedInterceptor()) {
@@ -2941,17 +2924,15 @@ RUNTIME_FUNCTION(Runtime_LoadElementWithInterceptor) {
DCHECK(args.smi_at(1) >= 0);
uint32_t index = args.smi_at(1);
Handle<Object> result;
- // TODO(conradw): Investigate strong mode semantics for this.
- LanguageMode language_mode = SLOPPY;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Object::GetElement(isolate, receiver, index, language_mode));
+ isolate, result, Object::GetElement(isolate, receiver, index));
return *result;
}
RUNTIME_FUNCTION(Runtime_LoadIC_MissFromStubFailure) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.IcMiss");
HandleScope scope(isolate);
Handle<Object> receiver = args.at<Object>(0);
Handle<Name> key = args.at<Name>(1);
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index a3265d70b9..2ce182b76e 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -284,9 +284,11 @@ class CallIC : public IC {
// Code generator routines.
static Handle<Code> initialize_stub(Isolate* isolate, int argc,
- ConvertReceiverMode mode);
+ ConvertReceiverMode mode,
+ TailCallMode tail_call_mode);
static Handle<Code> initialize_stub_in_optimized_code(
- Isolate* isolate, int argc, ConvertReceiverMode mode);
+ Isolate* isolate, int argc, ConvertReceiverMode mode,
+ TailCallMode tail_call_mode);
static void Clear(Isolate* isolate, Code* host, CallICNexus* nexus);
};
@@ -294,19 +296,14 @@ class CallIC : public IC {
class LoadIC : public IC {
public:
- static ExtraICState ComputeExtraICState(TypeofMode typeof_mode,
- LanguageMode language_mode) {
- return LoadICState(typeof_mode, language_mode).GetExtraICState();
+ static ExtraICState ComputeExtraICState(TypeofMode typeof_mode) {
+ return LoadICState(typeof_mode).GetExtraICState();
}
TypeofMode typeof_mode() const {
return LoadICState::GetTypeofMode(extra_ic_state());
}
- LanguageMode language_mode() const {
- return LoadICState::GetLanguageMode(extra_ic_state());
- }
-
LoadIC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus = NULL)
: IC(depth, isolate, nexus) {
DCHECK(nexus != NULL);
@@ -321,9 +318,8 @@ class LoadIC : public IC {
static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
static void GenerateMiss(MacroAssembler* masm);
- static void GenerateRuntimeGetProperty(MacroAssembler* masm,
- LanguageMode language_mode);
- static void GenerateNormal(MacroAssembler* masm, LanguageMode language_mode);
+ static void GenerateRuntimeGetProperty(MacroAssembler* masm);
+ static void GenerateNormal(MacroAssembler* masm);
static Handle<Code> initialize_stub(Isolate* isolate,
ExtraICState extra_state);
@@ -340,14 +336,10 @@ class LoadIC : public IC {
Handle<Code> slow_stub() const {
if (kind() == Code::LOAD_IC) {
- return is_strong(language_mode())
- ? isolate()->builtins()->LoadIC_Slow_Strong()
- : isolate()->builtins()->LoadIC_Slow();
+ return isolate()->builtins()->LoadIC_Slow();
} else {
DCHECK_EQ(Code::KEYED_LOAD_IC, kind());
- return is_strong(language_mode())
- ? isolate()->builtins()->KeyedLoadIC_Slow_Strong()
- : isolate()->builtins()->KeyedLoadIC_Slow();
+ return isolate()->builtins()->KeyedLoadIC_Slow();
}
}
@@ -377,9 +369,8 @@ class KeyedLoadIC : public LoadIC {
: public BitField<IcCheckType, LoadICState::kNextBitFieldOffset, 1> {};
static ExtraICState ComputeExtraICState(TypeofMode typeof_mode,
- LanguageMode language_mode,
IcCheckType key_type) {
- return LoadICState(typeof_mode, language_mode).GetExtraICState() |
+ return LoadICState(typeof_mode).GetExtraICState() |
IcCheckTypeField::encode(key_type);
}
@@ -399,11 +390,9 @@ class KeyedLoadIC : public LoadIC {
// Code generator routines.
static void GenerateMiss(MacroAssembler* masm);
- static void GenerateRuntimeGetProperty(MacroAssembler* masm,
- LanguageMode language_mode);
+ static void GenerateRuntimeGetProperty(MacroAssembler* masm);
static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
- static void GenerateMegamorphic(MacroAssembler* masm,
- LanguageMode language_mode);
+ static void GenerateMegamorphic(MacroAssembler* masm);
// Bit mask to be tested against bit field for the cases when
// generic stub should go into slow case.
@@ -616,8 +605,7 @@ class CompareIC : public IC {
static Condition ComputeCondition(Token::Value op);
// Factory method for getting an uninitialized compare stub.
- static Handle<Code> GetUninitialized(Isolate* isolate, Token::Value op,
- Strength strength);
+ static Handle<Code> GetUninitialized(Isolate* isolate, Token::Value op);
private:
static bool HasInlinedSmiCode(Address address);
@@ -625,8 +613,7 @@ class CompareIC : public IC {
bool strict() const { return op_ == Token::EQ_STRICT; }
Condition GetCondition() const { return ComputeCondition(op_); }
- static Code* GetRawUninitialized(Isolate* isolate, Token::Value op,
- Strength strength);
+ static Code* GetRawUninitialized(Isolate* isolate, Token::Value op);
static void Clear(Isolate* isolate, Address address, Code* target,
Address constant_pool);
@@ -646,9 +633,6 @@ class CompareNilIC : public IC {
static Handle<Code> GetUninitialized();
static void Clear(Address address, Code* target, Address constant_pool);
-
- static Handle<Object> DoCompareNilSlow(Isolate* isolate, NilValue nil,
- Handle<Object> object);
};
diff --git a/deps/v8/src/ic/mips/handler-compiler-mips.cc b/deps/v8/src/ic/mips/handler-compiler-mips.cc
index 554d0c56ff..f3af1cf537 100644
--- a/deps/v8/src/ic/mips/handler-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/handler-compiler-mips.cc
@@ -4,8 +4,10 @@
#if V8_TARGET_ARCH_MIPS
-#include "src/ic/call-optimization.h"
#include "src/ic/handler-compiler.h"
+
+#include "src/field-type.h"
+#include "src/ic/call-optimization.h"
#include "src/ic/ic.h"
#include "src/isolate-inl.h"
@@ -279,9 +281,16 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
call_data_undefined = true;
__ LoadRoot(data, Heap::kUndefinedValueRootIndex);
} else {
- __ lw(data, FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
- __ lw(data, FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
- __ lw(data, FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+ if (optimization.is_constant_call()) {
+ __ lw(data,
+ FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(data,
+ FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
+ __ lw(data, FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+ } else {
+ __ lw(data,
+ FieldMemOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
+ }
__ lw(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
}
@@ -299,7 +308,8 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ li(api_function_address, Operand(ref));
// Jump to stub.
- CallApiAccessorStub stub(isolate, is_store, call_data_undefined);
+ CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+ !optimization.is_constant_call());
__ TailCallStub(&stub);
}
@@ -383,8 +393,7 @@ void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
__ Branch(miss_label, ne, value_reg, Operand(scratch));
}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
+void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
Register value_reg,
Label* miss_label) {
Register map_reg = scratch1();
@@ -392,21 +401,11 @@ void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
DCHECK(!value_reg.is(map_reg));
DCHECK(!value_reg.is(scratch));
__ JumpIfSmi(value_reg, miss_label);
- HeapType::Iterator<Map> it = field_type->Classes();
- if (!it.Done()) {
+ if (field_type->IsClass()) {
__ lw(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
- Label do_store;
- while (true) {
- // Compare map directly within the Branch() functions.
- __ GetWeakValue(scratch, Map::WeakCellForMap(it.Current()));
- it.Advance();
- if (it.Done()) {
- __ Branch(miss_label, ne, map_reg, Operand(scratch));
- break;
- }
- __ Branch(&do_store, eq, map_reg, Operand(scratch));
- }
- __ bind(&do_store);
+ // Compare map directly within the Branch() functions.
+ __ GetWeakValue(scratch, Map::WeakCellForMap(field_type->AsClass()));
+ __ Branch(miss_label, ne, map_reg, Operand(scratch));
}
}
@@ -584,42 +583,51 @@ void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
void NamedLoadHandlerCompiler::GenerateLoadCallback(
- Register reg, Handle<ExecutableAccessorInfo> callback) {
- // Build AccessorInfo::args_ list on the stack and push property name below
- // the exit frame to make GC aware of them and store pointers to them.
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
- DCHECK(!scratch2().is(reg));
- DCHECK(!scratch3().is(reg));
- DCHECK(!scratch4().is(reg));
- __ push(receiver());
+ Register reg, Handle<AccessorInfo> callback) {
+ DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), receiver()));
+ DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
+
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+ // Here and below +1 is for name() pushed after the args_ array.
+ typedef PropertyCallbackArguments PCA;
+ __ Subu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
+ __ sw(receiver(), MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
Handle<Object> data(callback->data(), isolate());
if (data->IsUndefined() || data->IsSmi()) {
- __ li(scratch3(), data);
+ __ li(scratch2(), data);
} else {
Handle<WeakCell> cell =
isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
// The callback is alive if this instruction is executed,
// so the weak cell is not cleared and points to data.
- __ GetWeakValue(scratch3(), cell);
+ __ GetWeakValue(scratch2(), cell);
}
- __ Subu(sp, sp, 6 * kPointerSize);
- __ sw(scratch3(), MemOperand(sp, 5 * kPointerSize));
- __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
- __ sw(scratch3(), MemOperand(sp, 4 * kPointerSize));
- __ sw(scratch3(), MemOperand(sp, 3 * kPointerSize));
- __ li(scratch4(), Operand(ExternalReference::isolate_address(isolate())));
- __ sw(scratch4(), MemOperand(sp, 2 * kPointerSize));
- __ sw(reg, MemOperand(sp, 1 * kPointerSize));
+ __ sw(scratch2(), MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
+ __ LoadRoot(scratch2(), Heap::kUndefinedValueRootIndex);
+ __ sw(scratch2(),
+ MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
+ __ sw(scratch2(), MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
+ kPointerSize));
+ __ li(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
+ __ sw(scratch2(), MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
+ __ sw(reg, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
+ // should_throw_on_error -> false
+ DCHECK(Smi::FromInt(0) == nullptr);
+ __ sw(zero_reg,
+ MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
+
__ sw(name(), MemOperand(sp, 0 * kPointerSize));
- __ Addu(scratch2(), sp, 1 * kPointerSize);
- __ mov(a2, scratch2()); // Saved in case scratch2 == a1.
// Abi for CallApiGetter.
Register getter_address_reg = ApiGetterDescriptor::function_address();
@@ -705,8 +713,8 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
- Handle<JSObject> object, Handle<Name> name,
- Handle<ExecutableAccessorInfo> callback) {
+ Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
+ LanguageMode language_mode) {
Register holder_reg = Frontend(name);
__ Push(receiver(), holder_reg); // Receiver.
@@ -721,6 +729,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ push(at);
__ li(at, Operand(name));
__ Push(at, value());
+ __ Push(Smi::FromInt(language_mode));
// Do tail-call to the runtime system.
__ TailCallRuntime(Runtime::kStoreCallbackProperty);
@@ -769,7 +778,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
}
Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
+ __ IncrementCounter(counters->ic_named_load_global_stub(), 1, a1, a3);
if (IC::ICUseVector(kind())) {
DiscardVectorAndSlot();
}
diff --git a/deps/v8/src/ic/mips/ic-mips.cc b/deps/v8/src/ic/mips/ic-mips.cc
index a27d6b56f7..ae3615e3bb 100644
--- a/deps/v8/src/ic/mips/ic-mips.cc
+++ b/deps/v8/src/ic/mips/ic-mips.cc
@@ -159,8 +159,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register elements,
Register scratch1, Register scratch2,
- Register result, Label* slow,
- LanguageMode language_mode) {
+ Register result, Label* slow) {
// Register use:
//
// receiver - holds the receiver on entry.
@@ -216,13 +215,8 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
__ Branch(&check_next_prototype);
__ bind(&absent);
- if (is_strong(language_mode)) {
- // Strong mode accesses must throw in this case, so call the runtime.
- __ Branch(slow);
- } else {
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ Branch(&done);
- }
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ Branch(&done);
__ bind(&in_bounds);
// Fast case: Do the load.
@@ -230,8 +224,7 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
// The key is a smi.
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ sll(at, key, kPointerSizeLog2 - kSmiTagSize);
- __ addu(at, at, scratch1);
+ __ Lsa(at, scratch1, key, kPointerSizeLog2 - kSmiTagSize);
__ lw(scratch2, MemOperand(at));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
@@ -271,8 +264,7 @@ static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
__ bind(&unique);
}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = a0;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
@@ -287,7 +279,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
// Dictionary load failed, go slow (but don't miss).
__ bind(&slow);
- GenerateRuntimeGetProperty(masm, language_mode);
+ GenerateRuntimeGetProperty(masm);
}
@@ -311,7 +303,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
DCHECK(!AreAliased(t0, t1, LoadWithVectorDescriptor::SlotRegister(),
LoadWithVectorDescriptor::VectorRegister()));
- __ IncrementCounter(isolate->counters()->load_miss(), 1, t0, t1);
+ __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, t0, t1);
LoadIC_PushArgs(masm);
@@ -319,17 +311,14 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kLoadIC_Miss);
}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
- LanguageMode language_mode) {
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// The return address is in ra.
__ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
__ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
// Do tail-call to runtime routine.
- __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
- : Runtime::kGetProperty);
+ __ TailCallRuntime(Runtime::kGetProperty);
}
@@ -339,7 +328,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
DCHECK(!AreAliased(t0, t1, LoadWithVectorDescriptor::SlotRegister(),
LoadWithVectorDescriptor::VectorRegister()));
- __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, t0, t1);
+ __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, t0, t1);
LoadIC_PushArgs(masm);
@@ -347,21 +336,16 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
- LanguageMode language_mode) {
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// The return address is in ra.
__ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
// Do tail-call to runtime routine.
- __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
- : Runtime::kKeyedGetProperty);
+ __ TailCallRuntime(Runtime::kKeyedGetProperty);
}
-
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
- LanguageMode language_mode) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// The return address is in ra.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
@@ -385,9 +369,9 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(a0, a3, &check_number_dictionary);
- GenerateFastArrayLoad(masm, receiver, key, a0, a3, t0, v0, &slow,
- language_mode);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, t0, a3);
+ GenerateFastArrayLoad(masm, receiver, key, a0, a3, t0, v0, &slow);
+ __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, t0,
+ a3);
__ Ret();
__ bind(&check_number_dictionary);
@@ -405,9 +389,9 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
// Slow case, key and receiver still in a2 and a1.
__ bind(&slow);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, t0,
+ __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, t0,
a3);
- GenerateRuntimeGetProperty(masm, language_mode);
+ GenerateRuntimeGetProperty(masm);
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow);
@@ -451,8 +435,8 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
GenerateGlobalInstanceTypeCheck(masm, a0, &slow);
// Load the property to v0.
GenerateDictionaryLoad(masm, &slow, a3, key, v0, t1, t0);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, t0,
- a3);
+ __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
+ t0, a3);
__ Ret();
__ bind(&index_name);
@@ -491,8 +475,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
// there may be a callback on the element.
Label holecheck_passed1;
__ Addu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
- __ sll(at, key, kPointerSizeLog2 - kSmiTagSize);
- __ addu(address, address, at);
+ __ Lsa(address, address, key, kPointerSizeLog2 - kSmiTagSize);
__ lw(scratch, MemOperand(address));
__ Branch(&holecheck_passed1, ne, scratch,
Operand(masm->isolate()->factory()->the_hole_value()));
@@ -511,8 +494,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
}
// It's irrelevant whether array is smi-only or not when writing a smi.
__ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(address, address, scratch);
+ __ Lsa(address, address, key, kPointerSizeLog2 - kSmiTagSize);
__ sw(value, MemOperand(address));
__ Ret();
@@ -528,8 +510,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
}
__ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(address, address, scratch);
+ __ Lsa(address, address, key, kPointerSizeLog2 - kSmiTagSize);
__ sw(value, MemOperand(address));
// Update write barrier for the elements array address.
__ mov(scratch, value); // Preserve the value which is returned.
@@ -550,8 +531,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
// go to the runtime.
__ Addu(address, elements, Operand(FixedDoubleArray::kHeaderSize +
kHoleNanUpper32Offset - kHeapObjectTag));
- __ sll(at, key, kPointerSizeLog2);
- __ addu(address, address, at);
+ __ Lsa(address, address, key, kPointerSizeLog2);
__ lw(scratch, MemOperand(address));
__ Branch(&fast_double_without_map_check, ne, scratch,
Operand(kHoleNanUpper32));
@@ -791,11 +771,11 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
GenerateDictionaryStore(masm, &miss, dictionary, name, value, t2, t5);
Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(), 1, t2, t5);
+ __ IncrementCounter(counters->ic_store_normal_hit(), 1, t2, t5);
__ Ret();
__ bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1, t2, t5);
+ __ IncrementCounter(counters->ic_store_normal_miss(), 1, t2, t5);
GenerateMiss(masm);
}
diff --git a/deps/v8/src/ic/mips/stub-cache-mips.cc b/deps/v8/src/ic/mips/stub-cache-mips.cc
index 1a9897e8f3..039763c4cf 100644
--- a/deps/v8/src/ic/mips/stub-cache-mips.cc
+++ b/deps/v8/src/ic/mips/stub-cache-mips.cc
@@ -42,13 +42,11 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
scratch = no_reg;
// Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ sll(offset_scratch, offset, 1);
- __ Addu(offset_scratch, offset_scratch, offset);
+ __ Lsa(offset_scratch, offset, offset, 1);
// Calculate the base address of the entry.
__ li(base_addr, Operand(key_offset));
- __ sll(at, offset_scratch, kPointerSizeLog2);
- __ Addu(base_addr, base_addr, at);
+ __ Lsa(base_addr, base_addr, offset_scratch, kPointerSizeLog2);
// Check that the key in the entry matches the name.
__ lw(at, MemOperand(base_addr, 0));
diff --git a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
index d94a292228..968effdd7f 100644
--- a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
@@ -4,8 +4,10 @@
#if V8_TARGET_ARCH_MIPS64
-#include "src/ic/call-optimization.h"
#include "src/ic/handler-compiler.h"
+
+#include "src/field-type.h"
+#include "src/ic/call-optimization.h"
#include "src/ic/ic.h"
#include "src/isolate-inl.h"
@@ -279,9 +281,16 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
call_data_undefined = true;
__ LoadRoot(data, Heap::kUndefinedValueRootIndex);
} else {
- __ ld(data, FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
- __ ld(data, FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
- __ ld(data, FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+ if (optimization.is_constant_call()) {
+ __ ld(data,
+ FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(data,
+ FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
+ __ ld(data, FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+ } else {
+ __ ld(data,
+ FieldMemOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
+ }
__ ld(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
}
@@ -299,7 +308,8 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ li(api_function_address, Operand(ref));
// Jump to stub.
- CallApiAccessorStub stub(isolate, is_store, call_data_undefined);
+ CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+ !optimization.is_constant_call());
__ TailCallStub(&stub);
}
@@ -383,8 +393,7 @@ void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
__ Branch(miss_label, ne, value_reg, Operand(scratch));
}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
+void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
Register value_reg,
Label* miss_label) {
Register map_reg = scratch1();
@@ -392,21 +401,11 @@ void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
DCHECK(!value_reg.is(map_reg));
DCHECK(!value_reg.is(scratch));
__ JumpIfSmi(value_reg, miss_label);
- HeapType::Iterator<Map> it = field_type->Classes();
- if (!it.Done()) {
+ if (field_type->IsClass()) {
__ ld(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
- Label do_store;
- while (true) {
- // Compare map directly within the Branch() functions.
- __ GetWeakValue(scratch, Map::WeakCellForMap(it.Current()));
- it.Advance();
- if (it.Done()) {
- __ Branch(miss_label, ne, map_reg, Operand(scratch));
- break;
- }
- __ Branch(&do_store, eq, map_reg, Operand(scratch));
- }
- __ bind(&do_store);
+ // Compare map directly within the Branch() functions.
+ __ GetWeakValue(scratch, Map::WeakCellForMap(field_type->AsClass()));
+ __ Branch(miss_label, ne, map_reg, Operand(scratch));
}
}
@@ -584,42 +583,51 @@ void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
void NamedLoadHandlerCompiler::GenerateLoadCallback(
- Register reg, Handle<ExecutableAccessorInfo> callback) {
- // Build AccessorInfo::args_ list on the stack and push property name below
- // the exit frame to make GC aware of them and store pointers to them.
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
- DCHECK(!scratch2().is(reg));
- DCHECK(!scratch3().is(reg));
- DCHECK(!scratch4().is(reg));
- __ push(receiver());
+ Register reg, Handle<AccessorInfo> callback) {
+ DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), receiver()));
+ DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
+
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+ // Here and below +1 is for name() pushed after the args_ array.
+ typedef PropertyCallbackArguments PCA;
+ __ Dsubu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
+ __ sd(receiver(), MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
Handle<Object> data(callback->data(), isolate());
if (data->IsUndefined() || data->IsSmi()) {
- __ li(scratch3(), data);
+ __ li(scratch2(), data);
} else {
Handle<WeakCell> cell =
isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
// The callback is alive if this instruction is executed,
// so the weak cell is not cleared and points to data.
- __ GetWeakValue(scratch3(), cell);
+ __ GetWeakValue(scratch2(), cell);
}
- __ Dsubu(sp, sp, 6 * kPointerSize);
- __ sd(scratch3(), MemOperand(sp, 5 * kPointerSize));
- __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
- __ sd(scratch3(), MemOperand(sp, 4 * kPointerSize));
- __ sd(scratch3(), MemOperand(sp, 3 * kPointerSize));
- __ li(scratch4(), Operand(ExternalReference::isolate_address(isolate())));
- __ sd(scratch4(), MemOperand(sp, 2 * kPointerSize));
- __ sd(reg, MemOperand(sp, 1 * kPointerSize));
+ __ sd(scratch2(), MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
+ __ LoadRoot(scratch2(), Heap::kUndefinedValueRootIndex);
+ __ sd(scratch2(),
+ MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
+ __ sd(scratch2(), MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
+ kPointerSize));
+ __ li(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
+ __ sd(scratch2(), MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
+ __ sd(reg, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
+ // should_throw_on_error -> false
+ DCHECK(Smi::FromInt(0) == nullptr);
+ __ sd(zero_reg,
+ MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
+
__ sd(name(), MemOperand(sp, 0 * kPointerSize));
- __ Daddu(scratch2(), sp, 1 * kPointerSize);
- __ mov(a2, scratch2()); // Saved in case scratch2 == a1.
// Abi for CallApiGetter.
Register getter_address_reg = ApiGetterDescriptor::function_address();
@@ -705,8 +713,8 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
- Handle<JSObject> object, Handle<Name> name,
- Handle<ExecutableAccessorInfo> callback) {
+ Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
+ LanguageMode language_mode) {
Register holder_reg = Frontend(name);
__ Push(receiver(), holder_reg); // Receiver.
@@ -721,6 +729,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ push(at);
__ li(at, Operand(name));
__ Push(at, value());
+ __ Push(Smi::FromInt(language_mode));
// Do tail-call to the runtime system.
__ TailCallRuntime(Runtime::kStoreCallbackProperty);
@@ -769,7 +778,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
}
Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
+ __ IncrementCounter(counters->ic_named_load_global_stub(), 1, a1, a3);
if (IC::ICUseVector(kind())) {
DiscardVectorAndSlot();
}
diff --git a/deps/v8/src/ic/mips64/ic-mips64.cc b/deps/v8/src/ic/mips64/ic-mips64.cc
index c5da5fbb42..f46c9dcb26 100644
--- a/deps/v8/src/ic/mips64/ic-mips64.cc
+++ b/deps/v8/src/ic/mips64/ic-mips64.cc
@@ -158,8 +158,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register elements,
Register scratch1, Register scratch2,
- Register result, Label* slow,
- LanguageMode language_mode) {
+ Register result, Label* slow) {
// Register use:
//
// receiver - holds the receiver on entry.
@@ -215,12 +214,8 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
__ Branch(&check_next_prototype);
__ bind(&absent);
- if (is_strong(language_mode)) {
- __ Branch(slow);
- } else {
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ Branch(&done);
- }
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ Branch(&done);
__ bind(&in_bounds);
// Fast case: Do the load.
@@ -269,8 +264,7 @@ static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
__ bind(&unique);
}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = a0;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
@@ -284,7 +278,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
// Dictionary load failed, go slow (but don't miss).
__ bind(&slow);
- GenerateRuntimeGetProperty(masm, language_mode);
+ GenerateRuntimeGetProperty(masm);
}
@@ -308,7 +302,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
DCHECK(!AreAliased(a4, a5, LoadWithVectorDescriptor::SlotRegister(),
LoadWithVectorDescriptor::VectorRegister()));
- __ IncrementCounter(isolate->counters()->load_miss(), 1, a4, a5);
+ __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, a4, a5);
LoadIC_PushArgs(masm);
@@ -316,17 +310,14 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kLoadIC_Miss);
}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
- LanguageMode language_mode) {
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// The return address is in ra.
__ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
__ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
// Do tail-call to runtime routine.
- __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
- : Runtime::kGetProperty);
+ __ TailCallRuntime(Runtime::kGetProperty);
}
@@ -336,7 +327,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
DCHECK(!AreAliased(a4, a5, LoadWithVectorDescriptor::SlotRegister(),
LoadWithVectorDescriptor::VectorRegister()));
- __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a4, a5);
+ __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, a4, a5);
LoadIC_PushArgs(masm);
@@ -344,21 +335,16 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
- LanguageMode language_mode) {
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// The return address is in ra.
__ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
// Do tail-call to runtime routine.
- __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
- : Runtime::kKeyedGetProperty);
+ __ TailCallRuntime(Runtime::kKeyedGetProperty);
}
-
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
- LanguageMode language_mode) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// The return address is in ra.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
@@ -382,9 +368,9 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(a0, a3, &check_number_dictionary);
- GenerateFastArrayLoad(masm, receiver, key, a0, a3, a4, v0, &slow,
- language_mode);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, a4, a3);
+ GenerateFastArrayLoad(masm, receiver, key, a0, a3, a4, v0, &slow);
+ __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, a4,
+ a3);
__ Ret();
__ bind(&check_number_dictionary);
@@ -402,9 +388,9 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
// Slow case, key and receiver still in a2 and a1.
__ bind(&slow);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, a4,
+ __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, a4,
a3);
- GenerateRuntimeGetProperty(masm, language_mode);
+ GenerateRuntimeGetProperty(masm);
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow);
@@ -448,8 +434,8 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
GenerateGlobalInstanceTypeCheck(masm, a0, &slow);
// Load the property to v0.
GenerateDictionaryLoad(masm, &slow, a3, key, v0, a5, a4);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, a4,
- a3);
+ __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
+ a4, a3);
__ Ret();
__ bind(&index_name);
@@ -787,11 +773,11 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
GenerateDictionaryStore(masm, &miss, dictionary, name, value, a6, a7);
Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(), 1, a6, a7);
+ __ IncrementCounter(counters->ic_store_normal_hit(), 1, a6, a7);
__ Ret();
__ bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1, a6, a7);
+ __ IncrementCounter(counters->ic_store_normal_miss(), 1, a6, a7);
GenerateMiss(masm);
}
diff --git a/deps/v8/src/ic/mips64/stub-cache-mips64.cc b/deps/v8/src/ic/mips64/stub-cache-mips64.cc
index 4ab9f8e5b2..0bd7dd0f2d 100644
--- a/deps/v8/src/ic/mips64/stub-cache-mips64.cc
+++ b/deps/v8/src/ic/mips64/stub-cache-mips64.cc
@@ -42,13 +42,11 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
scratch = no_reg;
// Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ dsll(offset_scratch, offset, 1);
- __ Daddu(offset_scratch, offset_scratch, offset);
+ __ Dlsa(offset_scratch, offset, offset, 1);
// Calculate the base address of the entry.
__ li(base_addr, Operand(key_offset));
- __ dsll(at, offset_scratch, kPointerSizeLog2);
- __ Daddu(base_addr, base_addr, at);
+ __ Dlsa(base_addr, base_addr, offset_scratch, kPointerSizeLog2);
// Check that the key in the entry matches the name.
__ ld(at, MemOperand(base_addr, 0));
diff --git a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
index 8b48755bbf..6e7d78afd3 100644
--- a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
@@ -4,8 +4,10 @@
#if V8_TARGET_ARCH_PPC
-#include "src/ic/call-optimization.h"
#include "src/ic/handler-compiler.h"
+
+#include "src/field-type.h"
+#include "src/ic/call-optimization.h"
#include "src/ic/ic.h"
#include "src/isolate-inl.h"
@@ -284,12 +286,17 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
call_data_undefined = true;
__ LoadRoot(data, Heap::kUndefinedValueRootIndex);
} else {
- __ LoadP(data,
- FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(data,
- FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
- __ LoadP(data,
- FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+ if (optimization.is_constant_call()) {
+ __ LoadP(data,
+ FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(data,
+ FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
+ __ LoadP(data,
+ FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+ } else {
+ __ LoadP(data,
+ FieldMemOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
+ }
__ LoadP(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
}
@@ -308,7 +315,8 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ mov(api_function_address, Operand(ref));
// Jump to stub.
- CallApiAccessorStub stub(isolate, is_store, call_data_undefined);
+ CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+ !optimization.is_constant_call());
__ TailCallStub(&stub);
}
@@ -393,8 +401,7 @@ void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
__ bne(miss_label);
}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
+void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
Register value_reg,
Label* miss_label) {
Register map_reg = scratch1();
@@ -402,20 +409,11 @@ void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
DCHECK(!value_reg.is(map_reg));
DCHECK(!value_reg.is(scratch));
__ JumpIfSmi(value_reg, miss_label);
- HeapType::Iterator<Map> it = field_type->Classes();
- if (!it.Done()) {
+ if (field_type->IsClass()) {
__ LoadP(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
- Label do_store;
- while (true) {
- __ CmpWeakValue(map_reg, Map::WeakCellForMap(it.Current()), scratch);
- it.Advance();
- if (it.Done()) {
- __ bne(miss_label);
- break;
- }
- __ beq(&do_store);
- }
- __ bind(&do_store);
+ __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
+ scratch);
+ __ bne(miss_label);
}
}
@@ -592,38 +590,40 @@ void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
void NamedLoadHandlerCompiler::GenerateLoadCallback(
- Register reg, Handle<ExecutableAccessorInfo> callback) {
- // Build AccessorInfo::args_ list on the stack and push property name below
- // the exit frame to make GC aware of them and store pointers to them.
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
- DCHECK(!scratch2().is(reg));
- DCHECK(!scratch3().is(reg));
- DCHECK(!scratch4().is(reg));
+ Register reg, Handle<AccessorInfo> callback) {
+ DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), receiver()));
+ DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
+
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
__ push(receiver());
- // Push data from ExecutableAccessorInfo.
+ // Push data from AccessorInfo.
Handle<Object> data(callback->data(), isolate());
if (data->IsUndefined() || data->IsSmi()) {
- __ Move(scratch3(), data);
+ __ Move(scratch2(), data);
} else {
Handle<WeakCell> cell =
isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
// The callback is alive if this instruction is executed,
// so the weak cell is not cleared and points to data.
- __ GetWeakValue(scratch3(), cell);
+ __ GetWeakValue(scratch2(), cell);
}
- __ push(scratch3());
- __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
- __ mr(scratch4(), scratch3());
- __ Push(scratch3(), scratch4());
- __ mov(scratch4(), Operand(ExternalReference::isolate_address(isolate())));
- __ Push(scratch4(), reg);
- __ push(name());
+ __ push(scratch2());
+ __ LoadRoot(scratch2(), Heap::kUndefinedValueRootIndex);
+ __ Push(scratch2(), scratch2());
+ __ mov(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
+ // should_throw_on_error -> false
+ __ mov(scratch3(), Operand(Smi::FromInt(0)));
+ __ Push(scratch2(), reg, scratch3(), name());
// Abi for CallApiGetter
Register getter_address_reg = ApiGetterDescriptor::function_address();
@@ -711,8 +711,8 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
- Handle<JSObject> object, Handle<Name> name,
- Handle<ExecutableAccessorInfo> callback) {
+ Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
+ LanguageMode language_mode) {
Register holder_reg = Frontend(name);
__ Push(receiver(), holder_reg); // receiver
@@ -728,6 +728,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
__ push(ip);
__ mov(ip, Operand(name));
__ Push(ip, value());
+ __ Push(Smi::FromInt(language_mode));
// Do tail-call to the runtime system.
__ TailCallRuntime(Runtime::kStoreCallbackProperty);
@@ -776,7 +777,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
}
Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_load_global_stub(), 1, r4, r6);
+ __ IncrementCounter(counters->ic_named_load_global_stub(), 1, r4, r6);
if (IC::ICUseVector(kind())) {
DiscardVectorAndSlot();
}
diff --git a/deps/v8/src/ic/ppc/ic-ppc.cc b/deps/v8/src/ic/ppc/ic-ppc.cc
index 78daac2657..567296c4c5 100644
--- a/deps/v8/src/ic/ppc/ic-ppc.cc
+++ b/deps/v8/src/ic/ppc/ic-ppc.cc
@@ -163,8 +163,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register elements,
Register scratch1, Register scratch2,
- Register result, Label* slow,
- LanguageMode language_mode) {
+ Register result, Label* slow) {
// Register use:
//
// receiver - holds the receiver on entry.
@@ -221,13 +220,8 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
__ jmp(&check_next_prototype);
__ bind(&absent);
- if (is_strong(language_mode)) {
- // Strong mode accesses must throw in this case, so call the runtime.
- __ jmp(slow);
- } else {
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ jmp(&done);
- }
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ jmp(&done);
__ bind(&in_bounds);
// Fast case: Do the load.
@@ -274,8 +268,7 @@ static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
__ bind(&unique);
}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = r3;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
@@ -290,7 +283,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
// Dictionary load failed, go slow (but don't miss).
__ bind(&slow);
- GenerateRuntimeGetProperty(masm, language_mode);
+ GenerateRuntimeGetProperty(masm);
}
@@ -314,7 +307,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
DCHECK(!AreAliased(r7, r8, LoadWithVectorDescriptor::SlotRegister(),
LoadWithVectorDescriptor::VectorRegister()));
- __ IncrementCounter(isolate->counters()->load_miss(), 1, r7, r8);
+ __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, r7, r8);
LoadIC_PushArgs(masm);
@@ -322,17 +315,14 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kLoadIC_Miss);
}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
- LanguageMode language_mode) {
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// The return address is in lr.
__ mr(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
__ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
// Do tail-call to runtime routine.
- __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
- : Runtime::kGetProperty);
+ __ TailCallRuntime(Runtime::kGetProperty);
}
@@ -342,7 +332,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
DCHECK(!AreAliased(r7, r8, LoadWithVectorDescriptor::SlotRegister(),
LoadWithVectorDescriptor::VectorRegister()));
- __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r7, r8);
+ __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, r7, r8);
LoadIC_PushArgs(masm);
@@ -350,21 +340,16 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
- LanguageMode language_mode) {
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// The return address is in lr.
__ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
// Do tail-call to runtime routine.
- __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
- : Runtime::kKeyedGetProperty);
+ __ TailCallRuntime(Runtime::kKeyedGetProperty);
}
-
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
- LanguageMode language_mode) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// The return address is in lr.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
@@ -388,9 +373,9 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(r3, r6, &check_number_dictionary);
- GenerateFastArrayLoad(masm, receiver, key, r3, r6, r7, r3, &slow,
- language_mode);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r7, r6);
+ GenerateFastArrayLoad(masm, receiver, key, r3, r6, r7, r3, &slow);
+ __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, r7,
+ r6);
__ Ret();
__ bind(&check_number_dictionary);
@@ -409,9 +394,9 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
// Slow case, key and receiver still in r3 and r4.
__ bind(&slow);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, r7,
+ __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_slow(), 1, r7,
r6);
- GenerateRuntimeGetProperty(masm, language_mode);
+ GenerateRuntimeGetProperty(masm);
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, r3, r6, &index_name, &slow);
@@ -456,8 +441,8 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
GenerateGlobalInstanceTypeCheck(masm, r3, &slow);
// Load the property to r3.
GenerateDictionaryLoad(masm, &slow, r6, key, r3, r8, r7);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, r7,
- r6);
+ __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1,
+ r7, r6);
__ Ret();
__ bind(&index_name);
@@ -797,11 +782,11 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
GenerateDictionaryStore(masm, &miss, dictionary, name, value, r9, r10);
Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(), 1, r9, r10);
+ __ IncrementCounter(counters->ic_store_normal_hit(), 1, r9, r10);
__ Ret();
__ bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1, r9, r10);
+ __ IncrementCounter(counters->ic_store_normal_miss(), 1, r9, r10);
GenerateMiss(masm);
}
diff --git a/deps/v8/src/ic/x64/handler-compiler-x64.cc b/deps/v8/src/ic/x64/handler-compiler-x64.cc
index c09eca68dd..ac3dd9a367 100644
--- a/deps/v8/src/ic/x64/handler-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/handler-compiler-x64.cc
@@ -4,8 +4,10 @@
#if V8_TARGET_ARCH_X64
-#include "src/ic/call-optimization.h"
#include "src/ic/handler-compiler.h"
+
+#include "src/field-type.h"
+#include "src/ic/call-optimization.h"
#include "src/ic/ic.h"
#include "src/isolate-inl.h"
@@ -181,9 +183,16 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
call_data_undefined = true;
__ LoadRoot(data, Heap::kUndefinedValueRootIndex);
} else {
- __ movp(data, FieldOperand(callee, JSFunction::kSharedFunctionInfoOffset));
- __ movp(data, FieldOperand(data, SharedFunctionInfo::kFunctionDataOffset));
- __ movp(data, FieldOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+ if (optimization.is_constant_call()) {
+ __ movp(data,
+ FieldOperand(callee, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(data,
+ FieldOperand(data, SharedFunctionInfo::kFunctionDataOffset));
+ __ movp(data, FieldOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+ } else {
+ __ movp(data,
+ FieldOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
+ }
__ movp(data, FieldOperand(data, CallHandlerInfo::kDataOffset));
}
@@ -200,7 +209,8 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
RelocInfo::EXTERNAL_REFERENCE);
// Jump to stub.
- CallApiAccessorStub stub(isolate, is_store, call_data_undefined);
+ CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+ !optimization.is_constant_call());
__ TailCallStub(&stub);
}
@@ -395,8 +405,7 @@ void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
__ j(not_equal, miss_label);
}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
+void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
Register value_reg,
Label* miss_label) {
Register map_reg = scratch1();
@@ -404,20 +413,12 @@ void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
DCHECK(!value_reg.is(map_reg));
DCHECK(!value_reg.is(scratch));
__ JumpIfSmi(value_reg, miss_label);
- HeapType::Iterator<Map> it = field_type->Classes();
- if (!it.Done()) {
+ if (field_type->IsClass()) {
Label do_store;
__ movp(map_reg, FieldOperand(value_reg, HeapObject::kMapOffset));
- while (true) {
- __ CmpWeakValue(map_reg, Map::WeakCellForMap(it.Current()), scratch);
- it.Advance();
- if (it.Done()) {
- __ j(not_equal, miss_label);
- break;
- }
- __ j(equal, &do_store, Label::kNear);
- }
- __ bind(&do_store);
+ __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
+ scratch);
+ __ j(not_equal, miss_label);
}
}
@@ -592,24 +593,29 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
void NamedLoadHandlerCompiler::GenerateLoadCallback(
- Register reg, Handle<ExecutableAccessorInfo> callback) {
+ Register reg, Handle<AccessorInfo> callback) {
+ DCHECK(!AreAliased(kScratchRegister, scratch2(), scratch3(), receiver()));
+ DCHECK(!AreAliased(kScratchRegister, scratch2(), scratch3(), reg));
+
// Insert additional parameters into the stack frame above return address.
- DCHECK(!scratch4().is(reg));
- __ PopReturnAddressTo(scratch4());
-
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
- STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
+ __ PopReturnAddressTo(scratch3());
+
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
__ Push(receiver()); // receiver
Handle<Object> data(callback->data(), isolate());
if (data->IsUndefined() || data->IsSmi()) {
__ Push(data);
} else {
- DCHECK(!scratch2().is(reg));
Handle<WeakCell> cell =
isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
// The callback is alive if this instruction is executed,
@@ -617,17 +623,15 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
__ GetWeakValue(scratch2(), cell);
__ Push(scratch2());
}
- DCHECK(!kScratchRegister.is(reg));
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
__ Push(kScratchRegister); // return value
__ Push(kScratchRegister); // return value default
__ PushAddress(ExternalReference::isolate_address(isolate()));
__ Push(reg); // holder
- __ Push(name()); // name
- // Save a pointer to where we pushed the arguments pointer. This will be
- // passed as the const PropertyAccessorInfo& to the C++ callback.
+ __ Push(Smi::FromInt(0)); // should_throw_on_error -> false
- __ PushReturnAddressFrom(scratch4());
+ __ Push(name()); // name
+ __ PushReturnAddressFrom(scratch3());
// Abi for CallApiGetter
Register api_function_address = ApiGetterDescriptor::function_address();
@@ -722,8 +726,8 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
- Handle<JSObject> object, Handle<Name> name,
- Handle<ExecutableAccessorInfo> callback) {
+ Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
+ LanguageMode language_mode) {
Register holder_reg = Frontend(name);
__ PopReturnAddressTo(scratch1());
@@ -739,6 +743,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
}
__ Push(name);
__ Push(value());
+ __ Push(Smi::FromInt(language_mode));
__ PushReturnAddressFrom(scratch1());
// Do tail-call to the runtime system.
@@ -794,7 +799,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
}
Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_load_global_stub(), 1);
+ __ IncrementCounter(counters->ic_named_load_global_stub(), 1);
if (IC::ICUseVector(kind())) {
DiscardVectorAndSlot();
}
diff --git a/deps/v8/src/ic/x64/ic-x64.cc b/deps/v8/src/ic/x64/ic-x64.cc
index bf4ad96f69..247116d7fe 100644
--- a/deps/v8/src/ic/x64/ic-x64.cc
+++ b/deps/v8/src/ic/x64/ic-x64.cc
@@ -167,7 +167,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register elements,
Register scratch, Register result,
- Label* slow, LanguageMode language_mode) {
+ Label* slow) {
// Register use:
//
// receiver - holds the receiver on entry.
@@ -222,13 +222,8 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
__ jmp(&check_next_prototype);
__ bind(&absent);
- if (is_strong(language_mode)) {
- // Strong mode accesses must throw in this case, so call the runtime.
- __ jmp(slow);
- } else {
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ jmp(&done);
- }
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ jmp(&done);
__ bind(&in_bounds);
// Fast case: Do the load.
@@ -274,9 +269,7 @@ static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
__ bind(&unique);
}
-
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
- LanguageMode language_mode) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// The return address is on the stack.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
@@ -298,10 +291,9 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(rax, &check_number_dictionary);
- GenerateFastArrayLoad(masm, receiver, key, rax, rbx, rax, &slow,
- language_mode);
+ GenerateFastArrayLoad(masm, receiver, key, rax, rbx, rax, &slow);
Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
+ __ IncrementCounter(counters->ic_keyed_load_generic_smi(), 1);
__ ret(0);
__ bind(&check_number_dictionary);
@@ -319,8 +311,8 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
__ bind(&slow);
// Slow case: Jump to runtime.
- __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
- KeyedLoadIC::GenerateRuntimeGetProperty(masm, language_mode);
+ __ IncrementCounter(counters->ic_keyed_load_generic_slow(), 1);
+ KeyedLoadIC::GenerateRuntimeGetProperty(masm);
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, rax, rbx, &index_name, &slow);
@@ -366,7 +358,7 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
GenerateGlobalInstanceTypeCheck(masm, rax, &slow);
GenerateDictionaryLoad(masm, &slow, rbx, key, rax, rdi, rax);
- __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
+ __ IncrementCounter(counters->ic_keyed_load_generic_symbol(), 1);
__ ret(0);
__ bind(&index_name);
@@ -626,8 +618,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
GenerateMiss(masm);
}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = rax;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
@@ -642,7 +633,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
// Dictionary load failed, go slow (but don't miss).
__ bind(&slow);
- LoadIC::GenerateRuntimeGetProperty(masm, language_mode);
+ LoadIC::GenerateRuntimeGetProperty(masm);
}
@@ -667,7 +658,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
// The return address is on the stack.
Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->load_miss(), 1);
+ __ IncrementCounter(counters->ic_load_miss(), 1);
LoadIC_PushArgs(masm);
@@ -675,9 +666,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kLoadIC_Miss);
}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
- LanguageMode language_mode) {
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// The return address is on the stack.
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
@@ -690,15 +679,14 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
__ PushReturnAddressFrom(rbx);
// Do tail-call to runtime routine.
- __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
- : Runtime::kGetProperty);
+ __ TailCallRuntime(Runtime::kGetProperty);
}
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// The return address is on the stack.
Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_miss(), 1);
+ __ IncrementCounter(counters->ic_keyed_load_miss(), 1);
LoadIC_PushArgs(masm);
@@ -706,9 +694,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
- LanguageMode language_mode) {
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// The return address is on the stack.
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
@@ -721,8 +707,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
__ PushReturnAddressFrom(rbx);
// Do tail-call to runtime routine.
- __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
- : Runtime::kKeyedGetProperty);
+ __ TailCallRuntime(Runtime::kKeyedGetProperty);
}
@@ -774,11 +759,11 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
__ movp(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
GenerateDictionaryStore(masm, &miss, dictionary, name, value, r8, r9);
Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(), 1);
+ __ IncrementCounter(counters->ic_store_normal_hit(), 1);
__ ret(0);
__ bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1);
+ __ IncrementCounter(counters->ic_store_normal_miss(), 1);
GenerateMiss(masm);
}
diff --git a/deps/v8/src/ic/x87/handler-compiler-x87.cc b/deps/v8/src/ic/x87/handler-compiler-x87.cc
index cc43ed298d..1b25f06347 100644
--- a/deps/v8/src/ic/x87/handler-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/handler-compiler-x87.cc
@@ -4,8 +4,10 @@
#if V8_TARGET_ARCH_X87
-#include "src/ic/call-optimization.h"
#include "src/ic/handler-compiler.h"
+
+#include "src/field-type.h"
+#include "src/ic/call-optimization.h"
#include "src/ic/ic.h"
#include "src/isolate-inl.h"
@@ -197,9 +199,13 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
call_data_undefined = true;
__ mov(data, Immediate(isolate->factory()->undefined_value()));
} else {
- __ mov(data, FieldOperand(callee, JSFunction::kSharedFunctionInfoOffset));
- __ mov(data, FieldOperand(data, SharedFunctionInfo::kFunctionDataOffset));
- __ mov(data, FieldOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+ if (optimization.is_constant_call()) {
+ __ mov(data, FieldOperand(callee, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(data, FieldOperand(data, SharedFunctionInfo::kFunctionDataOffset));
+ __ mov(data, FieldOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+ } else {
+ __ mov(data, FieldOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
+ }
__ mov(data, FieldOperand(data, CallHandlerInfo::kDataOffset));
}
@@ -214,7 +220,8 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ mov(api_function_address, Immediate(function_address));
// Jump to stub.
- CallApiAccessorStub stub(isolate, is_store, call_data_undefined);
+ CallApiAccessorStub stub(isolate, is_store, call_data_undefined,
+ !optimization.is_constant_call());
__ TailCallStub(&stub);
}
@@ -399,8 +406,7 @@ void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
__ j(not_equal, miss_label);
}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
+void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
Register value_reg,
Label* miss_label) {
Register map_reg = scratch1();
@@ -408,20 +414,11 @@ void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
DCHECK(!value_reg.is(map_reg));
DCHECK(!value_reg.is(scratch));
__ JumpIfSmi(value_reg, miss_label);
- HeapType::Iterator<Map> it = field_type->Classes();
- if (!it.Done()) {
- Label do_store;
+ if (field_type->IsClass()) {
__ mov(map_reg, FieldOperand(value_reg, HeapObject::kMapOffset));
- while (true) {
- __ CmpWeakValue(map_reg, Map::WeakCellForMap(it.Current()), scratch);
- it.Advance();
- if (it.Done()) {
- __ j(not_equal, miss_label);
- break;
- }
- __ j(equal, &do_store, Label::kNear);
- }
- __ bind(&do_store);
+ __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
+ scratch);
+ __ j(not_equal, miss_label);
}
}
@@ -593,24 +590,30 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
void NamedLoadHandlerCompiler::GenerateLoadCallback(
- Register reg, Handle<ExecutableAccessorInfo> callback) {
+ Register reg, Handle<AccessorInfo> callback) {
+ DCHECK(!AreAliased(scratch2(), scratch3(), receiver()));
+ DCHECK(!AreAliased(scratch2(), scratch3(), reg));
+
// Insert additional parameters into the stack frame above return address.
- DCHECK(!scratch3().is(reg));
__ pop(scratch3()); // Get return address to place it below.
- STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
- STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
- STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
- STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
- STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
__ push(receiver()); // receiver
- // Push data from ExecutableAccessorInfo.
+ // Push data from AccessorInfo.
Handle<Object> data(callback->data(), isolate());
if (data->IsUndefined() || data->IsSmi()) {
__ push(Immediate(data));
} else {
- DCHECK(!scratch2().is(reg));
Handle<WeakCell> cell =
isolate()->factory()->NewWeakCell(Handle<HeapObject>::cast(data));
// The callback is alive if this instruction is executed,
@@ -623,13 +626,9 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback(
__ push(Immediate(isolate()->factory()->undefined_value()));
__ push(Immediate(reinterpret_cast<int>(isolate())));
__ push(reg); // holder
-
- // Save a pointer to where we pushed the arguments. This will be
- // passed as the const PropertyAccessorInfo& to the C++ callback.
- __ push(esp);
+ __ push(Immediate(Smi::FromInt(0))); // should_throw_on_error -> false
__ push(name()); // name
-
__ push(scratch3()); // Restore return address.
// Abi for CallApiGetter
@@ -731,8 +730,8 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
- Handle<JSObject> object, Handle<Name> name,
- Handle<ExecutableAccessorInfo> callback) {
+ Handle<JSObject> object, Handle<Name> name, Handle<AccessorInfo> callback,
+ LanguageMode language_mode) {
Register holder_reg = Frontend(name);
__ pop(scratch1()); // remove the return address
@@ -748,6 +747,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
}
__ Push(name);
__ push(value());
+ __ push(Immediate(Smi::FromInt(language_mode)));
__ push(scratch1()); // restore return address
// Do tail-call to the runtime system.
@@ -802,7 +802,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
}
Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->named_load_global_stub(), 1);
+ __ IncrementCounter(counters->ic_named_load_global_stub(), 1);
// The code above already loads the result into the return register.
if (IC::ICUseVector(kind())) {
DiscardVectorAndSlot();
diff --git a/deps/v8/src/ic/x87/ic-x87.cc b/deps/v8/src/ic/x87/ic-x87.cc
index d4cc3ce80a..5bbd9c5814 100644
--- a/deps/v8/src/ic/x87/ic-x87.cc
+++ b/deps/v8/src/ic/x87/ic-x87.cc
@@ -167,7 +167,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
Register key, Register scratch,
Register scratch2, Register result,
- Label* slow, LanguageMode language_mode) {
+ Label* slow) {
// Register use:
// receiver - holds the receiver and is unchanged.
// key - holds the key and is unchanged (must be a smi).
@@ -211,13 +211,8 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
__ jmp(&check_next_prototype);
__ bind(&absent);
- if (is_strong(language_mode)) {
- // Strong mode accesses must throw in this case, so call the runtime.
- __ jmp(slow);
- } else {
- __ mov(result, masm->isolate()->factory()->undefined_value());
- __ jmp(&done);
- }
+ __ mov(result, masm->isolate()->factory()->undefined_value());
+ __ jmp(&done);
__ bind(&in_bounds);
// Fast case: Do the load.
@@ -262,9 +257,7 @@ static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
__ bind(&unique);
}
-
-void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
- LanguageMode language_mode) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// The return address is on the stack.
Label slow, check_name, index_smi, index_name, property_array_property;
Label probe_dictionary, check_number_dictionary;
@@ -286,11 +279,10 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
// Check the receiver's map to see if it has fast elements.
__ CheckFastElements(eax, &check_number_dictionary);
- GenerateFastArrayLoad(masm, receiver, key, eax, ebx, eax, &slow,
- language_mode);
+ GenerateFastArrayLoad(masm, receiver, key, eax, ebx, eax, &slow);
Isolate* isolate = masm->isolate();
Counters* counters = isolate->counters();
- __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
+ __ IncrementCounter(counters->ic_keyed_load_generic_smi(), 1);
__ ret(0);
__ bind(&check_number_dictionary);
@@ -318,8 +310,8 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
__ bind(&slow);
// Slow case: jump to runtime.
- __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
- GenerateRuntimeGetProperty(masm, language_mode);
+ __ IncrementCounter(counters->ic_keyed_load_generic_slow(), 1);
+ GenerateRuntimeGetProperty(masm);
__ bind(&check_name);
GenerateKeyNameCheck(masm, key, eax, ebx, &index_name, &slow);
@@ -363,7 +355,7 @@ void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
GenerateGlobalInstanceTypeCheck(masm, eax, &slow);
GenerateDictionaryLoad(masm, &slow, ebx, key, eax, edi, eax);
- __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
+ __ IncrementCounter(counters->ic_keyed_load_generic_symbol(), 1);
__ ret(0);
__ bind(&index_name);
@@ -628,8 +620,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
GenerateMiss(masm);
}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
Register dictionary = eax;
DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
@@ -644,7 +635,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
// Dictionary load failed, go slow (but don't miss).
__ bind(&slow);
- GenerateRuntimeGetProperty(masm, language_mode);
+ GenerateRuntimeGetProperty(masm);
}
@@ -668,16 +659,14 @@ static void LoadIC_PushArgs(MacroAssembler* masm) {
void LoadIC::GenerateMiss(MacroAssembler* masm) {
// Return address is on the stack.
- __ IncrementCounter(masm->isolate()->counters()->load_miss(), 1);
+ __ IncrementCounter(masm->isolate()->counters()->ic_load_miss(), 1);
LoadIC_PushArgs(masm);
// Perform tail call to the entry.
__ TailCallRuntime(Runtime::kLoadIC_Miss);
}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
- LanguageMode language_mode) {
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// Return address is on the stack.
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
@@ -689,14 +678,13 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
__ push(ebx);
// Do tail-call to runtime routine.
- __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
- : Runtime::kGetProperty);
+ __ TailCallRuntime(Runtime::kGetProperty);
}
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// Return address is on the stack.
- __ IncrementCounter(masm->isolate()->counters()->keyed_load_miss(), 1);
+ __ IncrementCounter(masm->isolate()->counters()->ic_keyed_load_miss(), 1);
LoadIC_PushArgs(masm);
@@ -704,9 +692,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
- LanguageMode language_mode) {
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// Return address is on the stack.
Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadDescriptor::NameRegister();
@@ -718,8 +704,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
__ push(ebx);
// Do tail-call to runtime routine.
- __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
- : Runtime::kKeyedGetProperty);
+ __ TailCallRuntime(Runtime::kKeyedGetProperty);
}
@@ -777,14 +762,14 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
receiver, edi);
__ Drop(3);
Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(), 1);
+ __ IncrementCounter(counters->ic_store_normal_hit(), 1);
__ ret(0);
__ bind(&restore_miss);
__ pop(slot);
__ pop(vector);
__ pop(receiver);
- __ IncrementCounter(counters->store_normal_miss(), 1);
+ __ IncrementCounter(counters->ic_store_normal_miss(), 1);
GenerateMiss(masm);
}
diff --git a/deps/v8/src/identity-map.cc b/deps/v8/src/identity-map.cc
index 723cdfa2a6..97b70ae2fd 100644
--- a/deps/v8/src/identity-map.cc
+++ b/deps/v8/src/identity-map.cc
@@ -4,7 +4,7 @@
#include "src/identity-map.h"
-#include "src/heap/heap.h"
+#include "src/base/functional.h"
#include "src/heap/heap-inl.h"
#include "src/zone-containers.h"
@@ -14,10 +14,17 @@ namespace internal {
static const int kInitialIdentityMapSize = 4;
static const int kResizeFactor = 4;
-IdentityMapBase::~IdentityMapBase() {
- if (keys_) heap_->UnregisterStrongRoots(keys_);
-}
+IdentityMapBase::~IdentityMapBase() { Clear(); }
+void IdentityMapBase::Clear() {
+ if (keys_) {
+ heap_->UnregisterStrongRoots(keys_);
+ keys_ = nullptr;
+ values_ = nullptr;
+ size_ = 0;
+ mask_ = 0;
+ }
+}
IdentityMapBase::RawEntry IdentityMapBase::Lookup(Object* key) {
int index = LookupIndex(key);
@@ -35,8 +42,7 @@ IdentityMapBase::RawEntry IdentityMapBase::Insert(Object* key) {
int IdentityMapBase::Hash(Object* address) {
CHECK_NE(address, heap_->not_mapped_symbol());
uintptr_t raw_address = reinterpret_cast<uintptr_t>(address);
- // Xor some of the upper bits, since the lower 2 or 3 are usually aligned.
- return static_cast<int>((raw_address >> 11) ^ raw_address);
+ return static_cast<int>(hasher_(raw_address));
}
diff --git a/deps/v8/src/identity-map.h b/deps/v8/src/identity-map.h
index 2c4a0f3399..ad2a260769 100644
--- a/deps/v8/src/identity-map.h
+++ b/deps/v8/src/identity-map.h
@@ -5,6 +5,7 @@
#ifndef V8_IDENTITY_MAP_H_
#define V8_IDENTITY_MAP_H_
+#include "src/base/functional.h"
#include "src/handles.h"
namespace v8 {
@@ -36,6 +37,7 @@ class IdentityMapBase {
RawEntry GetEntry(Object* key);
RawEntry FindEntry(Object* key);
+ void Clear();
private:
// Internal implementation should not be called directly by subclasses.
@@ -47,6 +49,7 @@ class IdentityMapBase {
RawEntry Insert(Object* key);
int Hash(Object* address);
+ base::hash<uintptr_t> hasher_;
Heap* heap_;
Zone* zone_;
int gc_counter_;
@@ -85,6 +88,9 @@ class IdentityMap : public IdentityMapBase {
// Set the value for the given key.
void Set(Handle<Object> key, V v) { Set(*key, v); }
void Set(Object* key, V v) { *(reinterpret_cast<V*>(GetEntry(key))) = v; }
+
+ // Removes all elements from the map.
+ void Clear() { IdentityMapBase::Clear(); }
};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interface-descriptors.cc b/deps/v8/src/interface-descriptors.cc
index 94ed7020c3..cc46a56d94 100644
--- a/deps/v8/src/interface-descriptors.cc
+++ b/deps/v8/src/interface-descriptors.cc
@@ -31,12 +31,12 @@ Type* ExternalPointer(Zone* zone) {
}
} // namespace
-
-Type::FunctionType* CallInterfaceDescriptor::BuildDefaultFunctionType(
+FunctionType* CallInterfaceDescriptor::BuildDefaultFunctionType(
Isolate* isolate, int parameter_count) {
Zone* zone = isolate->interface_descriptor_zone();
- Type::FunctionType* function = Type::FunctionType::New(
- AnyTagged(zone), Type::Undefined(), parameter_count, zone);
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), Type::Undefined(), parameter_count, zone)
+ ->AsFunction();
while (parameter_count-- != 0) {
function->InitParameter(parameter_count, AnyTagged(zone));
}
@@ -86,12 +86,11 @@ void VoidDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr);
}
-
-Type::FunctionType* LoadDescriptor::BuildCallInterfaceDescriptorFunctionType(
+FunctionType* LoadDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
- Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), Type::Undefined(), 3, zone)->AsFunction();
function->InitParameter(0, AnyTagged(zone));
function->InitParameter(1, AnyTagged(zone));
function->InitParameter(2, SmiType(zone));
@@ -136,13 +135,12 @@ void VectorStoreTransitionDescriptor::InitializePlatformSpecific(
}
}
-
-Type::FunctionType*
+FunctionType*
StoreTransitionDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
- Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 4, zone);
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), Type::Undefined(), 4, zone)->AsFunction();
function->InitParameter(0, AnyTagged(zone)); // Receiver
function->InitParameter(1, AnyTagged(zone)); // Name
function->InitParameter(2, AnyTagged(zone)); // Value
@@ -150,13 +148,12 @@ StoreTransitionDescriptor::BuildCallInterfaceDescriptorFunctionType(
return function;
}
-
-Type::FunctionType*
+FunctionType*
LoadGlobalViaContextDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
- Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 1, zone);
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), Type::Undefined(), 1, zone)->AsFunction();
function->InitParameter(0, UntaggedIntegral32(zone));
return function;
}
@@ -168,13 +165,12 @@ void LoadGlobalViaContextDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-Type::FunctionType*
+FunctionType*
StoreGlobalViaContextDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
- Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 2, zone);
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), Type::Undefined(), 2, zone)->AsFunction();
function->InitParameter(0, UntaggedIntegral32(zone));
function->InitParameter(1, AnyTagged(zone));
return function;
@@ -216,6 +212,13 @@ void ToStringDescriptor::InitializePlatformSpecific(
}
+void ToNameDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ReceiverRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void ToObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ReceiverRegister()};
@@ -236,13 +239,12 @@ void MathPowIntegerDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-Type::FunctionType*
+FunctionType*
LoadWithVectorDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
- Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 4, zone);
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), Type::Undefined(), 4, zone)->AsFunction();
function->InitParameter(0, AnyTagged(zone));
function->InitParameter(1, AnyTagged(zone));
function->InitParameter(2, SmiType(zone));
@@ -258,15 +260,15 @@ void LoadWithVectorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-Type::FunctionType*
+FunctionType*
VectorStoreTransitionDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
bool has_slot = !VectorStoreTransitionDescriptor::SlotRegister().is(no_reg);
int arg_count = has_slot ? 6 : 5;
- Type::FunctionType* function = Type::FunctionType::New(
- AnyTagged(zone), Type::Undefined(), arg_count, zone);
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), Type::Undefined(), arg_count, zone)
+ ->AsFunction();
int index = 0;
function->InitParameter(index++, AnyTagged(zone)); // receiver
function->InitParameter(index++, AnyTagged(zone)); // name
@@ -279,13 +281,11 @@ VectorStoreTransitionDescriptor::BuildCallInterfaceDescriptorFunctionType(
return function;
}
-
-Type::FunctionType*
-VectorStoreICDescriptor::BuildCallInterfaceDescriptorFunctionType(
+FunctionType* VectorStoreICDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
- Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 5, zone);
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), Type::Undefined(), 5, zone)->AsFunction();
function->InitParameter(0, AnyTagged(zone));
function->InitParameter(1, AnyTagged(zone));
function->InitParameter(2, AnyTagged(zone));
@@ -302,13 +302,12 @@ void VectorStoreICDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-Type::FunctionType*
+FunctionType*
VectorStoreICTrampolineDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
- Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 4, zone);
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), Type::Undefined(), 4, zone)->AsFunction();
function->InitParameter(0, AnyTagged(zone));
function->InitParameter(1, AnyTagged(zone));
function->InitParameter(2, AnyTagged(zone));
@@ -324,13 +323,11 @@ void VectorStoreICTrampolineDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-Type::FunctionType*
-ApiGetterDescriptor::BuildCallInterfaceDescriptorFunctionType(
+FunctionType* ApiGetterDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
- Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 1, zone);
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), Type::Undefined(), 1, zone)->AsFunction();
function->InitParameter(0, ExternalPointer(zone));
return function;
}
@@ -343,54 +340,6 @@ void ApiGetterDescriptor::InitializePlatformSpecific(
}
-void ArgumentsAccessReadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {index(), parameter_count()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-Type::FunctionType*
-ArgumentsAccessNewDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int paramater_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
- function->InitParameter(0, AnyTagged(zone));
- function->InitParameter(1, SmiType(zone));
- function->InitParameter(2, ExternalPointer(zone));
- return function;
-}
-
-
-void ArgumentsAccessNewDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {function(), parameter_count(), parameter_pointer()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-Type::FunctionType*
-RestParamAccessDescriptor::BuildCallInterfaceDescriptorFunctionType(
- Isolate* isolate, int paramater_count) {
- Zone* zone = isolate->interface_descriptor_zone();
- Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
- function->InitParameter(0, SmiType(zone));
- function->InitParameter(1, ExternalPointer(zone));
- function->InitParameter(2, SmiType(zone));
- return function;
-}
-
-
-void RestParamAccessDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {parameter_count(), parameter_pointer(),
- rest_parameter_index()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void ContextOnlyDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
data->InitializePlatformSpecific(0, nullptr);
@@ -403,13 +352,12 @@ void GrowArrayElementsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
-Type::FunctionType*
+FunctionType*
FastCloneRegExpDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
- Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 4, zone);
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), Type::Undefined(), 4, zone)->AsFunction();
function->InitParameter(0, AnyTagged(zone)); // closure
function->InitParameter(1, SmiType(zone)); // literal_index
function->InitParameter(2, AnyTagged(zone)); // pattern
@@ -417,63 +365,57 @@ FastCloneRegExpDescriptor::BuildCallInterfaceDescriptorFunctionType(
return function;
}
-
-Type::FunctionType*
+FunctionType*
FastCloneShallowArrayDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
- Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), Type::Undefined(), 3, zone)->AsFunction();
function->InitParameter(0, AnyTagged(zone));
function->InitParameter(1, SmiType(zone));
function->InitParameter(2, AnyTagged(zone));
return function;
}
-
-Type::FunctionType*
+FunctionType*
CreateAllocationSiteDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
- Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 2, zone);
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), Type::Undefined(), 2, zone)->AsFunction();
function->InitParameter(0, AnyTagged(zone));
function->InitParameter(1, SmiType(zone));
return function;
}
-
-Type::FunctionType*
+FunctionType*
CreateWeakCellDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
- Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), Type::Undefined(), 3, zone)->AsFunction();
function->InitParameter(0, AnyTagged(zone));
function->InitParameter(1, SmiType(zone));
function->InitParameter(2, AnyTagged(zone));
return function;
}
-
-Type::FunctionType*
+FunctionType*
CallTrampolineDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
- Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 2, zone);
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), Type::Undefined(), 2, zone)->AsFunction();
function->InitParameter(0, AnyTagged(zone)); // target
function->InitParameter(1, UntaggedIntegral32(zone)); // actual #arguments
return function;
}
-
-Type::FunctionType*
-ConstructStubDescriptor::BuildCallInterfaceDescriptorFunctionType(
+FunctionType* ConstructStubDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
- Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 4, zone);
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), Type::Undefined(), 4, zone)->AsFunction();
function->InitParameter(0, AnyTagged(zone)); // target
function->InitParameter(1, AnyTagged(zone)); // new.target
function->InitParameter(2, UntaggedIntegral32(zone)); // actual #arguments
@@ -481,76 +423,70 @@ ConstructStubDescriptor::BuildCallInterfaceDescriptorFunctionType(
return function;
}
-
-Type::FunctionType*
+FunctionType*
ConstructTrampolineDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
- Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), Type::Undefined(), 3, zone)->AsFunction();
function->InitParameter(0, AnyTagged(zone)); // target
function->InitParameter(1, AnyTagged(zone)); // new.target
function->InitParameter(2, UntaggedIntegral32(zone)); // actual #arguments
return function;
}
-
-Type::FunctionType*
+FunctionType*
CallFunctionWithFeedbackDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
- Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 2, zone);
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), Type::Undefined(), 2, zone)->AsFunction();
function->InitParameter(0, Type::Receiver()); // JSFunction
function->InitParameter(1, SmiType(zone));
return function;
}
-
-Type::FunctionType* CallFunctionWithFeedbackAndVectorDescriptor::
+FunctionType* CallFunctionWithFeedbackAndVectorDescriptor::
BuildCallInterfaceDescriptorFunctionType(Isolate* isolate,
int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
- Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), Type::Undefined(), 3, zone)->AsFunction();
function->InitParameter(0, Type::Receiver()); // JSFunction
function->InitParameter(1, SmiType(zone));
function->InitParameter(2, AnyTagged(zone));
return function;
}
-
-Type::FunctionType*
+FunctionType*
ArrayConstructorDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
- Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), Type::Undefined(), 3, zone)->AsFunction();
function->InitParameter(0, Type::Receiver()); // JSFunction
function->InitParameter(1, AnyTagged(zone));
function->InitParameter(2, UntaggedIntegral32(zone));
return function;
}
-
-Type::FunctionType*
+FunctionType*
InternalArrayConstructorDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
- Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 2, zone);
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), Type::Undefined(), 2, zone)->AsFunction();
function->InitParameter(0, Type::Receiver()); // JSFunction
function->InitParameter(1, UntaggedIntegral32(zone));
return function;
}
-
-Type::FunctionType*
+FunctionType*
ArgumentAdaptorDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
- Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 4, zone);
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), Type::Undefined(), 4, zone)->AsFunction();
function->InitParameter(0, Type::Receiver()); // JSFunction
function->InitParameter(1, AnyTagged(zone)); // the new target
function->InitParameter(2, UntaggedIntegral32(zone)); // actual #arguments
@@ -558,13 +494,11 @@ ArgumentAdaptorDescriptor::BuildCallInterfaceDescriptorFunctionType(
return function;
}
-
-Type::FunctionType*
-ApiFunctionDescriptor::BuildCallInterfaceDescriptorFunctionType(
+FunctionType* ApiFunctionDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
- Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 5, zone);
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), Type::Undefined(), 5, zone)->AsFunction();
function->InitParameter(0, AnyTagged(zone)); // callee
function->InitParameter(1, AnyTagged(zone)); // call_data
function->InitParameter(2, AnyTagged(zone)); // holder
@@ -573,13 +507,11 @@ ApiFunctionDescriptor::BuildCallInterfaceDescriptorFunctionType(
return function;
}
-
-Type::FunctionType*
-ApiAccessorDescriptor::BuildCallInterfaceDescriptorFunctionType(
+FunctionType* ApiAccessorDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
Zone* zone = isolate->interface_descriptor_zone();
- Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 4, zone);
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), Type::Undefined(), 4, zone)->AsFunction();
function->InitParameter(0, AnyTagged(zone)); // callee
function->InitParameter(1, AnyTagged(zone)); // call_data
function->InitParameter(2, AnyTagged(zone)); // holder
@@ -587,6 +519,19 @@ ApiAccessorDescriptor::BuildCallInterfaceDescriptorFunctionType(
return function;
}
+FunctionType*
+InterpreterDispatchDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int parameter_count) {
+ Zone* zone = isolate->interface_descriptor_zone();
+ FunctionType* function =
+ Type::Function(AnyTagged(zone), Type::Undefined(), 5, zone)->AsFunction();
+ function->InitParameter(kAccumulatorParameter, AnyTagged(zone));
+ function->InitParameter(kRegisterFileParameter, ExternalPointer(zone));
+ function->InitParameter(kBytecodeOffsetParameter, UntaggedIntegral32(zone));
+ function->InitParameter(kBytecodeArrayParameter, AnyTagged(zone));
+ function->InitParameter(kDispatchTableParameter, AnyTagged(zone));
+ return function;
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interface-descriptors.h b/deps/v8/src/interface-descriptors.h
index 2814daeded..fb1969d8ef 100644
--- a/deps/v8/src/interface-descriptors.h
+++ b/deps/v8/src/interface-descriptors.h
@@ -25,9 +25,14 @@ class PlatformInterfaceDescriptor;
V(LoadWithVector) \
V(FastNewClosure) \
V(FastNewContext) \
+ V(FastNewObject) \
+ V(FastNewRestParameter) \
+ V(FastNewSloppyArguments) \
+ V(FastNewStrictArguments) \
V(ToNumber) \
V(ToLength) \
V(ToString) \
+ V(ToName) \
V(ToObject) \
V(NumberToString) \
V(Typeof) \
@@ -66,21 +71,17 @@ class PlatformInterfaceDescriptor;
V(ApiFunction) \
V(ApiAccessor) \
V(ApiGetter) \
- V(ArgumentsAccessRead) \
- V(ArgumentsAccessNew) \
- V(RestParamAccess) \
- V(StoreArrayLiteralElement) \
V(LoadGlobalViaContext) \
V(StoreGlobalViaContext) \
V(MathPowTagged) \
V(MathPowInteger) \
V(ContextOnly) \
V(GrowArrayElements) \
+ V(InterpreterDispatch) \
V(InterpreterPushArgsAndCall) \
V(InterpreterPushArgsAndConstruct) \
V(InterpreterCEntry)
-
class CallInterfaceDescriptorData {
public:
CallInterfaceDescriptorData()
@@ -89,7 +90,7 @@ class CallInterfaceDescriptorData {
// A copy of the passed in registers and param_representations is made
// and owned by the CallInterfaceDescriptorData.
- void InitializePlatformIndependent(Type::FunctionType* function_type) {
+ void InitializePlatformIndependent(FunctionType* function_type) {
function_type_ = function_type;
}
@@ -112,7 +113,7 @@ class CallInterfaceDescriptorData {
return platform_specific_descriptor_;
}
- Type::FunctionType* function_type() const { return function_type_; }
+ FunctionType* function_type() const { return function_type_; }
private:
int register_param_count_;
@@ -124,7 +125,7 @@ class CallInterfaceDescriptorData {
base::SmartArrayPointer<Register> register_params_;
// Specifies types for parameters and return
- Type::FunctionType* function_type_;
+ FunctionType* function_type_;
PlatformInterfaceDescriptor* platform_specific_descriptor_;
@@ -175,21 +176,19 @@ class CallInterfaceDescriptor {
return data()->platform_specific_descriptor();
}
- Type::FunctionType* GetFunctionType() const {
- return data()->function_type();
- }
+ FunctionType* GetFunctionType() const { return data()->function_type(); }
static const Register ContextRegister();
const char* DebugName(Isolate* isolate) const;
- static Type::FunctionType* BuildDefaultFunctionType(Isolate* isolate,
- int paramater_count);
+ static FunctionType* BuildDefaultFunctionType(Isolate* isolate,
+ int paramater_count);
protected:
const CallInterfaceDescriptorData* data() const { return data_; }
- virtual Type::FunctionType* BuildCallInterfaceDescriptorFunctionType(
+ virtual FunctionType* BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int register_param_count) {
return BuildDefaultFunctionType(isolate, register_param_count);
}
@@ -202,9 +201,8 @@ class CallInterfaceDescriptor {
if (!data()->IsInitialized()) {
CallInterfaceDescriptorData* d = isolate->call_descriptor_data(key);
InitializePlatformSpecific(d);
- Type::FunctionType* function_type =
- BuildCallInterfaceDescriptorFunctionType(isolate,
- d->register_param_count());
+ FunctionType* function_type = BuildCallInterfaceDescriptorFunctionType(
+ isolate, d->register_param_count());
d->InitializePlatformIndependent(function_type);
}
}
@@ -226,16 +224,14 @@ class CallInterfaceDescriptor {
public: \
static inline CallDescriptors::Key key();
-
#define DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(name, base) \
DECLARE_DESCRIPTOR(name, base) \
protected: \
- Type::FunctionType* BuildCallInterfaceDescriptorFunctionType( \
+ FunctionType* BuildCallInterfaceDescriptorFunctionType( \
Isolate* isolate, int register_param_count) override; \
\
public:
-
class VoidDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(VoidDescriptor, CallInterfaceDescriptor)
@@ -379,6 +375,28 @@ class FastNewContextDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(FastNewContextDescriptor, CallInterfaceDescriptor)
};
+class FastNewObjectDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(FastNewObjectDescriptor, CallInterfaceDescriptor)
+};
+
+class FastNewRestParameterDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(FastNewRestParameterDescriptor, CallInterfaceDescriptor)
+};
+
+class FastNewSloppyArgumentsDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(FastNewSloppyArgumentsDescriptor,
+ CallInterfaceDescriptor)
+};
+
+class FastNewStrictArgumentsDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(FastNewStrictArgumentsDescriptor,
+ CallInterfaceDescriptor)
+};
+
class ToNumberDescriptor : public CallInterfaceDescriptor {
public:
@@ -406,6 +424,16 @@ class ToStringDescriptor : public CallInterfaceDescriptor {
};
+class ToNameDescriptor : public CallInterfaceDescriptor {
+ public:
+ enum ParameterIndices { kReceiverIndex };
+
+ DECLARE_DESCRIPTOR(ToNameDescriptor, CallInterfaceDescriptor)
+
+ static const Register ReceiverRegister();
+};
+
+
class ToObjectDescriptor : public CallInterfaceDescriptor {
public:
enum ParameterIndices { kReceiverIndex };
@@ -692,43 +720,6 @@ class ApiGetterDescriptor : public CallInterfaceDescriptor {
};
-class ArgumentsAccessReadDescriptor : public CallInterfaceDescriptor {
- public:
- DECLARE_DESCRIPTOR(ArgumentsAccessReadDescriptor, CallInterfaceDescriptor)
-
- static const Register index();
- static const Register parameter_count();
-};
-
-
-class ArgumentsAccessNewDescriptor : public CallInterfaceDescriptor {
- public:
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ArgumentsAccessNewDescriptor,
- CallInterfaceDescriptor)
-
- static const Register function();
- static const Register parameter_count();
- static const Register parameter_pointer();
-};
-
-
-class RestParamAccessDescriptor : public CallInterfaceDescriptor {
- public:
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(RestParamAccessDescriptor,
- CallInterfaceDescriptor)
- static const Register parameter_count();
- static const Register parameter_pointer();
- static const Register rest_parameter_index();
-};
-
-
-class StoreArrayLiteralElementDescriptor : public CallInterfaceDescriptor {
- public:
- DECLARE_DESCRIPTOR(StoreArrayLiteralElementDescriptor,
- CallInterfaceDescriptor)
-};
-
-
class MathPowTaggedDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(MathPowTaggedDescriptor, CallInterfaceDescriptor)
@@ -760,6 +751,18 @@ class GrowArrayElementsDescriptor : public CallInterfaceDescriptor {
static const Register KeyRegister();
};
+class InterpreterDispatchDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(InterpreterDispatchDescriptor,
+ CallInterfaceDescriptor)
+
+ static const int kAccumulatorParameter = 0;
+ static const int kRegisterFileParameter = 1;
+ static const int kBytecodeOffsetParameter = 2;
+ static const int kBytecodeArrayParameter = 3;
+ static const int kDispatchTableParameter = 4;
+ static const int kContextParameter = 5;
+};
class InterpreterPushArgsAndCallDescriptor : public CallInterfaceDescriptor {
public:
@@ -781,7 +784,6 @@ class InterpreterCEntryDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(InterpreterCEntryDescriptor, CallInterfaceDescriptor)
};
-
#undef DECLARE_DESCRIPTOR
diff --git a/deps/v8/src/interpreter/DEPS b/deps/v8/src/interpreter/DEPS
deleted file mode 100644
index f8d6b98fd8..0000000000
--- a/deps/v8/src/interpreter/DEPS
+++ /dev/null
@@ -1,3 +0,0 @@
-include_rules = [
- "+src/compiler/interpreter-assembler.h",
-]
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index 1b15fc6668..7103c72178 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -3,12 +3,13 @@
// found in the LICENSE file.
#include "src/interpreter/bytecode-array-builder.h"
+#include "src/compiler.h"
namespace v8 {
namespace internal {
namespace interpreter {
-class BytecodeArrayBuilder::PreviousBytecodeHelper {
+class BytecodeArrayBuilder::PreviousBytecodeHelper BASE_EMBEDDED {
public:
explicit PreviousBytecodeHelper(const BytecodeArrayBuilder& array_builder)
: array_builder_(array_builder),
@@ -37,9 +38,9 @@ class BytecodeArrayBuilder::PreviousBytecodeHelper {
Bytecodes::GetOperandOffset(bytecode, operand_index);
OperandSize size = Bytecodes::GetOperandSize(bytecode, operand_index);
switch (size) {
- default:
case OperandSize::kNone:
UNREACHABLE();
+ break;
case OperandSize::kByte:
return static_cast<uint32_t>(
array_builder_.bytecodes()->at(operand_offset));
@@ -49,6 +50,7 @@ class BytecodeArrayBuilder::PreviousBytecodeHelper {
array_builder_.bytecodes()->at(operand_offset + 1);
return static_cast<uint32_t>(operand);
}
+ return 0;
}
Handle<Object> GetConstantForIndexOperand(int operand_index) const {
@@ -63,43 +65,31 @@ class BytecodeArrayBuilder::PreviousBytecodeHelper {
DISALLOW_COPY_AND_ASSIGN(PreviousBytecodeHelper);
};
-
-BytecodeArrayBuilder::BytecodeArrayBuilder(Isolate* isolate, Zone* zone)
+BytecodeArrayBuilder::BytecodeArrayBuilder(Isolate* isolate, Zone* zone,
+ int parameter_count,
+ int context_count, int locals_count)
: isolate_(isolate),
zone_(zone),
bytecodes_(zone),
bytecode_generated_(false),
constant_array_builder_(isolate, zone),
+ handler_table_builder_(isolate, zone),
+ source_position_table_builder_(isolate, zone),
last_block_end_(0),
last_bytecode_start_(~0),
exit_seen_in_block_(false),
unbound_jumps_(0),
- parameter_count_(-1),
- local_register_count_(-1),
- context_register_count_(-1),
- temporary_register_count_(0),
- free_temporaries_(zone) {}
-
-
-BytecodeArrayBuilder::~BytecodeArrayBuilder() { DCHECK_EQ(0, unbound_jumps_); }
-
-
-void BytecodeArrayBuilder::set_locals_count(int number_of_locals) {
- local_register_count_ = number_of_locals;
- DCHECK_LE(context_register_count_, 0);
-}
-
-
-void BytecodeArrayBuilder::set_parameter_count(int number_of_parameters) {
- parameter_count_ = number_of_parameters;
-}
-
-
-void BytecodeArrayBuilder::set_context_count(int number_of_contexts) {
- context_register_count_ = number_of_contexts;
+ parameter_count_(parameter_count),
+ local_register_count_(locals_count),
+ context_register_count_(context_count),
+ temporary_allocator_(zone, fixed_register_count()),
+ register_translator_(this) {
+ DCHECK_GE(parameter_count_, 0);
+ DCHECK_GE(context_register_count_, 0);
DCHECK_GE(local_register_count_, 0);
}
+BytecodeArrayBuilder::~BytecodeArrayBuilder() { DCHECK_EQ(0, unbound_jumps_); }
Register BytecodeArrayBuilder::first_context_register() const {
DCHECK_GT(context_register_count_, 0);
@@ -113,18 +103,6 @@ Register BytecodeArrayBuilder::last_context_register() const {
}
-Register BytecodeArrayBuilder::first_temporary_register() const {
- DCHECK_GT(temporary_register_count_, 0);
- return Register(fixed_register_count());
-}
-
-
-Register BytecodeArrayBuilder::last_temporary_register() const {
- DCHECK_GT(temporary_register_count_, 0);
- return Register(fixed_register_count() + temporary_register_count_ - 1);
-}
-
-
Register BytecodeArrayBuilder::Parameter(int parameter_index) const {
DCHECK_GE(parameter_index, 0);
return Register::FromParameterIndex(parameter_index, parameter_count());
@@ -136,25 +114,23 @@ bool BytecodeArrayBuilder::RegisterIsParameterOrLocal(Register reg) const {
}
-bool BytecodeArrayBuilder::RegisterIsTemporary(Register reg) const {
- return temporary_register_count_ > 0 && first_temporary_register() <= reg &&
- reg <= last_temporary_register();
-}
-
-
Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray() {
DCHECK_EQ(bytecode_generated_, false);
- EnsureReturn();
+ DCHECK(exit_seen_in_block_);
int bytecode_size = static_cast<int>(bytecodes_.size());
- int register_count = fixed_register_count() + temporary_register_count_;
+ int register_count =
+ fixed_and_temporary_register_count() + translation_register_count();
int frame_size = register_count * kPointerSize;
- Factory* factory = isolate_->factory();
- Handle<FixedArray> constant_pool =
- constant_array_builder()->ToFixedArray(factory);
- Handle<BytecodeArray> output =
- factory->NewBytecodeArray(bytecode_size, &bytecodes_.front(), frame_size,
- parameter_count(), constant_pool);
+ Handle<FixedArray> constant_pool = constant_array_builder()->ToFixedArray();
+ Handle<FixedArray> handler_table = handler_table_builder()->ToHandlerTable();
+ Handle<FixedArray> source_position_table =
+ source_position_table_builder()->ToFixedArray();
+ Handle<BytecodeArray> output = isolate_->factory()->NewBytecodeArray(
+ bytecode_size, &bytecodes_.front(), frame_size, parameter_count(),
+ constant_pool);
+ output->set_handler_table(*handler_table);
+ output->set_source_position_table(*source_position_table);
bytecode_generated_ = true;
return output;
}
@@ -163,16 +139,28 @@ Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray() {
template <size_t N>
void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t(&operands)[N]) {
// Don't output dead code.
- if (exit_seen_in_block_) return;
+ if (exit_seen_in_block_) {
+ source_position_table_builder_.RevertPosition(bytecodes()->size());
+ return;
+ }
+
+ int operand_count = static_cast<int>(N);
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count);
+
+ int register_operand_count = Bytecodes::NumberOfRegisterOperands(bytecode);
+ if (register_operand_count > 0) {
+ register_translator()->TranslateInputRegisters(bytecode, operands,
+ operand_count);
+ }
- DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), static_cast<int>(N));
last_bytecode_start_ = bytecodes()->size();
bytecodes()->push_back(Bytecodes::ToByte(bytecode));
- for (int i = 0; i < static_cast<int>(N); i++) {
+ for (int i = 0; i < operand_count; i++) {
DCHECK(OperandIsValid(bytecode, i, operands[i]));
switch (Bytecodes::GetOperandSize(bytecode, i)) {
case OperandSize::kNone:
UNREACHABLE();
+ break;
case OperandSize::kByte:
bytecodes()->push_back(static_cast<uint8_t>(operands[i]));
break;
@@ -185,6 +173,10 @@ void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t(&operands)[N]) {
}
}
}
+
+ if (register_operand_count > 0) {
+ register_translator()->TranslateOutputRegisters();
+ }
}
@@ -218,32 +210,23 @@ void BytecodeArrayBuilder::Output(Bytecode bytecode, uint32_t operand0) {
void BytecodeArrayBuilder::Output(Bytecode bytecode) {
// Don't output dead code.
- if (exit_seen_in_block_) return;
+ if (exit_seen_in_block_) {
+ source_position_table_builder_.RevertPosition(bytecodes()->size());
+ return;
+ }
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
last_bytecode_start_ = bytecodes()->size();
bytecodes()->push_back(Bytecodes::ToByte(bytecode));
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op,
- Register reg,
- Strength strength) {
- if (is_strong(strength)) {
- UNIMPLEMENTED();
- }
-
- Output(BytecodeForBinaryOperation(op), reg.ToOperand());
+ Register reg) {
+ Output(BytecodeForBinaryOperation(op), reg.ToRawOperand());
return *this;
}
-
-BytecodeArrayBuilder& BytecodeArrayBuilder::CountOperation(Token::Value op,
- Strength strength) {
- if (is_strong(strength)) {
- UNIMPLEMENTED();
- }
-
+BytecodeArrayBuilder& BytecodeArrayBuilder::CountOperation(Token::Value op) {
Output(BytecodeForCountOperation(op));
return *this;
}
@@ -260,14 +243,9 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::TypeOf() {
return *this;
}
-
-BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(
- Token::Value op, Register reg, Strength strength) {
- if (is_strong(strength)) {
- UNIMPLEMENTED();
- }
-
- Output(BytecodeForCompareOperation(op), reg.ToOperand());
+BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(Token::Value op,
+ Register reg) {
+ Output(BytecodeForCompareOperation(op), reg.ToRawOperand());
return *this;
}
@@ -338,11 +316,10 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadBooleanConstant(bool value) {
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadAccumulatorWithRegister(
Register reg) {
if (!IsRegisterInAccumulator(reg)) {
- Output(Bytecode::kLdar, reg.ToOperand());
+ Output(Bytecode::kLdar, reg.ToRawOperand());
}
return *this;
}
@@ -350,15 +327,8 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadAccumulatorWithRegister(
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreAccumulatorInRegister(
Register reg) {
- // TODO(oth): Avoid storing the accumulator in the register if the
- // previous bytecode loaded the accumulator with the same register.
- //
- // TODO(oth): If the previous bytecode is a MOV into this register,
- // the previous instruction can be removed. The logic for determining
- // these redundant MOVs appears complex.
- Output(Bytecode::kStar, reg.ToOperand());
if (!IsRegisterInAccumulator(reg)) {
- Output(Bytecode::kStar, reg.ToOperand());
+ Output(Bytecode::kStar, reg.ToRawOperand());
}
return *this;
}
@@ -367,31 +337,37 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreAccumulatorInRegister(
BytecodeArrayBuilder& BytecodeArrayBuilder::MoveRegister(Register from,
Register to) {
DCHECK(from != to);
- Output(Bytecode::kMov, from.ToOperand(), to.ToOperand());
+ if (FitsInReg8Operand(from) && FitsInReg8Operand(to)) {
+ Output(Bytecode::kMov, from.ToRawOperand(), to.ToRawOperand());
+ } else if (FitsInReg16Operand(from) && FitsInReg16Operand(to)) {
+ Output(Bytecode::kMovWide, from.ToRawOperand(), to.ToRawOperand());
+ } else {
+ UNIMPLEMENTED();
+ }
return *this;
}
-
-BytecodeArrayBuilder& BytecodeArrayBuilder::ExchangeRegisters(Register reg0,
- Register reg1) {
- DCHECK(reg0 != reg1);
- if (FitsInReg8Operand(reg0)) {
- Output(Bytecode::kExchange, reg0.ToOperand(), reg1.ToWideOperand());
- } else if (FitsInReg8Operand(reg1)) {
- Output(Bytecode::kExchange, reg1.ToOperand(), reg0.ToWideOperand());
+void BytecodeArrayBuilder::MoveRegisterUntranslated(Register from,
+ Register to) {
+ // Move bytecodes modify the stack. Checking validity is an
+ // essential mitigation against corrupting the stack.
+ if (FitsInReg8OperandUntranslated(from)) {
+ CHECK(RegisterIsValid(from, OperandType::kReg8) &&
+ RegisterIsValid(to, OperandType::kReg16));
+ } else if (FitsInReg8OperandUntranslated(to)) {
+ CHECK(RegisterIsValid(from, OperandType::kReg16) &&
+ RegisterIsValid(to, OperandType::kReg8));
} else {
- Output(Bytecode::kExchangeWide, reg0.ToWideOperand(), reg1.ToWideOperand());
+ UNIMPLEMENTED();
}
- return *this;
+ Output(Bytecode::kMovWide, from.ToRawOperand(), to.ToRawOperand());
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(
- const Handle<String> name, int feedback_slot, LanguageMode language_mode,
- TypeofMode typeof_mode) {
- // TODO(rmcilroy): Potentially store language and typeof information in an
+ const Handle<String> name, int feedback_slot, TypeofMode typeof_mode) {
+ // TODO(rmcilroy): Potentially store typeof information in an
// operand rather than having extra bytecodes.
- Bytecode bytecode = BytecodeForLoadGlobal(language_mode, typeof_mode);
+ Bytecode bytecode = BytecodeForLoadGlobal(typeof_mode);
size_t name_index = GetConstantPoolEntry(name);
if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
Output(bytecode, static_cast<uint8_t>(name_index),
@@ -429,10 +405,10 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadContextSlot(Register context,
int slot_index) {
DCHECK(slot_index >= 0);
if (FitsInIdx8Operand(slot_index)) {
- Output(Bytecode::kLdaContextSlot, context.ToOperand(),
+ Output(Bytecode::kLdaContextSlot, context.ToRawOperand(),
static_cast<uint8_t>(slot_index));
} else if (FitsInIdx16Operand(slot_index)) {
- Output(Bytecode::kLdaContextSlotWide, context.ToOperand(),
+ Output(Bytecode::kLdaContextSlotWide, context.ToRawOperand(),
static_cast<uint16_t>(slot_index));
} else {
UNIMPLEMENTED();
@@ -445,10 +421,10 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreContextSlot(Register context,
int slot_index) {
DCHECK(slot_index >= 0);
if (FitsInIdx8Operand(slot_index)) {
- Output(Bytecode::kStaContextSlot, context.ToOperand(),
+ Output(Bytecode::kStaContextSlot, context.ToRawOperand(),
static_cast<uint8_t>(slot_index));
} else if (FitsInIdx16Operand(slot_index)) {
- Output(Bytecode::kStaContextSlotWide, context.ToOperand(),
+ Output(Bytecode::kStaContextSlotWide, context.ToRawOperand(),
static_cast<uint16_t>(slot_index));
} else {
UNIMPLEMENTED();
@@ -490,18 +466,16 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreLookupSlot(
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNamedProperty(
- Register object, const Handle<String> name, int feedback_slot,
- LanguageMode language_mode) {
- Bytecode bytecode = BytecodeForLoadIC(language_mode);
+ Register object, const Handle<Name> name, int feedback_slot) {
size_t name_index = GetConstantPoolEntry(name);
if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
- Output(bytecode, object.ToOperand(), static_cast<uint8_t>(name_index),
+ Output(Bytecode::kLoadIC, object.ToRawOperand(),
+ static_cast<uint8_t>(name_index),
static_cast<uint8_t>(feedback_slot));
} else if (FitsInIdx16Operand(name_index) &&
FitsInIdx16Operand(feedback_slot)) {
- Output(BytecodeForWideOperands(bytecode), object.ToOperand(),
+ Output(Bytecode::kLoadICWide, object.ToRawOperand(),
static_cast<uint16_t>(name_index),
static_cast<uint16_t>(feedback_slot));
} else {
@@ -510,14 +484,13 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNamedProperty(
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
- Register object, int feedback_slot, LanguageMode language_mode) {
- Bytecode bytecode = BytecodeForKeyedLoadIC(language_mode);
+ Register object, int feedback_slot) {
if (FitsInIdx8Operand(feedback_slot)) {
- Output(bytecode, object.ToOperand(), static_cast<uint8_t>(feedback_slot));
+ Output(Bytecode::kKeyedLoadIC, object.ToRawOperand(),
+ static_cast<uint8_t>(feedback_slot));
} else if (FitsInIdx16Operand(feedback_slot)) {
- Output(BytecodeForWideOperands(bytecode), object.ToOperand(),
+ Output(Bytecode::kKeyedLoadICWide, object.ToRawOperand(),
static_cast<uint16_t>(feedback_slot));
} else {
UNIMPLEMENTED();
@@ -525,18 +498,17 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
- Register object, const Handle<String> name, int feedback_slot,
+ Register object, const Handle<Name> name, int feedback_slot,
LanguageMode language_mode) {
Bytecode bytecode = BytecodeForStoreIC(language_mode);
size_t name_index = GetConstantPoolEntry(name);
if (FitsInIdx8Operand(name_index) && FitsInIdx8Operand(feedback_slot)) {
- Output(bytecode, object.ToOperand(), static_cast<uint8_t>(name_index),
+ Output(bytecode, object.ToRawOperand(), static_cast<uint8_t>(name_index),
static_cast<uint8_t>(feedback_slot));
} else if (FitsInIdx16Operand(name_index) &&
FitsInIdx16Operand(feedback_slot)) {
- Output(BytecodeForWideOperands(bytecode), object.ToOperand(),
+ Output(BytecodeForWideOperands(bytecode), object.ToRawOperand(),
static_cast<uint16_t>(name_index),
static_cast<uint16_t>(feedback_slot));
} else {
@@ -551,11 +523,11 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreKeyedProperty(
LanguageMode language_mode) {
Bytecode bytecode = BytecodeForKeyedStoreIC(language_mode);
if (FitsInIdx8Operand(feedback_slot)) {
- Output(bytecode, object.ToOperand(), key.ToOperand(),
+ Output(bytecode, object.ToRawOperand(), key.ToRawOperand(),
static_cast<uint8_t>(feedback_slot));
} else if (FitsInIdx16Operand(feedback_slot)) {
- Output(BytecodeForWideOperands(bytecode), object.ToOperand(),
- key.ToOperand(), static_cast<uint16_t>(feedback_slot));
+ Output(BytecodeForWideOperands(bytecode), object.ToRawOperand(),
+ key.ToRawOperand(), static_cast<uint16_t>(feedback_slot));
} else {
UNIMPLEMENTED();
}
@@ -653,13 +625,13 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateObjectLiteral(
BytecodeArrayBuilder& BytecodeArrayBuilder::PushContext(Register context) {
- Output(Bytecode::kPushContext, context.ToOperand());
+ Output(Bytecode::kPushContext, context.ToRawOperand());
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::PopContext(Register context) {
- Output(Bytecode::kPopContext, context.ToOperand());
+ Output(Bytecode::kPopContext, context.ToRawOperand());
return *this;
}
@@ -766,6 +738,8 @@ Bytecode BytecodeArrayBuilder::GetJumpWithConstantOperand(
return Bytecode::kJumpIfToBooleanTrueConstant;
case Bytecode::kJumpIfToBooleanFalse:
return Bytecode::kJumpIfToBooleanFalseConstant;
+ case Bytecode::kJumpIfNotHole:
+ return Bytecode::kJumpIfNotHoleConstant;
case Bytecode::kJumpIfNull:
return Bytecode::kJumpIfNullConstant;
case Bytecode::kJumpIfUndefined:
@@ -791,6 +765,8 @@ Bytecode BytecodeArrayBuilder::GetJumpWithConstantWideOperand(
return Bytecode::kJumpIfToBooleanTrueConstantWide;
case Bytecode::kJumpIfToBooleanFalse:
return Bytecode::kJumpIfToBooleanFalseConstantWide;
+ case Bytecode::kJumpIfNotHole:
+ return Bytecode::kJumpIfNotHoleConstantWide;
case Bytecode::kJumpIfNull:
return Bytecode::kJumpIfNullConstantWide;
case Bytecode::kJumpIfUndefined:
@@ -808,6 +784,7 @@ Bytecode BytecodeArrayBuilder::GetJumpWithToBoolean(Bytecode jump_bytecode) {
case Bytecode::kJump:
case Bytecode::kJumpIfNull:
case Bytecode::kJumpIfUndefined:
+ case Bytecode::kJumpIfNotHole:
return jump_bytecode;
case Bytecode::kJumpIfTrue:
return Bytecode::kJumpIfToBooleanTrue;
@@ -883,7 +860,10 @@ void BytecodeArrayBuilder::PatchJump(
BytecodeArrayBuilder& BytecodeArrayBuilder::OutputJump(Bytecode jump_bytecode,
BytecodeLabel* label) {
// Don't emit dead code.
- if (exit_seen_in_block_) return *this;
+ if (exit_seen_in_block_) {
+ source_position_table_builder_.RevertPosition(bytecodes()->size());
+ return *this;
+ }
// Check if the value in accumulator is boolean, if not choose an
// appropriate JumpIfToBoolean bytecode.
@@ -965,6 +945,15 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfUndefined(
return OutputJump(Bytecode::kJumpIfUndefined, label);
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::StackCheck() {
+ Output(Bytecode::kStackCheck);
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNotHole(
+ BytecodeLabel* label) {
+ return OutputJump(Bytecode::kJumpIfNotHole, label);
+}
BytecodeArrayBuilder& BytecodeArrayBuilder::Throw() {
Output(Bytecode::kThrow);
@@ -973,40 +962,86 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Throw() {
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::ReThrow() {
+ Output(Bytecode::kReThrow);
+ exit_seen_in_block_ = true;
+ return *this;
+}
+
+
BytecodeArrayBuilder& BytecodeArrayBuilder::Return() {
Output(Bytecode::kReturn);
exit_seen_in_block_ = true;
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::Debugger() {
+ Output(Bytecode::kDebugger);
+ return *this;
+}
BytecodeArrayBuilder& BytecodeArrayBuilder::ForInPrepare(
- Register cache_type, Register cache_array, Register cache_length) {
- Output(Bytecode::kForInPrepare, cache_type.ToOperand(),
- cache_array.ToOperand(), cache_length.ToOperand());
+ Register cache_info_triple) {
+ if (FitsInReg8Operand(cache_info_triple)) {
+ Output(Bytecode::kForInPrepare, cache_info_triple.ToRawOperand());
+ } else if (FitsInReg16Operand(cache_info_triple)) {
+ Output(Bytecode::kForInPrepareWide, cache_info_triple.ToRawOperand());
+ } else {
+ UNIMPLEMENTED();
+ }
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::ForInDone(Register index,
Register cache_length) {
- Output(Bytecode::kForInDone, index.ToOperand(), cache_length.ToOperand());
+ Output(Bytecode::kForInDone, index.ToRawOperand(),
+ cache_length.ToRawOperand());
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::ForInNext(Register receiver,
- Register cache_type,
- Register cache_array,
- Register index) {
- Output(Bytecode::kForInNext, receiver.ToOperand(), cache_type.ToOperand(),
- cache_array.ToOperand(), index.ToOperand());
+BytecodeArrayBuilder& BytecodeArrayBuilder::ForInNext(
+ Register receiver, Register index, Register cache_type_array_pair) {
+ if (FitsInReg8Operand(receiver) && FitsInReg8Operand(index) &&
+ FitsInReg8Operand(cache_type_array_pair)) {
+ Output(Bytecode::kForInNext, receiver.ToRawOperand(), index.ToRawOperand(),
+ cache_type_array_pair.ToRawOperand());
+ } else if (FitsInReg16Operand(receiver) && FitsInReg16Operand(index) &&
+ FitsInReg16Operand(cache_type_array_pair)) {
+ Output(Bytecode::kForInNextWide, receiver.ToRawOperand(),
+ index.ToRawOperand(), cache_type_array_pair.ToRawOperand());
+ } else {
+ UNIMPLEMENTED();
+ }
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::ForInStep(Register index) {
- Output(Bytecode::kForInStep, index.ToOperand());
+ Output(Bytecode::kForInStep, index.ToRawOperand());
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::MarkHandler(int handler_id,
+ bool will_catch) {
+ handler_table_builder()->SetHandlerTarget(handler_id, bytecodes()->size());
+ handler_table_builder()->SetPrediction(handler_id, will_catch);
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::MarkTryBegin(int handler_id,
+ Register context) {
+ handler_table_builder()->SetTryRegionStart(handler_id, bytecodes()->size());
+ handler_table_builder()->SetContextRegister(handler_id, context);
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::MarkTryEnd(int handler_id) {
+ handler_table_builder()->SetTryRegionEnd(handler_id, bytecodes()->size());
return *this;
}
@@ -1016,27 +1051,33 @@ void BytecodeArrayBuilder::LeaveBasicBlock() {
exit_seen_in_block_ = false;
}
-
-void BytecodeArrayBuilder::EnsureReturn() {
+void BytecodeArrayBuilder::EnsureReturn(FunctionLiteral* literal) {
if (!exit_seen_in_block_) {
LoadUndefined();
+ SetReturnPosition(literal);
Return();
}
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::Call(Register callable,
- Register receiver,
- size_t arg_count,
- int feedback_slot) {
- if (FitsInIdx8Operand(arg_count) && FitsInIdx8Operand(feedback_slot)) {
- Output(Bytecode::kCall, callable.ToOperand(), receiver.ToOperand(),
- static_cast<uint8_t>(arg_count),
+ Register receiver_args,
+ size_t receiver_args_count,
+ int feedback_slot,
+ TailCallMode tail_call_mode) {
+ Bytecode bytecode = BytecodeForCall(tail_call_mode);
+ if (FitsInReg8Operand(callable) && FitsInReg8Operand(receiver_args) &&
+ FitsInIdx8Operand(receiver_args_count) &&
+ FitsInIdx8Operand(feedback_slot)) {
+ Output(bytecode, callable.ToRawOperand(), receiver_args.ToRawOperand(),
+ static_cast<uint8_t>(receiver_args_count),
static_cast<uint8_t>(feedback_slot));
- } else if (FitsInIdx16Operand(arg_count) &&
+ } else if (FitsInReg16Operand(callable) &&
+ FitsInReg16Operand(receiver_args) &&
+ FitsInIdx16Operand(receiver_args_count) &&
FitsInIdx16Operand(feedback_slot)) {
- Output(Bytecode::kCallWide, callable.ToOperand(), receiver.ToOperand(),
- static_cast<uint16_t>(arg_count),
+ bytecode = BytecodeForWideOperands(bytecode);
+ Output(bytecode, callable.ToRawOperand(), receiver_args.ToRawOperand(),
+ static_cast<uint16_t>(receiver_args_count),
static_cast<uint16_t>(feedback_slot));
} else {
UNIMPLEMENTED();
@@ -1044,7 +1085,6 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Call(Register callable,
return *this;
}
-
BytecodeArrayBuilder& BytecodeArrayBuilder::New(Register constructor,
Register first_arg,
size_t arg_count) {
@@ -1052,9 +1092,17 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::New(Register constructor,
DCHECK_EQ(0u, arg_count);
first_arg = Register(0);
}
- DCHECK(FitsInIdx8Operand(arg_count));
- Output(Bytecode::kNew, constructor.ToOperand(), first_arg.ToOperand(),
- static_cast<uint8_t>(arg_count));
+ if (FitsInReg8Operand(constructor) && FitsInReg8Operand(first_arg) &&
+ FitsInIdx8Operand(arg_count)) {
+ Output(Bytecode::kNew, constructor.ToRawOperand(), first_arg.ToRawOperand(),
+ static_cast<uint8_t>(arg_count));
+ } else if (FitsInReg16Operand(constructor) && FitsInReg16Operand(first_arg) &&
+ FitsInIdx16Operand(arg_count)) {
+ Output(Bytecode::kNewWide, constructor.ToRawOperand(),
+ first_arg.ToRawOperand(), static_cast<uint16_t>(arg_count));
+ } else {
+ UNIMPLEMENTED();
+ }
return *this;
}
@@ -1063,13 +1111,19 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntime(
Runtime::FunctionId function_id, Register first_arg, size_t arg_count) {
DCHECK_EQ(1, Runtime::FunctionForId(function_id)->result_size);
DCHECK(FitsInIdx16Operand(function_id));
- DCHECK(FitsInIdx8Operand(arg_count));
if (!first_arg.is_valid()) {
DCHECK_EQ(0u, arg_count);
first_arg = Register(0);
}
- Output(Bytecode::kCallRuntime, static_cast<uint16_t>(function_id),
- first_arg.ToOperand(), static_cast<uint8_t>(arg_count));
+ if (FitsInReg8Operand(first_arg) && FitsInIdx8Operand(arg_count)) {
+ Output(Bytecode::kCallRuntime, static_cast<uint16_t>(function_id),
+ first_arg.ToRawOperand(), static_cast<uint8_t>(arg_count));
+ } else if (FitsInReg16Operand(first_arg) && FitsInIdx16Operand(arg_count)) {
+ Output(Bytecode::kCallRuntimeWide, static_cast<uint16_t>(function_id),
+ first_arg.ToRawOperand(), static_cast<uint16_t>(arg_count));
+ } else {
+ UNIMPLEMENTED();
+ }
return *this;
}
@@ -1079,38 +1133,49 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CallRuntimeForPair(
Register first_return) {
DCHECK_EQ(2, Runtime::FunctionForId(function_id)->result_size);
DCHECK(FitsInIdx16Operand(function_id));
- DCHECK(FitsInIdx8Operand(arg_count));
if (!first_arg.is_valid()) {
DCHECK_EQ(0u, arg_count);
first_arg = Register(0);
}
- Output(Bytecode::kCallRuntimeForPair, static_cast<uint16_t>(function_id),
- first_arg.ToOperand(), static_cast<uint8_t>(arg_count),
- first_return.ToOperand());
+ if (FitsInReg8Operand(first_arg) && FitsInIdx8Operand(arg_count) &&
+ FitsInReg8Operand(first_return)) {
+ Output(Bytecode::kCallRuntimeForPair, static_cast<uint16_t>(function_id),
+ first_arg.ToRawOperand(), static_cast<uint8_t>(arg_count),
+ first_return.ToRawOperand());
+ } else if (FitsInReg16Operand(first_arg) && FitsInIdx16Operand(arg_count) &&
+ FitsInReg16Operand(first_return)) {
+ Output(Bytecode::kCallRuntimeForPairWide,
+ static_cast<uint16_t>(function_id), first_arg.ToRawOperand(),
+ static_cast<uint16_t>(arg_count), first_return.ToRawOperand());
+ } else {
+ UNIMPLEMENTED();
+ }
return *this;
}
-
-BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(int context_index,
- Register receiver,
- size_t arg_count) {
+BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(
+ int context_index, Register receiver_args, size_t receiver_args_count) {
DCHECK(FitsInIdx16Operand(context_index));
- DCHECK(FitsInIdx8Operand(arg_count));
- Output(Bytecode::kCallJSRuntime, static_cast<uint16_t>(context_index),
- receiver.ToOperand(), static_cast<uint8_t>(arg_count));
+ if (FitsInReg8Operand(receiver_args) &&
+ FitsInIdx8Operand(receiver_args_count)) {
+ Output(Bytecode::kCallJSRuntime, static_cast<uint16_t>(context_index),
+ receiver_args.ToRawOperand(),
+ static_cast<uint8_t>(receiver_args_count));
+ } else if (FitsInReg16Operand(receiver_args) &&
+ FitsInIdx16Operand(receiver_args_count)) {
+ Output(Bytecode::kCallJSRuntimeWide, static_cast<uint16_t>(context_index),
+ receiver_args.ToRawOperand(),
+ static_cast<uint16_t>(receiver_args_count));
+ } else {
+ UNIMPLEMENTED();
+ }
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::Delete(Register object,
LanguageMode language_mode) {
- Output(BytecodeForDelete(language_mode), object.ToOperand());
- return *this;
-}
-
-
-BytecodeArrayBuilder& BytecodeArrayBuilder::DeleteLookupSlot() {
- Output(Bytecode::kDeleteLookupSlot);
+ Output(BytecodeForDelete(language_mode), object.ToRawOperand());
return *this;
}
@@ -1119,126 +1184,62 @@ size_t BytecodeArrayBuilder::GetConstantPoolEntry(Handle<Object> object) {
return constant_array_builder()->Insert(object);
}
-
-int BytecodeArrayBuilder::BorrowTemporaryRegister() {
- if (free_temporaries_.empty()) {
- temporary_register_count_ += 1;
- return last_temporary_register().index();
- } else {
- auto pos = free_temporaries_.begin();
- int retval = *pos;
- free_temporaries_.erase(pos);
- return retval;
- }
-}
-
-
-int BytecodeArrayBuilder::BorrowTemporaryRegisterNotInRange(int start_index,
- int end_index) {
- auto index = free_temporaries_.lower_bound(start_index);
- if (index == free_temporaries_.begin()) {
- // If start_index is the first free register, check for a register
- // greater than end_index.
- index = free_temporaries_.upper_bound(end_index);
- if (index == free_temporaries_.end()) {
- temporary_register_count_ += 1;
- return last_temporary_register().index();
- }
- } else {
- // If there is a free register < start_index
- index--;
- }
-
- int retval = *index;
- free_temporaries_.erase(index);
- return retval;
-}
-
-
-void BytecodeArrayBuilder::BorrowConsecutiveTemporaryRegister(int reg_index) {
- DCHECK(free_temporaries_.find(reg_index) != free_temporaries_.end());
- free_temporaries_.erase(reg_index);
+void BytecodeArrayBuilder::SetReturnPosition(FunctionLiteral* fun) {
+ int pos = std::max(fun->start_position(), fun->end_position() - 1);
+ source_position_table_builder_.AddStatementPosition(bytecodes_.size(), pos);
}
-
-void BytecodeArrayBuilder::ReturnTemporaryRegister(int reg_index) {
- DCHECK(free_temporaries_.find(reg_index) == free_temporaries_.end());
- free_temporaries_.insert(reg_index);
+void BytecodeArrayBuilder::SetStatementPosition(Statement* stmt) {
+ if (stmt->position() == RelocInfo::kNoPosition) return;
+ source_position_table_builder_.AddStatementPosition(bytecodes_.size(),
+ stmt->position());
}
-
-int BytecodeArrayBuilder::PrepareForConsecutiveTemporaryRegisters(
- size_t count) {
- if (count == 0) {
- return -1;
- }
-
- // Search within existing temporaries for a run.
- auto start = free_temporaries_.begin();
- size_t run_length = 0;
- for (auto run_end = start; run_end != free_temporaries_.end(); run_end++) {
- if (*run_end != *start + static_cast<int>(run_length)) {
- start = run_end;
- run_length = 0;
- }
- if (++run_length == count) {
- return *start;
- }
- }
-
- // Continue run if possible across existing last temporary.
- if (temporary_register_count_ > 0 &&
- (start == free_temporaries_.end() ||
- *start + static_cast<int>(run_length) !=
- last_temporary_register().index() + 1)) {
- run_length = 0;
- }
-
- // Ensure enough registers for run.
- while (run_length++ < count) {
- temporary_register_count_++;
- free_temporaries_.insert(last_temporary_register().index());
- }
- return last_temporary_register().index() - static_cast<int>(count) + 1;
+void BytecodeArrayBuilder::SetExpressionPosition(Expression* expr) {
+ if (expr->position() == RelocInfo::kNoPosition) return;
+ source_position_table_builder_.AddExpressionPosition(bytecodes_.size(),
+ expr->position());
}
-
bool BytecodeArrayBuilder::TemporaryRegisterIsLive(Register reg) const {
- if (temporary_register_count_ > 0) {
- DCHECK(reg.index() >= first_temporary_register().index() &&
- reg.index() <= last_temporary_register().index());
- return free_temporaries_.find(reg.index()) == free_temporaries_.end();
- } else {
- return false;
- }
+ return temporary_register_allocator()->RegisterIsLive(reg);
}
-
-bool BytecodeArrayBuilder::RegisterIsValid(Register reg) const {
- if (reg.is_function_context() || reg.is_function_closure() ||
- reg.is_new_target()) {
- return true;
- } else if (reg.is_parameter()) {
- int parameter_index = reg.ToParameterIndex(parameter_count_);
- return parameter_index >= 0 && parameter_index < parameter_count_;
- } else if (reg.index() < fixed_register_count()) {
- return true;
- } else {
- return TemporaryRegisterIsLive(reg);
- }
-}
-
-
bool BytecodeArrayBuilder::OperandIsValid(Bytecode bytecode, int operand_index,
uint32_t operand_value) const {
OperandType operand_type = Bytecodes::GetOperandType(bytecode, operand_index);
switch (operand_type) {
case OperandType::kNone:
return false;
- case OperandType::kCount16:
+ case OperandType::kRegCount16: {
+ // Expect kRegCount16 is part of a range previous operand is a
+ // valid operand to start a range.
+ if (operand_index > 0) {
+ OperandType previous_operand_type =
+ Bytecodes::GetOperandType(bytecode, operand_index - 1);
+ return ((previous_operand_type == OperandType::kMaybeReg16 ||
+ previous_operand_type == OperandType::kReg16) &&
+ static_cast<uint16_t>(operand_value) == operand_value);
+ } else {
+ return false;
+ }
+ }
+ case OperandType::kRegCount8: {
+ // Expect kRegCount8 is part of a range previous operand is a
+ // valid operand to start a range.
+ if (operand_index > 0) {
+ OperandType previous_operand_type =
+ Bytecodes::GetOperandType(bytecode, operand_index - 1);
+ return ((previous_operand_type == OperandType::kMaybeReg8 ||
+ previous_operand_type == OperandType::kReg8 ||
+ previous_operand_type == OperandType::kMaybeReg16) &&
+ static_cast<uint8_t>(operand_value) == operand_value);
+ } else {
+ return false;
+ }
+ }
case OperandType::kIdx16:
return static_cast<uint16_t>(operand_value) == operand_value;
- case OperandType::kCount8:
case OperandType::kImm8:
case OperandType::kIdx8:
return static_cast<uint8_t>(operand_value) == operand_value;
@@ -1248,27 +1249,84 @@ bool BytecodeArrayBuilder::OperandIsValid(Bytecode bytecode, int operand_index,
}
// Fall-through to kReg8 case.
case OperandType::kReg8:
- return RegisterIsValid(
- Register::FromOperand(static_cast<uint8_t>(operand_value)));
- case OperandType::kRegPair8: {
- Register reg0 =
- Register::FromOperand(static_cast<uint8_t>(operand_value));
+ case OperandType::kRegOut8:
+ return RegisterIsValid(Register::FromRawOperand(operand_value),
+ operand_type);
+ case OperandType::kRegOutPair8:
+ case OperandType::kRegOutPair16:
+ case OperandType::kRegPair8:
+ case OperandType::kRegPair16: {
+ Register reg0 = Register::FromRawOperand(operand_value);
Register reg1 = Register(reg0.index() + 1);
- return RegisterIsValid(reg0) && RegisterIsValid(reg1);
+ return RegisterIsValid(reg0, operand_type) &&
+ RegisterIsValid(reg1, operand_type);
}
- case OperandType::kReg16:
- if (bytecode != Bytecode::kExchange &&
- bytecode != Bytecode::kExchangeWide) {
- return false;
+ case OperandType::kRegOutTriple8:
+ case OperandType::kRegOutTriple16: {
+ Register reg0 = Register::FromRawOperand(operand_value);
+ Register reg1 = Register(reg0.index() + 1);
+ Register reg2 = Register(reg0.index() + 2);
+ return RegisterIsValid(reg0, operand_type) &&
+ RegisterIsValid(reg1, operand_type) &&
+ RegisterIsValid(reg2, operand_type);
+ }
+ case OperandType::kMaybeReg16:
+ if (operand_value == 0) {
+ return true;
}
- return RegisterIsValid(
- Register::FromWideOperand(static_cast<uint16_t>(operand_value)));
+ // Fall-through to kReg16 case.
+ case OperandType::kReg16:
+ case OperandType::kRegOut16: {
+ Register reg = Register::FromRawOperand(operand_value);
+ return RegisterIsValid(reg, operand_type);
+ }
}
UNREACHABLE();
return false;
}
+bool BytecodeArrayBuilder::RegisterIsValid(Register reg,
+ OperandType reg_type) const {
+ if (!reg.is_valid()) {
+ return false;
+ }
+
+ switch (Bytecodes::SizeOfOperand(reg_type)) {
+ case OperandSize::kByte:
+ if (!FitsInReg8OperandUntranslated(reg)) {
+ return false;
+ }
+ break;
+ case OperandSize::kShort:
+ if (!FitsInReg16OperandUntranslated(reg)) {
+ return false;
+ }
+ break;
+ case OperandSize::kNone:
+ UNREACHABLE();
+ return false;
+ }
+
+ if (reg.is_current_context() || reg.is_function_closure() ||
+ reg.is_new_target()) {
+ return true;
+ } else if (reg.is_parameter()) {
+ int parameter_index = reg.ToParameterIndex(parameter_count());
+ return parameter_index >= 0 && parameter_index < parameter_count();
+ } else if (RegisterTranslator::InTranslationWindow(reg)) {
+ return translation_register_count() > 0;
+ } else {
+ reg = RegisterTranslator::UntranslateRegister(reg);
+ if (reg.index() < fixed_register_count()) {
+ return true;
+ } else {
+ return TemporaryRegisterIsLive(reg);
+ }
+ }
+}
+
+
bool BytecodeArrayBuilder::LastBytecodeInSameBlock() const {
return last_bytecode_start_ < bytecodes()->size() &&
last_bytecode_start_ >= last_block_end_;
@@ -1279,9 +1337,10 @@ bool BytecodeArrayBuilder::IsRegisterInAccumulator(Register reg) {
if (LastBytecodeInSameBlock()) {
PreviousBytecodeHelper previous_bytecode(*this);
Bytecode bytecode = previous_bytecode.GetBytecode();
- if ((bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar) &&
- (reg == Register::FromOperand(previous_bytecode.GetOperand(0)))) {
- return true;
+ if (bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar) {
+ Register previous_reg =
+ Register::FromOperand(previous_bytecode.GetOperand(0));
+ return previous_reg == reg;
}
}
return false;
@@ -1367,14 +1426,14 @@ Bytecode BytecodeArrayBuilder::BytecodeForCompareOperation(Token::Value op) {
// static
Bytecode BytecodeArrayBuilder::BytecodeForWideOperands(Bytecode bytecode) {
switch (bytecode) {
- case Bytecode::kLoadICSloppy:
- return Bytecode::kLoadICSloppyWide;
- case Bytecode::kLoadICStrict:
- return Bytecode::kLoadICStrictWide;
- case Bytecode::kKeyedLoadICSloppy:
- return Bytecode::kKeyedLoadICSloppyWide;
- case Bytecode::kKeyedLoadICStrict:
- return Bytecode::kKeyedLoadICStrictWide;
+ case Bytecode::kCall:
+ return Bytecode::kCallWide;
+ case Bytecode::kTailCall:
+ return Bytecode::kTailCallWide;
+ case Bytecode::kLoadIC:
+ return Bytecode::kLoadICWide;
+ case Bytecode::kKeyedLoadIC:
+ return Bytecode::kKeyedLoadICWide;
case Bytecode::kStoreICSloppy:
return Bytecode::kStoreICSloppyWide;
case Bytecode::kStoreICStrict:
@@ -1383,14 +1442,10 @@ Bytecode BytecodeArrayBuilder::BytecodeForWideOperands(Bytecode bytecode) {
return Bytecode::kKeyedStoreICSloppyWide;
case Bytecode::kKeyedStoreICStrict:
return Bytecode::kKeyedStoreICStrictWide;
- case Bytecode::kLdaGlobalSloppy:
- return Bytecode::kLdaGlobalSloppyWide;
- case Bytecode::kLdaGlobalStrict:
- return Bytecode::kLdaGlobalStrictWide;
- case Bytecode::kLdaGlobalInsideTypeofSloppy:
- return Bytecode::kLdaGlobalInsideTypeofSloppyWide;
- case Bytecode::kLdaGlobalInsideTypeofStrict:
- return Bytecode::kLdaGlobalInsideTypeofStrictWide;
+ case Bytecode::kLdaGlobal:
+ return Bytecode::kLdaGlobalWide;
+ case Bytecode::kLdaGlobalInsideTypeof:
+ return Bytecode::kLdaGlobalInsideTypeofWide;
case Bytecode::kStaGlobalSloppy:
return Bytecode::kStaGlobalSloppyWide;
case Bytecode::kStaGlobalStrict:
@@ -1411,39 +1466,6 @@ Bytecode BytecodeArrayBuilder::BytecodeForWideOperands(Bytecode bytecode) {
// static
-Bytecode BytecodeArrayBuilder::BytecodeForLoadIC(LanguageMode language_mode) {
- switch (language_mode) {
- case SLOPPY:
- return Bytecode::kLoadICSloppy;
- case STRICT:
- return Bytecode::kLoadICStrict;
- case STRONG:
- UNIMPLEMENTED();
- default:
- UNREACHABLE();
- }
- return static_cast<Bytecode>(-1);
-}
-
-
-// static
-Bytecode BytecodeArrayBuilder::BytecodeForKeyedLoadIC(
- LanguageMode language_mode) {
- switch (language_mode) {
- case SLOPPY:
- return Bytecode::kKeyedLoadICSloppy;
- case STRICT:
- return Bytecode::kKeyedLoadICStrict;
- case STRONG:
- UNIMPLEMENTED();
- default:
- UNREACHABLE();
- }
- return static_cast<Bytecode>(-1);
-}
-
-
-// static
Bytecode BytecodeArrayBuilder::BytecodeForStoreIC(LanguageMode language_mode) {
switch (language_mode) {
case SLOPPY:
@@ -1477,23 +1499,9 @@ Bytecode BytecodeArrayBuilder::BytecodeForKeyedStoreIC(
// static
-Bytecode BytecodeArrayBuilder::BytecodeForLoadGlobal(LanguageMode language_mode,
- TypeofMode typeof_mode) {
- switch (language_mode) {
- case SLOPPY:
- return typeof_mode == INSIDE_TYPEOF
- ? Bytecode::kLdaGlobalInsideTypeofSloppy
- : Bytecode::kLdaGlobalSloppy;
- case STRICT:
- return typeof_mode == INSIDE_TYPEOF
- ? Bytecode::kLdaGlobalInsideTypeofStrict
- : Bytecode::kLdaGlobalStrict;
- case STRONG:
- UNIMPLEMENTED();
- default:
- UNREACHABLE();
- }
- return static_cast<Bytecode>(-1);
+Bytecode BytecodeArrayBuilder::BytecodeForLoadGlobal(TypeofMode typeof_mode) {
+ return typeof_mode == INSIDE_TYPEOF ? Bytecode::kLdaGlobalInsideTypeof
+ : Bytecode::kLdaGlobal;
}
@@ -1530,7 +1538,6 @@ Bytecode BytecodeArrayBuilder::BytecodeForStoreLookupSlot(
return static_cast<Bytecode>(-1);
}
-
// static
Bytecode BytecodeArrayBuilder::BytecodeForCreateArguments(
CreateArgumentsType type) {
@@ -1539,9 +1546,10 @@ Bytecode BytecodeArrayBuilder::BytecodeForCreateArguments(
return Bytecode::kCreateMappedArguments;
case CreateArgumentsType::kUnmappedArguments:
return Bytecode::kCreateUnmappedArguments;
- default:
- UNREACHABLE();
+ case CreateArgumentsType::kRestParameter:
+ return Bytecode::kCreateRestParameter;
}
+ UNREACHABLE();
return static_cast<Bytecode>(-1);
}
@@ -1561,6 +1569,18 @@ Bytecode BytecodeArrayBuilder::BytecodeForDelete(LanguageMode language_mode) {
return static_cast<Bytecode>(-1);
}
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForCall(TailCallMode tail_call_mode) {
+ switch (tail_call_mode) {
+ case TailCallMode::kDisallow:
+ return Bytecode::kCall;
+ case TailCallMode::kAllow:
+ return Bytecode::kTailCall;
+ default:
+ UNREACHABLE();
+ }
+ return static_cast<Bytecode>(-1);
+}
// static
bool BytecodeArrayBuilder::FitsInIdx8Operand(int value) {
@@ -1594,13 +1614,23 @@ bool BytecodeArrayBuilder::FitsInIdx16Operand(size_t value) {
// static
bool BytecodeArrayBuilder::FitsInReg8Operand(Register value) {
- return kMinInt8 <= value.index() && value.index() <= kMaxInt8;
+ return RegisterTranslator::FitsInReg8Operand(value);
+}
+
+// static
+bool BytecodeArrayBuilder::FitsInReg8OperandUntranslated(Register value) {
+ return value.is_byte_operand();
}
// static
bool BytecodeArrayBuilder::FitsInReg16Operand(Register value) {
- return kMinInt16 <= value.index() && value.index() <= kMaxInt16;
+ return RegisterTranslator::FitsInReg16Operand(value);
+}
+
+// static
+bool BytecodeArrayBuilder::FitsInReg16OperandUntranslated(Register value) {
+ return value.is_short_operand();
}
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index 7c23dc3f22..fe69337184 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -6,8 +6,12 @@
#define V8_INTERPRETER_BYTECODE_ARRAY_BUILDER_H_
#include "src/ast/ast.h"
+#include "src/interpreter/bytecode-register-allocator.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/constant-array-builder.h"
+#include "src/interpreter/handler-table-builder.h"
+#include "src/interpreter/register-translator.h"
+#include "src/interpreter/source-position-table.h"
#include "src/zone-containers.h"
namespace v8 {
@@ -18,36 +22,29 @@ class Isolate;
namespace interpreter {
class BytecodeLabel;
-class ConstantArrayBuilder;
class Register;
-// TODO(rmcilroy): Unify this with CreateArgumentsParameters::Type in Turbofan
-// when rest parameters implementation has settled down.
-enum class CreateArgumentsType { kMappedArguments, kUnmappedArguments };
-
-class BytecodeArrayBuilder final {
+class BytecodeArrayBuilder final : public ZoneObject, private RegisterMover {
public:
- BytecodeArrayBuilder(Isolate* isolate, Zone* zone);
+ BytecodeArrayBuilder(Isolate* isolate, Zone* zone, int parameter_count,
+ int context_count, int locals_count);
~BytecodeArrayBuilder();
Handle<BytecodeArray> ToBytecodeArray();
- // Set the number of parameters expected by function.
- void set_parameter_count(int number_of_params);
+ // Get the number of parameters expected by function.
int parameter_count() const {
DCHECK_GE(parameter_count_, 0);
return parameter_count_;
}
- // Set the number of locals required for bytecode array.
- void set_locals_count(int number_of_locals);
+ // Get the number of locals required for bytecode array.
int locals_count() const {
DCHECK_GE(local_register_count_, 0);
return local_register_count_;
}
- // Set number of contexts required for bytecode array.
- void set_context_count(int number_of_contexts);
+ // Get number of contexts required for bytecode array.
int context_count() const {
DCHECK_GE(context_register_count_, 0);
return context_register_count_;
@@ -59,14 +56,30 @@ class BytecodeArrayBuilder final {
// Returns the number of fixed (non-temporary) registers.
int fixed_register_count() const { return context_count() + locals_count(); }
+ // Returns the number of fixed and temporary registers.
+ int fixed_and_temporary_register_count() const {
+ return fixed_register_count() + temporary_register_count();
+ }
+
+ int temporary_register_count() const {
+ return temporary_register_allocator()->allocation_count();
+ }
+
+ // Returns the number of registers used for translating wide
+ // register operands into byte sized register operands.
+ int translation_register_count() const {
+ return RegisterTranslator::RegisterCountAdjustment(
+ fixed_and_temporary_register_count(), parameter_count());
+ }
+
Register Parameter(int parameter_index) const;
// Return true if the register |reg| represents a parameter or a
// local.
bool RegisterIsParameterOrLocal(Register reg) const;
- // Return true if the register |reg| represents a temporary register.
- bool RegisterIsTemporary(Register reg) const;
+ // Returns true if the register |reg| is a live temporary register.
+ bool TemporaryRegisterIsLive(Register reg) const;
// Constant loads to accumulator.
BytecodeArrayBuilder& LoadLiteral(v8::internal::Smi* value);
@@ -80,7 +93,6 @@ class BytecodeArrayBuilder final {
// Global loads to the accumulator and stores from the accumulator.
BytecodeArrayBuilder& LoadGlobal(const Handle<String> name, int feedback_slot,
- LanguageMode language_mode,
TypeofMode typeof_mode);
BytecodeArrayBuilder& StoreGlobal(const Handle<String> name,
int feedback_slot,
@@ -98,20 +110,17 @@ class BytecodeArrayBuilder final {
// Register-register transfer.
BytecodeArrayBuilder& MoveRegister(Register from, Register to);
- BytecodeArrayBuilder& ExchangeRegisters(Register reg0, Register reg1);
// Named load property.
BytecodeArrayBuilder& LoadNamedProperty(Register object,
- const Handle<String> name,
- int feedback_slot,
- LanguageMode language_mode);
+ const Handle<Name> name,
+ int feedback_slot);
// Keyed load property. The key should be in the accumulator.
- BytecodeArrayBuilder& LoadKeyedProperty(Register object, int feedback_slot,
- LanguageMode language_mode);
+ BytecodeArrayBuilder& LoadKeyedProperty(Register object, int feedback_slot);
// Store properties. The value to be stored should be in the accumulator.
BytecodeArrayBuilder& StoreNamedProperty(Register object,
- const Handle<String> name,
+ const Handle<Name> name,
int feedback_slot,
LanguageMode language_mode);
BytecodeArrayBuilder& StoreKeyedProperty(Register object, Register key,
@@ -149,44 +158,51 @@ class BytecodeArrayBuilder final {
BytecodeArrayBuilder& PopContext(Register context);
// Call a JS function. The JSFunction or Callable to be called should be in
- // |callable|, the receiver should be in |receiver| and all subsequent
- // arguments should be in registers <receiver + 1> to
- // <receiver + 1 + arg_count>.
- BytecodeArrayBuilder& Call(Register callable, Register receiver,
- size_t arg_count, int feedback_slot);
-
- // Call the new operator. The |constructor| register is followed by
- // |arg_count| consecutive registers containing arguments to be
- // applied to the constructor.
+ // |callable|, the receiver should be in |receiver_args| and all subsequent
+ // arguments should be in registers <receiver_args + 1> to
+ // <receiver_args + receiver_arg_count - 1>.
+ BytecodeArrayBuilder& Call(
+ Register callable, Register receiver_args, size_t receiver_arg_count,
+ int feedback_slot, TailCallMode tail_call_mode = TailCallMode::kDisallow);
+
+ BytecodeArrayBuilder& TailCall(Register callable, Register receiver_args,
+ size_t receiver_arg_count, int feedback_slot) {
+ return Call(callable, receiver_args, receiver_arg_count, feedback_slot,
+ TailCallMode::kAllow);
+ }
+
+ // Call the new operator. The accumulator holds the |new_target|.
+ // The |constructor| is in a register followed by |arg_count|
+ // consecutive arguments starting at |first_arg| for the constuctor
+ // invocation.
BytecodeArrayBuilder& New(Register constructor, Register first_arg,
size_t arg_count);
// Call the runtime function with |function_id|. The first argument should be
// in |first_arg| and all subsequent arguments should be in registers
- // <first_arg + 1> to <first_arg + 1 + arg_count>.
+ // <first_arg + 1> to <first_arg + arg_count - 1>.
BytecodeArrayBuilder& CallRuntime(Runtime::FunctionId function_id,
Register first_arg, size_t arg_count);
// Call the runtime function with |function_id| that returns a pair of values.
// The first argument should be in |first_arg| and all subsequent arguments
- // should be in registers <first_arg + 1> to <first_arg + 1 + arg_count>. The
+ // should be in registers <first_arg + 1> to <first_arg + arg_count - 1>. The
// return values will be returned in <first_return> and <first_return + 1>.
BytecodeArrayBuilder& CallRuntimeForPair(Runtime::FunctionId function_id,
Register first_arg, size_t arg_count,
Register first_return);
// Call the JS runtime function with |context_index|. The the receiver should
- // be in |receiver| and all subsequent arguments should be in registers
- // <receiver + 1> to <receiver + 1 + arg_count>.
- BytecodeArrayBuilder& CallJSRuntime(int context_index, Register receiver,
- size_t arg_count);
+ // be in |receiver_args| and all subsequent arguments should be in registers
+ // <receiver + 1> to <receiver + receiver_args_count - 1>.
+ BytecodeArrayBuilder& CallJSRuntime(int context_index, Register receiver_args,
+ size_t receiver_args_count);
// Operators (register holds the lhs value, accumulator holds the rhs value).
- BytecodeArrayBuilder& BinaryOperation(Token::Value binop, Register reg,
- Strength strength);
+ BytecodeArrayBuilder& BinaryOperation(Token::Value binop, Register reg);
// Count Operators (value stored in accumulator).
- BytecodeArrayBuilder& CountOperation(Token::Value op, Strength strength);
+ BytecodeArrayBuilder& CountOperation(Token::Value op);
// Unary Operators.
BytecodeArrayBuilder& LogicalNot();
@@ -195,11 +211,9 @@ class BytecodeArrayBuilder final {
// Deletes property from an object. This expects that accumulator contains
// the key to be deleted and the register contains a reference to the object.
BytecodeArrayBuilder& Delete(Register object, LanguageMode language_mode);
- BytecodeArrayBuilder& DeleteLookupSlot();
// Tests.
- BytecodeArrayBuilder& CompareOperation(Token::Value op, Register reg,
- Strength strength);
+ BytecodeArrayBuilder& CompareOperation(Token::Value op, Register reg);
// Casts.
BytecodeArrayBuilder& CastAccumulatorToBoolean();
@@ -214,48 +228,65 @@ class BytecodeArrayBuilder final {
BytecodeArrayBuilder& Jump(BytecodeLabel* label);
BytecodeArrayBuilder& JumpIfTrue(BytecodeLabel* label);
BytecodeArrayBuilder& JumpIfFalse(BytecodeLabel* label);
+ BytecodeArrayBuilder& JumpIfNotHole(BytecodeLabel* label);
BytecodeArrayBuilder& JumpIfNull(BytecodeLabel* label);
BytecodeArrayBuilder& JumpIfUndefined(BytecodeLabel* label);
+ BytecodeArrayBuilder& StackCheck();
+
BytecodeArrayBuilder& Throw();
+ BytecodeArrayBuilder& ReThrow();
BytecodeArrayBuilder& Return();
+ // Debugger.
+ BytecodeArrayBuilder& Debugger();
+
// Complex flow control.
- BytecodeArrayBuilder& ForInPrepare(Register cache_type, Register cache_array,
- Register cache_length);
+ BytecodeArrayBuilder& ForInPrepare(Register cache_info_triple);
BytecodeArrayBuilder& ForInDone(Register index, Register cache_length);
- BytecodeArrayBuilder& ForInNext(Register receiver, Register cache_type,
- Register cache_array, Register index);
+ BytecodeArrayBuilder& ForInNext(Register receiver, Register index,
+ Register cache_type_array_pair);
BytecodeArrayBuilder& ForInStep(Register index);
+ // Exception handling.
+ BytecodeArrayBuilder& MarkHandler(int handler_id, bool will_catch);
+ BytecodeArrayBuilder& MarkTryBegin(int handler_id, Register context);
+ BytecodeArrayBuilder& MarkTryEnd(int handler_id);
+
+ // Creates a new handler table entry and returns a {hander_id} identifying the
+ // entry, so that it can be referenced by above exception handling support.
+ int NewHandlerEntry() { return handler_table_builder()->NewHandlerEntry(); }
+
+ void SetStatementPosition(Statement* stmt);
+ void SetExpressionPosition(Expression* expr);
+
// Accessors
Zone* zone() const { return zone_; }
-
- private:
- ZoneVector<uint8_t>* bytecodes() { return &bytecodes_; }
- const ZoneVector<uint8_t>* bytecodes() const { return &bytecodes_; }
- Isolate* isolate() const { return isolate_; }
- ConstantArrayBuilder* constant_array_builder() {
- return &constant_array_builder_;
+ TemporaryRegisterAllocator* temporary_register_allocator() {
+ return &temporary_allocator_;
}
- const ConstantArrayBuilder* constant_array_builder() const {
- return &constant_array_builder_;
+ const TemporaryRegisterAllocator* temporary_register_allocator() const {
+ return &temporary_allocator_;
}
+ void EnsureReturn(FunctionLiteral* literal);
+
+ private:
+ class PreviousBytecodeHelper;
+ friend class BytecodeRegisterAllocator;
+
static Bytecode BytecodeForBinaryOperation(Token::Value op);
static Bytecode BytecodeForCountOperation(Token::Value op);
static Bytecode BytecodeForCompareOperation(Token::Value op);
static Bytecode BytecodeForWideOperands(Bytecode bytecode);
- static Bytecode BytecodeForLoadIC(LanguageMode language_mode);
- static Bytecode BytecodeForKeyedLoadIC(LanguageMode language_mode);
static Bytecode BytecodeForStoreIC(LanguageMode language_mode);
static Bytecode BytecodeForKeyedStoreIC(LanguageMode language_mode);
- static Bytecode BytecodeForLoadGlobal(LanguageMode language_mode,
- TypeofMode typeof_mode);
+ static Bytecode BytecodeForLoadGlobal(TypeofMode typeof_mode);
static Bytecode BytecodeForStoreGlobal(LanguageMode language_mode);
static Bytecode BytecodeForStoreLookupSlot(LanguageMode language_mode);
static Bytecode BytecodeForCreateArguments(CreateArgumentsType type);
static Bytecode BytecodeForDelete(LanguageMode language_mode);
+ static Bytecode BytecodeForCall(TailCallMode tail_call_mode);
static bool FitsInIdx8Operand(int value);
static bool FitsInIdx8Operand(size_t value);
@@ -263,15 +294,17 @@ class BytecodeArrayBuilder final {
static bool FitsInIdx16Operand(int value);
static bool FitsInIdx16Operand(size_t value);
static bool FitsInReg8Operand(Register value);
+ static bool FitsInReg8OperandUntranslated(Register value);
static bool FitsInReg16Operand(Register value);
+ static bool FitsInReg16OperandUntranslated(Register value);
+
+ // RegisterMover interface.
+ void MoveRegisterUntranslated(Register from, Register to) override;
static Bytecode GetJumpWithConstantOperand(Bytecode jump_smi8_operand);
static Bytecode GetJumpWithConstantWideOperand(Bytecode jump_smi8_operand);
static Bytecode GetJumpWithToBoolean(Bytecode jump_smi8_operand);
- Register MapRegister(Register reg);
- Register MapRegisters(Register reg, Register args_base, int args_length = 1);
-
template <size_t N>
INLINE(void Output(Bytecode bytecode, uint32_t(&operands)[N]));
void Output(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
@@ -292,49 +325,54 @@ class BytecodeArrayBuilder final {
const ZoneVector<uint8_t>::iterator& jump_location, int delta);
void LeaveBasicBlock();
- void EnsureReturn();
bool OperandIsValid(Bytecode bytecode, int operand_index,
uint32_t operand_value) const;
- bool LastBytecodeInSameBlock() const;
+ bool RegisterIsValid(Register reg, OperandType reg_type) const;
+ bool LastBytecodeInSameBlock() const;
bool NeedToBooleanCast();
bool IsRegisterInAccumulator(Register reg);
- bool RegisterIsValid(Register reg) const;
-
- // Temporary register management.
- int BorrowTemporaryRegister();
- int BorrowTemporaryRegisterNotInRange(int start_index, int end_index);
- void ReturnTemporaryRegister(int reg_index);
- int PrepareForConsecutiveTemporaryRegisters(size_t count);
- void BorrowConsecutiveTemporaryRegister(int reg_index);
- bool TemporaryRegisterIsLive(Register reg) const;
-
- Register first_temporary_register() const;
- Register last_temporary_register() const;
+ // Set position for implicit return.
+ void SetReturnPosition(FunctionLiteral* fun);
// Gets a constant pool entry for the |object|.
size_t GetConstantPoolEntry(Handle<Object> object);
+ ZoneVector<uint8_t>* bytecodes() { return &bytecodes_; }
+ const ZoneVector<uint8_t>* bytecodes() const { return &bytecodes_; }
+ Isolate* isolate() const { return isolate_; }
+ ConstantArrayBuilder* constant_array_builder() {
+ return &constant_array_builder_;
+ }
+ const ConstantArrayBuilder* constant_array_builder() const {
+ return &constant_array_builder_;
+ }
+ HandlerTableBuilder* handler_table_builder() {
+ return &handler_table_builder_;
+ }
+ SourcePositionTableBuilder* source_position_table_builder() {
+ return &source_position_table_builder_;
+ }
+ RegisterTranslator* register_translator() { return &register_translator_; }
+
Isolate* isolate_;
Zone* zone_;
ZoneVector<uint8_t> bytecodes_;
bool bytecode_generated_;
ConstantArrayBuilder constant_array_builder_;
+ HandlerTableBuilder handler_table_builder_;
+ SourcePositionTableBuilder source_position_table_builder_;
size_t last_block_end_;
size_t last_bytecode_start_;
bool exit_seen_in_block_;
int unbound_jumps_;
-
int parameter_count_;
int local_register_count_;
int context_register_count_;
- int temporary_register_count_;
- ZoneSet<int> free_temporaries_;
-
- class PreviousBytecodeHelper;
- friend class BytecodeRegisterAllocator;
+ TemporaryRegisterAllocator temporary_allocator_;
+ RegisterTranslator register_translator_;
DISALLOW_COPY_AND_ASSIGN(BytecodeArrayBuilder);
};
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.cc b/deps/v8/src/interpreter/bytecode-array-iterator.cc
index d09d72f01a..0fea985efe 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.cc
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.cc
@@ -47,14 +47,14 @@ uint32_t BytecodeArrayIterator::GetRawOperand(int operand_index,
bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
Bytecodes::GetOperandOffset(current_bytecode(), operand_index);
switch (Bytecodes::SizeOfOperand(operand_type)) {
- default:
- case OperandSize::kNone:
- UNREACHABLE();
case OperandSize::kByte:
return static_cast<uint32_t>(*operand_start);
case OperandSize::kShort:
return ReadUnalignedUInt16(operand_start);
+ case OperandSize::kNone:
+ UNREACHABLE();
}
+ return 0;
}
@@ -63,12 +63,11 @@ int8_t BytecodeArrayIterator::GetImmediateOperand(int operand_index) const {
return static_cast<int8_t>(operand);
}
-
-int BytecodeArrayIterator::GetCountOperand(int operand_index) const {
+int BytecodeArrayIterator::GetRegisterCountOperand(int operand_index) const {
OperandSize size =
Bytecodes::GetOperandSize(current_bytecode(), operand_index);
- OperandType type = (size == OperandSize::kByte) ? OperandType::kCount8
- : OperandType::kCount16;
+ OperandType type = (size == OperandSize::kByte) ? OperandType::kRegCount8
+ : OperandType::kRegCount16;
uint32_t operand = GetRawOperand(operand_index, type);
return static_cast<int>(operand);
}
@@ -87,19 +86,63 @@ int BytecodeArrayIterator::GetIndexOperand(int operand_index) const {
Register BytecodeArrayIterator::GetRegisterOperand(int operand_index) const {
OperandType operand_type =
Bytecodes::GetOperandType(current_bytecode(), operand_index);
- DCHECK(operand_type == OperandType::kReg8 ||
- operand_type == OperandType::kRegPair8 ||
- operand_type == OperandType::kMaybeReg8 ||
- operand_type == OperandType::kReg16);
+ DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
uint32_t operand = GetRawOperand(operand_index, operand_type);
- return Register::FromOperand(operand);
+ Register reg;
+ switch (Bytecodes::GetOperandSize(current_bytecode(), operand_index)) {
+ case OperandSize::kByte:
+ reg = Register::FromOperand(static_cast<uint8_t>(operand));
+ break;
+ case OperandSize::kShort:
+ reg = Register::FromWideOperand(static_cast<uint16_t>(operand));
+ break;
+ case OperandSize::kNone:
+ UNREACHABLE();
+ reg = Register::invalid_value();
+ break;
+ }
+ DCHECK_GE(reg.index(),
+ Register::FromParameterIndex(0, bytecode_array()->parameter_count())
+ .index());
+ DCHECK(reg.index() < bytecode_array()->register_count() ||
+ (reg.index() == 0 &&
+ Bytecodes::IsMaybeRegisterOperandType(
+ Bytecodes::GetOperandType(current_bytecode(), operand_index))));
+ return reg;
}
+int BytecodeArrayIterator::GetRegisterOperandRange(int operand_index) const {
+ interpreter::OperandType operand_type =
+ Bytecodes::GetOperandType(current_bytecode(), operand_index);
+ DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
+ switch (operand_type) {
+ case OperandType::kRegPair8:
+ case OperandType::kRegPair16:
+ case OperandType::kRegOutPair8:
+ case OperandType::kRegOutPair16:
+ return 2;
+ case OperandType::kRegOutTriple8:
+ case OperandType::kRegOutTriple16:
+ return 3;
+ default: {
+ if (operand_index + 1 !=
+ Bytecodes::NumberOfOperands(current_bytecode())) {
+ OperandType next_operand_type =
+ Bytecodes::GetOperandType(current_bytecode(), operand_index + 1);
+ if (Bytecodes::IsRegisterCountOperandType(next_operand_type)) {
+ return GetRegisterCountOperand(operand_index + 1);
+ }
+ }
+ return 1;
+ }
+ }
+}
Handle<Object> BytecodeArrayIterator::GetConstantForIndexOperand(
int operand_index) const {
- Handle<FixedArray> constants = handle(bytecode_array()->constant_pool());
- return FixedArray::get(constants, GetIndexOperand(operand_index));
+ return FixedArray::get(bytecode_array()->constant_pool(),
+ GetIndexOperand(operand_index),
+ bytecode_array()->GetIsolate());
}
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.h b/deps/v8/src/interpreter/bytecode-array-iterator.h
index e67fa974bd..5379bbf028 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.h
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.h
@@ -21,6 +21,7 @@ class BytecodeArrayIterator {
bool done() const;
Bytecode current_bytecode() const;
int current_bytecode_size() const;
+ void set_current_offset(int offset) { bytecode_offset_ = offset; }
int current_offset() const { return bytecode_offset_; }
const Handle<BytecodeArray>& bytecode_array() const {
return bytecode_array_;
@@ -28,8 +29,9 @@ class BytecodeArrayIterator {
int8_t GetImmediateOperand(int operand_index) const;
int GetIndexOperand(int operand_index) const;
- int GetCountOperand(int operand_index) const;
+ int GetRegisterCountOperand(int operand_index) const;
Register GetRegisterOperand(int operand_index) const;
+ int GetRegisterOperandRange(int operand_index) const;
Handle<Object> GetConstantForIndexOperand(int operand_index) const;
// Get the raw byte for the given operand. Note: you should prefer using the
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 959e155149..6f4dc275c1 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -27,19 +27,26 @@ class BytecodeGenerator::ContextScope BASE_EMBEDDED {
: generator_(generator),
scope_(scope),
outer_(generator_->execution_context()),
- register_(generator_->NextContextRegister()),
+ register_(Register::current_context()),
depth_(0),
should_pop_context_(should_pop_context) {
if (outer_) {
depth_ = outer_->depth_ + 1;
- generator_->builder()->PushContext(register_);
+
+ // Push the outer context into a new context register.
+ Register outer_context_reg(builder()->first_context_register().index() +
+ outer_->depth_);
+ outer_->set_register(outer_context_reg);
+ generator_->builder()->PushContext(outer_context_reg);
}
generator_->set_execution_context(this);
}
~ContextScope() {
if (outer_ && should_pop_context_) {
+ DCHECK_EQ(register_.index(), Register::current_context().index());
generator_->builder()->PopContext(outer_->reg());
+ outer_->set_register(register_);
}
generator_->set_execution_context(outer_);
}
@@ -67,6 +74,10 @@ class BytecodeGenerator::ContextScope BASE_EMBEDDED {
Register reg() const { return register_; }
private:
+ const BytecodeArrayBuilder* builder() const { return generator_->builder(); }
+
+ void set_register(Register reg) { register_ = reg; }
+
BytecodeGenerator* generator_;
Scope* scope_;
ContextScope* outer_;
@@ -81,30 +92,141 @@ class BytecodeGenerator::ContextScope BASE_EMBEDDED {
class BytecodeGenerator::ControlScope BASE_EMBEDDED {
public:
explicit ControlScope(BytecodeGenerator* generator)
- : generator_(generator), outer_(generator->execution_control()) {
+ : generator_(generator), outer_(generator->execution_control()),
+ context_(generator->execution_context()) {
generator_->set_execution_control(this);
}
virtual ~ControlScope() { generator_->set_execution_control(outer()); }
void Break(Statement* stmt) { PerformCommand(CMD_BREAK, stmt); }
void Continue(Statement* stmt) { PerformCommand(CMD_CONTINUE, stmt); }
+ void ReturnAccumulator() { PerformCommand(CMD_RETURN, nullptr); }
+ void ReThrowAccumulator() { PerformCommand(CMD_RETHROW, nullptr); }
+
+ class DeferredCommands;
protected:
- enum Command { CMD_BREAK, CMD_CONTINUE };
+ enum Command { CMD_BREAK, CMD_CONTINUE, CMD_RETURN, CMD_RETHROW };
void PerformCommand(Command command, Statement* statement);
virtual bool Execute(Command command, Statement* statement) = 0;
BytecodeGenerator* generator() const { return generator_; }
ControlScope* outer() const { return outer_; }
+ ContextScope* context() const { return context_; }
private:
BytecodeGenerator* generator_;
ControlScope* outer_;
+ ContextScope* context_;
DISALLOW_COPY_AND_ASSIGN(ControlScope);
};
+// Helper class for a try-finally control scope. It can record intercepted
+// control-flow commands that cause entry into a finally-block, and re-apply
+// them after again leaving that block. Special tokens are used to identify
+// paths going through the finally-block to dispatch after leaving the block.
+class BytecodeGenerator::ControlScope::DeferredCommands final {
+ public:
+ DeferredCommands(BytecodeGenerator* generator, Register token_register,
+ Register result_register)
+ : generator_(generator),
+ deferred_(generator->zone()),
+ token_register_(token_register),
+ result_register_(result_register) {}
+
+ // One recorded control-flow command.
+ struct Entry {
+ Command command; // The command type being applied on this path.
+ Statement* statement; // The target statement for the command or {nullptr}.
+ int token; // A token identifying this particular path.
+ };
+
+ // Records a control-flow command while entering the finally-block. This also
+ // generates a new dispatch token that identifies one particular path. This
+ // expects the result to be in the accumulator.
+ void RecordCommand(Command command, Statement* statement) {
+ int token = static_cast<int>(deferred_.size());
+ deferred_.push_back({command, statement, token});
+
+ builder()->StoreAccumulatorInRegister(result_register_);
+ builder()->LoadLiteral(Smi::FromInt(token));
+ builder()->StoreAccumulatorInRegister(token_register_);
+ }
+
+ // Records the dispatch token to be used to identify the re-throw path when
+ // the finally-block has been entered through the exception handler. This
+ // expects the exception to be in the accumulator.
+ void RecordHandlerReThrowPath() {
+ // The accumulator contains the exception object.
+ RecordCommand(CMD_RETHROW, nullptr);
+ }
+
+ // Records the dispatch token to be used to identify the implicit fall-through
+ // path at the end of a try-block into the corresponding finally-block.
+ void RecordFallThroughPath() {
+ builder()->LoadLiteral(Smi::FromInt(-1));
+ builder()->StoreAccumulatorInRegister(token_register_);
+ }
+
+ // Applies all recorded control-flow commands after the finally-block again.
+ // This generates a dynamic dispatch on the token from the entry point.
+ void ApplyDeferredCommands() {
+ // The fall-through path is covered by the default case, hence +1 here.
+ SwitchBuilder dispatch(builder(), static_cast<int>(deferred_.size() + 1));
+ for (size_t i = 0; i < deferred_.size(); ++i) {
+ Entry& entry = deferred_[i];
+ builder()->LoadLiteral(Smi::FromInt(entry.token));
+ builder()->CompareOperation(Token::EQ_STRICT, token_register_);
+ dispatch.Case(static_cast<int>(i));
+ }
+ dispatch.DefaultAt(static_cast<int>(deferred_.size()));
+ for (size_t i = 0; i < deferred_.size(); ++i) {
+ Entry& entry = deferred_[i];
+ dispatch.SetCaseTarget(static_cast<int>(i));
+ builder()->LoadAccumulatorWithRegister(result_register_);
+ execution_control()->PerformCommand(entry.command, entry.statement);
+ }
+ dispatch.SetCaseTarget(static_cast<int>(deferred_.size()));
+ }
+
+ BytecodeArrayBuilder* builder() { return generator_->builder(); }
+ ControlScope* execution_control() { return generator_->execution_control(); }
+
+ private:
+ BytecodeGenerator* generator_;
+ ZoneVector<Entry> deferred_;
+ Register token_register_;
+ Register result_register_;
+};
+
+
+// Scoped class for dealing with control flow reaching the function level.
+class BytecodeGenerator::ControlScopeForTopLevel final
+ : public BytecodeGenerator::ControlScope {
+ public:
+ explicit ControlScopeForTopLevel(BytecodeGenerator* generator)
+ : ControlScope(generator) {}
+
+ protected:
+ bool Execute(Command command, Statement* statement) override {
+ switch (command) {
+ case CMD_BREAK:
+ case CMD_CONTINUE:
+ break;
+ case CMD_RETURN:
+ generator()->builder()->Return();
+ return true;
+ case CMD_RETHROW:
+ generator()->builder()->ReThrow();
+ return true;
+ }
+ return false;
+ }
+};
+
+
// Scoped class for enabling break inside blocks and switch blocks.
class BytecodeGenerator::ControlScopeForBreakable final
: public BytecodeGenerator::ControlScope {
@@ -117,13 +239,15 @@ class BytecodeGenerator::ControlScopeForBreakable final
control_builder_(control_builder) {}
protected:
- virtual bool Execute(Command command, Statement* statement) {
+ bool Execute(Command command, Statement* statement) override {
if (statement != statement_) return false;
switch (command) {
case CMD_BREAK:
control_builder_->Break();
return true;
case CMD_CONTINUE:
+ case CMD_RETURN:
+ case CMD_RETHROW:
break;
}
return false;
@@ -148,7 +272,7 @@ class BytecodeGenerator::ControlScopeForIteration final
loop_builder_(loop_builder) {}
protected:
- virtual bool Execute(Command command, Statement* statement) {
+ bool Execute(Command command, Statement* statement) override {
if (statement != statement_) return false;
switch (command) {
case CMD_BREAK:
@@ -157,6 +281,9 @@ class BytecodeGenerator::ControlScopeForIteration final
case CMD_CONTINUE:
loop_builder_->Continue();
return true;
+ case CMD_RETURN:
+ case CMD_RETHROW:
+ break;
}
return false;
}
@@ -167,12 +294,84 @@ class BytecodeGenerator::ControlScopeForIteration final
};
+// Scoped class for enabling 'throw' in try-catch constructs.
+class BytecodeGenerator::ControlScopeForTryCatch final
+ : public BytecodeGenerator::ControlScope {
+ public:
+ ControlScopeForTryCatch(BytecodeGenerator* generator,
+ TryCatchBuilder* try_catch_builder)
+ : ControlScope(generator) {
+ generator->try_catch_nesting_level_++;
+ }
+ virtual ~ControlScopeForTryCatch() {
+ generator()->try_catch_nesting_level_--;
+ }
+
+ protected:
+ bool Execute(Command command, Statement* statement) override {
+ switch (command) {
+ case CMD_BREAK:
+ case CMD_CONTINUE:
+ case CMD_RETURN:
+ break;
+ case CMD_RETHROW:
+ generator()->builder()->ReThrow();
+ return true;
+ }
+ return false;
+ }
+};
+
+
+// Scoped class for enabling control flow through try-finally constructs.
+class BytecodeGenerator::ControlScopeForTryFinally final
+ : public BytecodeGenerator::ControlScope {
+ public:
+ ControlScopeForTryFinally(BytecodeGenerator* generator,
+ TryFinallyBuilder* try_finally_builder,
+ DeferredCommands* commands)
+ : ControlScope(generator),
+ try_finally_builder_(try_finally_builder),
+ commands_(commands) {
+ generator->try_finally_nesting_level_++;
+ }
+ virtual ~ControlScopeForTryFinally() {
+ generator()->try_finally_nesting_level_--;
+ }
+
+ protected:
+ bool Execute(Command command, Statement* statement) override {
+ switch (command) {
+ case CMD_BREAK:
+ case CMD_CONTINUE:
+ case CMD_RETURN:
+ case CMD_RETHROW:
+ commands_->RecordCommand(command, statement);
+ try_finally_builder_->LeaveTry();
+ return true;
+ }
+ return false;
+ }
+
+ private:
+ TryFinallyBuilder* try_finally_builder_;
+ DeferredCommands* commands_;
+};
+
+
void BytecodeGenerator::ControlScope::PerformCommand(Command command,
Statement* statement) {
ControlScope* current = this;
+ ContextScope* context = this->context();
do {
- if (current->Execute(command, statement)) return;
+ if (current->Execute(command, statement)) { return; }
current = current->outer();
+ if (current->context() != context) {
+ // Pop context to the expected depth.
+ // TODO(rmcilroy): Only emit a single context pop.
+ generator()->builder()->PopContext(current->context()->reg());
+ context = current->context();
+ }
} while (current != nullptr);
UNREACHABLE();
}
@@ -183,7 +382,8 @@ class BytecodeGenerator::RegisterAllocationScope {
explicit RegisterAllocationScope(BytecodeGenerator* generator)
: generator_(generator),
outer_(generator->register_allocator()),
- allocator_(builder()) {
+ allocator_(builder()->zone(),
+ builder()->temporary_register_allocator()) {
generator_->set_register_allocator(this);
}
@@ -205,11 +405,11 @@ class BytecodeGenerator::RegisterAllocationScope {
// walk the full context chain and compute the list of consecutive
// reservations in the innerscopes.
UNIMPLEMENTED();
- return Register(-1);
+ return Register::invalid_value();
}
}
- void PrepareForConsecutiveAllocations(size_t count) {
+ void PrepareForConsecutiveAllocations(int count) {
allocator_.PrepareForConsecutiveAllocations(count);
}
@@ -330,7 +530,7 @@ class BytecodeGenerator::RegisterResultScope final
virtual void SetResultInRegister(Register reg) {
DCHECK(builder()->RegisterIsParameterOrLocal(reg) ||
- (builder()->RegisterIsTemporary(reg) &&
+ (builder()->TemporaryRegisterIsLive(reg) &&
!allocator()->RegisterIsAllocatedInThisScope(reg)));
result_register_ = reg;
set_result_identified();
@@ -342,32 +542,36 @@ class BytecodeGenerator::RegisterResultScope final
Register result_register_;
};
-
BytecodeGenerator::BytecodeGenerator(Isolate* isolate, Zone* zone)
: isolate_(isolate),
zone_(zone),
- builder_(isolate, zone),
+ builder_(nullptr),
info_(nullptr),
scope_(nullptr),
globals_(0, zone),
execution_control_(nullptr),
execution_context_(nullptr),
execution_result_(nullptr),
- register_allocator_(nullptr) {
+ register_allocator_(nullptr),
+ try_catch_nesting_level_(0),
+ try_finally_nesting_level_(0) {
InitializeAstVisitor(isolate);
}
-
Handle<BytecodeArray> BytecodeGenerator::MakeBytecode(CompilationInfo* info) {
set_info(info);
set_scope(info->scope());
+ // Initialize bytecode array builder.
+ set_builder(new (zone()) BytecodeArrayBuilder(
+ isolate(), zone(), info->num_parameters_including_this(),
+ scope()->MaxNestedContextChainLength(), scope()->num_stack_slots()));
+
// Initialize the incoming context.
ContextScope incoming_context(this, scope(), false);
- builder()->set_parameter_count(info->num_parameters_including_this());
- builder()->set_locals_count(scope()->num_stack_slots());
- builder()->set_context_count(scope()->MaxNestedContextChainLength());
+ // Initialize control scope.
+ ControlScopeForTopLevel control(this);
// Build function context only if there are context allocated variables.
if (scope()->NeedsContext()) {
@@ -380,9 +584,10 @@ Handle<BytecodeArray> BytecodeGenerator::MakeBytecode(CompilationInfo* info) {
MakeBytecodeBody();
}
+ builder()->EnsureReturn(info->literal());
set_scope(nullptr);
set_info(nullptr);
- return builder_.ToBytecodeArray();
+ return builder()->ToBytecodeArray();
}
@@ -390,11 +595,10 @@ void BytecodeGenerator::MakeBytecodeBody() {
// Build the arguments object if it is used.
VisitArgumentsObject(scope()->arguments());
- // TODO(mythria): Build rest arguments array if it is used.
+ // Build rest arguments array if it is used.
int rest_index;
- if (scope()->rest_parameter(&rest_index)) {
- UNIMPLEMENTED();
- }
+ Variable* rest_parameter = scope()->rest_parameter(&rest_index);
+ VisitRestArgumentsArray(rest_parameter);
// Build assignment to {.this_function} variable if it is used.
VisitThisFunctionVariable(scope()->this_function_var());
@@ -409,37 +613,40 @@ void BytecodeGenerator::MakeBytecodeBody() {
// Visit illegal re-declaration and bail out if it exists.
if (scope()->HasIllegalRedeclaration()) {
- Visit(scope()->GetIllegalRedeclaration());
+ VisitForEffect(scope()->GetIllegalRedeclaration());
return;
}
// Visit declarations within the function scope.
VisitDeclarations(scope()->declarations());
+ // Perform a stack-check before the body.
+ builder()->StackCheck();
+
// Visit statements in the function body.
VisitStatements(info()->literal()->body());
}
void BytecodeGenerator::VisitBlock(Block* stmt) {
- BlockBuilder block_builder(this->builder());
- ControlScopeForBreakable execution_control(this, stmt, &block_builder);
-
- if (stmt->scope() == NULL) {
- // Visit statements in the same scope, no declarations.
- VisitStatements(stmt->statements());
+ // Visit declarations and statements.
+ if (stmt->scope() != nullptr && stmt->scope()->NeedsContext()) {
+ VisitNewLocalBlockContext(stmt->scope());
+ ContextScope scope(this, stmt->scope());
+ VisitBlockDeclarationsAndStatements(stmt);
} else {
- // Visit declarations and statements in a block scope.
- if (stmt->scope()->NeedsContext()) {
- VisitNewLocalBlockContext(stmt->scope());
- ContextScope scope(this, stmt->scope());
- VisitDeclarations(stmt->scope()->declarations());
- VisitStatements(stmt->statements());
- } else {
- VisitDeclarations(stmt->scope()->declarations());
- VisitStatements(stmt->statements());
- }
+ VisitBlockDeclarationsAndStatements(stmt);
+ }
+}
+
+
+void BytecodeGenerator::VisitBlockDeclarationsAndStatements(Block* stmt) {
+ BlockBuilder block_builder(builder());
+ ControlScopeForBreakable execution_control(this, stmt, &block_builder);
+ if (stmt->scope() != nullptr) {
+ VisitDeclarations(stmt->scope()->declarations());
}
+ VisitStatements(stmt->statements());
if (stmt->labels() != nullptr) block_builder.EndBlock();
}
@@ -480,9 +687,31 @@ void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
variable->index());
}
break;
- case VariableLocation::LOOKUP:
- UNIMPLEMENTED();
+ case VariableLocation::LOOKUP: {
+ DCHECK(IsDeclaredVariableMode(mode));
+
+ register_allocator()->PrepareForConsecutiveAllocations(3);
+ Register name = register_allocator()->NextConsecutiveRegister();
+ Register init_value = register_allocator()->NextConsecutiveRegister();
+ Register attributes = register_allocator()->NextConsecutiveRegister();
+
+ builder()->LoadLiteral(variable->name()).StoreAccumulatorInRegister(name);
+ if (hole_init) {
+ builder()->LoadTheHole().StoreAccumulatorInRegister(init_value);
+ } else {
+ // For variables, we must not use an initial value (such as 'undefined')
+ // because we may have a (legal) redeclaration and we must not destroy
+ // the current value.
+ builder()
+ ->LoadLiteral(Smi::FromInt(0))
+ .StoreAccumulatorInRegister(init_value);
+ }
+ builder()
+ ->LoadLiteral(Smi::FromInt(variable->DeclarationPropertyAttributes()))
+ .StoreAccumulatorInRegister(attributes)
+ .CallRuntime(Runtime::kDeclareLookupSlot, name, 3);
break;
+ }
}
}
@@ -503,7 +732,10 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
case VariableLocation::PARAMETER:
case VariableLocation::LOCAL: {
VisitForAccumulatorValue(decl->fun());
- VisitVariableAssignment(variable, FeedbackVectorSlot::Invalid());
+ DCHECK(variable->mode() == LET || variable->mode() == VAR ||
+ variable->mode() == CONST);
+ VisitVariableAssignment(variable, Token::INIT,
+ FeedbackVectorSlot::Invalid());
break;
}
case VariableLocation::CONTEXT: {
@@ -513,8 +745,20 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
variable->index());
break;
}
- case VariableLocation::LOOKUP:
- UNIMPLEMENTED();
+ case VariableLocation::LOOKUP: {
+ register_allocator()->PrepareForConsecutiveAllocations(3);
+ Register name = register_allocator()->NextConsecutiveRegister();
+ Register literal = register_allocator()->NextConsecutiveRegister();
+ Register attributes = register_allocator()->NextConsecutiveRegister();
+ builder()->LoadLiteral(variable->name()).StoreAccumulatorInRegister(name);
+
+ VisitForAccumulatorValue(decl->fun());
+ builder()
+ ->StoreAccumulatorInRegister(literal)
+ .LoadLiteral(Smi::FromInt(variable->DeclarationPropertyAttributes()))
+ .StoreAccumulatorInRegister(attributes)
+ .CallRuntime(Runtime::kDeclareLookupSlot, name, 3);
+ }
}
}
@@ -533,7 +777,10 @@ void BytecodeGenerator::VisitDeclarations(
ZoneList<Declaration*>* declarations) {
RegisterAllocationScope register_scope(this);
DCHECK(globals()->empty());
- AstVisitor::VisitDeclarations(declarations);
+ for (int i = 0; i < declarations->length(); i++) {
+ RegisterAllocationScope register_scope(this);
+ Visit(declarations->at(i));
+ }
if (globals()->empty()) return;
int array_index = 0;
Handle<FixedArray> data = isolate()->factory()->NewFixedArray(
@@ -569,6 +816,7 @@ void BytecodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
void BytecodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
+ builder()->SetStatementPosition(stmt);
VisitForEffect(stmt->expression());
}
@@ -624,12 +872,16 @@ void BytecodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
void BytecodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
VisitForAccumulatorValue(stmt->expression());
- builder()->Return();
+ builder()->SetStatementPosition(stmt);
+ execution_control()->ReturnAccumulator();
}
void BytecodeGenerator::VisitWithStatement(WithStatement* stmt) {
- UNIMPLEMENTED();
+ VisitForAccumulatorValue(stmt->expression());
+ builder()->CastAccumulatorToJSObject();
+ VisitNewLocalWithContext();
+ VisitInScope(stmt->statement(), stmt->scope());
}
@@ -657,8 +909,7 @@ void BytecodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Perform label comparison as if via '===' with tag.
VisitForAccumulatorValue(clause->label());
- builder()->CompareOperation(Token::Value::EQ_STRICT, tag,
- language_mode_strength());
+ builder()->CompareOperation(Token::Value::EQ_STRICT, tag);
switch_builder.Case(i);
}
@@ -688,20 +939,25 @@ void BytecodeGenerator::VisitCaseClause(CaseClause* clause) {
UNREACHABLE();
}
+void BytecodeGenerator::VisitIterationBody(IterationStatement* stmt,
+ LoopBuilder* loop_builder) {
+ ControlScopeForIteration execution_control(this, stmt, loop_builder);
+ builder()->StackCheck();
+ Visit(stmt->body());
+}
void BytecodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
LoopBuilder loop_builder(builder());
- ControlScopeForIteration execution_control(this, stmt, &loop_builder);
loop_builder.LoopHeader();
if (stmt->cond()->ToBooleanIsFalse()) {
- Visit(stmt->body());
+ VisitIterationBody(stmt, &loop_builder);
loop_builder.Condition();
} else if (stmt->cond()->ToBooleanIsTrue()) {
loop_builder.Condition();
- Visit(stmt->body());
+ VisitIterationBody(stmt, &loop_builder);
loop_builder.JumpToHeader();
} else {
- Visit(stmt->body());
+ VisitIterationBody(stmt, &loop_builder);
loop_builder.Condition();
VisitForAccumulatorValue(stmt->cond());
loop_builder.JumpToHeaderIfTrue();
@@ -709,7 +965,6 @@ void BytecodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
loop_builder.EndLoop();
}
-
void BytecodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
if (stmt->cond()->ToBooleanIsFalse()) {
// If the condition is false there is no need to generate the loop.
@@ -717,14 +972,13 @@ void BytecodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
}
LoopBuilder loop_builder(builder());
- ControlScopeForIteration execution_control(this, stmt, &loop_builder);
loop_builder.LoopHeader();
loop_builder.Condition();
if (!stmt->cond()->ToBooleanIsTrue()) {
VisitForAccumulatorValue(stmt->cond());
loop_builder.BreakIfFalse();
}
- Visit(stmt->body());
+ VisitIterationBody(stmt, &loop_builder);
loop_builder.JumpToHeader();
loop_builder.EndLoop();
}
@@ -741,15 +995,13 @@ void BytecodeGenerator::VisitForStatement(ForStatement* stmt) {
}
LoopBuilder loop_builder(builder());
- ControlScopeForIteration execution_control(this, stmt, &loop_builder);
-
loop_builder.LoopHeader();
loop_builder.Condition();
if (stmt->cond() && !stmt->cond()->ToBooleanIsTrue()) {
VisitForAccumulatorValue(stmt->cond());
loop_builder.BreakIfFalse();
}
- Visit(stmt->body());
+ VisitIterationBody(stmt, &loop_builder);
if (stmt->next() != nullptr) {
loop_builder.Next();
Visit(stmt->next());
@@ -770,7 +1022,7 @@ void BytecodeGenerator::VisitForInAssignment(Expression* expr,
switch (assign_type) {
case VARIABLE: {
Variable* variable = expr->AsVariableProxy()->var();
- VisitVariableAssignment(variable, slot);
+ VisitVariableAssignment(variable, Token::ASSIGN, slot);
break;
}
case NAMED_PROPERTY: {
@@ -795,9 +1047,40 @@ void BytecodeGenerator::VisitForInAssignment(Expression* expr,
language_mode());
break;
}
- case NAMED_SUPER_PROPERTY:
- case KEYED_SUPER_PROPERTY:
- UNIMPLEMENTED();
+ case NAMED_SUPER_PROPERTY: {
+ RegisterAllocationScope register_scope(this);
+ register_allocator()->PrepareForConsecutiveAllocations(4);
+ Register receiver = register_allocator()->NextConsecutiveRegister();
+ Register home_object = register_allocator()->NextConsecutiveRegister();
+ Register name = register_allocator()->NextConsecutiveRegister();
+ Register value = register_allocator()->NextConsecutiveRegister();
+ builder()->StoreAccumulatorInRegister(value);
+ SuperPropertyReference* super_property =
+ property->obj()->AsSuperPropertyReference();
+ VisitForRegisterValue(super_property->this_var(), receiver);
+ VisitForRegisterValue(super_property->home_object(), home_object);
+ builder()
+ ->LoadLiteral(property->key()->AsLiteral()->AsPropertyName())
+ .StoreAccumulatorInRegister(name);
+ BuildNamedSuperPropertyStore(receiver, home_object, name, value);
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ RegisterAllocationScope register_scope(this);
+ register_allocator()->PrepareForConsecutiveAllocations(4);
+ Register receiver = register_allocator()->NextConsecutiveRegister();
+ Register home_object = register_allocator()->NextConsecutiveRegister();
+ Register key = register_allocator()->NextConsecutiveRegister();
+ Register value = register_allocator()->NextConsecutiveRegister();
+ builder()->StoreAccumulatorInRegister(value);
+ SuperPropertyReference* super_property =
+ property->obj()->AsSuperPropertyReference();
+ VisitForRegisterValue(super_property->this_var(), receiver);
+ VisitForRegisterValue(super_property->home_object(), home_object);
+ VisitForRegisterValue(property->key(), key);
+ BuildKeyedSuperPropertyStore(receiver, home_object, key, value);
+ break;
+ }
}
}
@@ -810,7 +1093,6 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
}
LoopBuilder loop_builder(builder());
- ControlScopeForIteration control_scope(this, stmt, &loop_builder);
BytecodeLabel subject_null_label, subject_undefined_label, not_object_label;
// Prepare the state for executing ForIn.
@@ -821,10 +1103,14 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
builder()->CastAccumulatorToJSObject();
builder()->JumpIfNull(&not_object_label);
builder()->StoreAccumulatorInRegister(receiver);
- Register cache_type = register_allocator()->NewRegister();
- Register cache_array = register_allocator()->NewRegister();
- Register cache_length = register_allocator()->NewRegister();
- builder()->ForInPrepare(cache_type, cache_array, cache_length);
+
+ register_allocator()->PrepareForConsecutiveAllocations(3);
+ Register cache_type = register_allocator()->NextConsecutiveRegister();
+ Register cache_array = register_allocator()->NextConsecutiveRegister();
+ Register cache_length = register_allocator()->NextConsecutiveRegister();
+ // Used as kRegTriple8 and kRegPair8 in ForInPrepare and ForInNext.
+ USE(cache_array);
+ builder()->ForInPrepare(cache_type);
// Set up loop counter
Register index = register_allocator()->NewRegister();
@@ -836,10 +1122,11 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
loop_builder.Condition();
builder()->ForInDone(index, cache_length);
loop_builder.BreakIfTrue();
- builder()->ForInNext(receiver, cache_type, cache_array, index);
+ DCHECK(Register::AreContiguous(cache_type, cache_array));
+ builder()->ForInNext(receiver, index, cache_type);
loop_builder.ContinueIfUndefined();
VisitForInAssignment(stmt->each(), stmt->EachFeedbackSlot());
- Visit(stmt->body());
+ VisitIterationBody(stmt, &loop_builder);
loop_builder.Next();
builder()->ForInStep(index);
builder()->StoreAccumulatorInRegister(index);
@@ -852,31 +1139,127 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
void BytecodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
- UNIMPLEMENTED();
+ LoopBuilder loop_builder(builder());
+ ControlScopeForIteration control_scope(this, stmt, &loop_builder);
+
+ VisitForEffect(stmt->assign_iterator());
+
+ loop_builder.LoopHeader();
+ loop_builder.Next();
+ VisitForEffect(stmt->next_result());
+ VisitForAccumulatorValue(stmt->result_done());
+ loop_builder.BreakIfTrue();
+
+ VisitForEffect(stmt->assign_each());
+ VisitIterationBody(stmt, &loop_builder);
+ loop_builder.JumpToHeader();
+ loop_builder.EndLoop();
}
void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
- if (FLAG_ignition_fake_try_catch) {
+ TryCatchBuilder try_control_builder(builder());
+ Register no_reg;
+
+ // Preserve the context in a dedicated register, so that it can be restored
+ // when the handler is entered by the stack-unwinding machinery.
+ // TODO(mstarzinger): Be smarter about register allocation.
+ Register context = register_allocator()->NewRegister();
+ builder()->MoveRegister(Register::current_context(), context);
+
+ // Evaluate the try-block inside a control scope. This simulates a handler
+ // that is intercepting 'throw' control commands.
+ try_control_builder.BeginTry(context);
+ {
+ ControlScopeForTryCatch scope(this, &try_control_builder);
Visit(stmt->try_block());
- return;
}
- UNIMPLEMENTED();
+ try_control_builder.EndTry();
+
+ // Create a catch scope that binds the exception.
+ VisitNewLocalCatchContext(stmt->variable());
+ builder()->StoreAccumulatorInRegister(context);
+
+ // Clear message object as we enter the catch block.
+ builder()->CallRuntime(Runtime::kInterpreterClearPendingMessage, no_reg, 0);
+
+ // Load the catch context into the accumulator.
+ builder()->LoadAccumulatorWithRegister(context);
+
+ // Evaluate the catch-block.
+ VisitInScope(stmt->catch_block(), stmt->scope());
+ try_control_builder.EndCatch();
}
void BytecodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- if (FLAG_ignition_fake_try_catch) {
+ TryFinallyBuilder try_control_builder(builder(), IsInsideTryCatch());
+ Register no_reg;
+
+ // We keep a record of all paths that enter the finally-block to be able to
+ // dispatch to the correct continuation point after the statements in the
+ // finally-block have been evaluated.
+ //
+ // The try-finally construct can enter the finally-block in three ways:
+ // 1. By exiting the try-block normally, falling through at the end.
+ // 2. By exiting the try-block with a function-local control flow transfer
+ // (i.e. through break/continue/return statements).
+ // 3. By exiting the try-block with a thrown exception.
+ //
+ // The result register semantics depend on how the block was entered:
+ // - ReturnStatement: It represents the return value being returned.
+ // - ThrowStatement: It represents the exception being thrown.
+ // - BreakStatement/ContinueStatement: Undefined and not used.
+ // - Falling through into finally-block: Undefined and not used.
+ Register token = register_allocator()->NewRegister();
+ Register result = register_allocator()->NewRegister();
+ ControlScope::DeferredCommands commands(this, token, result);
+
+ // Preserve the context in a dedicated register, so that it can be restored
+ // when the handler is entered by the stack-unwinding machinery.
+ // TODO(mstarzinger): Be smarter about register allocation.
+ Register context = register_allocator()->NewRegister();
+ builder()->MoveRegister(Register::current_context(), context);
+
+ // Evaluate the try-block inside a control scope. This simulates a handler
+ // that is intercepting all control commands.
+ try_control_builder.BeginTry(context);
+ {
+ ControlScopeForTryFinally scope(this, &try_control_builder, &commands);
Visit(stmt->try_block());
- Visit(stmt->finally_block());
- return;
}
- UNIMPLEMENTED();
+ try_control_builder.EndTry();
+
+ // Record fall-through and exception cases.
+ commands.RecordFallThroughPath();
+ try_control_builder.LeaveTry();
+ try_control_builder.BeginHandler();
+ commands.RecordHandlerReThrowPath();
+
+ // Pending message object is saved on entry.
+ try_control_builder.BeginFinally();
+ Register message = context; // Reuse register.
+
+ // Clear message object as we enter the finally block.
+ builder()
+ ->CallRuntime(Runtime::kInterpreterClearPendingMessage, no_reg, 0)
+ .StoreAccumulatorInRegister(message);
+
+ // Evaluate the finally-block.
+ Visit(stmt->finally_block());
+ try_control_builder.EndFinally();
+
+ // Pending message object is restored on exit.
+ builder()->CallRuntime(Runtime::kInterpreterSetPendingMessage, message, 1);
+
+ // Dynamic dispatch after the finally-block.
+ commands.ApplyDeferredCommands();
}
void BytecodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
- UNIMPLEMENTED();
+ builder()->SetStatementPosition(stmt);
+ builder()->Debugger();
}
@@ -892,18 +1275,166 @@ void BytecodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
- UNIMPLEMENTED();
+ if (expr->scope()->ContextLocalCount() > 0) {
+ VisitNewLocalBlockContext(expr->scope());
+ ContextScope scope(this, expr->scope());
+ VisitDeclarations(expr->scope()->declarations());
+ VisitClassLiteralContents(expr);
+ } else {
+ VisitDeclarations(expr->scope()->declarations());
+ VisitClassLiteralContents(expr);
+ }
+}
+
+void BytecodeGenerator::VisitClassLiteralContents(ClassLiteral* expr) {
+ VisitClassLiteralForRuntimeDefinition(expr);
+
+ // Load the "prototype" from the constructor.
+ register_allocator()->PrepareForConsecutiveAllocations(2);
+ Register literal = register_allocator()->NextConsecutiveRegister();
+ Register prototype = register_allocator()->NextConsecutiveRegister();
+ Handle<String> name = isolate()->factory()->prototype_string();
+ FeedbackVectorSlot slot = expr->PrototypeSlot();
+ builder()
+ ->StoreAccumulatorInRegister(literal)
+ .LoadNamedProperty(literal, name, feedback_index(slot))
+ .StoreAccumulatorInRegister(prototype);
+
+ VisitClassLiteralProperties(expr, literal, prototype);
+ builder()->CallRuntime(Runtime::kFinalizeClassDefinition, literal, 2);
+ // Assign to class variable.
+ if (expr->class_variable_proxy() != nullptr) {
+ Variable* var = expr->class_variable_proxy()->var();
+ FeedbackVectorSlot slot = expr->NeedsProxySlot()
+ ? expr->ProxySlot()
+ : FeedbackVectorSlot::Invalid();
+ VisitVariableAssignment(var, Token::INIT, slot);
+ }
+ execution_result()->SetResultInAccumulator();
}
+void BytecodeGenerator::VisitClassLiteralForRuntimeDefinition(
+ ClassLiteral* expr) {
+ AccumulatorResultScope result_scope(this);
+ register_allocator()->PrepareForConsecutiveAllocations(4);
+ Register extends = register_allocator()->NextConsecutiveRegister();
+ Register constructor = register_allocator()->NextConsecutiveRegister();
+ Register start_position = register_allocator()->NextConsecutiveRegister();
+ Register end_position = register_allocator()->NextConsecutiveRegister();
+
+ VisitForAccumulatorValueOrTheHole(expr->extends());
+ builder()->StoreAccumulatorInRegister(extends);
+
+ VisitForAccumulatorValue(expr->constructor());
+ builder()
+ ->StoreAccumulatorInRegister(constructor)
+ .LoadLiteral(Smi::FromInt(expr->start_position()))
+ .StoreAccumulatorInRegister(start_position)
+ .LoadLiteral(Smi::FromInt(expr->end_position()))
+ .StoreAccumulatorInRegister(end_position)
+ .CallRuntime(Runtime::kDefineClass, extends, 4);
+ result_scope.SetResultInAccumulator();
+}
+
+void BytecodeGenerator::VisitClassLiteralProperties(ClassLiteral* expr,
+ Register literal,
+ Register prototype) {
+ RegisterAllocationScope register_scope(this);
+ register_allocator()->PrepareForConsecutiveAllocations(5);
+ Register receiver = register_allocator()->NextConsecutiveRegister();
+ Register key = register_allocator()->NextConsecutiveRegister();
+ Register value = register_allocator()->NextConsecutiveRegister();
+ Register attr = register_allocator()->NextConsecutiveRegister();
+ Register set_function_name = register_allocator()->NextConsecutiveRegister();
+
+ bool attr_assigned = false;
+ Register old_receiver = Register::invalid_value();
+
+ // Create nodes to store method values into the literal.
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+
+ // Set-up receiver.
+ Register new_receiver = property->is_static() ? literal : prototype;
+ if (new_receiver != old_receiver) {
+ builder()->MoveRegister(new_receiver, receiver);
+ old_receiver = new_receiver;
+ }
+
+ VisitForAccumulatorValue(property->key());
+ builder()->CastAccumulatorToName().StoreAccumulatorInRegister(key);
+ // The static prototype property is read only. We handle the non computed
+ // property name case in the parser. Since this is the only case where we
+ // need to check for an own read only property we special case this so we do
+ // not need to do this for every property.
+ if (property->is_static() && property->is_computed_name()) {
+ VisitClassLiteralStaticPrototypeWithComputedName(key);
+ }
+ VisitForAccumulatorValue(property->value());
+ builder()->StoreAccumulatorInRegister(value);
+
+ VisitSetHomeObject(value, receiver, property);
+
+ if (!attr_assigned) {
+ builder()
+ ->LoadLiteral(Smi::FromInt(DONT_ENUM))
+ .StoreAccumulatorInRegister(attr);
+ attr_assigned = true;
+ }
+
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ case ObjectLiteral::Property::PROTOTYPE:
+ // Invalid properties for ES6 classes.
+ UNREACHABLE();
+ break;
+ case ObjectLiteral::Property::COMPUTED: {
+ builder()
+ ->LoadLiteral(Smi::FromInt(property->NeedsSetFunctionName()))
+ .StoreAccumulatorInRegister(set_function_name);
+ builder()->CallRuntime(Runtime::kDefineDataPropertyInLiteral, receiver,
+ 5);
+ break;
+ }
+ case ObjectLiteral::Property::GETTER: {
+ builder()->CallRuntime(Runtime::kDefineGetterPropertyUnchecked,
+ receiver, 4);
+ break;
+ }
+ case ObjectLiteral::Property::SETTER: {
+ builder()->CallRuntime(Runtime::kDefineSetterPropertyUnchecked,
+ receiver, 4);
+ break;
+ }
+ }
+ }
+}
+
+void BytecodeGenerator::VisitClassLiteralStaticPrototypeWithComputedName(
+ Register key) {
+ BytecodeLabel done;
+ builder()
+ ->LoadLiteral(isolate()->factory()->prototype_string())
+ .CompareOperation(Token::Value::EQ_STRICT, key)
+ .JumpIfFalse(&done)
+ .CallRuntime(Runtime::kThrowStaticPrototypeError, Register(0), 0)
+ .Bind(&done);
+}
void BytecodeGenerator::VisitNativeFunctionLiteral(
NativeFunctionLiteral* expr) {
- UNIMPLEMENTED();
+ // Find or build a shared function info for the native function template.
+ Handle<SharedFunctionInfo> shared_info =
+ Compiler::GetSharedFunctionInfoForNative(expr->extension(), expr->name());
+ builder()->CreateClosure(shared_info, NOT_TENURED);
+ execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitDoExpression(DoExpression* expr) {
- UNIMPLEMENTED();
+ VisitBlock(expr->block());
+ VisitVariableProxy(expr->result());
}
@@ -964,10 +1495,13 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
builder()->CreateObjectLiteral(expr->constant_properties(),
expr->literal_index(),
expr->ComputeFlags(true));
- Register literal;
+
+ // Allocate in the outer scope since this register is used to return the
+ // expression's results to the caller.
+ Register literal = register_allocator()->outer()->NewRegister();
+ builder()->StoreAccumulatorInRegister(literal);
// Store computed values into the literal.
- bool literal_in_accumulator = true;
int property_index = 0;
AccessorTable accessor_table(zone());
for (; property_index < expr->properties()->length(); property_index++) {
@@ -975,12 +1509,6 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (property->is_computed_name()) break;
if (property->IsCompileTimeValue()) continue;
- if (literal_in_accumulator) {
- literal = register_allocator()->NewRegister();
- builder()->StoreAccumulatorInRegister(literal);
- literal_in_accumulator = false;
- }
-
RegisterAllocationScope inner_register_scope(this);
Literal* literal_key = property->key()->AsLiteral();
switch (property->kind()) {
@@ -995,21 +1523,31 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (literal_key->value()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(property->value());
- builder()->StoreNamedProperty(
- literal, literal_key->AsPropertyName(),
- feedback_index(property->GetSlot(0)), language_mode());
+ if (FunctionLiteral::NeedsHomeObject(property->value())) {
+ RegisterAllocationScope register_scope(this);
+ Register value = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(value);
+ builder()->StoreNamedProperty(
+ literal, literal_key->AsPropertyName(),
+ feedback_index(property->GetSlot(0)), language_mode());
+ VisitSetHomeObject(value, literal, property, 1);
+ } else {
+ builder()->StoreNamedProperty(
+ literal, literal_key->AsPropertyName(),
+ feedback_index(property->GetSlot(0)), language_mode());
+ }
} else {
VisitForEffect(property->value());
}
} else {
- register_allocator()->PrepareForConsecutiveAllocations(3);
+ register_allocator()->PrepareForConsecutiveAllocations(4);
+ Register literal_argument =
+ register_allocator()->NextConsecutiveRegister();
Register key = register_allocator()->NextConsecutiveRegister();
Register value = register_allocator()->NextConsecutiveRegister();
Register language = register_allocator()->NextConsecutiveRegister();
- // TODO(oth): This is problematic - can't assume contiguous here.
- // literal is allocated in outer register scope, whereas key, value,
- // language are in another.
- DCHECK(Register::AreContiguous(literal, key, value, language));
+
+ builder()->MoveRegister(literal, literal_argument);
VisitForAccumulatorValue(property->key());
builder()->StoreAccumulatorInRegister(key);
VisitForAccumulatorValue(property->value());
@@ -1018,20 +1556,23 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
builder()
->LoadLiteral(Smi::FromInt(SLOPPY))
.StoreAccumulatorInRegister(language)
- .CallRuntime(Runtime::kSetProperty, literal, 4);
+ .CallRuntime(Runtime::kSetProperty, literal_argument, 4);
VisitSetHomeObject(value, literal, property);
}
}
break;
}
case ObjectLiteral::Property::PROTOTYPE: {
- register_allocator()->PrepareForConsecutiveAllocations(1);
DCHECK(property->emit_store());
+ register_allocator()->PrepareForConsecutiveAllocations(2);
+ Register literal_argument =
+ register_allocator()->NextConsecutiveRegister();
Register value = register_allocator()->NextConsecutiveRegister();
- DCHECK(Register::AreContiguous(literal, value));
+
+ builder()->MoveRegister(literal, literal_argument);
VisitForAccumulatorValue(property->value());
builder()->StoreAccumulatorInRegister(value).CallRuntime(
- Runtime::kInternalSetPrototype, literal, 2);
+ Runtime::kInternalSetPrototype, literal_argument, 2);
break;
}
case ObjectLiteral::Property::GETTER:
@@ -1052,12 +1593,14 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
for (AccessorTable::Iterator it = accessor_table.begin();
it != accessor_table.end(); ++it) {
RegisterAllocationScope inner_register_scope(this);
- register_allocator()->PrepareForConsecutiveAllocations(4);
+ register_allocator()->PrepareForConsecutiveAllocations(5);
+ Register literal_argument = register_allocator()->NextConsecutiveRegister();
Register name = register_allocator()->NextConsecutiveRegister();
Register getter = register_allocator()->NextConsecutiveRegister();
Register setter = register_allocator()->NextConsecutiveRegister();
Register attr = register_allocator()->NextConsecutiveRegister();
- DCHECK(Register::AreContiguous(literal, name, getter, setter, attr));
+
+ builder()->MoveRegister(literal, literal_argument);
VisitForAccumulatorValue(it->first);
builder()->StoreAccumulatorInRegister(name);
VisitObjectLiteralAccessor(literal, it->second->getter, getter);
@@ -1065,7 +1608,8 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
builder()
->LoadLiteral(Smi::FromInt(NONE))
.StoreAccumulatorInRegister(attr)
- .CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, literal, 5);
+ .CallRuntime(Runtime::kDefineAccessorPropertyUnchecked,
+ literal_argument, 5);
}
// Object literals have two parts. The "static" part on the left contains no
@@ -1078,67 +1622,69 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// compile them into a series of "SetOwnProperty" runtime calls. This will
// preserve insertion order.
for (; property_index < expr->properties()->length(); property_index++) {
- if (literal_in_accumulator) {
- literal = register_allocator()->NewRegister();
- builder()->StoreAccumulatorInRegister(literal);
- literal_in_accumulator = false;
- }
-
ObjectLiteral::Property* property = expr->properties()->at(property_index);
RegisterAllocationScope inner_register_scope(this);
+
if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
DCHECK(property->emit_store());
- Register value = register_allocator()->NewRegister();
- DCHECK(Register::AreContiguous(literal, value));
+ register_allocator()->PrepareForConsecutiveAllocations(2);
+ Register literal_argument =
+ register_allocator()->NextConsecutiveRegister();
+ Register value = register_allocator()->NextConsecutiveRegister();
+
+ builder()->MoveRegister(literal, literal_argument);
VisitForAccumulatorValue(property->value());
builder()->StoreAccumulatorInRegister(value).CallRuntime(
- Runtime::kInternalSetPrototype, literal, 2);
+ Runtime::kInternalSetPrototype, literal_argument, 2);
continue;
}
- register_allocator()->PrepareForConsecutiveAllocations(3);
+ register_allocator()->PrepareForConsecutiveAllocations(5);
+ Register literal_argument = register_allocator()->NextConsecutiveRegister();
Register key = register_allocator()->NextConsecutiveRegister();
Register value = register_allocator()->NextConsecutiveRegister();
Register attr = register_allocator()->NextConsecutiveRegister();
- DCHECK(Register::AreContiguous(literal, key, value, attr));
+ DCHECK(Register::AreContiguous(literal_argument, key, value, attr));
+ Register set_function_name =
+ register_allocator()->NextConsecutiveRegister();
+ builder()->MoveRegister(literal, literal_argument);
VisitForAccumulatorValue(property->key());
builder()->CastAccumulatorToName().StoreAccumulatorInRegister(key);
VisitForAccumulatorValue(property->value());
builder()->StoreAccumulatorInRegister(value);
VisitSetHomeObject(value, literal, property);
builder()->LoadLiteral(Smi::FromInt(NONE)).StoreAccumulatorInRegister(attr);
- Runtime::FunctionId function_id = static_cast<Runtime::FunctionId>(-1);
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
case ObjectLiteral::Property::COMPUTED:
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- function_id = Runtime::kDefineDataPropertyUnchecked;
+ builder()
+ ->LoadLiteral(Smi::FromInt(property->NeedsSetFunctionName()))
+ .StoreAccumulatorInRegister(set_function_name);
+ builder()->CallRuntime(Runtime::kDefineDataPropertyInLiteral,
+ literal_argument, 5);
break;
case ObjectLiteral::Property::PROTOTYPE:
UNREACHABLE(); // Handled specially above.
break;
case ObjectLiteral::Property::GETTER:
- function_id = Runtime::kDefineGetterPropertyUnchecked;
+ builder()->CallRuntime(Runtime::kDefineGetterPropertyUnchecked,
+ literal_argument, 4);
break;
case ObjectLiteral::Property::SETTER:
- function_id = Runtime::kDefineSetterPropertyUnchecked;
+ builder()->CallRuntime(Runtime::kDefineSetterPropertyUnchecked,
+ literal_argument, 4);
break;
}
- builder()->CallRuntime(function_id, literal, 4);
}
// Transform literals that contain functions to fast properties.
if (expr->has_function()) {
- DCHECK(!literal_in_accumulator);
builder()->CallRuntime(Runtime::kToFastProperties, literal, 1);
}
- if (!literal_in_accumulator) {
- // Restore literal array into accumulator.
- builder()->LoadAccumulatorWithRegister(literal);
- }
- execution_result()->SetResultInAccumulator();
+ execution_result()->SetResultInRegister(literal);
}
@@ -1156,10 +1702,7 @@ void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
array_index++) {
Expression* subexpr = expr->values()->at(array_index);
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
- if (subexpr->IsSpread()) {
- // TODO(rmcilroy): Deal with spread expressions.
- UNIMPLEMENTED();
- }
+ DCHECK(!subexpr->IsSpread());
if (literal_in_accumulator) {
index = register_allocator()->NewRegister();
@@ -1189,14 +1732,25 @@ void BytecodeGenerator::VisitVariableProxy(VariableProxy* proxy) {
VisitVariableLoad(proxy->var(), proxy->VariableFeedbackSlot());
}
+void BytecodeGenerator::BuildHoleCheckForVariableLoad(VariableMode mode,
+ Handle<String> name) {
+ if (mode == CONST_LEGACY) {
+ BytecodeLabel end_label;
+ builder()->JumpIfNotHole(&end_label).LoadUndefined().Bind(&end_label);
+ } else if (mode == LET || mode == CONST) {
+ BuildThrowIfHole(name);
+ }
+}
void BytecodeGenerator::VisitVariableLoad(Variable* variable,
FeedbackVectorSlot slot,
TypeofMode typeof_mode) {
+ VariableMode mode = variable->mode();
switch (variable->location()) {
case VariableLocation::LOCAL: {
Register source(Register(variable->index()));
builder()->LoadAccumulatorWithRegister(source);
+ BuildHoleCheckForVariableLoad(mode, variable->name());
execution_result()->SetResultInAccumulator();
break;
}
@@ -1205,13 +1759,14 @@ void BytecodeGenerator::VisitVariableLoad(Variable* variable,
// index -1 but is parameter index 0 in BytecodeArrayBuilder).
Register source = builder()->Parameter(variable->index() + 1);
builder()->LoadAccumulatorWithRegister(source);
+ BuildHoleCheckForVariableLoad(mode, variable->name());
execution_result()->SetResultInAccumulator();
break;
}
case VariableLocation::GLOBAL:
case VariableLocation::UNALLOCATED: {
builder()->LoadGlobal(variable->name(), feedback_index(slot),
- language_mode(), typeof_mode);
+ typeof_mode);
execution_result()->SetResultInAccumulator();
break;
}
@@ -1237,10 +1792,10 @@ void BytecodeGenerator::VisitVariableLoad(Variable* variable,
.StoreAccumulatorInRegister(context_reg);
}
}
+
builder()->LoadContextSlot(context_reg, variable->index());
+ BuildHoleCheckForVariableLoad(mode, variable->name());
execution_result()->SetResultInAccumulator();
- // TODO(rmcilroy): Perform check for uninitialized legacy const, const and
- // let variables.
break;
}
case VariableLocation::LOOKUP: {
@@ -1251,14 +1806,12 @@ void BytecodeGenerator::VisitVariableLoad(Variable* variable,
}
}
-
void BytecodeGenerator::VisitVariableLoadForAccumulatorValue(
Variable* variable, FeedbackVectorSlot slot, TypeofMode typeof_mode) {
AccumulatorResultScope accumulator_result(this);
VisitVariableLoad(variable, slot, typeof_mode);
}
-
Register BytecodeGenerator::VisitVariableLoadForRegisterValue(
Variable* variable, FeedbackVectorSlot slot, TypeofMode typeof_mode) {
RegisterResultScope register_scope(this);
@@ -1266,20 +1819,150 @@ Register BytecodeGenerator::VisitVariableLoadForRegisterValue(
return register_scope.ResultRegister();
}
+void BytecodeGenerator::BuildNamedSuperPropertyLoad(Register receiver,
+ Register home_object,
+ Register name) {
+ DCHECK(Register::AreContiguous(receiver, home_object, name));
+ builder()->CallRuntime(Runtime::kLoadFromSuper, receiver, 3);
+}
+
+void BytecodeGenerator::BuildKeyedSuperPropertyLoad(Register receiver,
+ Register home_object,
+ Register key) {
+ DCHECK(Register::AreContiguous(receiver, home_object, key));
+ builder()->CallRuntime(Runtime::kLoadKeyedFromSuper, receiver, 3);
+}
+
+void BytecodeGenerator::BuildNamedSuperPropertyStore(Register receiver,
+ Register home_object,
+ Register name,
+ Register value) {
+ DCHECK(Register::AreContiguous(receiver, home_object, name, value));
+ Runtime::FunctionId function_id = is_strict(language_mode())
+ ? Runtime::kStoreToSuper_Strict
+ : Runtime::kStoreToSuper_Sloppy;
+ builder()->CallRuntime(function_id, receiver, 4);
+}
+
+void BytecodeGenerator::BuildKeyedSuperPropertyStore(Register receiver,
+ Register home_object,
+ Register key,
+ Register value) {
+ DCHECK(Register::AreContiguous(receiver, home_object, key, value));
+ Runtime::FunctionId function_id = is_strict(language_mode())
+ ? Runtime::kStoreKeyedToSuper_Strict
+ : Runtime::kStoreKeyedToSuper_Sloppy;
+ builder()->CallRuntime(function_id, receiver, 4);
+}
+
+void BytecodeGenerator::BuildThrowReferenceError(Handle<String> name) {
+ RegisterAllocationScope register_scope(this);
+ Register name_reg = register_allocator()->NewRegister();
+ builder()->LoadLiteral(name).StoreAccumulatorInRegister(name_reg).CallRuntime(
+ Runtime::kThrowReferenceError, name_reg, 1);
+}
+
+void BytecodeGenerator::BuildThrowIfHole(Handle<String> name) {
+ // TODO(interpreter): Can the parser reduce the number of checks
+ // performed? Or should there be a ThrowIfHole bytecode.
+ BytecodeLabel no_reference_error;
+ builder()->JumpIfNotHole(&no_reference_error);
+ BuildThrowReferenceError(name);
+ builder()->Bind(&no_reference_error);
+}
+
+void BytecodeGenerator::BuildThrowIfNotHole(Handle<String> name) {
+ // TODO(interpreter): Can the parser reduce the number of checks
+ // performed? Or should there be a ThrowIfNotHole bytecode.
+ BytecodeLabel no_reference_error, reference_error;
+ builder()
+ ->JumpIfNotHole(&reference_error)
+ .Jump(&no_reference_error)
+ .Bind(&reference_error);
+ BuildThrowReferenceError(name);
+ builder()->Bind(&no_reference_error);
+}
+
+void BytecodeGenerator::BuildThrowReassignConstant(Handle<String> name) {
+ // TODO(mythria): This will be replaced by a new bytecode that throws an
+ // appropriate error depending on the whether the value is a hole or not.
+ BytecodeLabel const_assign_error;
+ builder()->JumpIfNotHole(&const_assign_error);
+ BuildThrowReferenceError(name);
+ builder()
+ ->Bind(&const_assign_error)
+ .CallRuntime(Runtime::kThrowConstAssignError, Register(), 0);
+}
+
+void BytecodeGenerator::BuildHoleCheckForVariableAssignment(Variable* variable,
+ Token::Value op) {
+ VariableMode mode = variable->mode();
+ DCHECK(mode != CONST_LEGACY);
+ if (mode == CONST && op != Token::INIT) {
+ // Non-intializing assignments to constant is not allowed.
+ BuildThrowReassignConstant(variable->name());
+ } else if (mode == LET && op != Token::INIT) {
+ // Perform an initialization check for let declared variables.
+ // E.g. let x = (x = 20); is not allowed.
+ BuildThrowIfHole(variable->name());
+ } else {
+ DCHECK(variable->is_this() && mode == CONST && op == Token::INIT);
+ // Perform an initialization check for 'this'. 'this' variable is the
+ // only variable able to trigger bind operations outside the TDZ
+ // via 'super' calls.
+ BuildThrowIfNotHole(variable->name());
+ }
+}
void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
+ Token::Value op,
FeedbackVectorSlot slot) {
+ VariableMode mode = variable->mode();
+ RegisterAllocationScope assignment_register_scope(this);
+ BytecodeLabel end_label;
+ bool hole_check_required =
+ (mode == CONST_LEGACY) || (mode == LET && op != Token::INIT) ||
+ (mode == CONST && op != Token::INIT) ||
+ (mode == CONST && op == Token::INIT && variable->is_this());
switch (variable->location()) {
+ case VariableLocation::PARAMETER:
case VariableLocation::LOCAL: {
- // TODO(rmcilroy): support const mode initialization.
- Register destination(variable->index());
- builder()->StoreAccumulatorInRegister(destination);
- break;
- }
- case VariableLocation::PARAMETER: {
- // The parameter indices are shifted by 1 (receiver is variable
- // index -1 but is parameter index 0 in BytecodeArrayBuilder).
- Register destination(builder()->Parameter(variable->index() + 1));
+ Register destination;
+ if (VariableLocation::PARAMETER == variable->location()) {
+ destination = Register(builder()->Parameter(variable->index() + 1));
+ } else {
+ destination = Register(variable->index());
+ }
+
+ if (hole_check_required) {
+ // Load destination to check for hole.
+ Register value_temp = register_allocator()->NewRegister();
+ builder()
+ ->StoreAccumulatorInRegister(value_temp)
+ .LoadAccumulatorWithRegister(destination);
+
+ if (mode == CONST_LEGACY && op == Token::INIT) {
+ // Perform an intialization check for legacy constants.
+ builder()
+ ->JumpIfNotHole(&end_label)
+ .MoveRegister(value_temp, destination)
+ .Bind(&end_label)
+ .LoadAccumulatorWithRegister(value_temp);
+ // Break here because the value should not be stored unconditionally.
+ break;
+ } else if (mode == CONST_LEGACY && op != Token::INIT) {
+ DCHECK(!is_strict(language_mode()));
+ // Ensure accumulator is in the correct state.
+ builder()->LoadAccumulatorWithRegister(value_temp);
+ // Break here, non-initializing assignments to legacy constants are
+ // ignored.
+ break;
+ } else {
+ BuildHoleCheckForVariableAssignment(variable, op);
+ builder()->LoadAccumulatorWithRegister(value_temp);
+ }
+ }
+
builder()->StoreAccumulatorInRegister(destination);
break;
}
@@ -1290,10 +1973,10 @@ void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
break;
}
case VariableLocation::CONTEXT: {
- // TODO(rmcilroy): support const mode initialization.
int depth = execution_context()->ContextChainDepth(variable->scope());
ContextScope* context = execution_context()->Previous(depth);
Register context_reg;
+
if (context) {
context_reg = context->reg();
} else {
@@ -1315,11 +1998,63 @@ void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
}
builder()->LoadAccumulatorWithRegister(value_temp);
}
+
+ if (hole_check_required) {
+ // Load destination to check for hole.
+ Register value_temp = register_allocator()->NewRegister();
+ builder()
+ ->StoreAccumulatorInRegister(value_temp)
+ .LoadContextSlot(context_reg, variable->index());
+
+ if (mode == CONST_LEGACY && op == Token::INIT) {
+ // Perform an intialization check for legacy constants.
+ builder()
+ ->JumpIfNotHole(&end_label)
+ .LoadAccumulatorWithRegister(value_temp)
+ .StoreContextSlot(context_reg, variable->index())
+ .Bind(&end_label);
+ builder()->LoadAccumulatorWithRegister(value_temp);
+ // Break here because the value should not be stored unconditionally.
+ // The above code performs the store conditionally.
+ break;
+ } else if (mode == CONST_LEGACY && op != Token::INIT) {
+ DCHECK(!is_strict(language_mode()));
+ // Ensure accumulator is in the correct state.
+ builder()->LoadAccumulatorWithRegister(value_temp);
+ // Break here, non-initializing assignments to legacy constants are
+ // ignored.
+ break;
+ } else {
+ BuildHoleCheckForVariableAssignment(variable, op);
+ builder()->LoadAccumulatorWithRegister(value_temp);
+ }
+ }
+
builder()->StoreContextSlot(context_reg, variable->index());
break;
}
case VariableLocation::LOOKUP: {
- builder()->StoreLookupSlot(variable->name(), language_mode());
+ if (mode == CONST_LEGACY && op == Token::INIT) {
+ register_allocator()->PrepareForConsecutiveAllocations(3);
+ Register value = register_allocator()->NextConsecutiveRegister();
+ Register context = register_allocator()->NextConsecutiveRegister();
+ Register name = register_allocator()->NextConsecutiveRegister();
+
+ // InitializeLegacyConstLookupSlot runtime call returns the 'value'
+ // passed to it. So, accumulator will have its original contents when
+ // runtime call returns.
+ builder()
+ ->StoreAccumulatorInRegister(value)
+ .MoveRegister(execution_context()->reg(), context)
+ .LoadLiteral(variable->name())
+ .StoreAccumulatorInRegister(name)
+ .CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, value, 3);
+ } else if (mode == CONST_LEGACY && op != Token::INIT) {
+ // Non-intializing assignments to legacy constants are ignored.
+ DCHECK(!is_strict(language_mode()));
+ } else {
+ builder()->StoreLookupSlot(variable->name(), language_mode());
+ }
break;
}
}
@@ -1327,8 +2062,8 @@ void BytecodeGenerator::VisitVariableAssignment(Variable* variable,
void BytecodeGenerator::VisitAssignment(Assignment* expr) {
- DCHECK(expr->target()->IsValidReferenceExpression());
- Register object, key;
+ DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
+ Register object, key, home_object, value;
Handle<String> name;
// Left-hand side can only be a property, a global or a variable slot.
@@ -1358,9 +2093,35 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
}
break;
}
- case NAMED_SUPER_PROPERTY:
- case KEYED_SUPER_PROPERTY:
- UNIMPLEMENTED();
+ case NAMED_SUPER_PROPERTY: {
+ register_allocator()->PrepareForConsecutiveAllocations(4);
+ object = register_allocator()->NextConsecutiveRegister();
+ home_object = register_allocator()->NextConsecutiveRegister();
+ key = register_allocator()->NextConsecutiveRegister();
+ value = register_allocator()->NextConsecutiveRegister();
+ SuperPropertyReference* super_property =
+ property->obj()->AsSuperPropertyReference();
+ VisitForRegisterValue(super_property->this_var(), object);
+ VisitForRegisterValue(super_property->home_object(), home_object);
+ builder()
+ ->LoadLiteral(property->key()->AsLiteral()->AsPropertyName())
+ .StoreAccumulatorInRegister(key);
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ register_allocator()->PrepareForConsecutiveAllocations(4);
+ object = register_allocator()->NextConsecutiveRegister();
+ home_object = register_allocator()->NextConsecutiveRegister();
+ key = register_allocator()->NextConsecutiveRegister();
+ value = register_allocator()->NextConsecutiveRegister();
+ builder()->StoreAccumulatorInRegister(value);
+ SuperPropertyReference* super_property =
+ property->obj()->AsSuperPropertyReference();
+ VisitForRegisterValue(super_property->this_var(), object);
+ VisitForRegisterValue(super_property->home_object(), home_object);
+ VisitForRegisterValue(property->key(), key);
+ break;
+ }
}
// Evaluate the value and potentially handle compound assignments by loading
@@ -1378,8 +2139,7 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
old_value = register_allocator()->NewRegister();
builder()
- ->LoadNamedProperty(object, name, feedback_index(slot),
- language_mode())
+ ->LoadNamedProperty(object, name, feedback_index(slot))
.StoreAccumulatorInRegister(old_value);
break;
}
@@ -1389,18 +2149,25 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
old_value = register_allocator()->NewRegister();
builder()
- ->LoadKeyedProperty(object, feedback_index(slot), language_mode())
+ ->LoadKeyedProperty(object, feedback_index(slot))
.StoreAccumulatorInRegister(old_value);
break;
}
- case NAMED_SUPER_PROPERTY:
- case KEYED_SUPER_PROPERTY:
- UNIMPLEMENTED();
+ case NAMED_SUPER_PROPERTY: {
+ old_value = register_allocator()->NewRegister();
+ BuildNamedSuperPropertyLoad(object, home_object, key);
+ builder()->StoreAccumulatorInRegister(old_value);
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ old_value = register_allocator()->NewRegister();
+ BuildKeyedSuperPropertyLoad(object, home_object, key);
+ builder()->StoreAccumulatorInRegister(old_value);
break;
+ }
}
VisitForAccumulatorValue(expr->value());
- builder()->BinaryOperation(expr->binary_op(), old_value,
- language_mode_strength());
+ builder()->BinaryOperation(expr->binary_op(), old_value);
} else {
VisitForAccumulatorValue(expr->value());
}
@@ -1412,7 +2179,7 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
// TODO(oth): The VisitVariableAssignment() call is hard to reason about.
// Is the value in the accumulator safe? Yes, but scary.
Variable* variable = expr->target()->AsVariableProxy()->var();
- VisitVariableAssignment(variable, slot);
+ VisitVariableAssignment(variable, expr->op(), slot);
break;
}
case NAMED_PROPERTY:
@@ -1423,9 +2190,16 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
builder()->StoreKeyedProperty(object, key, feedback_index(slot),
language_mode());
break;
- case NAMED_SUPER_PROPERTY:
- case KEYED_SUPER_PROPERTY:
- UNIMPLEMENTED();
+ case NAMED_SUPER_PROPERTY: {
+ builder()->StoreAccumulatorInRegister(value);
+ BuildNamedSuperPropertyStore(object, home_object, key, value);
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ builder()->StoreAccumulatorInRegister(value);
+ BuildKeyedSuperPropertyStore(object, home_object, key, value);
+ break;
+ }
}
execution_result()->SetResultInAccumulator();
}
@@ -1437,6 +2211,11 @@ void BytecodeGenerator::VisitYield(Yield* expr) { UNIMPLEMENTED(); }
void BytecodeGenerator::VisitThrow(Throw* expr) {
VisitForAccumulatorValue(expr->exception());
builder()->Throw();
+ // Throw statments are modeled as expression instead of statments. These are
+ // converted from assignment statements in Rewriter::ReWrite pass. An
+ // assignment statement expects a value in the accumulator. This is a hack to
+ // avoid DCHECK fails assert accumulator has been set.
+ execution_result()->SetResultInAccumulator();
}
@@ -1449,34 +2228,84 @@ void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* expr) {
case NAMED_PROPERTY: {
builder()->LoadNamedProperty(obj,
expr->key()->AsLiteral()->AsPropertyName(),
- feedback_index(slot), language_mode());
+ feedback_index(slot));
break;
}
case KEYED_PROPERTY: {
VisitForAccumulatorValue(expr->key());
- builder()->LoadKeyedProperty(obj, feedback_index(slot), language_mode());
+ builder()->LoadKeyedProperty(obj, feedback_index(slot));
break;
}
case NAMED_SUPER_PROPERTY:
+ VisitNamedSuperPropertyLoad(expr, Register::invalid_value());
+ break;
case KEYED_SUPER_PROPERTY:
- UNIMPLEMENTED();
+ VisitKeyedSuperPropertyLoad(expr, Register::invalid_value());
+ break;
}
execution_result()->SetResultInAccumulator();
}
-
void BytecodeGenerator::VisitPropertyLoadForAccumulator(Register obj,
Property* expr) {
AccumulatorResultScope result_scope(this);
VisitPropertyLoad(obj, expr);
}
+void BytecodeGenerator::VisitNamedSuperPropertyLoad(Property* property,
+ Register opt_receiver_out) {
+ RegisterAllocationScope register_scope(this);
+ register_allocator()->PrepareForConsecutiveAllocations(3);
+
+ Register receiver, home_object, name;
+ receiver = register_allocator()->NextConsecutiveRegister();
+ home_object = register_allocator()->NextConsecutiveRegister();
+ name = register_allocator()->NextConsecutiveRegister();
+ SuperPropertyReference* super_property =
+ property->obj()->AsSuperPropertyReference();
+ VisitForRegisterValue(super_property->this_var(), receiver);
+ VisitForRegisterValue(super_property->home_object(), home_object);
+ builder()
+ ->LoadLiteral(property->key()->AsLiteral()->AsPropertyName())
+ .StoreAccumulatorInRegister(name);
+ BuildNamedSuperPropertyLoad(receiver, home_object, name);
-void BytecodeGenerator::VisitProperty(Property* expr) {
- Register obj = VisitForRegisterValue(expr->obj());
- VisitPropertyLoad(obj, expr);
+ if (opt_receiver_out.is_valid()) {
+ builder()->MoveRegister(receiver, opt_receiver_out);
+ }
}
+void BytecodeGenerator::VisitKeyedSuperPropertyLoad(Property* property,
+ Register opt_receiver_out) {
+ RegisterAllocationScope register_scope(this);
+ register_allocator()->PrepareForConsecutiveAllocations(3);
+
+ Register receiver, home_object, key;
+ receiver = register_allocator()->NextConsecutiveRegister();
+ home_object = register_allocator()->NextConsecutiveRegister();
+ key = register_allocator()->NextConsecutiveRegister();
+ SuperPropertyReference* super_property =
+ property->obj()->AsSuperPropertyReference();
+ VisitForRegisterValue(super_property->this_var(), receiver);
+ VisitForRegisterValue(super_property->home_object(), home_object);
+ VisitForRegisterValue(property->key(), key);
+ BuildKeyedSuperPropertyLoad(receiver, home_object, key);
+
+ if (opt_receiver_out.is_valid()) {
+ builder()->MoveRegister(receiver, opt_receiver_out);
+ }
+}
+
+void BytecodeGenerator::VisitProperty(Property* expr) {
+ LhsKind property_kind = Property::GetAssignType(expr);
+ if (property_kind != NAMED_SUPER_PROPERTY &&
+ property_kind != KEYED_SUPER_PROPERTY) {
+ Register obj = VisitForRegisterValue(expr->obj());
+ VisitPropertyLoad(obj, expr);
+ } else {
+ VisitPropertyLoad(Register::invalid_value(), expr);
+ }
+}
Register BytecodeGenerator::VisitArguments(ZoneList<Expression*>* args) {
if (args->length() == 0) {
@@ -1510,18 +2339,21 @@ Register BytecodeGenerator::VisitArguments(ZoneList<Expression*>* args) {
return first_arg;
}
-
void BytecodeGenerator::VisitCall(Call* expr) {
Expression* callee_expr = expr->expression();
Call::CallType call_type = expr->GetCallType(isolate());
+ if (call_type == Call::SUPER_CALL) {
+ return VisitCallSuper(expr);
+ }
+
// Prepare the callee and the receiver to the function call. This depends on
// the semantics of the underlying call type.
// The receiver and arguments need to be allocated consecutively for
// Call(). We allocate the callee and receiver consecutively for calls to
- // kLoadLookupSlot. Future optimizations could avoid this there are no
- // arguments or the receiver and arguments are already consecutive.
+ // %LoadLookupSlotForCall. Future optimizations could avoid this there are
+ // no arguments or the receiver and arguments are already consecutive.
ZoneList<Expression*>* args = expr->arguments();
register_allocator()->PrepareForConsecutiveAllocations(args->length() + 2);
Register callee = register_allocator()->NextConsecutiveRegister();
@@ -1551,18 +2383,16 @@ void BytecodeGenerator::VisitCall(Call* expr) {
case Call::POSSIBLY_EVAL_CALL: {
if (callee_expr->AsVariableProxy()->var()->IsLookupSlot()) {
RegisterAllocationScope inner_register_scope(this);
- register_allocator()->PrepareForConsecutiveAllocations(2);
- Register context = register_allocator()->NextConsecutiveRegister();
- Register name = register_allocator()->NextConsecutiveRegister();
+ Register name = register_allocator()->NewRegister();
- // Call LoadLookupSlot to get the callee and receiver.
+ // Call %LoadLookupSlotForCall to get the callee and receiver.
DCHECK(Register::AreContiguous(callee, receiver));
Variable* variable = callee_expr->AsVariableProxy()->var();
builder()
- ->MoveRegister(Register::function_context(), context)
- .LoadLiteral(variable->name())
+ ->LoadLiteral(variable->name())
.StoreAccumulatorInRegister(name)
- .CallRuntimeForPair(Runtime::kLoadLookupSlot, context, 2, callee);
+ .CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, name, 1,
+ callee);
break;
}
// Fall through.
@@ -1574,10 +2404,21 @@ void BytecodeGenerator::VisitCall(Call* expr) {
builder()->StoreAccumulatorInRegister(callee);
break;
}
- case Call::NAMED_SUPER_PROPERTY_CALL:
- case Call::KEYED_SUPER_PROPERTY_CALL:
+ case Call::NAMED_SUPER_PROPERTY_CALL: {
+ Property* property = callee_expr->AsProperty();
+ VisitNamedSuperPropertyLoad(property, receiver);
+ builder()->StoreAccumulatorInRegister(callee);
+ break;
+ }
+ case Call::KEYED_SUPER_PROPERTY_CALL: {
+ Property* property = callee_expr->AsProperty();
+ VisitKeyedSuperPropertyLoad(property, receiver);
+ builder()->StoreAccumulatorInRegister(callee);
+ break;
+ }
case Call::SUPER_CALL:
- UNIMPLEMENTED();
+ UNREACHABLE();
+ break;
}
// Evaluate all arguments to the function call and store in sequential
@@ -1615,12 +2456,39 @@ void BytecodeGenerator::VisitCall(Call* expr) {
.StoreAccumulatorInRegister(callee);
}
- // TODO(rmcilroy): Use CallIC to allow call type feedback.
- builder()->Call(callee, receiver, args->length(),
- feedback_index(expr->CallFeedbackICSlot()));
+ builder()->SetExpressionPosition(expr);
+ builder()->Call(callee, receiver, 1 + args->length(),
+ feedback_index(expr->CallFeedbackICSlot()),
+ expr->tail_call_mode());
execution_result()->SetResultInAccumulator();
}
+void BytecodeGenerator::VisitCallSuper(Call* expr) {
+ RegisterAllocationScope register_scope(this);
+ SuperCallReference* super = expr->expression()->AsSuperCallReference();
+
+ // Prepare the constructor to the super call.
+ Register this_function = register_allocator()->NewRegister();
+ VisitForAccumulatorValue(super->this_function_var());
+ builder()
+ ->StoreAccumulatorInRegister(this_function)
+ .CallRuntime(Runtime::kInlineGetSuperConstructor, this_function, 1);
+
+ Register constructor = this_function; // Re-use dead this_function register.
+ builder()->StoreAccumulatorInRegister(constructor);
+
+ ZoneList<Expression*>* args = expr->arguments();
+ Register first_arg = VisitArguments(args);
+
+ // The new target is loaded into the accumulator from the
+ // {new.target} variable.
+ VisitForAccumulatorValue(super->new_target_var());
+
+ // Call construct.
+ builder()->SetExpressionPosition(expr);
+ builder()->New(constructor, first_arg, args->length());
+ execution_result()->SetResultInAccumulator();
+}
void BytecodeGenerator::VisitCallNew(CallNew* expr) {
Register constructor = register_allocator()->NewRegister();
@@ -1629,27 +2497,31 @@ void BytecodeGenerator::VisitCallNew(CallNew* expr) {
ZoneList<Expression*>* args = expr->arguments();
Register first_arg = VisitArguments(args);
- builder()->New(constructor, first_arg, args->length());
+
+ builder()->SetExpressionPosition(expr);
+ // The accumulator holds new target which is the same as the
+ // constructor for CallNew.
+ builder()
+ ->LoadAccumulatorWithRegister(constructor)
+ .New(constructor, first_arg, args->length());
execution_result()->SetResultInAccumulator();
}
void BytecodeGenerator::VisitCallRuntime(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
- Register receiver;
if (expr->is_jsruntime()) {
// Allocate a register for the receiver and load it with undefined.
- register_allocator()->PrepareForConsecutiveAllocations(args->length() + 1);
- receiver = register_allocator()->NextConsecutiveRegister();
+ register_allocator()->PrepareForConsecutiveAllocations(1 + args->length());
+ Register receiver = register_allocator()->NextConsecutiveRegister();
builder()->LoadUndefined().StoreAccumulatorInRegister(receiver);
- }
- // Evaluate all arguments to the runtime call.
- Register first_arg = VisitArguments(args);
-
- if (expr->is_jsruntime()) {
- DCHECK(args->length() == 0 || first_arg.index() == receiver.index() + 1);
- builder()->CallJSRuntime(expr->context_index(), receiver, args->length());
+ Register first_arg = VisitArguments(args);
+ CHECK(args->length() == 0 || first_arg.index() == receiver.index() + 1);
+ builder()->CallJSRuntime(expr->context_index(), receiver,
+ 1 + args->length());
} else {
+ // Evaluate all arguments to the runtime call.
+ Register first_arg = VisitArguments(args);
Runtime::FunctionId function_id = expr->function()->function_id;
builder()->CallRuntime(function_id, first_arg, args->length());
}
@@ -1755,7 +2627,11 @@ void BytecodeGenerator::VisitDelete(UnaryOperation* expr) {
break;
}
case VariableLocation::LOOKUP: {
- builder()->LoadLiteral(variable->name()).DeleteLookupSlot();
+ Register name_reg = register_allocator()->NewRegister();
+ builder()
+ ->LoadLiteral(variable->name())
+ .StoreAccumulatorInRegister(name_reg)
+ .CallRuntime(Runtime::kDeleteLookupSlot, name_reg, 1);
break;
}
default:
@@ -1781,7 +2657,7 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
bool is_postfix = expr->is_postfix();
// Evaluate LHS expression and get old value.
- Register obj, key, old_value;
+ Register object, home_object, key, old_value, value;
Handle<String> name;
switch (assign_type) {
case VARIABLE: {
@@ -1792,26 +2668,53 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
}
case NAMED_PROPERTY: {
FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
- obj = VisitForRegisterValue(property->obj());
+ object = VisitForRegisterValue(property->obj());
name = property->key()->AsLiteral()->AsPropertyName();
- builder()->LoadNamedProperty(obj, name, feedback_index(slot),
- language_mode());
+ builder()->LoadNamedProperty(object, name, feedback_index(slot));
break;
}
case KEYED_PROPERTY: {
FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
- obj = VisitForRegisterValue(property->obj());
+ object = VisitForRegisterValue(property->obj());
// Use visit for accumulator here since we need the key in the accumulator
// for the LoadKeyedProperty.
key = register_allocator()->NewRegister();
VisitForAccumulatorValue(property->key());
builder()->StoreAccumulatorInRegister(key).LoadKeyedProperty(
- obj, feedback_index(slot), language_mode());
+ object, feedback_index(slot));
+ break;
+ }
+ case NAMED_SUPER_PROPERTY: {
+ register_allocator()->PrepareForConsecutiveAllocations(4);
+ object = register_allocator()->NextConsecutiveRegister();
+ home_object = register_allocator()->NextConsecutiveRegister();
+ key = register_allocator()->NextConsecutiveRegister();
+ value = register_allocator()->NextConsecutiveRegister();
+ SuperPropertyReference* super_property =
+ property->obj()->AsSuperPropertyReference();
+ VisitForRegisterValue(super_property->this_var(), object);
+ VisitForRegisterValue(super_property->home_object(), home_object);
+ builder()
+ ->LoadLiteral(property->key()->AsLiteral()->AsPropertyName())
+ .StoreAccumulatorInRegister(key);
+ BuildNamedSuperPropertyLoad(object, home_object, key);
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ register_allocator()->PrepareForConsecutiveAllocations(4);
+ object = register_allocator()->NextConsecutiveRegister();
+ home_object = register_allocator()->NextConsecutiveRegister();
+ key = register_allocator()->NextConsecutiveRegister();
+ value = register_allocator()->NextConsecutiveRegister();
+ builder()->StoreAccumulatorInRegister(value);
+ SuperPropertyReference* super_property =
+ property->obj()->AsSuperPropertyReference();
+ VisitForRegisterValue(super_property->this_var(), object);
+ VisitForRegisterValue(super_property->home_object(), home_object);
+ VisitForRegisterValue(property->key(), key);
+ BuildKeyedSuperPropertyLoad(object, home_object, key);
break;
}
- case NAMED_SUPER_PROPERTY:
- case KEYED_SUPER_PROPERTY:
- UNIMPLEMENTED();
}
// Convert old value into a number.
@@ -1826,29 +2729,36 @@ void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
}
// Perform +1/-1 operation.
- builder()->CountOperation(expr->binary_op(), language_mode_strength());
+ builder()->CountOperation(expr->binary_op());
// Store the value.
FeedbackVectorSlot feedback_slot = expr->CountSlot();
switch (assign_type) {
case VARIABLE: {
Variable* variable = expr->expression()->AsVariableProxy()->var();
- VisitVariableAssignment(variable, feedback_slot);
+ VisitVariableAssignment(variable, expr->op(), feedback_slot);
break;
}
case NAMED_PROPERTY: {
- builder()->StoreNamedProperty(obj, name, feedback_index(feedback_slot),
+ builder()->StoreNamedProperty(object, name, feedback_index(feedback_slot),
language_mode());
break;
}
case KEYED_PROPERTY: {
- builder()->StoreKeyedProperty(obj, key, feedback_index(feedback_slot),
+ builder()->StoreKeyedProperty(object, key, feedback_index(feedback_slot),
language_mode());
break;
}
- case NAMED_SUPER_PROPERTY:
- case KEYED_SUPER_PROPERTY:
- UNIMPLEMENTED();
+ case NAMED_SUPER_PROPERTY: {
+ builder()->StoreAccumulatorInRegister(value);
+ BuildNamedSuperPropertyStore(object, home_object, key, value);
+ break;
+ }
+ case KEYED_SUPER_PROPERTY: {
+ builder()->StoreAccumulatorInRegister(value);
+ BuildKeyedSuperPropertyStore(object, home_object, key, value);
+ break;
+ }
}
// Restore old value for postfix expressions.
@@ -1881,7 +2791,7 @@ void BytecodeGenerator::VisitBinaryOperation(BinaryOperation* binop) {
void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Register lhs = VisitForRegisterValue(expr->left());
VisitForAccumulatorValue(expr->right());
- builder()->CompareOperation(expr->op(), lhs, language_mode_strength());
+ builder()->CompareOperation(expr->op(), lhs);
execution_result()->SetResultInAccumulator();
}
@@ -1889,7 +2799,7 @@ void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) {
void BytecodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
Register lhs = VisitForRegisterValue(expr->left());
VisitForAccumulatorValue(expr->right());
- builder()->BinaryOperation(expr->op(), lhs, language_mode_strength());
+ builder()->BinaryOperation(expr->op(), lhs);
execution_result()->SetResultInAccumulator();
}
@@ -1908,13 +2818,15 @@ void BytecodeGenerator::VisitThisFunction(ThisFunction* expr) {
void BytecodeGenerator::VisitSuperCallReference(SuperCallReference* expr) {
- UNIMPLEMENTED();
+ // Handled by VisitCall().
+ UNREACHABLE();
}
void BytecodeGenerator::VisitSuperPropertyReference(
SuperPropertyReference* expr) {
- UNIMPLEMENTED();
+ builder()->CallRuntime(Runtime::kThrowUnsupportedSuperError, Register(0), 0);
+ execution_result()->SetResultInAccumulator();
}
@@ -1962,8 +2874,7 @@ void BytecodeGenerator::VisitLogicalAndExpression(BinaryOperation* binop) {
}
-void BytecodeGenerator::VisitRewritableAssignmentExpression(
- RewritableAssignmentExpression* expr) {
+void BytecodeGenerator::VisitRewritableExpression(RewritableExpression* expr) {
Visit(expr->expression());
}
@@ -2040,6 +2951,40 @@ void BytecodeGenerator::VisitNewLocalBlockContext(Scope* scope) {
execution_result()->SetResultInAccumulator();
}
+void BytecodeGenerator::VisitNewLocalWithContext() {
+ AccumulatorResultScope accumulator_execution_result(this);
+
+ register_allocator()->PrepareForConsecutiveAllocations(2);
+ Register extension_object = register_allocator()->NextConsecutiveRegister();
+ Register closure = register_allocator()->NextConsecutiveRegister();
+
+ builder()->StoreAccumulatorInRegister(extension_object);
+ VisitFunctionClosureForContext();
+ builder()->StoreAccumulatorInRegister(closure).CallRuntime(
+ Runtime::kPushWithContext, extension_object, 2);
+ execution_result()->SetResultInAccumulator();
+}
+
+void BytecodeGenerator::VisitNewLocalCatchContext(Variable* variable) {
+ AccumulatorResultScope accumulator_execution_result(this);
+ DCHECK(variable->IsContextSlot());
+
+ // Allocate a new local block context.
+ register_allocator()->PrepareForConsecutiveAllocations(3);
+ Register name = register_allocator()->NextConsecutiveRegister();
+ Register exception = register_allocator()->NextConsecutiveRegister();
+ Register closure = register_allocator()->NextConsecutiveRegister();
+
+ builder()
+ ->StoreAccumulatorInRegister(exception)
+ .LoadLiteral(variable->name())
+ .StoreAccumulatorInRegister(name);
+ VisitFunctionClosureForContext();
+ builder()->StoreAccumulatorInRegister(closure).CallRuntime(
+ Runtime::kPushCatchContext, name, 3);
+ execution_result()->SetResultInAccumulator();
+}
+
void BytecodeGenerator::VisitObjectLiteralAccessor(
Register home_object, ObjectLiteralProperty* property, Register value_out) {
@@ -2053,14 +2998,17 @@ void BytecodeGenerator::VisitObjectLiteralAccessor(
}
}
-
void BytecodeGenerator::VisitSetHomeObject(Register value, Register home_object,
ObjectLiteralProperty* property,
int slot_number) {
Expression* expr = property->value();
- if (!FunctionLiteral::NeedsHomeObject(expr)) return;
-
- UNIMPLEMENTED();
+ if (FunctionLiteral::NeedsHomeObject(expr)) {
+ Handle<Name> name = isolate()->factory()->home_object_symbol();
+ FeedbackVectorSlot slot = property->GetSlot(slot_number);
+ builder()
+ ->LoadAccumulatorWithRegister(home_object)
+ .StoreNamedProperty(value, name, feedback_index(slot), language_mode());
+ }
}
@@ -2076,19 +3024,26 @@ void BytecodeGenerator::VisitArgumentsObject(Variable* variable) {
? CreateArgumentsType::kUnmappedArguments
: CreateArgumentsType::kMappedArguments;
builder()->CreateArguments(type);
- VisitVariableAssignment(variable, FeedbackVectorSlot::Invalid());
+ VisitVariableAssignment(variable, Token::ASSIGN,
+ FeedbackVectorSlot::Invalid());
}
+void BytecodeGenerator::VisitRestArgumentsArray(Variable* rest) {
+ if (rest == nullptr) return;
+
+ // Allocate and initialize a new rest parameter and assign to the {rest}
+ // variable.
+ builder()->CreateArguments(CreateArgumentsType::kRestParameter);
+ DCHECK(rest->IsContextSlot() || rest->IsStackAllocated());
+ VisitVariableAssignment(rest, Token::ASSIGN, FeedbackVectorSlot::Invalid());
+}
void BytecodeGenerator::VisitThisFunctionVariable(Variable* variable) {
if (variable == nullptr) return;
- // TODO(rmcilroy): Remove once we have tests which exercise this code path.
- UNIMPLEMENTED();
-
// Store the closure we were called with in the given variable.
builder()->LoadAccumulatorWithRegister(Register::function_closure());
- VisitVariableAssignment(variable, FeedbackVectorSlot::Invalid());
+ VisitVariableAssignment(variable, Token::INIT, FeedbackVectorSlot::Invalid());
}
@@ -2097,7 +3052,7 @@ void BytecodeGenerator::VisitNewTargetVariable(Variable* variable) {
// Store the new target we were called with in the given variable.
builder()->LoadAccumulatorWithRegister(Register::new_target());
- VisitVariableAssignment(variable, FeedbackVectorSlot::Invalid());
+ VisitVariableAssignment(variable, Token::INIT, FeedbackVectorSlot::Invalid());
}
@@ -2114,6 +3069,12 @@ void BytecodeGenerator::VisitFunctionClosureForContext() {
Context::NATIVE_CONTEXT_INDEX)
.StoreAccumulatorInRegister(native_context)
.LoadContextSlot(native_context, Context::CLOSURE_INDEX);
+ } else if (closure_scope->is_eval_scope()) {
+ // Contexts created by a call to eval have the same closure as the
+ // context calling eval, not the anonymous closure containing the eval
+ // code. Fetch it from the context.
+ builder()->LoadContextSlot(execution_context()->reg(),
+ Context::CLOSURE_INDEX);
} else {
DCHECK(closure_scope->is_function_scope());
builder()->LoadAccumulatorWithRegister(Register::function_closure());
@@ -2128,6 +3089,13 @@ void BytecodeGenerator::VisitForAccumulatorValue(Expression* expr) {
Visit(expr);
}
+void BytecodeGenerator::VisitForAccumulatorValueOrTheHole(Expression* expr) {
+ if (expr == nullptr) {
+ builder()->LoadTheHole();
+ } else {
+ VisitForAccumulatorValue(expr);
+ }
+}
// Visits the expression |expr| and discards the result.
void BytecodeGenerator::VisitForEffect(Expression* expr) {
@@ -2144,22 +3112,19 @@ Register BytecodeGenerator::VisitForRegisterValue(Expression* expr) {
return register_scope.ResultRegister();
}
+// Visits the expression |expr| and stores the expression result in
+// |destination|.
+void BytecodeGenerator::VisitForRegisterValue(Expression* expr,
+ Register destination) {
+ AccumulatorResultScope register_scope(this);
+ Visit(expr);
+ builder()->StoreAccumulatorInRegister(destination);
+}
-Register BytecodeGenerator::NextContextRegister() const {
- if (execution_context() == nullptr) {
- // Return the incoming function context for the outermost execution context.
- return Register::function_context();
- }
- Register previous = execution_context()->reg();
- if (previous == Register::function_context()) {
- // If the previous context was the incoming function context, then the next
- // context register is the first local context register.
- return builder_.first_context_register();
- } else {
- // Otherwise use the next local context register.
- DCHECK_LT(previous.index(), builder_.last_context_register().index());
- return Register(previous.index() + 1);
- }
+void BytecodeGenerator::VisitInScope(Statement* stmt, Scope* scope) {
+ ContextScope context_scope(this, scope);
+ DCHECK(scope->declarations()->is_empty());
+ Visit(stmt);
}
@@ -2168,11 +3133,6 @@ LanguageMode BytecodeGenerator::language_mode() const {
}
-Strength BytecodeGenerator::language_mode_strength() const {
- return strength(language_mode());
-}
-
-
int BytecodeGenerator::feedback_index(FeedbackVectorSlot slot) const {
return info()->feedback_vector()->GetIndex(slot);
}
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index 8bda7be301..4ef173890c 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -13,6 +13,8 @@ namespace v8 {
namespace internal {
namespace interpreter {
+class LoopBuilder;
+
class BytecodeGenerator final : public AstVisitor {
public:
BytecodeGenerator(Isolate* isolate, Zone* zone);
@@ -32,6 +34,9 @@ class BytecodeGenerator final : public AstVisitor {
class ControlScope;
class ControlScopeForBreakable;
class ControlScopeForIteration;
+ class ControlScopeForTopLevel;
+ class ControlScopeForTryCatch;
+ class ControlScopeForTryFinally;
class ExpressionResultScope;
class EffectResultScope;
class AccumulatorResultScope;
@@ -39,7 +44,6 @@ class BytecodeGenerator final : public AstVisitor {
class RegisterAllocationScope;
void MakeBytecodeBody();
- Register NextContextRegister() const;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
@@ -61,6 +65,20 @@ class BytecodeGenerator final : public AstVisitor {
// Helper visitors which perform common operations.
Register VisitArguments(ZoneList<Expression*>* arguments);
+ // Visit a keyed super property load. The optional
+ // |opt_receiver_out| register will have the receiver stored to it
+ // if it's a valid register. The loaded value is placed in the
+ // accumulator.
+ void VisitKeyedSuperPropertyLoad(Property* property,
+ Register opt_receiver_out);
+
+ // Visit a named super property load. The optional
+ // |opt_receiver_out| register will have the receiver stored to it
+ // if it's a valid register. The loaded value is placed in the
+ // accumulator.
+ void VisitNamedSuperPropertyLoad(Property* property,
+ Register opt_receiver_out);
+
void VisitPropertyLoad(Register obj, Property* expr);
void VisitPropertyLoadForAccumulator(Register obj, Property* expr);
@@ -72,14 +90,41 @@ class BytecodeGenerator final : public AstVisitor {
MUST_USE_RESULT Register
VisitVariableLoadForRegisterValue(Variable* variable, FeedbackVectorSlot slot,
TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
- void VisitVariableAssignment(Variable* variable, FeedbackVectorSlot slot);
+ void VisitVariableAssignment(Variable* variable, Token::Value op,
+ FeedbackVectorSlot slot);
+
+ void BuildNamedSuperPropertyStore(Register receiver, Register home_object,
+ Register name, Register value);
+ void BuildKeyedSuperPropertyStore(Register receiver, Register home_object,
+ Register key, Register value);
+ void BuildNamedSuperPropertyLoad(Register receiver, Register home_object,
+ Register name);
+ void BuildKeyedSuperPropertyLoad(Register receiver, Register home_object,
+ Register key);
+
+ void BuildThrowIfHole(Handle<String> name);
+ void BuildThrowIfNotHole(Handle<String> name);
+ void BuildThrowReassignConstant(Handle<String> name);
+ void BuildThrowReferenceError(Handle<String> name);
+ void BuildHoleCheckForVariableLoad(VariableMode mode, Handle<String> name);
+ void BuildHoleCheckForVariableAssignment(Variable* variable, Token::Value op);
void VisitArgumentsObject(Variable* variable);
+ void VisitRestArgumentsArray(Variable* rest);
+ void VisitCallSuper(Call* call);
+ void VisitClassLiteralContents(ClassLiteral* expr);
+ void VisitClassLiteralForRuntimeDefinition(ClassLiteral* expr);
+ void VisitClassLiteralProperties(ClassLiteral* expr, Register literal,
+ Register prototype);
+ void VisitClassLiteralStaticPrototypeWithComputedName(Register name);
void VisitThisFunctionVariable(Variable* variable);
void VisitNewTargetVariable(Variable* variable);
void VisitNewLocalFunctionContext();
void VisitBuildLocalActivationContext();
+ void VisitBlockDeclarationsAndStatements(Block* stmt);
void VisitNewLocalBlockContext(Scope* scope);
+ void VisitNewLocalCatchContext(Variable* variable);
+ void VisitNewLocalWithContext();
void VisitFunctionClosureForContext();
void VisitSetHomeObject(Register value, Register home_object,
ObjectLiteralProperty* property, int slot_number = 0);
@@ -88,17 +133,34 @@ class BytecodeGenerator final : public AstVisitor {
Register value_out);
void VisitForInAssignment(Expression* expr, FeedbackVectorSlot slot);
+ // Visit the body of a loop iteration.
+ void VisitIterationBody(IterationStatement* stmt, LoopBuilder* loop_builder);
+
+ // Visit a statement and switch scopes, the context is in the accumulator.
+ void VisitInScope(Statement* stmt, Scope* scope);
+
// Visitors for obtaining expression result in the accumulator, in a
// register, or just getting the effect.
- void VisitForAccumulatorValue(Expression* expression);
- MUST_USE_RESULT Register VisitForRegisterValue(Expression* expression);
- void VisitForEffect(Expression* node);
+ void VisitForAccumulatorValue(Expression* expr);
+ void VisitForAccumulatorValueOrTheHole(Expression* expr);
+ MUST_USE_RESULT Register VisitForRegisterValue(Expression* expr);
+ void VisitForRegisterValue(Expression* expr, Register destination);
+ void VisitForEffect(Expression* expr);
// Methods for tracking and remapping register.
void RecordStoreToRegister(Register reg);
Register LoadFromAliasedRegister(Register reg);
- inline BytecodeArrayBuilder* builder() { return &builder_; }
+ // Methods for tracking try-block nesting.
+ bool IsInsideTryCatch() const { return try_catch_nesting_level_ > 0; }
+ bool IsInsideTryFinally() const { return try_finally_nesting_level_ > 0; }
+
+ // Initialize an array of temporary registers with consecutive registers.
+ template <size_t N>
+ void InitializeWithConsecutiveRegisters(Register (&registers)[N]);
+
+ inline void set_builder(BytecodeArrayBuilder* builder) { builder_ = builder; }
+ inline BytecodeArrayBuilder* builder() const { return builder_; }
inline Isolate* isolate() const { return isolate_; }
inline Zone* zone() const { return zone_; }
@@ -130,12 +192,11 @@ class BytecodeGenerator final : public AstVisitor {
ZoneVector<Handle<Object>>* globals() { return &globals_; }
inline LanguageMode language_mode() const;
- Strength language_mode_strength() const;
int feedback_index(FeedbackVectorSlot slot) const;
Isolate* isolate_;
Zone* zone_;
- BytecodeArrayBuilder builder_;
+ BytecodeArrayBuilder* builder_;
CompilationInfo* info_;
Scope* scope_;
ZoneVector<Handle<Object>> globals_;
@@ -143,6 +204,8 @@ class BytecodeGenerator final : public AstVisitor {
ContextScope* execution_context_;
ExpressionResultScope* execution_result_;
RegisterAllocationScope* register_allocator_;
+ int try_catch_nesting_level_;
+ int try_finally_nesting_level_;
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-register-allocator.cc b/deps/v8/src/interpreter/bytecode-register-allocator.cc
index 4efb612db5..0a617c048a 100644
--- a/deps/v8/src/interpreter/bytecode-register-allocator.cc
+++ b/deps/v8/src/interpreter/bytecode-register-allocator.cc
@@ -10,17 +10,173 @@ namespace v8 {
namespace internal {
namespace interpreter {
+TemporaryRegisterAllocator::TemporaryRegisterAllocator(Zone* zone,
+ int allocation_base)
+ : free_temporaries_(zone),
+ allocation_base_(allocation_base),
+ allocation_count_(0) {}
+
+Register TemporaryRegisterAllocator::first_temporary_register() const {
+ DCHECK(allocation_count() > 0);
+ return Register(allocation_base());
+}
+
+Register TemporaryRegisterAllocator::last_temporary_register() const {
+ DCHECK(allocation_count() > 0);
+ return Register(allocation_base() + allocation_count() - 1);
+}
+
+int TemporaryRegisterAllocator::AllocateTemporaryRegister() {
+ allocation_count_ += 1;
+ return allocation_base() + allocation_count() - 1;
+}
+
+int TemporaryRegisterAllocator::BorrowTemporaryRegister() {
+ if (free_temporaries_.empty()) {
+ return AllocateTemporaryRegister();
+ } else {
+ auto pos = free_temporaries_.begin();
+ int retval = *pos;
+ free_temporaries_.erase(pos);
+ return retval;
+ }
+}
+
+int TemporaryRegisterAllocator::BorrowTemporaryRegisterNotInRange(
+ int start_index, int end_index) {
+ if (free_temporaries_.empty()) {
+ int next_allocation = allocation_base() + allocation_count();
+ while (next_allocation >= start_index && next_allocation <= end_index) {
+ free_temporaries_.insert(AllocateTemporaryRegister());
+ next_allocation += 1;
+ }
+ return AllocateTemporaryRegister();
+ }
+
+ ZoneSet<int>::iterator index = free_temporaries_.lower_bound(start_index);
+ if (index == free_temporaries_.begin()) {
+ // If start_index is the first free register, check for a register
+ // greater than end_index.
+ index = free_temporaries_.upper_bound(end_index);
+ if (index == free_temporaries_.end()) {
+ return AllocateTemporaryRegister();
+ }
+ } else {
+ // If there is a free register < start_index
+ index--;
+ }
+
+ int retval = *index;
+ free_temporaries_.erase(index);
+ return retval;
+}
+
+int TemporaryRegisterAllocator::PrepareForConsecutiveTemporaryRegisters(
+ size_t count) {
+ if (count == 0) {
+ return -1;
+ }
+
+ // TODO(oth): replace use of set<> here for free_temporaries with a
+ // more efficient structure. And/or partition into two searches -
+ // one before the translation window and one after.
+
+ // A run will require at least |count| free temporaries.
+ while (free_temporaries_.size() < count) {
+ free_temporaries_.insert(AllocateTemporaryRegister());
+ }
+
+ // Search within existing temporaries for a run.
+ auto start = free_temporaries_.begin();
+ size_t run_length = 0;
+ for (auto run_end = start; run_end != free_temporaries_.end(); run_end++) {
+ int expected = *start + static_cast<int>(run_length);
+ if (*run_end != expected) {
+ start = run_end;
+ run_length = 0;
+ }
+ Register reg_start(*start);
+ Register reg_expected(expected);
+ if (RegisterTranslator::DistanceToTranslationWindow(reg_start) > 0 &&
+ RegisterTranslator::DistanceToTranslationWindow(reg_expected) <= 0) {
+ // Run straddles the lower edge of the translation window. Registers
+ // after the start of this boundary are displaced by the register
+ // translator to provide a hole for translation. Runs either side
+ // of the boundary are fine.
+ start = run_end;
+ run_length = 0;
+ }
+ if (++run_length == count) {
+ return *start;
+ }
+ }
+
+ // Continue run if possible across existing last temporary.
+ if (allocation_count_ > 0 && (start == free_temporaries_.end() ||
+ *start + static_cast<int>(run_length) !=
+ last_temporary_register().index() + 1)) {
+ run_length = 0;
+ }
+
+ // Pad temporaries if extended run would cross translation boundary.
+ Register reg_first(*start);
+ Register reg_last(*start + static_cast<int>(count) - 1);
+ DCHECK_GT(RegisterTranslator::DistanceToTranslationWindow(reg_first),
+ RegisterTranslator::DistanceToTranslationWindow(reg_last));
+ while (RegisterTranslator::DistanceToTranslationWindow(reg_first) > 0 &&
+ RegisterTranslator::DistanceToTranslationWindow(reg_last) <= 0) {
+ auto pos_insert_pair =
+ free_temporaries_.insert(AllocateTemporaryRegister());
+ reg_first = Register(*pos_insert_pair.first);
+ reg_last = Register(reg_first.index() + static_cast<int>(count) - 1);
+ run_length = 0;
+ }
+
+ // Ensure enough registers for run.
+ while (run_length++ < count) {
+ free_temporaries_.insert(AllocateTemporaryRegister());
+ }
+
+ int run_start =
+ last_temporary_register().index() - static_cast<int>(count) + 1;
+ DCHECK(RegisterTranslator::DistanceToTranslationWindow(Register(run_start)) <=
+ 0 ||
+ RegisterTranslator::DistanceToTranslationWindow(
+ Register(run_start + static_cast<int>(count) - 1)) > 0);
+ return run_start;
+}
+
+bool TemporaryRegisterAllocator::RegisterIsLive(Register reg) const {
+ if (allocation_count_ > 0) {
+ DCHECK(reg >= first_temporary_register() &&
+ reg <= last_temporary_register());
+ return free_temporaries_.find(reg.index()) == free_temporaries_.end();
+ } else {
+ return false;
+ }
+}
+
+void TemporaryRegisterAllocator::BorrowConsecutiveTemporaryRegister(
+ int reg_index) {
+ DCHECK(free_temporaries_.find(reg_index) != free_temporaries_.end());
+ free_temporaries_.erase(reg_index);
+}
+
+void TemporaryRegisterAllocator::ReturnTemporaryRegister(int reg_index) {
+ DCHECK(free_temporaries_.find(reg_index) == free_temporaries_.end());
+ free_temporaries_.insert(reg_index);
+}
+
BytecodeRegisterAllocator::BytecodeRegisterAllocator(
- BytecodeArrayBuilder* builder)
- : builder_(builder),
- allocated_(builder->zone()),
+ Zone* zone, TemporaryRegisterAllocator* allocator)
+ : base_allocator_(allocator),
+ allocated_(zone),
next_consecutive_register_(-1),
next_consecutive_count_(-1) {}
-
BytecodeRegisterAllocator::~BytecodeRegisterAllocator() {
for (auto i = allocated_.rbegin(); i != allocated_.rend(); i++) {
- builder_->ReturnTemporaryRegister(*i);
+ base_allocator()->ReturnTemporaryRegister(*i);
}
allocated_.clear();
}
@@ -29,9 +185,9 @@ BytecodeRegisterAllocator::~BytecodeRegisterAllocator() {
Register BytecodeRegisterAllocator::NewRegister() {
int allocated = -1;
if (next_consecutive_count_ <= 0) {
- allocated = builder_->BorrowTemporaryRegister();
+ allocated = base_allocator()->BorrowTemporaryRegister();
} else {
- allocated = builder_->BorrowTemporaryRegisterNotInRange(
+ allocated = base_allocator()->BorrowTemporaryRegisterNotInRange(
next_consecutive_register_,
next_consecutive_register_ + next_consecutive_count_ - 1);
}
@@ -52,7 +208,7 @@ bool BytecodeRegisterAllocator::RegisterIsAllocatedInThisScope(
void BytecodeRegisterAllocator::PrepareForConsecutiveAllocations(size_t count) {
if (static_cast<int>(count) > next_consecutive_count_) {
next_consecutive_register_ =
- builder_->PrepareForConsecutiveTemporaryRegisters(count);
+ base_allocator()->PrepareForConsecutiveTemporaryRegisters(count);
next_consecutive_count_ = static_cast<int>(count);
}
}
@@ -61,7 +217,8 @@ void BytecodeRegisterAllocator::PrepareForConsecutiveAllocations(size_t count) {
Register BytecodeRegisterAllocator::NextConsecutiveRegister() {
DCHECK_GE(next_consecutive_register_, 0);
DCHECK_GT(next_consecutive_count_, 0);
- builder_->BorrowConsecutiveTemporaryRegister(next_consecutive_register_);
+ base_allocator()->BorrowConsecutiveTemporaryRegister(
+ next_consecutive_register_);
allocated_.push_back(next_consecutive_register_);
next_consecutive_count_--;
return Register(next_consecutive_register_++);
diff --git a/deps/v8/src/interpreter/bytecode-register-allocator.h b/deps/v8/src/interpreter/bytecode-register-allocator.h
index 74ab3a4272..696a3b174a 100644
--- a/deps/v8/src/interpreter/bytecode-register-allocator.h
+++ b/deps/v8/src/interpreter/bytecode-register-allocator.h
@@ -5,6 +5,7 @@
#ifndef V8_INTERPRETER_BYTECODE_REGISTER_ALLOCATOR_H_
#define V8_INTERPRETER_BYTECODE_REGISTER_ALLOCATOR_H_
+#include "src/interpreter/bytecodes.h"
#include "src/zone-containers.h"
namespace v8 {
@@ -14,26 +15,82 @@ namespace interpreter {
class BytecodeArrayBuilder;
class Register;
+class TemporaryRegisterAllocator final {
+ public:
+ TemporaryRegisterAllocator(Zone* zone, int start_index);
+
+ // Borrow a temporary register.
+ int BorrowTemporaryRegister();
+
+ // Borrow a temporary register from the register range outside of
+ // |start_index| to |end_index|.
+ int BorrowTemporaryRegisterNotInRange(int start_index, int end_index);
+
+ // Return a temporary register when no longer used.
+ void ReturnTemporaryRegister(int reg_index);
+
+ // Ensure a run of consecutive registers is available. Each register in
+ // the range should be borrowed with BorrowConsecutiveTemporaryRegister().
+ // Returns the start index of the run.
+ int PrepareForConsecutiveTemporaryRegisters(size_t count);
+
+ // Borrow a register from a range prepared with
+ // PrepareForConsecutiveTemporaryRegisters().
+ void BorrowConsecutiveTemporaryRegister(int reg_index);
+
+ // Returns true if |reg| is a temporary register and is currently
+ // borrowed.
+ bool RegisterIsLive(Register reg) const;
+
+ // Returns the first register in the range of temporary registers.
+ Register first_temporary_register() const;
+
+ // Returns the last register in the range of temporary registers.
+ Register last_temporary_register() const;
+
+ // Returns the start index of temporary register allocations.
+ int allocation_base() const { return allocation_base_; }
+
+ // Returns the number of temporary register allocations made.
+ int allocation_count() const { return allocation_count_; }
+
+ private:
+ // Allocate a temporary register.
+ int AllocateTemporaryRegister();
+
+ ZoneSet<int> free_temporaries_;
+ int allocation_base_;
+ int allocation_count_;
+
+ DISALLOW_COPY_AND_ASSIGN(TemporaryRegisterAllocator);
+};
+
// A class than allows the instantiator to allocate temporary registers that are
// cleaned up when scope is closed.
-class BytecodeRegisterAllocator {
+class BytecodeRegisterAllocator final {
public:
- explicit BytecodeRegisterAllocator(BytecodeArrayBuilder* builder);
+ explicit BytecodeRegisterAllocator(Zone* zone,
+ TemporaryRegisterAllocator* allocator);
~BytecodeRegisterAllocator();
Register NewRegister();
+ // Ensure |count| consecutive allocations are available.
void PrepareForConsecutiveAllocations(size_t count);
+
+ // Get the next consecutive allocation after calling
+ // PrepareForConsecutiveAllocations.
Register NextConsecutiveRegister();
+ // Returns true if |reg| is allocated in this allocator.
bool RegisterIsAllocatedInThisScope(Register reg) const;
+ // Returns true if unused consecutive allocations remain.
bool HasConsecutiveAllocations() const { return next_consecutive_count_ > 0; }
private:
- void* operator new(size_t size);
- void operator delete(void* p);
+ TemporaryRegisterAllocator* base_allocator() const { return base_allocator_; }
- BytecodeArrayBuilder* builder_;
+ TemporaryRegisterAllocator* base_allocator_;
ZoneVector<int> allocated_;
int next_consecutive_register_;
int next_consecutive_count_;
diff --git a/deps/v8/src/interpreter/bytecode-traits.h b/deps/v8/src/interpreter/bytecode-traits.h
index fd778d7c92..b8136051bb 100644
--- a/deps/v8/src/interpreter/bytecode-traits.h
+++ b/deps/v8/src/interpreter/bytecode-traits.h
@@ -28,6 +28,18 @@ struct OperandTraits {};
OPERAND_TYPE_LIST(DECLARE_OPERAND_SIZE)
#undef DECLARE_OPERAND_SIZE
+template <OperandType>
+struct RegisterOperandTraits {
+ static const int kIsRegisterOperand = 0;
+};
+
+#define DECLARE_REGISTER_OPERAND(Name, _) \
+ template <> \
+ struct RegisterOperandTraits<OperandType::k##Name> { \
+ static const int kIsRegisterOperand = 1; \
+ };
+REGISTER_OPERAND_TYPE_LIST(DECLARE_REGISTER_OPERAND)
+#undef DECLARE_REGISTER_OPERAND
template <OperandType... Args>
struct BytecodeTraits {};
@@ -63,13 +75,28 @@ struct BytecodeTraits<operand_0, operand_1, operand_2, operand_3,
return kOperandOffsets[i];
}
+ template <OperandType ot>
+ static inline bool HasAnyOperandsOfType() {
+ return operand_0 == ot || operand_1 == ot || operand_2 == ot ||
+ operand_3 == ot;
+ }
+
static const int kOperandCount = 4;
+ static const int kRegisterOperandCount =
+ RegisterOperandTraits<operand_0>::kIsRegisterOperand +
+ RegisterOperandTraits<operand_1>::kIsRegisterOperand +
+ RegisterOperandTraits<operand_2>::kIsRegisterOperand +
+ RegisterOperandTraits<operand_3>::kIsRegisterOperand;
+ static const int kRegisterOperandBitmap =
+ RegisterOperandTraits<operand_0>::kIsRegisterOperand +
+ (RegisterOperandTraits<operand_1>::kIsRegisterOperand << 1) +
+ (RegisterOperandTraits<operand_2>::kIsRegisterOperand << 2) +
+ (RegisterOperandTraits<operand_3>::kIsRegisterOperand << 3);
static const int kSize =
1 + OperandTraits<operand_0>::kSize + OperandTraits<operand_1>::kSize +
OperandTraits<operand_2>::kSize + OperandTraits<operand_3>::kSize;
};
-
template <OperandType operand_0, OperandType operand_1, OperandType operand_2>
struct BytecodeTraits<operand_0, operand_1, operand_2, OPERAND_TERM> {
static inline OperandType GetOperandType(int i) {
@@ -96,7 +123,20 @@ struct BytecodeTraits<operand_0, operand_1, operand_2, OPERAND_TERM> {
return kOperandOffsets[i];
}
+ template <OperandType ot>
+ static inline bool HasAnyOperandsOfType() {
+ return operand_0 == ot || operand_1 == ot || operand_2 == ot;
+ }
+
static const int kOperandCount = 3;
+ static const int kRegisterOperandCount =
+ RegisterOperandTraits<operand_0>::kIsRegisterOperand +
+ RegisterOperandTraits<operand_1>::kIsRegisterOperand +
+ RegisterOperandTraits<operand_2>::kIsRegisterOperand;
+ static const int kRegisterOperandBitmap =
+ RegisterOperandTraits<operand_0>::kIsRegisterOperand +
+ (RegisterOperandTraits<operand_1>::kIsRegisterOperand << 1) +
+ (RegisterOperandTraits<operand_2>::kIsRegisterOperand << 2);
static const int kSize =
1 + OperandTraits<operand_0>::kSize + OperandTraits<operand_1>::kSize +
OperandTraits<operand_2>::kSize;
@@ -126,7 +166,18 @@ struct BytecodeTraits<operand_0, operand_1, OPERAND_TERM> {
return kOperandOffsets[i];
}
+ template <OperandType ot>
+ static inline bool HasAnyOperandsOfType() {
+ return operand_0 == ot || operand_1 == ot;
+ }
+
static const int kOperandCount = 2;
+ static const int kRegisterOperandCount =
+ RegisterOperandTraits<operand_0>::kIsRegisterOperand +
+ RegisterOperandTraits<operand_1>::kIsRegisterOperand;
+ static const int kRegisterOperandBitmap =
+ RegisterOperandTraits<operand_0>::kIsRegisterOperand +
+ (RegisterOperandTraits<operand_1>::kIsRegisterOperand << 1);
static const int kSize =
1 + OperandTraits<operand_0>::kSize + OperandTraits<operand_1>::kSize;
};
@@ -148,7 +199,16 @@ struct BytecodeTraits<operand_0, OPERAND_TERM> {
return 1;
}
+ template <OperandType ot>
+ static inline bool HasAnyOperandsOfType() {
+ return operand_0 == ot;
+ }
+
static const int kOperandCount = 1;
+ static const int kRegisterOperandCount =
+ RegisterOperandTraits<operand_0>::kIsRegisterOperand;
+ static const int kRegisterOperandBitmap =
+ RegisterOperandTraits<operand_0>::kIsRegisterOperand;
static const int kSize = 1 + OperandTraits<operand_0>::kSize;
};
@@ -169,7 +229,14 @@ struct BytecodeTraits<OperandType::kNone, OPERAND_TERM> {
return 1;
}
+ template <OperandType ot>
+ static inline bool HasAnyOperandsOfType() {
+ return false;
+ }
+
static const int kOperandCount = 0;
+ static const int kRegisterOperandCount = 0;
+ static const int kRegisterOperandBitmap = 0;
static const int kSize = 1 + OperandTraits<OperandType::kNone>::kSize;
};
diff --git a/deps/v8/src/interpreter/bytecodes.cc b/deps/v8/src/interpreter/bytecodes.cc
index 2d4406cc1b..c3b17c7b10 100644
--- a/deps/v8/src/interpreter/bytecodes.cc
+++ b/deps/v8/src/interpreter/bytecodes.cc
@@ -57,6 +57,7 @@ const char* Bytecodes::OperandSizeToString(OperandSize operand_size) {
// static
uint8_t Bytecodes::ToByte(Bytecode bytecode) {
+ DCHECK(bytecode <= Bytecode::kLast);
return static_cast<uint8_t>(bytecode);
}
@@ -70,6 +71,21 @@ Bytecode Bytecodes::FromByte(uint8_t value) {
// static
+Bytecode Bytecodes::GetDebugBreak(Bytecode bytecode) {
+ switch (Size(bytecode)) {
+#define CASE(Name, ...) \
+ case BytecodeTraits<__VA_ARGS__, OPERAND_TERM>::kSize: \
+ return Bytecode::k##Name;
+ DEBUG_BREAK_BYTECODE_LIST(CASE)
+#undef CASE
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return static_cast<Bytecode>(-1);
+}
+
+// static
int Bytecodes::Size(Bytecode bytecode) {
DCHECK(bytecode <= Bytecode::kLast);
switch (bytecode) {
@@ -100,6 +116,21 @@ int Bytecodes::NumberOfOperands(Bytecode bytecode) {
// static
+int Bytecodes::NumberOfRegisterOperands(Bytecode bytecode) {
+ DCHECK(bytecode <= Bytecode::kLast);
+ switch (bytecode) {
+#define CASE(Name, ...) \
+ case Bytecode::k##Name: \
+ typedef BytecodeTraits<__VA_ARGS__, OPERAND_TERM> Name##Trait; \
+ return Name##Trait::kRegisterOperandCount;
+ BYTECODE_LIST(CASE)
+#undef CASE
+ }
+ UNREACHABLE();
+ return false;
+}
+
+// static
OperandType Bytecodes::GetOperandType(Bytecode bytecode, int i) {
DCHECK(bytecode <= Bytecode::kLast);
switch (bytecode) {
@@ -130,6 +161,21 @@ OperandSize Bytecodes::GetOperandSize(Bytecode bytecode, int i) {
// static
+int Bytecodes::GetRegisterOperandBitmap(Bytecode bytecode) {
+ DCHECK(bytecode <= Bytecode::kLast);
+ switch (bytecode) {
+#define CASE(Name, ...) \
+ case Bytecode::k##Name: \
+ typedef BytecodeTraits<__VA_ARGS__, OPERAND_TERM> Name##Trait; \
+ return Name##Trait::kRegisterOperandBitmap;
+ BYTECODE_LIST(CASE)
+#undef CASE
+ }
+ UNREACHABLE();
+ return false;
+}
+
+// static
int Bytecodes::GetOperandOffset(Bytecode bytecode, int i) {
DCHECK(bytecode <= Bytecode::kLast);
switch (bytecode) {
@@ -164,6 +210,7 @@ bool Bytecodes::IsConditionalJumpImmediate(Bytecode bytecode) {
bytecode == Bytecode::kJumpIfFalse ||
bytecode == Bytecode::kJumpIfToBooleanTrue ||
bytecode == Bytecode::kJumpIfToBooleanFalse ||
+ bytecode == Bytecode::kJumpIfNotHole ||
bytecode == Bytecode::kJumpIfNull ||
bytecode == Bytecode::kJumpIfUndefined;
}
@@ -175,6 +222,7 @@ bool Bytecodes::IsConditionalJumpConstant(Bytecode bytecode) {
bytecode == Bytecode::kJumpIfFalseConstant ||
bytecode == Bytecode::kJumpIfToBooleanTrueConstant ||
bytecode == Bytecode::kJumpIfToBooleanFalseConstant ||
+ bytecode == Bytecode::kJumpIfNotHoleConstant ||
bytecode == Bytecode::kJumpIfNullConstant ||
bytecode == Bytecode::kJumpIfUndefinedConstant;
}
@@ -186,6 +234,7 @@ bool Bytecodes::IsConditionalJumpConstantWide(Bytecode bytecode) {
bytecode == Bytecode::kJumpIfFalseConstantWide ||
bytecode == Bytecode::kJumpIfToBooleanTrueConstantWide ||
bytecode == Bytecode::kJumpIfToBooleanFalseConstantWide ||
+ bytecode == Bytecode::kJumpIfNotHoleConstantWide ||
bytecode == Bytecode::kJumpIfNullConstantWide ||
bytecode == Bytecode::kJumpIfUndefinedConstantWide;
}
@@ -227,10 +276,122 @@ bool Bytecodes::IsJump(Bytecode bytecode) {
// static
+bool Bytecodes::IsCallOrNew(Bytecode bytecode) {
+ return bytecode == Bytecode::kCall || bytecode == Bytecode::kTailCall ||
+ bytecode == Bytecode::kNew || bytecode == Bytecode::kCallWide ||
+ bytecode == Bytecode::kTailCallWide || bytecode == Bytecode::kNewWide;
+}
+
+// static
+bool Bytecodes::IsDebugBreak(Bytecode bytecode) {
+ switch (bytecode) {
+#define CASE(Name, ...) case Bytecode::k##Name:
+ DEBUG_BREAK_BYTECODE_LIST(CASE);
+#undef CASE
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+// static
bool Bytecodes::IsJumpOrReturn(Bytecode bytecode) {
return bytecode == Bytecode::kReturn || IsJump(bytecode);
}
+// static
+bool Bytecodes::IsIndexOperandType(OperandType operand_type) {
+ return operand_type == OperandType::kIdx8 ||
+ operand_type == OperandType::kIdx16;
+}
+
+// static
+bool Bytecodes::IsImmediateOperandType(OperandType operand_type) {
+ return operand_type == OperandType::kImm8;
+}
+
+// static
+bool Bytecodes::IsRegisterCountOperandType(OperandType operand_type) {
+ return (operand_type == OperandType::kRegCount8 ||
+ operand_type == OperandType::kRegCount16);
+}
+
+// static
+bool Bytecodes::IsMaybeRegisterOperandType(OperandType operand_type) {
+ return (operand_type == OperandType::kMaybeReg8 ||
+ operand_type == OperandType::kMaybeReg16);
+}
+
+// static
+bool Bytecodes::IsRegisterOperandType(OperandType operand_type) {
+ switch (operand_type) {
+#define CASE(Name, _) \
+ case OperandType::k##Name: \
+ return true;
+ REGISTER_OPERAND_TYPE_LIST(CASE)
+#undef CASE
+#define CASE(Name, _) \
+ case OperandType::k##Name: \
+ break;
+ NON_REGISTER_OPERAND_TYPE_LIST(CASE)
+#undef CASE
+ }
+ return false;
+}
+
+// static
+bool Bytecodes::IsRegisterInputOperandType(OperandType operand_type) {
+ switch (operand_type) {
+#define CASE(Name, _) \
+ case OperandType::k##Name: \
+ return true;
+ REGISTER_INPUT_OPERAND_TYPE_LIST(CASE)
+#undef CASE
+#define CASE(Name, _) \
+ case OperandType::k##Name: \
+ break;
+ NON_REGISTER_OPERAND_TYPE_LIST(CASE)
+ REGISTER_OUTPUT_OPERAND_TYPE_LIST(CASE)
+#undef CASE
+ }
+ return false;
+}
+
+// static
+bool Bytecodes::IsRegisterOutputOperandType(OperandType operand_type) {
+ switch (operand_type) {
+#define CASE(Name, _) \
+ case OperandType::k##Name: \
+ return true;
+ REGISTER_OUTPUT_OPERAND_TYPE_LIST(CASE)
+#undef CASE
+#define CASE(Name, _) \
+ case OperandType::k##Name: \
+ break;
+ NON_REGISTER_OPERAND_TYPE_LIST(CASE)
+ REGISTER_INPUT_OPERAND_TYPE_LIST(CASE)
+#undef CASE
+ }
+ return false;
+}
+
+namespace {
+static Register DecodeRegister(const uint8_t* operand_start,
+ OperandType operand_type) {
+ switch (Bytecodes::SizeOfOperand(operand_type)) {
+ case OperandSize::kByte:
+ return Register::FromOperand(*operand_start);
+ case OperandSize::kShort:
+ return Register::FromWideOperand(ReadUnalignedUInt16(operand_start));
+ case OperandSize::kNone: {
+ UNREACHABLE();
+ }
+ }
+ return Register();
+}
+} // namespace
+
// static
std::ostream& Bytecodes::Decode(std::ostream& os, const uint8_t* bytecode_start,
@@ -251,16 +412,20 @@ std::ostream& Bytecodes::Decode(std::ostream& os, const uint8_t* bytecode_start,
os << bytecode << " ";
+ // Operands for the debug break are from the original instruction.
+ if (IsDebugBreak(bytecode)) return os;
+
int number_of_operands = NumberOfOperands(bytecode);
+ int range = 0;
for (int i = 0; i < number_of_operands; i++) {
OperandType op_type = GetOperandType(bytecode, i);
const uint8_t* operand_start =
&bytecode_start[GetOperandOffset(bytecode, i)];
switch (op_type) {
- case interpreter::OperandType::kCount8:
+ case interpreter::OperandType::kRegCount8:
os << "#" << static_cast<unsigned int>(*operand_start);
break;
- case interpreter::OperandType::kCount16:
+ case interpreter::OperandType::kRegCount16:
os << '#' << ReadUnalignedUInt16(operand_start);
break;
case interpreter::OperandType::kIdx8:
@@ -272,48 +437,28 @@ std::ostream& Bytecodes::Decode(std::ostream& os, const uint8_t* bytecode_start,
case interpreter::OperandType::kImm8:
os << "#" << static_cast<int>(static_cast<int8_t>(*operand_start));
break;
+ case interpreter::OperandType::kMaybeReg8:
+ case interpreter::OperandType::kMaybeReg16:
case interpreter::OperandType::kReg8:
- case interpreter::OperandType::kMaybeReg8: {
- Register reg = Register::FromOperand(*operand_start);
- if (reg.is_function_context()) {
- os << "<context>";
- } else if (reg.is_function_closure()) {
- os << "<closure>";
- } else if (reg.is_new_target()) {
- os << "<new.target>";
- } else if (reg.is_parameter()) {
- int parameter_index = reg.ToParameterIndex(parameter_count);
- if (parameter_index == 0) {
- os << "<this>";
- } else {
- os << "a" << parameter_index - 1;
- }
- } else {
- os << "r" << reg.index();
- }
- break;
- }
- case interpreter::OperandType::kRegPair8: {
- Register reg = Register::FromOperand(*operand_start);
- if (reg.is_parameter()) {
- int parameter_index = reg.ToParameterIndex(parameter_count);
- DCHECK_NE(parameter_index, 0);
- os << "a" << parameter_index - 1 << "-" << parameter_index;
- } else {
- os << "r" << reg.index() << "-" << reg.index() + 1;
- }
+ case interpreter::OperandType::kReg16:
+ case interpreter::OperandType::kRegOut8:
+ case interpreter::OperandType::kRegOut16: {
+ Register reg = DecodeRegister(operand_start, op_type);
+ os << reg.ToString(parameter_count);
break;
}
- case interpreter::OperandType::kReg16: {
- Register reg =
- Register::FromWideOperand(ReadUnalignedUInt16(operand_start));
- if (reg.is_parameter()) {
- int parameter_index = reg.ToParameterIndex(parameter_count);
- DCHECK_NE(parameter_index, 0);
- os << "a" << parameter_index - 1;
- } else {
- os << "r" << reg.index();
- }
+ case interpreter::OperandType::kRegOutTriple8:
+ case interpreter::OperandType::kRegOutTriple16:
+ range += 1;
+ case interpreter::OperandType::kRegOutPair8:
+ case interpreter::OperandType::kRegOutPair16:
+ case interpreter::OperandType::kRegPair8:
+ case interpreter::OperandType::kRegPair16: {
+ range += 1;
+ Register first_reg = DecodeRegister(operand_start, op_type);
+ Register last_reg = Register(first_reg.index() + range);
+ os << first_reg.ToString(parameter_count) << "-"
+ << last_reg.ToString(parameter_count);
break;
}
case interpreter::OperandType::kNone:
@@ -327,7 +472,6 @@ std::ostream& Bytecodes::Decode(std::ostream& os, const uint8_t* bytecode_start,
return os;
}
-
std::ostream& operator<<(std::ostream& os, const Bytecode& bytecode) {
return os << Bytecodes::ToString(bytecode);
}
@@ -342,22 +486,33 @@ std::ostream& operator<<(std::ostream& os, const OperandSize& operand_size) {
return os << Bytecodes::OperandSizeToString(operand_size);
}
-
static const int kLastParamRegisterIndex =
-InterpreterFrameConstants::kLastParamFromRegisterPointer / kPointerSize;
static const int kFunctionClosureRegisterIndex =
-InterpreterFrameConstants::kFunctionFromRegisterPointer / kPointerSize;
-static const int kFunctionContextRegisterIndex =
+static const int kCurrentContextRegisterIndex =
-InterpreterFrameConstants::kContextFromRegisterPointer / kPointerSize;
static const int kNewTargetRegisterIndex =
-InterpreterFrameConstants::kNewTargetFromRegisterPointer / kPointerSize;
+// The register space is a signed 16-bit space. Register operands
+// occupy range above 0. Parameter indices are biased with the
+// negative value kLastParamRegisterIndex for ease of access in the
+// interpreter.
+static const int kMaxParameterIndex = kMaxInt16 + kLastParamRegisterIndex;
+static const int kMaxRegisterIndex = -kMinInt16;
+static const int kMaxReg8Index = -kMinInt8;
+static const int kMinReg8Index = -kMaxInt8;
+static const int kMaxReg16Index = -kMinInt16;
+static const int kMinReg16Index = -kMaxInt16;
-// Registers occupy range 0-127 in 8-bit value leaving 128 unused values.
-// Parameter indices are biased with the negative value kLastParamRegisterIndex
-// for ease of access in the interpreter.
-static const int kMaxParameterIndex = 128 + kLastParamRegisterIndex;
+bool Register::is_byte_operand() const {
+ return index_ >= kMinReg8Index && index_ <= kMaxReg8Index;
+}
+bool Register::is_short_operand() const {
+ return index_ >= kMinReg16Index && index_ <= kMaxReg16Index;
+}
Register Register::FromParameterIndex(int index, int parameter_count) {
DCHECK_GE(index, 0);
@@ -365,7 +520,6 @@ Register Register::FromParameterIndex(int index, int parameter_count) {
DCHECK_LE(parameter_count, kMaxParameterIndex + 1);
int register_index = kLastParamRegisterIndex - parameter_count + index + 1;
DCHECK_LT(register_index, 0);
- DCHECK_GE(register_index, kMinInt8);
return Register(register_index);
}
@@ -386,13 +540,13 @@ bool Register::is_function_closure() const {
}
-Register Register::function_context() {
- return Register(kFunctionContextRegisterIndex);
+Register Register::current_context() {
+ return Register(kCurrentContextRegisterIndex);
}
-bool Register::is_function_context() const {
- return index() == kFunctionContextRegisterIndex;
+bool Register::is_current_context() const {
+ return index() == kCurrentContextRegisterIndex;
}
@@ -403,13 +557,14 @@ bool Register::is_new_target() const {
return index() == kNewTargetRegisterIndex;
}
-
int Register::MaxParameterIndex() { return kMaxParameterIndex; }
+int Register::MaxRegisterIndex() { return kMaxRegisterIndex; }
+
+int Register::MaxRegisterIndexForByteOperand() { return kMaxReg8Index; }
uint8_t Register::ToOperand() const {
- DCHECK_GE(index_, kMinInt8);
- DCHECK_LE(index_, kMaxInt8);
+ DCHECK(is_byte_operand());
return static_cast<uint8_t>(-index_);
}
@@ -420,8 +575,7 @@ Register Register::FromOperand(uint8_t operand) {
uint16_t Register::ToWideOperand() const {
- DCHECK_GE(index_, kMinInt16);
- DCHECK_LE(index_, kMaxInt16);
+ DCHECK(is_short_operand());
return static_cast<uint16_t>(-index_);
}
@@ -431,6 +585,16 @@ Register Register::FromWideOperand(uint16_t operand) {
}
+uint32_t Register::ToRawOperand() const {
+ return static_cast<uint32_t>(-index_);
+}
+
+
+Register Register::FromRawOperand(uint32_t operand) {
+ return Register(-static_cast<int32_t>(operand));
+}
+
+
bool Register::AreContiguous(Register reg1, Register reg2, Register reg3,
Register reg4, Register reg5) {
if (reg1.index() + 1 != reg2.index()) {
@@ -448,6 +612,29 @@ bool Register::AreContiguous(Register reg1, Register reg2, Register reg3,
return true;
}
+std::string Register::ToString(int parameter_count) {
+ if (is_current_context()) {
+ return std::string("<context>");
+ } else if (is_function_closure()) {
+ return std::string("<closure>");
+ } else if (is_new_target()) {
+ return std::string("<new.target>");
+ } else if (is_parameter()) {
+ int parameter_index = ToParameterIndex(parameter_count);
+ if (parameter_index == 0) {
+ return std::string("<this>");
+ } else {
+ std::ostringstream s;
+ s << "a" << parameter_index - 1;
+ return s.str();
+ }
+ } else {
+ std::ostringstream s;
+ s << "r" << index();
+ return s.str();
+ }
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index a9beb6c918..d4863b1662 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -15,24 +15,65 @@ namespace v8 {
namespace internal {
namespace interpreter {
+#define INVALID_OPERAND_TYPE_LIST(V) \
+ V(None, OperandSize::kNone)
+
+#define REGISTER_INPUT_OPERAND_TYPE_LIST(V) \
+ /* Byte operands. */ \
+ V(MaybeReg8, OperandSize::kByte) \
+ V(Reg8, OperandSize::kByte) \
+ V(RegPair8, OperandSize::kByte) \
+ /* Short operands. */ \
+ V(MaybeReg16, OperandSize::kShort) \
+ V(Reg16, OperandSize::kShort) \
+ V(RegPair16, OperandSize::kShort)
+
+#define REGISTER_OUTPUT_OPERAND_TYPE_LIST(V) \
+ /* Byte operands. */ \
+ V(RegOut8, OperandSize::kByte) \
+ V(RegOutPair8, OperandSize::kByte) \
+ V(RegOutTriple8, OperandSize::kByte) \
+ /* Short operands. */ \
+ V(RegOut16, OperandSize::kShort) \
+ V(RegOutPair16, OperandSize::kShort) \
+ V(RegOutTriple16, OperandSize::kShort)
+
+#define SCALAR_OPERAND_TYPE_LIST(V) \
+ /* Byte operands. */ \
+ V(Idx8, OperandSize::kByte) \
+ V(Imm8, OperandSize::kByte) \
+ V(RegCount8, OperandSize::kByte) \
+ /* Short operands. */ \
+ V(Idx16, OperandSize::kShort) \
+ V(RegCount16, OperandSize::kShort)
+
+#define REGISTER_OPERAND_TYPE_LIST(V) \
+ REGISTER_INPUT_OPERAND_TYPE_LIST(V) \
+ REGISTER_OUTPUT_OPERAND_TYPE_LIST(V)
+
+#define NON_REGISTER_OPERAND_TYPE_LIST(V) \
+ INVALID_OPERAND_TYPE_LIST(V) \
+ SCALAR_OPERAND_TYPE_LIST(V)
+
// The list of operand types used by bytecodes.
-#define OPERAND_TYPE_LIST(V) \
- \
- /* None operand. */ \
- V(None, OperandSize::kNone) \
- \
- /* Byte operands. */ \
- V(Count8, OperandSize::kByte) \
- V(Imm8, OperandSize::kByte) \
- V(Idx8, OperandSize::kByte) \
- V(MaybeReg8, OperandSize::kByte) \
- V(Reg8, OperandSize::kByte) \
- V(RegPair8, OperandSize::kByte) \
- \
- /* Short operands. */ \
- V(Count16, OperandSize::kShort) \
- V(Idx16, OperandSize::kShort) \
- V(Reg16, OperandSize::kShort)
+#define OPERAND_TYPE_LIST(V) \
+ NON_REGISTER_OPERAND_TYPE_LIST(V) \
+ REGISTER_OPERAND_TYPE_LIST(V)
+
+// Define one debug break bytecode for each operands size.
+#define DEBUG_BREAK_BYTECODE_LIST(V) \
+ V(DebugBreak0, OperandType::kNone) \
+ V(DebugBreak1, OperandType::kReg8) \
+ V(DebugBreak2, OperandType::kReg16) \
+ V(DebugBreak3, OperandType::kReg16, OperandType::kReg8) \
+ V(DebugBreak4, OperandType::kReg16, OperandType::kReg16) \
+ V(DebugBreak5, OperandType::kReg16, OperandType::kReg16, OperandType::kReg8) \
+ V(DebugBreak6, OperandType::kReg16, OperandType::kReg16, \
+ OperandType::kReg16) \
+ V(DebugBreak7, OperandType::kReg16, OperandType::kReg16, \
+ OperandType::kReg16, OperandType::kReg8) \
+ V(DebugBreak8, OperandType::kReg16, OperandType::kReg16, \
+ OperandType::kReg16, OperandType::kReg16)
// The list of bytecodes which are interpreted by the interpreter.
#define BYTECODE_LIST(V) \
@@ -49,14 +90,10 @@ namespace interpreter {
V(LdaConstantWide, OperandType::kIdx16) \
\
/* Globals */ \
- V(LdaGlobalSloppy, OperandType::kIdx8, OperandType::kIdx8) \
- V(LdaGlobalStrict, OperandType::kIdx8, OperandType::kIdx8) \
- V(LdaGlobalInsideTypeofSloppy, OperandType::kIdx8, OperandType::kIdx8) \
- V(LdaGlobalInsideTypeofStrict, OperandType::kIdx8, OperandType::kIdx8) \
- V(LdaGlobalSloppyWide, OperandType::kIdx16, OperandType::kIdx16) \
- V(LdaGlobalStrictWide, OperandType::kIdx16, OperandType::kIdx16) \
- V(LdaGlobalInsideTypeofSloppyWide, OperandType::kIdx16, OperandType::kIdx16) \
- V(LdaGlobalInsideTypeofStrictWide, OperandType::kIdx16, OperandType::kIdx16) \
+ V(LdaGlobal, OperandType::kIdx8, OperandType::kIdx8) \
+ V(LdaGlobalInsideTypeof, OperandType::kIdx8, OperandType::kIdx8) \
+ V(LdaGlobalWide, OperandType::kIdx16, OperandType::kIdx16) \
+ V(LdaGlobalInsideTypeofWide, OperandType::kIdx16, OperandType::kIdx16) \
V(StaGlobalSloppy, OperandType::kIdx8, OperandType::kIdx8) \
V(StaGlobalStrict, OperandType::kIdx8, OperandType::kIdx8) \
V(StaGlobalSloppyWide, OperandType::kIdx16, OperandType::kIdx16) \
@@ -82,25 +119,17 @@ namespace interpreter {
\
/* Register-accumulator transfers */ \
V(Ldar, OperandType::kReg8) \
- V(Star, OperandType::kReg8) \
+ V(Star, OperandType::kRegOut8) \
\
/* Register-register transfers */ \
- V(Mov, OperandType::kReg8, OperandType::kReg8) \
- V(Exchange, OperandType::kReg8, OperandType::kReg16) \
- V(ExchangeWide, OperandType::kReg16, OperandType::kReg16) \
+ V(Mov, OperandType::kReg8, OperandType::kRegOut8) \
+ V(MovWide, OperandType::kReg16, OperandType::kRegOut16) \
\
/* LoadIC operations */ \
- V(LoadICSloppy, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8) \
- V(LoadICStrict, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8) \
- V(KeyedLoadICSloppy, OperandType::kReg8, OperandType::kIdx8) \
- V(KeyedLoadICStrict, OperandType::kReg8, OperandType::kIdx8) \
- /* TODO(rmcilroy): Wide register operands too? */ \
- V(LoadICSloppyWide, OperandType::kReg8, OperandType::kIdx16, \
- OperandType::kIdx16) \
- V(LoadICStrictWide, OperandType::kReg8, OperandType::kIdx16, \
- OperandType::kIdx16) \
- V(KeyedLoadICSloppyWide, OperandType::kReg8, OperandType::kIdx16) \
- V(KeyedLoadICStrictWide, OperandType::kReg8, OperandType::kIdx16) \
+ V(LoadIC, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8) \
+ V(KeyedLoadIC, OperandType::kReg8, OperandType::kIdx8) \
+ V(LoadICWide, OperandType::kReg8, OperandType::kIdx16, OperandType::kIdx16) \
+ V(KeyedLoadICWide, OperandType::kReg8, OperandType::kIdx16) \
\
/* StoreIC operations */ \
V(StoreICSloppy, OperandType::kReg8, OperandType::kIdx8, OperandType::kIdx8) \
@@ -109,7 +138,6 @@ namespace interpreter {
OperandType::kIdx8) \
V(KeyedStoreICStrict, OperandType::kReg8, OperandType::kReg8, \
OperandType::kIdx8) \
- /* TODO(rmcilroy): Wide register operands too? */ \
V(StoreICSloppyWide, OperandType::kReg8, OperandType::kIdx16, \
OperandType::kIdx16) \
V(StoreICStrictWide, OperandType::kReg8, OperandType::kIdx16, \
@@ -139,22 +167,33 @@ namespace interpreter {
V(TypeOf, OperandType::kNone) \
V(DeletePropertyStrict, OperandType::kReg8) \
V(DeletePropertySloppy, OperandType::kReg8) \
- V(DeleteLookupSlot, OperandType::kNone) \
\
/* Call operations */ \
- V(Call, OperandType::kReg8, OperandType::kReg8, OperandType::kCount8, \
+ V(Call, OperandType::kReg8, OperandType::kReg8, OperandType::kRegCount8, \
OperandType::kIdx8) \
- V(CallWide, OperandType::kReg8, OperandType::kReg8, OperandType::kCount16, \
- OperandType::kIdx16) \
+ V(CallWide, OperandType::kReg16, OperandType::kReg16, \
+ OperandType::kRegCount16, OperandType::kIdx16) \
+ V(TailCall, OperandType::kReg8, OperandType::kReg8, OperandType::kRegCount8, \
+ OperandType::kIdx8) \
+ V(TailCallWide, OperandType::kReg16, OperandType::kReg16, \
+ OperandType::kRegCount16, OperandType::kIdx16) \
V(CallRuntime, OperandType::kIdx16, OperandType::kMaybeReg8, \
- OperandType::kCount8) \
+ OperandType::kRegCount8) \
+ V(CallRuntimeWide, OperandType::kIdx16, OperandType::kMaybeReg16, \
+ OperandType::kRegCount8) \
V(CallRuntimeForPair, OperandType::kIdx16, OperandType::kMaybeReg8, \
- OperandType::kCount8, OperandType::kRegPair8) \
+ OperandType::kRegCount8, OperandType::kRegOutPair8) \
+ V(CallRuntimeForPairWide, OperandType::kIdx16, OperandType::kMaybeReg16, \
+ OperandType::kRegCount8, OperandType::kRegOutPair16) \
V(CallJSRuntime, OperandType::kIdx16, OperandType::kReg8, \
- OperandType::kCount8) \
+ OperandType::kRegCount8) \
+ V(CallJSRuntimeWide, OperandType::kIdx16, OperandType::kReg16, \
+ OperandType::kRegCount16) \
\
/* New operator */ \
- V(New, OperandType::kReg8, OperandType::kMaybeReg8, OperandType::kCount8) \
+ V(New, OperandType::kReg8, OperandType::kMaybeReg8, OperandType::kRegCount8) \
+ V(NewWide, OperandType::kReg16, OperandType::kMaybeReg16, \
+ OperandType::kRegCount16) \
\
/* Test Operators */ \
V(TestEqual, OperandType::kReg8) \
@@ -194,6 +233,7 @@ namespace interpreter {
/* Arguments allocation */ \
V(CreateMappedArguments, OperandType::kNone) \
V(CreateUnmappedArguments, OperandType::kNone) \
+ V(CreateRestParameter, OperandType::kNone) \
\
/* Control Flow */ \
V(Jump, OperandType::kImm8) \
@@ -217,18 +257,30 @@ namespace interpreter {
V(JumpIfUndefined, OperandType::kImm8) \
V(JumpIfUndefinedConstant, OperandType::kIdx8) \
V(JumpIfUndefinedConstantWide, OperandType::kIdx16) \
+ V(JumpIfNotHole, OperandType::kImm8) \
+ V(JumpIfNotHoleConstant, OperandType::kIdx8) \
+ V(JumpIfNotHoleConstantWide, OperandType::kIdx16) \
\
/* Complex flow control For..in */ \
- V(ForInPrepare, OperandType::kReg8, OperandType::kReg8, OperandType::kReg8) \
+ V(ForInPrepare, OperandType::kRegOutTriple8) \
+ V(ForInPrepareWide, OperandType::kRegOutTriple16) \
V(ForInDone, OperandType::kReg8, OperandType::kReg8) \
- V(ForInNext, OperandType::kReg8, OperandType::kReg8, OperandType::kReg8, \
- OperandType::kReg8) \
+ V(ForInNext, OperandType::kReg8, OperandType::kReg8, OperandType::kRegPair8) \
+ V(ForInNextWide, OperandType::kReg16, OperandType::kReg16, \
+ OperandType::kRegPair16) \
V(ForInStep, OperandType::kReg8) \
\
+ /* Perform a stack guard check */ \
+ V(StackCheck, OperandType::kNone) \
+ \
/* Non-local flow control */ \
V(Throw, OperandType::kNone) \
- V(Return, OperandType::kNone)
-
+ V(ReThrow, OperandType::kNone) \
+ V(Return, OperandType::kNone) \
+ \
+ /* Debugger */ \
+ V(Debugger, OperandType::kNone) \
+ DEBUG_BREAK_BYTECODE_LIST(V)
// Enumeration of the size classes of operand types used by bytecodes.
enum class OperandSize : uint8_t {
@@ -268,28 +320,30 @@ enum class Bytecode : uint8_t {
// in its stack-frame. Register hold parameters, this, and expression values.
class Register {
public:
- Register() : index_(kIllegalIndex) {}
-
- explicit Register(int index) : index_(index) {}
+ explicit Register(int index = kInvalidIndex) : index_(index) {}
- int index() const {
- DCHECK(index_ != kIllegalIndex);
- return index_;
- }
+ int index() const { return index_; }
bool is_parameter() const { return index() < 0; }
- bool is_valid() const { return index_ != kIllegalIndex; }
+ bool is_valid() const { return index_ != kInvalidIndex; }
+ bool is_byte_operand() const;
+ bool is_short_operand() const;
static Register FromParameterIndex(int index, int parameter_count);
int ToParameterIndex(int parameter_count) const;
static int MaxParameterIndex();
+ static int MaxRegisterIndex();
+ static int MaxRegisterIndexForByteOperand();
+
+ // Returns an invalid register.
+ static Register invalid_value() { return Register(); }
// Returns the register for the function's closure object.
static Register function_closure();
bool is_function_closure() const;
- // Returns the register for the function's outer context.
- static Register function_context();
- bool is_function_context() const;
+ // Returns the register which holds the current context object.
+ static Register current_context();
+ bool is_current_context() const;
// Returns the register for the incoming new target value.
static Register new_target();
@@ -301,11 +355,16 @@ class Register {
static Register FromWideOperand(uint16_t operand);
uint16_t ToWideOperand() const;
+ static Register FromRawOperand(uint32_t raw_operand);
+ uint32_t ToRawOperand() const;
+
static bool AreContiguous(Register reg1, Register reg2,
Register reg3 = Register(),
Register reg4 = Register(),
Register reg5 = Register());
+ std::string ToString(int parameter_count);
+
bool operator==(const Register& other) const {
return index() == other.index();
}
@@ -318,9 +377,15 @@ class Register {
bool operator<=(const Register& other) const {
return index() <= other.index();
}
+ bool operator>(const Register& other) const {
+ return index() > other.index();
+ }
+ bool operator>=(const Register& other) const {
+ return index() >= other.index();
+ }
private:
- static const int kIllegalIndex = kMaxInt;
+ static const int kInvalidIndex = kMaxInt;
void* operator new(size_t size);
void operator delete(void* p);
@@ -349,57 +414,96 @@ class Bytecodes {
// Returns the number of operands expected by |bytecode|.
static int NumberOfOperands(Bytecode bytecode);
- // Return the i-th operand of |bytecode|.
+ // Returns the number of register operands expected by |bytecode|.
+ static int NumberOfRegisterOperands(Bytecode bytecode);
+
+ // Returns the i-th operand of |bytecode|.
static OperandType GetOperandType(Bytecode bytecode, int i);
- // Return the size of the i-th operand of |bytecode|.
+ // Returns the size of the i-th operand of |bytecode|.
static OperandSize GetOperandSize(Bytecode bytecode, int i);
// Returns the offset of the i-th operand of |bytecode| relative to the start
// of the bytecode.
static int GetOperandOffset(Bytecode bytecode, int i);
+ // Returns a zero-based bitmap of the register operand positions of
+ // |bytecode|.
+ static int GetRegisterOperandBitmap(Bytecode bytecode);
+
+ // Returns a debug break bytecode with a matching operand size.
+ static Bytecode GetDebugBreak(Bytecode bytecode);
+
// Returns the size of the bytecode including its operands.
static int Size(Bytecode bytecode);
// Returns the size of |operand|.
static OperandSize SizeOfOperand(OperandType operand);
- // Return true if the bytecode is a conditional jump taking
+ // Returns true if the bytecode is a conditional jump taking
// an immediate byte operand (OperandType::kImm8).
static bool IsConditionalJumpImmediate(Bytecode bytecode);
- // Return true if the bytecode is a conditional jump taking
+ // Returns true if the bytecode is a conditional jump taking
// a constant pool entry (OperandType::kIdx8).
static bool IsConditionalJumpConstant(Bytecode bytecode);
- // Return true if the bytecode is a conditional jump taking
+ // Returns true if the bytecode is a conditional jump taking
// a constant pool entry (OperandType::kIdx16).
static bool IsConditionalJumpConstantWide(Bytecode bytecode);
- // Return true if the bytecode is a conditional jump taking
+ // Returns true if the bytecode is a conditional jump taking
// any kind of operand.
static bool IsConditionalJump(Bytecode bytecode);
- // Return true if the bytecode is a jump or a conditional jump taking
+ // Returns true if the bytecode is a jump or a conditional jump taking
// an immediate byte operand (OperandType::kImm8).
static bool IsJumpImmediate(Bytecode bytecode);
- // Return true if the bytecode is a jump or conditional jump taking a
+ // Returns true if the bytecode is a jump or conditional jump taking a
// constant pool entry (OperandType::kIdx8).
static bool IsJumpConstant(Bytecode bytecode);
- // Return true if the bytecode is a jump or conditional jump taking a
+ // Returns true if the bytecode is a jump or conditional jump taking a
// constant pool entry (OperandType::kIdx16).
static bool IsJumpConstantWide(Bytecode bytecode);
- // Return true if the bytecode is a jump or conditional jump taking
+ // Returns true if the bytecode is a jump or conditional jump taking
// any kind of operand.
static bool IsJump(Bytecode bytecode);
- // Return true if the bytecode is a conditional jump, a jump, or a return.
+ // Returns true if the bytecode is a conditional jump, a jump, or a return.
static bool IsJumpOrReturn(Bytecode bytecode);
+ // Returns true if the bytecode is a call or a constructor call.
+ static bool IsCallOrNew(Bytecode bytecode);
+
+ // Returns true if the bytecode is a debug break.
+ static bool IsDebugBreak(Bytecode bytecode);
+
+ // Returns true if |operand_type| is a register index operand (kIdx8/kIdx16).
+ static bool IsIndexOperandType(OperandType operand_type);
+
+ // Returns true if |operand_type| represents an immediate.
+ static bool IsImmediateOperandType(OperandType operand_type);
+
+ // Returns true if |operand_type| is a register count operand
+ // (kRegCount8/kRegCount16).
+ static bool IsRegisterCountOperandType(OperandType operand_type);
+
+ // Returns true if |operand_type| is any type of register operand.
+ static bool IsRegisterOperandType(OperandType operand_type);
+
+ // Returns true if |operand_type| represents a register used as an input.
+ static bool IsRegisterInputOperandType(OperandType operand_type);
+
+ // Returns true if |operand_type| represents a register used as an output.
+ static bool IsRegisterOutputOperandType(OperandType operand_type);
+
+ // Returns true if |operand_type| is a maybe register operand
+ // (kMaybeReg8/kMaybeReg16).
+ static bool IsMaybeRegisterOperandType(OperandType operand_type);
+
// Decode a single bytecode and operands to |os|.
static std::ostream& Decode(std::ostream& os, const uint8_t* bytecode_start,
int number_of_parameters);
diff --git a/deps/v8/src/interpreter/constant-array-builder.cc b/deps/v8/src/interpreter/constant-array-builder.cc
index 2586e1ff4d..e8b1281b5a 100644
--- a/deps/v8/src/interpreter/constant-array-builder.cc
+++ b/deps/v8/src/interpreter/constant-array-builder.cc
@@ -85,19 +85,19 @@ Handle<Object> ConstantArrayBuilder::At(size_t index) const {
}
}
-
-Handle<FixedArray> ConstantArrayBuilder::ToFixedArray(Factory* factory) const {
- Handle<FixedArray> fixed_array =
- factory->NewFixedArray(static_cast<int>(size()), PretenureFlag::TENURED);
+Handle<FixedArray> ConstantArrayBuilder::ToFixedArray() {
+ Handle<FixedArray> fixed_array = isolate_->factory()->NewFixedArray(
+ static_cast<int>(size()), PretenureFlag::TENURED);
for (int i = 0; i < fixed_array->length(); i++) {
fixed_array->set(i, *At(static_cast<size_t>(i)));
}
+ constants_map()->Clear();
return fixed_array;
}
size_t ConstantArrayBuilder::Insert(Handle<Object> object) {
- index_t* entry = constants_map_.Find(object);
+ index_t* entry = constants_map()->Find(object);
return (entry == nullptr) ? AllocateEntry(object) : *entry;
}
@@ -106,7 +106,7 @@ ConstantArrayBuilder::index_t ConstantArrayBuilder::AllocateEntry(
Handle<Object> object) {
DCHECK(!object->IsOddball());
size_t index;
- index_t* entry = constants_map_.Get(object);
+ index_t* entry = constants_map()->Get(object);
if (idx8_slice_.available() > 0) {
index = idx8_slice_.Allocate(object);
} else {
@@ -136,7 +136,7 @@ size_t ConstantArrayBuilder::CommitReservedEntry(OperandSize operand_size,
Handle<Object> object) {
DiscardReservedEntry(operand_size);
size_t index;
- index_t* entry = constants_map_.Find(object);
+ index_t* entry = constants_map()->Find(object);
if (nullptr == entry) {
index = AllocateEntry(object);
} else {
diff --git a/deps/v8/src/interpreter/constant-array-builder.h b/deps/v8/src/interpreter/constant-array-builder.h
index c882b1d540..d7e41e3771 100644
--- a/deps/v8/src/interpreter/constant-array-builder.h
+++ b/deps/v8/src/interpreter/constant-array-builder.h
@@ -12,13 +12,15 @@
namespace v8 {
namespace internal {
-class Factory;
class Isolate;
namespace interpreter {
-// A helper class for constructing constant arrays for the interpreter.
-class ConstantArrayBuilder final : public ZoneObject {
+// A helper class for constructing constant arrays for the
+// interpreter. Each instance of this class is intended to be used to
+// generate exactly one FixedArray of constants via the ToFixedArray
+// method.
+class ConstantArrayBuilder final BASE_EMBEDDED {
public:
// Capacity of the 8-bit operand slice.
static const size_t kLowCapacity = 1u << kBitsPerByte;
@@ -32,7 +34,7 @@ class ConstantArrayBuilder final : public ZoneObject {
ConstantArrayBuilder(Isolate* isolate, Zone* zone);
// Generate a fixed array of constants based on inserted objects.
- Handle<FixedArray> ToFixedArray(Factory* factory) const;
+ Handle<FixedArray> ToFixedArray();
// Returns the object in the constant pool array that at index
// |index|.
@@ -84,6 +86,8 @@ class ConstantArrayBuilder final : public ZoneObject {
DISALLOW_COPY_AND_ASSIGN(ConstantArraySlice);
};
+ IdentityMap<index_t>* constants_map() { return &constants_map_; }
+
Isolate* isolate_;
ConstantArraySlice idx8_slice_;
ConstantArraySlice idx16_slice_;
diff --git a/deps/v8/src/interpreter/control-flow-builders.cc b/deps/v8/src/interpreter/control-flow-builders.cc
index 99066e8c7e..6510aa443a 100644
--- a/deps/v8/src/interpreter/control-flow-builders.cc
+++ b/deps/v8/src/interpreter/control-flow-builders.cc
@@ -137,6 +137,57 @@ void SwitchBuilder::SetCaseTarget(int index) {
builder()->Bind(&site);
}
+
+void TryCatchBuilder::BeginTry(Register context) {
+ builder()->MarkTryBegin(handler_id_, context);
+}
+
+
+void TryCatchBuilder::EndTry() {
+ builder()->MarkTryEnd(handler_id_);
+ builder()->Jump(&exit_);
+ builder()->Bind(&handler_);
+ builder()->MarkHandler(handler_id_, true);
+}
+
+
+void TryCatchBuilder::EndCatch() { builder()->Bind(&exit_); }
+
+
+void TryFinallyBuilder::BeginTry(Register context) {
+ builder()->MarkTryBegin(handler_id_, context);
+}
+
+
+void TryFinallyBuilder::LeaveTry() {
+ finalization_sites_.push_back(BytecodeLabel());
+ builder()->Jump(&finalization_sites_.back());
+}
+
+
+void TryFinallyBuilder::EndTry() {
+ builder()->MarkTryEnd(handler_id_);
+}
+
+
+void TryFinallyBuilder::BeginHandler() {
+ builder()->Bind(&handler_);
+ builder()->MarkHandler(handler_id_, will_catch_);
+}
+
+
+void TryFinallyBuilder::BeginFinally() {
+ for (size_t i = 0; i < finalization_sites_.size(); i++) {
+ BytecodeLabel& site = finalization_sites_.at(i);
+ builder()->Bind(&site);
+ }
+}
+
+
+void TryFinallyBuilder::EndFinally() {
+ // Nothing to be done here.
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/control-flow-builders.h b/deps/v8/src/interpreter/control-flow-builders.h
index 24a7dfe3e5..e4d376b9b2 100644
--- a/deps/v8/src/interpreter/control-flow-builders.h
+++ b/deps/v8/src/interpreter/control-flow-builders.h
@@ -144,6 +144,53 @@ class SwitchBuilder final : public BreakableControlFlowBuilder {
ZoneVector<BytecodeLabel> case_sites_;
};
+
+// A class to help with co-ordinating control flow in try-catch statements.
+class TryCatchBuilder final : public ControlFlowBuilder {
+ public:
+ explicit TryCatchBuilder(BytecodeArrayBuilder* builder)
+ : ControlFlowBuilder(builder), handler_id_(builder->NewHandlerEntry()) {}
+
+ void BeginTry(Register context);
+ void EndTry();
+ void EndCatch();
+
+ private:
+ int handler_id_;
+ BytecodeLabel handler_;
+ BytecodeLabel exit_;
+};
+
+
+// A class to help with co-ordinating control flow in try-finally statements.
+class TryFinallyBuilder final : public ControlFlowBuilder {
+ public:
+ explicit TryFinallyBuilder(BytecodeArrayBuilder* builder, bool will_catch)
+ : ControlFlowBuilder(builder),
+ handler_id_(builder->NewHandlerEntry()),
+ finalization_sites_(builder->zone()),
+ will_catch_(will_catch) {}
+
+ void BeginTry(Register context);
+ void LeaveTry();
+ void EndTry();
+ void BeginHandler();
+ void BeginFinally();
+ void EndFinally();
+
+ private:
+ int handler_id_;
+ BytecodeLabel handler_;
+
+ // Unbound labels that identify jumps to the finally block in the code.
+ ZoneVector<BytecodeLabel> finalization_sites_;
+
+ // Conservative prediction of whether exceptions thrown into the handler for
+ // this finally block will be caught. Note that such a prediction depends on
+ // whether this try-finally is nested inside a surrounding try-catch.
+ bool will_catch_;
+};
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/handler-table-builder.cc b/deps/v8/src/interpreter/handler-table-builder.cc
new file mode 100644
index 0000000000..374089bdc3
--- /dev/null
+++ b/deps/v8/src/interpreter/handler-table-builder.cc
@@ -0,0 +1,73 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/handler-table-builder.h"
+
+#include "src/factory.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+HandlerTableBuilder::HandlerTableBuilder(Isolate* isolate, Zone* zone)
+ : isolate_(isolate), entries_(zone) {}
+
+Handle<HandlerTable> HandlerTableBuilder::ToHandlerTable() {
+ int handler_table_size = static_cast<int>(entries_.size());
+ Handle<HandlerTable> table =
+ Handle<HandlerTable>::cast(isolate_->factory()->NewFixedArray(
+ HandlerTable::LengthForRange(handler_table_size), TENURED));
+ for (int i = 0; i < handler_table_size; ++i) {
+ Entry& entry = entries_[i];
+ HandlerTable::CatchPrediction pred =
+ entry.will_catch ? HandlerTable::CAUGHT : HandlerTable::UNCAUGHT;
+ table->SetRangeStart(i, static_cast<int>(entry.offset_start));
+ table->SetRangeEnd(i, static_cast<int>(entry.offset_end));
+ table->SetRangeHandler(i, static_cast<int>(entry.offset_target), pred);
+ table->SetRangeData(i, entry.context.index());
+ }
+ return table;
+}
+
+
+int HandlerTableBuilder::NewHandlerEntry() {
+ int handler_id = static_cast<int>(entries_.size());
+ Entry entry = {0, 0, 0, Register(), false};
+ entries_.push_back(entry);
+ return handler_id;
+}
+
+
+void HandlerTableBuilder::SetTryRegionStart(int handler_id, size_t offset) {
+ DCHECK(Smi::IsValid(offset)); // Encoding of handler table requires this.
+ entries_[handler_id].offset_start = offset;
+}
+
+
+void HandlerTableBuilder::SetTryRegionEnd(int handler_id, size_t offset) {
+ DCHECK(Smi::IsValid(offset)); // Encoding of handler table requires this.
+ entries_[handler_id].offset_end = offset;
+}
+
+
+void HandlerTableBuilder::SetHandlerTarget(int handler_id, size_t offset) {
+ DCHECK(Smi::IsValid(offset)); // Encoding of handler table requires this.
+ entries_[handler_id].offset_target = offset;
+}
+
+
+void HandlerTableBuilder::SetPrediction(int handler_id, bool will_catch) {
+ entries_[handler_id].will_catch = will_catch;
+}
+
+
+void HandlerTableBuilder::SetContextRegister(int handler_id, Register reg) {
+ entries_[handler_id].context = reg;
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interpreter/handler-table-builder.h b/deps/v8/src/interpreter/handler-table-builder.h
new file mode 100644
index 0000000000..7356e37767
--- /dev/null
+++ b/deps/v8/src/interpreter/handler-table-builder.h
@@ -0,0 +1,61 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_HANDLER_TABLE_BUILDER_H_
+#define V8_INTERPRETER_HANDLER_TABLE_BUILDER_H_
+
+#include "src/handles.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class HandlerTable;
+class Isolate;
+
+namespace interpreter {
+
+// A helper class for constructing exception handler tables for the interpreter.
+class HandlerTableBuilder final BASE_EMBEDDED {
+ public:
+ HandlerTableBuilder(Isolate* isolate, Zone* zone);
+
+ // Builds the actual handler table by copying the current values into a heap
+ // object. Any further mutations to the builder won't be reflected.
+ Handle<HandlerTable> ToHandlerTable();
+
+ // Creates a new handler table entry and returns a {hander_id} identifying the
+ // entry, so that it can be referenced by below setter functions.
+ int NewHandlerEntry();
+
+ // Setter functions that modify certain values within the handler table entry
+ // being referenced by the given {handler_id}. All values will be encoded by
+ // the resulting {HandlerTable} class when copied into the heap.
+ void SetTryRegionStart(int handler_id, size_t offset);
+ void SetTryRegionEnd(int handler_id, size_t offset);
+ void SetHandlerTarget(int handler_id, size_t offset);
+ void SetPrediction(int handler_id, bool will_catch);
+ void SetContextRegister(int handler_id, Register reg);
+
+ private:
+ struct Entry {
+ size_t offset_start; // Bytecode offset starting try-region.
+ size_t offset_end; // Bytecode offset ending try-region.
+ size_t offset_target; // Bytecode offset of handler target.
+ Register context; // Register holding context for handler.
+ bool will_catch; // Optimistic prediction for handler.
+ };
+
+ Isolate* isolate_;
+ ZoneVector<Entry> entries_;
+
+ DISALLOW_COPY_AND_ASSIGN(HandlerTableBuilder);
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_HANDLER_TABLE_BUILDER_H_
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
new file mode 100644
index 0000000000..440e879c48
--- /dev/null
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -0,0 +1,546 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/interpreter-assembler.h"
+
+#include <ostream>
+
+#include "src/code-factory.h"
+#include "src/frames.h"
+#include "src/interface-descriptors.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/interpreter/interpreter.h"
+#include "src/machine-type.h"
+#include "src/macro-assembler.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+using compiler::Node;
+
+InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
+ Bytecode bytecode)
+ : compiler::CodeStubAssembler(
+ isolate, zone, InterpreterDispatchDescriptor(isolate),
+ Code::ComputeFlags(Code::STUB), Bytecodes::ToString(bytecode), 0),
+ bytecode_(bytecode),
+ accumulator_(this, MachineRepresentation::kTagged),
+ context_(this, MachineRepresentation::kTagged),
+ bytecode_array_(this, MachineRepresentation::kTagged),
+ disable_stack_check_across_call_(false),
+ stack_pointer_before_call_(nullptr) {
+ accumulator_.Bind(
+ Parameter(InterpreterDispatchDescriptor::kAccumulatorParameter));
+ context_.Bind(Parameter(InterpreterDispatchDescriptor::kContextParameter));
+ bytecode_array_.Bind(
+ Parameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter));
+ if (FLAG_trace_ignition) {
+ TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
+ }
+}
+
+InterpreterAssembler::~InterpreterAssembler() {}
+
+Node* InterpreterAssembler::GetAccumulator() { return accumulator_.value(); }
+
+void InterpreterAssembler::SetAccumulator(Node* value) {
+ accumulator_.Bind(value);
+}
+
+Node* InterpreterAssembler::GetContext() { return context_.value(); }
+
+void InterpreterAssembler::SetContext(Node* value) {
+ StoreRegister(value, Register::current_context());
+ context_.Bind(value);
+}
+
+Node* InterpreterAssembler::BytecodeOffset() {
+ return Parameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter);
+}
+
+Node* InterpreterAssembler::RegisterFileRawPointer() {
+ return Parameter(InterpreterDispatchDescriptor::kRegisterFileParameter);
+}
+
+Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
+ return bytecode_array_.value();
+}
+
+Node* InterpreterAssembler::DispatchTableRawPointer() {
+ return Parameter(InterpreterDispatchDescriptor::kDispatchTableParameter);
+}
+
+Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
+ return IntPtrAdd(RegisterFileRawPointer(), RegisterFrameOffset(reg_index));
+}
+
+Node* InterpreterAssembler::LoadRegister(int offset) {
+ return Load(MachineType::AnyTagged(), RegisterFileRawPointer(),
+ Int32Constant(offset));
+}
+
+Node* InterpreterAssembler::LoadRegister(Register reg) {
+ return LoadRegister(reg.ToOperand() << kPointerSizeLog2);
+}
+
+Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
+ return WordShl(index, kPointerSizeLog2);
+}
+
+Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
+ return Load(MachineType::AnyTagged(), RegisterFileRawPointer(),
+ RegisterFrameOffset(reg_index));
+}
+
+Node* InterpreterAssembler::StoreRegister(Node* value, int offset) {
+ return StoreNoWriteBarrier(MachineRepresentation::kTagged,
+ RegisterFileRawPointer(), Int32Constant(offset),
+ value);
+}
+
+Node* InterpreterAssembler::StoreRegister(Node* value, Register reg) {
+ return StoreRegister(value, reg.ToOperand() << kPointerSizeLog2);
+}
+
+Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
+ return StoreNoWriteBarrier(MachineRepresentation::kTagged,
+ RegisterFileRawPointer(),
+ RegisterFrameOffset(reg_index), value);
+}
+
+Node* InterpreterAssembler::NextRegister(Node* reg_index) {
+ // Register indexes are negative, so the next index is minus one.
+ return IntPtrAdd(reg_index, Int32Constant(-1));
+}
+
+Node* InterpreterAssembler::BytecodeOperand(int operand_index) {
+ DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
+ DCHECK_EQ(OperandSize::kByte,
+ Bytecodes::GetOperandSize(bytecode_, operand_index));
+ return Load(
+ MachineType::Uint8(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(), Int32Constant(Bytecodes::GetOperandOffset(
+ bytecode_, operand_index))));
+}
+
+Node* InterpreterAssembler::BytecodeOperandSignExtended(int operand_index) {
+ DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
+ DCHECK_EQ(OperandSize::kByte,
+ Bytecodes::GetOperandSize(bytecode_, operand_index));
+ Node* load = Load(
+ MachineType::Int8(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(), Int32Constant(Bytecodes::GetOperandOffset(
+ bytecode_, operand_index))));
+ // Ensure that we sign extend to full pointer size
+ if (kPointerSize == 8) {
+ load = ChangeInt32ToInt64(load);
+ }
+ return load;
+}
+
+Node* InterpreterAssembler::BytecodeOperandShort(int operand_index) {
+ DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
+ DCHECK_EQ(OperandSize::kShort,
+ Bytecodes::GetOperandSize(bytecode_, operand_index));
+ if (TargetSupportsUnalignedAccess()) {
+ return Load(
+ MachineType::Uint16(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(), Int32Constant(Bytecodes::GetOperandOffset(
+ bytecode_, operand_index))));
+ } else {
+ int offset = Bytecodes::GetOperandOffset(bytecode_, operand_index);
+ Node* first_byte = Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(), Int32Constant(offset)));
+ Node* second_byte =
+ Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(), Int32Constant(offset + 1)));
+#if V8_TARGET_LITTLE_ENDIAN
+ return WordOr(WordShl(second_byte, kBitsPerByte), first_byte);
+#elif V8_TARGET_BIG_ENDIAN
+ return WordOr(WordShl(first_byte, kBitsPerByte), second_byte);
+#else
+#error "Unknown Architecture"
+#endif
+ }
+}
+
+Node* InterpreterAssembler::BytecodeOperandShortSignExtended(
+ int operand_index) {
+ DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
+ DCHECK_EQ(OperandSize::kShort,
+ Bytecodes::GetOperandSize(bytecode_, operand_index));
+ int operand_offset = Bytecodes::GetOperandOffset(bytecode_, operand_index);
+ Node* load;
+ if (TargetSupportsUnalignedAccess()) {
+ load = Load(MachineType::Int16(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(), Int32Constant(operand_offset)));
+ } else {
+#if V8_TARGET_LITTLE_ENDIAN
+ Node* hi_byte_offset = Int32Constant(operand_offset + 1);
+ Node* lo_byte_offset = Int32Constant(operand_offset);
+#elif V8_TARGET_BIG_ENDIAN
+ Node* hi_byte_offset = Int32Constant(operand_offset);
+ Node* lo_byte_offset = Int32Constant(operand_offset + 1);
+#else
+#error "Unknown Architecture"
+#endif
+ Node* hi_byte = Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(), hi_byte_offset));
+ Node* lo_byte = Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(), lo_byte_offset));
+ hi_byte = Word32Shl(hi_byte, Int32Constant(kBitsPerByte));
+ load = Word32Or(hi_byte, lo_byte);
+ }
+
+ // Ensure that we sign extend to full pointer size
+ if (kPointerSize == 8) {
+ load = ChangeInt32ToInt64(load);
+ }
+ return load;
+}
+
+Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
+ switch (Bytecodes::GetOperandSize(bytecode_, operand_index)) {
+ case OperandSize::kByte:
+ DCHECK_EQ(OperandType::kRegCount8,
+ Bytecodes::GetOperandType(bytecode_, operand_index));
+ return BytecodeOperand(operand_index);
+ case OperandSize::kShort:
+ DCHECK_EQ(OperandType::kRegCount16,
+ Bytecodes::GetOperandType(bytecode_, operand_index));
+ return BytecodeOperandShort(operand_index);
+ case OperandSize::kNone:
+ UNREACHABLE();
+ }
+ return nullptr;
+}
+
+Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
+ DCHECK_EQ(OperandType::kImm8,
+ Bytecodes::GetOperandType(bytecode_, operand_index));
+ return BytecodeOperandSignExtended(operand_index);
+}
+
+Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
+ switch (Bytecodes::GetOperandSize(bytecode_, operand_index)) {
+ case OperandSize::kByte:
+ DCHECK_EQ(OperandType::kIdx8,
+ Bytecodes::GetOperandType(bytecode_, operand_index));
+ return BytecodeOperand(operand_index);
+ case OperandSize::kShort:
+ DCHECK_EQ(OperandType::kIdx16,
+ Bytecodes::GetOperandType(bytecode_, operand_index));
+ return BytecodeOperandShort(operand_index);
+ case OperandSize::kNone:
+ UNREACHABLE();
+ }
+ return nullptr;
+}
+
+Node* InterpreterAssembler::BytecodeOperandReg(int operand_index) {
+ OperandType operand_type =
+ Bytecodes::GetOperandType(bytecode_, operand_index);
+ if (Bytecodes::IsRegisterOperandType(operand_type)) {
+ OperandSize operand_size = Bytecodes::SizeOfOperand(operand_type);
+ if (operand_size == OperandSize::kByte) {
+ return BytecodeOperandSignExtended(operand_index);
+ } else if (operand_size == OperandSize::kShort) {
+ return BytecodeOperandShortSignExtended(operand_index);
+ }
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
+Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
+ Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
+ BytecodeArray::kConstantPoolOffset);
+ Node* entry_offset =
+ IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
+ WordShl(index, kPointerSizeLog2));
+ return Load(MachineType::AnyTagged(), constant_pool, entry_offset);
+}
+
+Node* InterpreterAssembler::LoadFixedArrayElement(Node* fixed_array,
+ int index) {
+ Node* entry_offset =
+ IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
+ WordShl(Int32Constant(index), kPointerSizeLog2));
+ return Load(MachineType::AnyTagged(), fixed_array, entry_offset);
+}
+
+Node* InterpreterAssembler::LoadObjectField(Node* object, int offset) {
+ return Load(MachineType::AnyTagged(), object,
+ IntPtrConstant(offset - kHeapObjectTag));
+}
+
+Node* InterpreterAssembler::LoadContextSlot(Node* context, int slot_index) {
+ return Load(MachineType::AnyTagged(), context,
+ IntPtrConstant(Context::SlotOffset(slot_index)));
+}
+
+Node* InterpreterAssembler::LoadContextSlot(Node* context, Node* slot_index) {
+ Node* offset =
+ IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
+ Int32Constant(Context::kHeaderSize - kHeapObjectTag));
+ return Load(MachineType::AnyTagged(), context, offset);
+}
+
+Node* InterpreterAssembler::StoreContextSlot(Node* context, Node* slot_index,
+ Node* value) {
+ Node* offset =
+ IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
+ Int32Constant(Context::kHeaderSize - kHeapObjectTag));
+ return Store(MachineRepresentation::kTagged, context, offset, value);
+}
+
+Node* InterpreterAssembler::LoadTypeFeedbackVector() {
+ Node* function = Load(
+ MachineType::AnyTagged(), RegisterFileRawPointer(),
+ IntPtrConstant(InterpreterFrameConstants::kFunctionFromRegisterPointer));
+ Node* shared_info =
+ LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset);
+ Node* vector =
+ LoadObjectField(shared_info, SharedFunctionInfo::kFeedbackVectorOffset);
+ return vector;
+}
+
+void InterpreterAssembler::CallPrologue() {
+ StoreRegister(SmiTag(BytecodeOffset()),
+ InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer);
+ StoreRegister(BytecodeArrayTaggedPointer(),
+ InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer);
+
+ if (FLAG_debug_code && !disable_stack_check_across_call_) {
+ DCHECK(stack_pointer_before_call_ == nullptr);
+ stack_pointer_before_call_ = LoadStackPointer();
+ }
+}
+
+void InterpreterAssembler::CallEpilogue() {
+ if (FLAG_debug_code && !disable_stack_check_across_call_) {
+ Node* stack_pointer_after_call = LoadStackPointer();
+ Node* stack_pointer_before_call = stack_pointer_before_call_;
+ stack_pointer_before_call_ = nullptr;
+ AbortIfWordNotEqual(stack_pointer_before_call, stack_pointer_after_call,
+ kUnexpectedStackPointer);
+ }
+
+ // Restore bytecode array from stack frame in case the debugger has swapped us
+ // to the patched debugger bytecode array.
+ bytecode_array_.Bind(LoadRegister(
+ InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
+}
+
+Node* InterpreterAssembler::CallJS(Node* function, Node* context,
+ Node* first_arg, Node* arg_count,
+ TailCallMode tail_call_mode) {
+ Callable callable =
+ CodeFactory::InterpreterPushArgsAndCall(isolate(), tail_call_mode);
+ Node* code_target = HeapConstant(callable.code());
+ return CallStub(callable.descriptor(), code_target, context, arg_count,
+ first_arg, function);
+}
+
+Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
+ Node* new_target, Node* first_arg,
+ Node* arg_count) {
+ Callable callable = CodeFactory::InterpreterPushArgsAndConstruct(isolate());
+ Node* code_target = HeapConstant(callable.code());
+ return CallStub(callable.descriptor(), code_target, context, arg_count,
+ new_target, constructor, first_arg);
+}
+
+Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
+ Node* first_arg, Node* arg_count,
+ int result_size) {
+ Callable callable = CodeFactory::InterpreterCEntry(isolate(), result_size);
+ Node* code_target = HeapConstant(callable.code());
+
+ // Get the function entry from the function id.
+ Node* function_table = ExternalConstant(
+ ExternalReference::runtime_function_table_address(isolate()));
+ Node* function_offset =
+ Int32Mul(function_id, Int32Constant(sizeof(Runtime::Function)));
+ Node* function = IntPtrAdd(function_table, function_offset);
+ Node* function_entry =
+ Load(MachineType::Pointer(), function,
+ Int32Constant(offsetof(Runtime::Function, entry)));
+
+ return CallStub(callable.descriptor(), code_target, context, arg_count,
+ first_arg, function_entry, result_size);
+}
+
+void InterpreterAssembler::UpdateInterruptBudget(Node* weight) {
+ CodeStubAssembler::Label ok(this);
+ CodeStubAssembler::Label interrupt_check(this);
+ CodeStubAssembler::Label end(this);
+ Node* budget_offset =
+ IntPtrConstant(BytecodeArray::kInterruptBudgetOffset - kHeapObjectTag);
+
+ // Update budget by |weight| and check if it reaches zero.
+ Node* old_budget =
+ Load(MachineType::Int32(), BytecodeArrayTaggedPointer(), budget_offset);
+ Node* new_budget = Int32Add(old_budget, weight);
+ Node* condition = Int32GreaterThanOrEqual(new_budget, Int32Constant(0));
+ Branch(condition, &ok, &interrupt_check);
+
+ // Perform interrupt and reset budget.
+ Bind(&interrupt_check);
+ CallRuntime(Runtime::kInterrupt, GetContext());
+ StoreNoWriteBarrier(MachineRepresentation::kWord32,
+ BytecodeArrayTaggedPointer(), budget_offset,
+ Int32Constant(Interpreter::InterruptBudget()));
+ Goto(&end);
+
+ // Update budget.
+ Bind(&ok);
+ StoreNoWriteBarrier(MachineRepresentation::kWord32,
+ BytecodeArrayTaggedPointer(), budget_offset, new_budget);
+ Goto(&end);
+ Bind(&end);
+}
+
+Node* InterpreterAssembler::Advance(int delta) {
+ return IntPtrAdd(BytecodeOffset(), Int32Constant(delta));
+}
+
+Node* InterpreterAssembler::Advance(Node* delta) {
+ return IntPtrAdd(BytecodeOffset(), delta);
+}
+
+void InterpreterAssembler::Jump(Node* delta) {
+ UpdateInterruptBudget(delta);
+ DispatchTo(Advance(delta));
+}
+
+void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
+ CodeStubAssembler::Label match(this);
+ CodeStubAssembler::Label no_match(this);
+
+ Branch(condition, &match, &no_match);
+ Bind(&match);
+ Jump(delta);
+ Bind(&no_match);
+ Dispatch();
+}
+
+void InterpreterAssembler::JumpIfWordEqual(Node* lhs, Node* rhs, Node* delta) {
+ JumpConditional(WordEqual(lhs, rhs), delta);
+}
+
+void InterpreterAssembler::JumpIfWordNotEqual(Node* lhs, Node* rhs,
+ Node* delta) {
+ JumpConditional(WordNotEqual(lhs, rhs), delta);
+}
+
+void InterpreterAssembler::Dispatch() {
+ DispatchTo(Advance(Bytecodes::Size(bytecode_)));
+}
+
+void InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
+ Node* target_bytecode = Load(
+ MachineType::Uint8(), BytecodeArrayTaggedPointer(), new_bytecode_offset);
+
+ // TODO(rmcilroy): Create a code target dispatch table to avoid conversion
+ // from code object on every dispatch.
+ Node* target_code_object =
+ Load(MachineType::Pointer(), DispatchTableRawPointer(),
+ Word32Shl(target_bytecode, Int32Constant(kPointerSizeLog2)));
+
+ DispatchToBytecodeHandler(target_code_object, new_bytecode_offset);
+}
+
+void InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
+ Node* bytecode_offset) {
+ if (FLAG_trace_ignition) {
+ TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
+ }
+
+ InterpreterDispatchDescriptor descriptor(isolate());
+ Node* args[] = {GetAccumulator(), RegisterFileRawPointer(),
+ bytecode_offset, BytecodeArrayTaggedPointer(),
+ DispatchTableRawPointer(), GetContext()};
+ TailCall(descriptor, handler, args, 0);
+}
+
+void InterpreterAssembler::InterpreterReturn() {
+ // TODO(rmcilroy): Investigate whether it is worth supporting self
+ // optimization of primitive functions like FullCodegen.
+
+ // Update profiling count by -BytecodeOffset to simulate backedge to start of
+ // function.
+ Node* profiling_weight =
+ Int32Sub(Int32Constant(kHeapObjectTag + BytecodeArray::kHeaderSize),
+ BytecodeOffset());
+ UpdateInterruptBudget(profiling_weight);
+
+ Node* exit_trampoline_code_object =
+ HeapConstant(isolate()->builtins()->InterpreterExitTrampoline());
+ DispatchToBytecodeHandler(exit_trampoline_code_object);
+}
+
+void InterpreterAssembler::StackCheck() {
+ CodeStubAssembler::Label end(this);
+ CodeStubAssembler::Label ok(this);
+ CodeStubAssembler::Label stack_guard(this);
+
+ Node* sp = LoadStackPointer();
+ Node* stack_limit = Load(
+ MachineType::Pointer(),
+ ExternalConstant(ExternalReference::address_of_stack_limit(isolate())));
+ Node* condition = UintPtrGreaterThanOrEqual(sp, stack_limit);
+ Branch(condition, &ok, &stack_guard);
+ Bind(&stack_guard);
+ CallRuntime(Runtime::kStackGuard, GetContext());
+ Goto(&end);
+ Bind(&ok);
+ Goto(&end);
+ Bind(&end);
+}
+
+void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
+ disable_stack_check_across_call_ = true;
+ Node* abort_id = SmiTag(Int32Constant(bailout_reason));
+ Node* ret_value = CallRuntime(Runtime::kAbort, GetContext(), abort_id);
+ disable_stack_check_across_call_ = false;
+ // Unreached, but keeps turbofan happy.
+ Return(ret_value);
+}
+
+void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
+ BailoutReason bailout_reason) {
+ CodeStubAssembler::Label match(this);
+ CodeStubAssembler::Label no_match(this);
+
+ Node* condition = WordEqual(lhs, rhs);
+ Branch(condition, &match, &no_match);
+ Bind(&no_match);
+ Abort(bailout_reason);
+ Bind(&match);
+}
+
+void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
+ CallRuntime(function_id, GetContext(), BytecodeArrayTaggedPointer(),
+ SmiTag(BytecodeOffset()), GetAccumulator());
+}
+
+// static
+bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
+#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+ return false;
+#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC
+ return CpuFeatures::IsSupported(UNALIGNED_ACCESSES);
+#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_X87
+ return true;
+#else
+#error "Unknown Architecture"
+#endif
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
new file mode 100644
index 0000000000..9600dfb6c5
--- /dev/null
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -0,0 +1,205 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_
+#define V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_
+
+#include "src/allocation.h"
+#include "src/base/smart-pointers.h"
+#include "src/builtins.h"
+#include "src/compiler/code-stub-assembler.h"
+#include "src/frames.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/runtime/runtime.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class InterpreterAssembler : public compiler::CodeStubAssembler {
+ public:
+ InterpreterAssembler(Isolate* isolate, Zone* zone, Bytecode bytecode);
+ virtual ~InterpreterAssembler();
+
+ // Returns the count immediate for bytecode operand |operand_index| in the
+ // current bytecode.
+ compiler::Node* BytecodeOperandCount(int operand_index);
+ // Returns the index immediate for bytecode operand |operand_index| in the
+ // current bytecode.
+ compiler::Node* BytecodeOperandIdx(int operand_index);
+ // Returns the Imm8 immediate for bytecode operand |operand_index| in the
+ // current bytecode.
+ compiler::Node* BytecodeOperandImm(int operand_index);
+ // Returns the register index for bytecode operand |operand_index| in the
+ // current bytecode.
+ compiler::Node* BytecodeOperandReg(int operand_index);
+
+ // Accumulator.
+ compiler::Node* GetAccumulator();
+ void SetAccumulator(compiler::Node* value);
+
+ // Context.
+ compiler::Node* GetContext();
+ void SetContext(compiler::Node* value);
+
+ // Loads from and stores to the interpreter register file.
+ compiler::Node* LoadRegister(int offset);
+ compiler::Node* LoadRegister(Register reg);
+ compiler::Node* LoadRegister(compiler::Node* reg_index);
+ compiler::Node* StoreRegister(compiler::Node* value, int offset);
+ compiler::Node* StoreRegister(compiler::Node* value, Register reg);
+ compiler::Node* StoreRegister(compiler::Node* value,
+ compiler::Node* reg_index);
+
+ // Returns the next consecutive register.
+ compiler::Node* NextRegister(compiler::Node* reg_index);
+
+ // Returns the location in memory of the register |reg_index| in the
+ // interpreter register file.
+ compiler::Node* RegisterLocation(compiler::Node* reg_index);
+
+ // Load constant at |index| in the constant pool.
+ compiler::Node* LoadConstantPoolEntry(compiler::Node* index);
+
+ // Load an element from a fixed array on the heap.
+ compiler::Node* LoadFixedArrayElement(compiler::Node* fixed_array, int index);
+
+ // Load a field from an object on the heap.
+ compiler::Node* LoadObjectField(compiler::Node* object, int offset);
+
+ // Load |slot_index| from |context|.
+ compiler::Node* LoadContextSlot(compiler::Node* context, int slot_index);
+ compiler::Node* LoadContextSlot(compiler::Node* context,
+ compiler::Node* slot_index);
+ // Stores |value| into |slot_index| of |context|.
+ compiler::Node* StoreContextSlot(compiler::Node* context,
+ compiler::Node* slot_index,
+ compiler::Node* value);
+
+ // Load the TypeFeedbackVector for the current function.
+ compiler::Node* LoadTypeFeedbackVector();
+
+ // Call JSFunction or Callable |function| with |arg_count|
+ // arguments (not including receiver) and the first argument
+ // located at |first_arg|.
+ compiler::Node* CallJS(compiler::Node* function, compiler::Node* context,
+ compiler::Node* first_arg, compiler::Node* arg_count,
+ TailCallMode tail_call_mode);
+
+ // Call constructor |constructor| with |arg_count| arguments (not
+ // including receiver) and the first argument located at
+ // |first_arg|. The |new_target| is the same as the
+ // |constructor| for the new keyword, but differs for the super
+ // keyword.
+ compiler::Node* CallConstruct(compiler::Node* constructor,
+ compiler::Node* context,
+ compiler::Node* new_target,
+ compiler::Node* first_arg,
+ compiler::Node* arg_count);
+
+ // Call runtime function with |arg_count| arguments and the first argument
+ // located at |first_arg|.
+ compiler::Node* CallRuntimeN(compiler::Node* function_id,
+ compiler::Node* context,
+ compiler::Node* first_arg,
+ compiler::Node* arg_count, int return_size = 1);
+
+ // Jump relative to the current bytecode by |jump_offset|.
+ void Jump(compiler::Node* jump_offset);
+
+ // Jump relative to the current bytecode by |jump_offset| if the
+ // |condition| is true. Helper function for JumpIfWordEqual and
+ // JumpIfWordNotEqual.
+ void JumpConditional(compiler::Node* condition, compiler::Node* jump_offset);
+
+ // Jump relative to the current bytecode by |jump_offset| if the
+ // word values |lhs| and |rhs| are equal.
+ void JumpIfWordEqual(compiler::Node* lhs, compiler::Node* rhs,
+ compiler::Node* jump_offset);
+
+ // Jump relative to the current bytecode by |jump_offset| if the
+ // word values |lhs| and |rhs| are not equal.
+ void JumpIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
+ compiler::Node* jump_offset);
+
+ // Perform a stack guard check.
+ void StackCheck();
+
+ // Returns from the function.
+ void InterpreterReturn();
+
+ // Dispatch to the bytecode.
+ void Dispatch();
+
+ // Dispatch to bytecode handler.
+ void DispatchToBytecodeHandler(compiler::Node* handler,
+ compiler::Node* bytecode_offset);
+ void DispatchToBytecodeHandler(compiler::Node* handler) {
+ DispatchToBytecodeHandler(handler, BytecodeOffset());
+ }
+
+ // Abort with the given bailout reason.
+ void Abort(BailoutReason bailout_reason);
+
+ protected:
+ static bool TargetSupportsUnalignedAccess();
+
+ private:
+ // Returns a raw pointer to start of the register file on the stack.
+ compiler::Node* RegisterFileRawPointer();
+ // Returns a tagged pointer to the current function's BytecodeArray object.
+ compiler::Node* BytecodeArrayTaggedPointer();
+ // Returns the offset from the BytecodeArrayPointer of the current bytecode.
+ compiler::Node* BytecodeOffset();
+ // Returns a raw pointer to first entry in the interpreter dispatch table.
+ compiler::Node* DispatchTableRawPointer();
+
+ // Saves and restores interpreter bytecode offset to the interpreter stack
+ // frame when performing a call.
+ void CallPrologue() override;
+ void CallEpilogue() override;
+
+ // Traces the current bytecode by calling |function_id|.
+ void TraceBytecode(Runtime::FunctionId function_id);
+
+ // Updates the bytecode array's interrupt budget by |weight| and calls
+ // Runtime::kInterrupt if counter reaches zero.
+ void UpdateInterruptBudget(compiler::Node* weight);
+
+ // Returns the offset of register |index| relative to RegisterFilePointer().
+ compiler::Node* RegisterFrameOffset(compiler::Node* index);
+
+ compiler::Node* BytecodeOperand(int operand_index);
+ compiler::Node* BytecodeOperandSignExtended(int operand_index);
+ compiler::Node* BytecodeOperandShort(int operand_index);
+ compiler::Node* BytecodeOperandShortSignExtended(int operand_index);
+
+ // Returns BytecodeOffset() advanced by delta bytecodes. Note: this does not
+ // update BytecodeOffset() itself.
+ compiler::Node* Advance(int delta);
+ compiler::Node* Advance(compiler::Node* delta);
+
+ // Starts next instruction dispatch at |new_bytecode_offset|.
+ void DispatchTo(compiler::Node* new_bytecode_offset);
+
+ // Abort operations for debug code.
+ void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
+ BailoutReason bailout_reason);
+
+ Bytecode bytecode_;
+ CodeStubAssembler::Variable accumulator_;
+ CodeStubAssembler::Variable context_;
+ CodeStubAssembler::Variable bytecode_array_;
+
+ bool disable_stack_check_across_call_;
+ compiler::Node* stack_pointer_before_call_;
+
+ DISALLOW_COPY_AND_ASSIGN(InterpreterAssembler);
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_INTERPRETER_ASSEMBLER_H_
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index 574602b0ed..43a7ead281 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -4,12 +4,13 @@
#include "src/interpreter/interpreter.h"
+#include "src/ast/prettyprinter.h"
#include "src/code-factory.h"
#include "src/compiler.h"
-#include "src/compiler/interpreter-assembler.h"
#include "src/factory.h"
#include "src/interpreter/bytecode-generator.h"
#include "src/interpreter/bytecodes.h"
+#include "src/interpreter/interpreter-assembler.h"
#include "src/zone.h"
namespace v8 {
@@ -20,52 +21,77 @@ using compiler::Node;
#define __ assembler->
+Interpreter::Interpreter(Isolate* isolate) : isolate_(isolate) {
+ memset(&dispatch_table_, 0, sizeof(dispatch_table_));
+}
+
+void Interpreter::Initialize() {
+ DCHECK(FLAG_ignition);
+ if (IsDispatchTableInitialized()) return;
+ Zone zone;
+ HandleScope scope(isolate_);
+
+#define GENERATE_CODE(Name, ...) \
+ { \
+ InterpreterAssembler assembler(isolate_, &zone, Bytecode::k##Name); \
+ Do##Name(&assembler); \
+ Handle<Code> code = assembler.GenerateCode(); \
+ TraceCodegen(code, #Name); \
+ dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] = *code; \
+ }
+ BYTECODE_LIST(GENERATE_CODE)
+#undef GENERATE_CODE
+}
-Interpreter::Interpreter(Isolate* isolate)
- : isolate_(isolate) {}
+Code* Interpreter::GetBytecodeHandler(Bytecode bytecode) {
+ DCHECK(IsDispatchTableInitialized());
+ return dispatch_table_[Bytecodes::ToByte(bytecode)];
+}
+void Interpreter::IterateDispatchTable(ObjectVisitor* v) {
+ v->VisitPointers(
+ reinterpret_cast<Object**>(&dispatch_table_[0]),
+ reinterpret_cast<Object**>(&dispatch_table_[0] + kDispatchTableSize));
+}
// static
-Handle<FixedArray> Interpreter::CreateUninitializedInterpreterTable(
- Isolate* isolate) {
- Handle<FixedArray> handler_table = isolate->factory()->NewFixedArray(
- static_cast<int>(Bytecode::kLast) + 1, TENURED);
- // We rely on the interpreter handler table being immovable, so check that
- // it was allocated on the first page (which is always immovable).
- DCHECK(isolate->heap()->old_space()->FirstPage()->Contains(
- handler_table->address()));
- return handler_table;
+int Interpreter::InterruptBudget() {
+ // TODO(ignition): Tune code size multiplier.
+ const int kCodeSizeMultiplier = 32;
+ return FLAG_interrupt_budget * kCodeSizeMultiplier;
}
+bool Interpreter::MakeBytecode(CompilationInfo* info) {
+ if (FLAG_print_bytecode || FLAG_print_source || FLAG_print_ast) {
+ OFStream os(stdout);
+ base::SmartArrayPointer<char> name = info->GetDebugName();
+ os << "[generating bytecode for function: " << info->GetDebugName().get()
+ << "]" << std::endl
+ << std::flush;
+ }
-void Interpreter::Initialize() {
- DCHECK(FLAG_ignition);
- Handle<FixedArray> handler_table = isolate_->factory()->interpreter_table();
- if (!IsInterpreterTableInitialized(handler_table)) {
- Zone zone;
- HandleScope scope(isolate_);
-
-#define GENERATE_CODE(Name, ...) \
- { \
- compiler::InterpreterAssembler assembler(isolate_, &zone, \
- Bytecode::k##Name); \
- Do##Name(&assembler); \
- Handle<Code> code = assembler.GenerateCode(); \
- handler_table->set(static_cast<int>(Bytecode::k##Name), *code); \
- }
- BYTECODE_LIST(GENERATE_CODE)
-#undef GENERATE_CODE
+#ifdef DEBUG
+ if (info->parse_info() && FLAG_print_source) {
+ OFStream os(stdout);
+ os << "--- Source from AST ---" << std::endl
+ << PrettyPrinter(info->isolate()).PrintProgram(info->literal())
+ << std::endl
+ << std::flush;
}
-}
+ if (info->parse_info() && FLAG_print_ast) {
+ OFStream os(stdout);
+ os << "--- AST ---" << std::endl
+ << AstPrinter(info->isolate()).PrintProgram(info->literal()) << std::endl
+ << std::flush;
+ }
+#endif // DEBUG
-bool Interpreter::MakeBytecode(CompilationInfo* info) {
BytecodeGenerator generator(info->isolate(), info->zone());
info->EnsureFeedbackVector();
Handle<BytecodeArray> bytecodes = generator.MakeBytecode(info);
if (FLAG_print_bytecode) {
OFStream os(stdout);
- os << "Function: " << info->GetDebugName().get() << std::endl;
bytecodes->Print(os);
os << std::flush;
}
@@ -75,18 +101,28 @@ bool Interpreter::MakeBytecode(CompilationInfo* info) {
return true;
}
-
-bool Interpreter::IsInterpreterTableInitialized(
- Handle<FixedArray> handler_table) {
- DCHECK(handler_table->length() == static_cast<int>(Bytecode::kLast) + 1);
- return handler_table->get(0) != isolate_->heap()->undefined_value();
+bool Interpreter::IsDispatchTableInitialized() {
+ if (FLAG_trace_ignition) {
+ // Regenerate table to add bytecode tracing operations.
+ return false;
+ }
+ return dispatch_table_[0] != nullptr;
}
+void Interpreter::TraceCodegen(Handle<Code> code, const char* name) {
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_trace_ignition_codegen) {
+ OFStream os(stdout);
+ code->Disassemble(name, os);
+ os << std::flush;
+ }
+#endif // ENABLE_DISASSEMBLER
+}
// LdaZero
//
// Load literal '0' into the accumulator.
-void Interpreter::DoLdaZero(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaZero(InterpreterAssembler* assembler) {
Node* zero_value = __ NumberConstant(0.0);
__ SetAccumulator(zero_value);
__ Dispatch();
@@ -96,15 +132,14 @@ void Interpreter::DoLdaZero(compiler::InterpreterAssembler* assembler) {
// LdaSmi8 <imm8>
//
// Load an 8-bit integer literal into the accumulator as a Smi.
-void Interpreter::DoLdaSmi8(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaSmi8(InterpreterAssembler* assembler) {
Node* raw_int = __ BytecodeOperandImm(0);
Node* smi_int = __ SmiTag(raw_int);
__ SetAccumulator(smi_int);
__ Dispatch();
}
-
-void Interpreter::DoLoadConstant(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLoadConstant(InterpreterAssembler* assembler) {
Node* index = __ BytecodeOperandIdx(0);
Node* constant = __ LoadConstantPoolEntry(index);
__ SetAccumulator(constant);
@@ -115,7 +150,7 @@ void Interpreter::DoLoadConstant(compiler::InterpreterAssembler* assembler) {
// LdaConstant <idx>
//
// Load constant literal at |idx| in the constant pool into the accumulator.
-void Interpreter::DoLdaConstant(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaConstant(InterpreterAssembler* assembler) {
DoLoadConstant(assembler);
}
@@ -123,7 +158,7 @@ void Interpreter::DoLdaConstant(compiler::InterpreterAssembler* assembler) {
// LdaConstantWide <idx>
//
// Load constant literal at |idx| in the constant pool into the accumulator.
-void Interpreter::DoLdaConstantWide(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaConstantWide(InterpreterAssembler* assembler) {
DoLoadConstant(assembler);
}
@@ -131,7 +166,7 @@ void Interpreter::DoLdaConstantWide(compiler::InterpreterAssembler* assembler) {
// LdaUndefined
//
// Load Undefined into the accumulator.
-void Interpreter::DoLdaUndefined(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaUndefined(InterpreterAssembler* assembler) {
Node* undefined_value =
__ HeapConstant(isolate_->factory()->undefined_value());
__ SetAccumulator(undefined_value);
@@ -142,7 +177,7 @@ void Interpreter::DoLdaUndefined(compiler::InterpreterAssembler* assembler) {
// LdaNull
//
// Load Null into the accumulator.
-void Interpreter::DoLdaNull(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaNull(InterpreterAssembler* assembler) {
Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
__ SetAccumulator(null_value);
__ Dispatch();
@@ -152,7 +187,7 @@ void Interpreter::DoLdaNull(compiler::InterpreterAssembler* assembler) {
// LdaTheHole
//
// Load TheHole into the accumulator.
-void Interpreter::DoLdaTheHole(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaTheHole(InterpreterAssembler* assembler) {
Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
__ SetAccumulator(the_hole_value);
__ Dispatch();
@@ -162,7 +197,7 @@ void Interpreter::DoLdaTheHole(compiler::InterpreterAssembler* assembler) {
// LdaTrue
//
// Load True into the accumulator.
-void Interpreter::DoLdaTrue(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaTrue(InterpreterAssembler* assembler) {
Node* true_value = __ HeapConstant(isolate_->factory()->true_value());
__ SetAccumulator(true_value);
__ Dispatch();
@@ -172,7 +207,7 @@ void Interpreter::DoLdaTrue(compiler::InterpreterAssembler* assembler) {
// LdaFalse
//
// Load False into the accumulator.
-void Interpreter::DoLdaFalse(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaFalse(InterpreterAssembler* assembler) {
Node* false_value = __ HeapConstant(isolate_->factory()->false_value());
__ SetAccumulator(false_value);
__ Dispatch();
@@ -182,7 +217,7 @@ void Interpreter::DoLdaFalse(compiler::InterpreterAssembler* assembler) {
// Ldar <src>
//
// Load accumulator with value from register <src>.
-void Interpreter::DoLdar(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdar(InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(0);
Node* value = __ LoadRegister(reg_index);
__ SetAccumulator(value);
@@ -193,7 +228,7 @@ void Interpreter::DoLdar(compiler::InterpreterAssembler* assembler) {
// Star <dst>
//
// Store accumulator to register <dst>.
-void Interpreter::DoStar(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStar(InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(0);
Node* accumulator = __ GetAccumulator();
__ StoreRegister(accumulator, reg_index);
@@ -201,32 +236,10 @@ void Interpreter::DoStar(compiler::InterpreterAssembler* assembler) {
}
-// Exchange <reg8> <reg16>
-//
-// Exchange two registers.
-void Interpreter::DoExchange(compiler::InterpreterAssembler* assembler) {
- Node* reg0_index = __ BytecodeOperandReg(0);
- Node* reg1_index = __ BytecodeOperandReg(1);
- Node* reg0_value = __ LoadRegister(reg0_index);
- Node* reg1_value = __ LoadRegister(reg1_index);
- __ StoreRegister(reg1_value, reg0_index);
- __ StoreRegister(reg0_value, reg1_index);
- __ Dispatch();
-}
-
-
-// ExchangeWide <reg16> <reg16>
-//
-// Exchange two registers.
-void Interpreter::DoExchangeWide(compiler::InterpreterAssembler* assembler) {
- return DoExchange(assembler);
-}
-
-
// Mov <src> <dst>
//
// Stores the value of register <src> to register <dst>.
-void Interpreter::DoMov(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoMov(InterpreterAssembler* assembler) {
Node* src_index = __ BytecodeOperandReg(0);
Node* src_value = __ LoadRegister(src_index);
Node* dst_index = __ BytecodeOperandReg(1);
@@ -235,8 +248,14 @@ void Interpreter::DoMov(compiler::InterpreterAssembler* assembler) {
}
-void Interpreter::DoLoadGlobal(Callable ic,
- compiler::InterpreterAssembler* assembler) {
+// MovWide <src> <dst>
+//
+// Stores the value of register <src> to register <dst>.
+void Interpreter::DoMovWide(InterpreterAssembler* assembler) {
+ DoMov(assembler);
+}
+
+void Interpreter::DoLoadGlobal(Callable ic, InterpreterAssembler* assembler) {
// Get the global object.
Node* context = __ GetContext();
Node* native_context =
@@ -250,109 +269,54 @@ void Interpreter::DoLoadGlobal(Callable ic,
Node* raw_slot = __ BytecodeOperandIdx(1);
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
- Node* result = __ CallIC(ic.descriptor(), code_target, global, name, smi_slot,
- type_feedback_vector);
+ Node* result = __ CallStub(ic.descriptor(), code_target, context, global,
+ name, smi_slot, type_feedback_vector);
__ SetAccumulator(result);
__ Dispatch();
}
-
-// LdaGlobalSloppy <name_index> <slot>
-//
-// Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in sloppy mode.
-void Interpreter::DoLdaGlobalSloppy(compiler::InterpreterAssembler* assembler) {
- Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
- SLOPPY, UNINITIALIZED);
- DoLoadGlobal(ic, assembler);
-}
-
-
-// LdaGlobalSloppy <name_index> <slot>
+// LdaGlobal <name_index> <slot>
//
// Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in strict mode.
-void Interpreter::DoLdaGlobalStrict(compiler::InterpreterAssembler* assembler) {
+// accumulator using FeedBackVector slot <slot> outside of a typeof.
+void Interpreter::DoLdaGlobal(InterpreterAssembler* assembler) {
Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
- STRICT, UNINITIALIZED);
- DoLoadGlobal(ic, assembler);
-}
-
-
-// LdaGlobalInsideTypeofSloppy <name_index> <slot>
-//
-// Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in sloppy mode.
-void Interpreter::DoLdaGlobalInsideTypeofSloppy(
- compiler::InterpreterAssembler* assembler) {
- Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
- SLOPPY, UNINITIALIZED);
+ UNINITIALIZED);
DoLoadGlobal(ic, assembler);
}
-
-// LdaGlobalInsideTypeofStrict <name_index> <slot>
+// LdaGlobalInsideTypeof <name_index> <slot>
//
// Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in strict mode.
-void Interpreter::DoLdaGlobalInsideTypeofStrict(
- compiler::InterpreterAssembler* assembler) {
+// accumulator using FeedBackVector slot <slot> inside of a typeof.
+void Interpreter::DoLdaGlobalInsideTypeof(InterpreterAssembler* assembler) {
Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
- STRICT, UNINITIALIZED);
+ UNINITIALIZED);
DoLoadGlobal(ic, assembler);
}
-
-// LdaGlobalSloppyWide <name_index> <slot>
-//
-// Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in sloppy mode.
-void Interpreter::DoLdaGlobalSloppyWide(
- compiler::InterpreterAssembler* assembler) {
- Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
- SLOPPY, UNINITIALIZED);
- DoLoadGlobal(ic, assembler);
-}
-
-
-// LdaGlobalSloppyWide <name_index> <slot>
+// LdaGlobalWide <name_index> <slot>
//
// Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in strict mode.
-void Interpreter::DoLdaGlobalStrictWide(
- compiler::InterpreterAssembler* assembler) {
+// accumulator using FeedBackVector slot <slot> outside of a typeof.
+void Interpreter::DoLdaGlobalWide(InterpreterAssembler* assembler) {
Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
- STRICT, UNINITIALIZED);
+ UNINITIALIZED);
DoLoadGlobal(ic, assembler);
}
-
-// LdaGlobalInsideTypeofSloppyWide <name_index> <slot>
+// LdaGlobalInsideTypeofWide <name_index> <slot>
//
// Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in sloppy mode.
-void Interpreter::DoLdaGlobalInsideTypeofSloppyWide(
- compiler::InterpreterAssembler* assembler) {
+// accumulator using FeedBackVector slot <slot> inside of a typeof.
+void Interpreter::DoLdaGlobalInsideTypeofWide(InterpreterAssembler* assembler) {
Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
- SLOPPY, UNINITIALIZED);
+ UNINITIALIZED);
DoLoadGlobal(ic, assembler);
}
-// LdaGlobalInsideTypeofSloppyWide <name_index> <slot>
-//
-// Load the global with name in constant pool entry <name_index> into the
-// accumulator using FeedBackVector slot <slot> in strict mode.
-void Interpreter::DoLdaGlobalInsideTypeofStrictWide(
- compiler::InterpreterAssembler* assembler) {
- Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, INSIDE_TYPEOF,
- STRICT, UNINITIALIZED);
- DoLoadGlobal(ic, assembler);
-}
-
-
-void Interpreter::DoStoreGlobal(Callable ic,
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStoreGlobal(Callable ic, InterpreterAssembler* assembler) {
// Get the global object.
Node* context = __ GetContext();
Node* native_context =
@@ -367,8 +331,8 @@ void Interpreter::DoStoreGlobal(Callable ic,
Node* raw_slot = __ BytecodeOperandIdx(1);
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
- __ CallIC(ic.descriptor(), code_target, global, name, value, smi_slot,
- type_feedback_vector);
+ __ CallStub(ic.descriptor(), code_target, context, global, name, value,
+ smi_slot, type_feedback_vector);
__ Dispatch();
}
@@ -378,7 +342,7 @@ void Interpreter::DoStoreGlobal(Callable ic,
//
// Store the value in the accumulator into the global with name in constant pool
// entry <name_index> using FeedBackVector slot <slot> in sloppy mode.
-void Interpreter::DoStaGlobalSloppy(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaGlobalSloppy(InterpreterAssembler* assembler) {
Callable ic =
CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
DoStoreGlobal(ic, assembler);
@@ -389,7 +353,7 @@ void Interpreter::DoStaGlobalSloppy(compiler::InterpreterAssembler* assembler) {
//
// Store the value in the accumulator into the global with name in constant pool
// entry <name_index> using FeedBackVector slot <slot> in strict mode.
-void Interpreter::DoStaGlobalStrict(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaGlobalStrict(InterpreterAssembler* assembler) {
Callable ic =
CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
DoStoreGlobal(ic, assembler);
@@ -400,8 +364,7 @@ void Interpreter::DoStaGlobalStrict(compiler::InterpreterAssembler* assembler) {
//
// Store the value in the accumulator into the global with name in constant pool
// entry <name_index> using FeedBackVector slot <slot> in sloppy mode.
-void Interpreter::DoStaGlobalSloppyWide(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaGlobalSloppyWide(InterpreterAssembler* assembler) {
Callable ic =
CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
DoStoreGlobal(ic, assembler);
@@ -412,8 +375,7 @@ void Interpreter::DoStaGlobalSloppyWide(
//
// Store the value in the accumulator into the global with name in constant pool
// entry <name_index> using FeedBackVector slot <slot> in strict mode.
-void Interpreter::DoStaGlobalStrictWide(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaGlobalStrictWide(InterpreterAssembler* assembler) {
Callable ic =
CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
DoStoreGlobal(ic, assembler);
@@ -423,7 +385,7 @@ void Interpreter::DoStaGlobalStrictWide(
// LdaContextSlot <context> <slot_index>
//
// Load the object in |slot_index| of |context| into the accumulator.
-void Interpreter::DoLdaContextSlot(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaContextSlot(InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(0);
Node* context = __ LoadRegister(reg_index);
Node* slot_index = __ BytecodeOperandIdx(1);
@@ -436,8 +398,7 @@ void Interpreter::DoLdaContextSlot(compiler::InterpreterAssembler* assembler) {
// LdaContextSlotWide <context> <slot_index>
//
// Load the object in |slot_index| of |context| into the accumulator.
-void Interpreter::DoLdaContextSlotWide(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaContextSlotWide(InterpreterAssembler* assembler) {
DoLdaContextSlot(assembler);
}
@@ -445,7 +406,7 @@ void Interpreter::DoLdaContextSlotWide(
// StaContextSlot <context> <slot_index>
//
// Stores the object in the accumulator into |slot_index| of |context|.
-void Interpreter::DoStaContextSlot(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaContextSlot(InterpreterAssembler* assembler) {
Node* value = __ GetAccumulator();
Node* reg_index = __ BytecodeOperandReg(0);
Node* context = __ LoadRegister(reg_index);
@@ -458,19 +419,16 @@ void Interpreter::DoStaContextSlot(compiler::InterpreterAssembler* assembler) {
// StaContextSlot <context> <slot_index>
//
// Stores the object in the accumulator into |slot_index| of |context|.
-void Interpreter::DoStaContextSlotWide(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaContextSlotWide(InterpreterAssembler* assembler) {
DoStaContextSlot(assembler);
}
-
void Interpreter::DoLoadLookupSlot(Runtime::FunctionId function_id,
- compiler::InterpreterAssembler* assembler) {
+ InterpreterAssembler* assembler) {
Node* index = __ BytecodeOperandIdx(0);
Node* name = __ LoadConstantPoolEntry(index);
Node* context = __ GetContext();
- Node* result_pair = __ CallRuntime(function_id, context, name);
- Node* result = __ Projection(0, result_pair);
+ Node* result = __ CallRuntime(function_id, context, name);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -480,7 +438,7 @@ void Interpreter::DoLoadLookupSlot(Runtime::FunctionId function_id,
//
// Lookup the object with the name in constant pool entry |name_index|
// dynamically.
-void Interpreter::DoLdaLookupSlot(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaLookupSlot(InterpreterAssembler* assembler) {
DoLoadLookupSlot(Runtime::kLoadLookupSlot, assembler);
}
@@ -489,9 +447,8 @@ void Interpreter::DoLdaLookupSlot(compiler::InterpreterAssembler* assembler) {
//
// Lookup the object with the name in constant pool entry |name_index|
// dynamically without causing a NoReferenceError.
-void Interpreter::DoLdaLookupSlotInsideTypeof(
- compiler::InterpreterAssembler* assembler) {
- DoLoadLookupSlot(Runtime::kLoadLookupSlotNoReferenceError, assembler);
+void Interpreter::DoLdaLookupSlotInsideTypeof(InterpreterAssembler* assembler) {
+ DoLoadLookupSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler);
}
@@ -499,8 +456,7 @@ void Interpreter::DoLdaLookupSlotInsideTypeof(
//
// Lookup the object with the name in constant pool entry |name_index|
// dynamically.
-void Interpreter::DoLdaLookupSlotWide(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLdaLookupSlotWide(InterpreterAssembler* assembler) {
DoLdaLookupSlot(assembler);
}
@@ -510,20 +466,20 @@ void Interpreter::DoLdaLookupSlotWide(
// Lookup the object with the name in constant pool entry |name_index|
// dynamically without causing a NoReferenceError.
void Interpreter::DoLdaLookupSlotInsideTypeofWide(
- compiler::InterpreterAssembler* assembler) {
+ InterpreterAssembler* assembler) {
DoLdaLookupSlotInsideTypeof(assembler);
}
-
void Interpreter::DoStoreLookupSlot(LanguageMode language_mode,
- compiler::InterpreterAssembler* assembler) {
+ InterpreterAssembler* assembler) {
Node* value = __ GetAccumulator();
Node* index = __ BytecodeOperandIdx(0);
Node* name = __ LoadConstantPoolEntry(index);
Node* context = __ GetContext();
- Node* language_mode_node = __ NumberConstant(language_mode);
- Node* result = __ CallRuntime(Runtime::kStoreLookupSlot, value, context, name,
- language_mode_node);
+ Node* result = __ CallRuntime(is_strict(language_mode)
+ ? Runtime::kStoreLookupSlot_Strict
+ : Runtime::kStoreLookupSlot_Sloppy,
+ context, name, value);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -533,8 +489,7 @@ void Interpreter::DoStoreLookupSlot(LanguageMode language_mode,
//
// Store the object in accumulator to the object with the name in constant
// pool entry |name_index| in sloppy mode.
-void Interpreter::DoStaLookupSlotSloppy(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaLookupSlotSloppy(InterpreterAssembler* assembler) {
DoStoreLookupSlot(LanguageMode::SLOPPY, assembler);
}
@@ -543,8 +498,7 @@ void Interpreter::DoStaLookupSlotSloppy(
//
// Store the object in accumulator to the object with the name in constant
// pool entry |name_index| in strict mode.
-void Interpreter::DoStaLookupSlotStrict(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaLookupSlotStrict(InterpreterAssembler* assembler) {
DoStoreLookupSlot(LanguageMode::STRICT, assembler);
}
@@ -553,8 +507,7 @@ void Interpreter::DoStaLookupSlotStrict(
//
// Store the object in accumulator to the object with the name in constant
// pool entry |name_index| in sloppy mode.
-void Interpreter::DoStaLookupSlotSloppyWide(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaLookupSlotSloppyWide(InterpreterAssembler* assembler) {
DoStaLookupSlotSloppy(assembler);
}
@@ -563,14 +516,11 @@ void Interpreter::DoStaLookupSlotSloppyWide(
//
// Store the object in accumulator to the object with the name in constant
// pool entry |name_index| in strict mode.
-void Interpreter::DoStaLookupSlotStrictWide(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStaLookupSlotStrictWide(InterpreterAssembler* assembler) {
DoStaLookupSlotStrict(assembler);
}
-
-void Interpreter::DoLoadIC(Callable ic,
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLoadIC(Callable ic, InterpreterAssembler* assembler) {
Node* code_target = __ HeapConstant(ic.code());
Node* register_index = __ BytecodeOperandReg(0);
Node* object = __ LoadRegister(register_index);
@@ -579,61 +529,35 @@ void Interpreter::DoLoadIC(Callable ic,
Node* raw_slot = __ BytecodeOperandIdx(2);
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
- Node* result = __ CallIC(ic.descriptor(), code_target, object, name, smi_slot,
- type_feedback_vector);
+ Node* context = __ GetContext();
+ Node* result = __ CallStub(ic.descriptor(), code_target, context, object,
+ name, smi_slot, type_feedback_vector);
__ SetAccumulator(result);
__ Dispatch();
}
-
-// LoadICSloppy <object> <name_index> <slot>
+// LoadIC <object> <name_index> <slot>
//
-// Calls the sloppy mode LoadIC at FeedBackVector slot <slot> for <object> and
-// the name at constant pool entry <name_index>.
-void Interpreter::DoLoadICSloppy(compiler::InterpreterAssembler* assembler) {
+// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
+// constant pool entry <name_index>.
+void Interpreter::DoLoadIC(InterpreterAssembler* assembler) {
Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
- SLOPPY, UNINITIALIZED);
+ UNINITIALIZED);
DoLoadIC(ic, assembler);
}
-
-// LoadICStrict <object> <name_index> <slot>
+// LoadICWide <object> <name_index> <slot>
//
-// Calls the sloppy mode LoadIC at FeedBackVector slot <slot> for <object> and
-// the name at constant pool entry <name_index>.
-void Interpreter::DoLoadICStrict(compiler::InterpreterAssembler* assembler) {
+// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
+// constant pool entry <name_index>.
+void Interpreter::DoLoadICWide(InterpreterAssembler* assembler) {
Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
- STRICT, UNINITIALIZED);
+ UNINITIALIZED);
DoLoadIC(ic, assembler);
}
-// LoadICSloppyWide <object> <name_index> <slot>
-//
-// Calls the sloppy mode LoadIC at FeedBackVector slot <slot> for <object> and
-// the name at constant pool entry <name_index>.
-void Interpreter::DoLoadICSloppyWide(
- compiler::InterpreterAssembler* assembler) {
- Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
- SLOPPY, UNINITIALIZED);
- DoLoadIC(ic, assembler);
-}
-
-
-// LoadICStrictWide <object> <name_index> <slot>
-//
-// Calls the sloppy mode LoadIC at FeedBackVector slot <slot> for <object> and
-// the name at constant pool entry <name_index>.
-void Interpreter::DoLoadICStrictWide(
- compiler::InterpreterAssembler* assembler) {
- Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
- STRICT, UNINITIALIZED);
- DoLoadIC(ic, assembler);
-}
-
-
-void Interpreter::DoKeyedLoadIC(Callable ic,
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoKeyedLoadIC(Callable ic, InterpreterAssembler* assembler) {
Node* code_target = __ HeapConstant(ic.code());
Node* reg_index = __ BytecodeOperandReg(0);
Node* object = __ LoadRegister(reg_index);
@@ -641,63 +565,35 @@ void Interpreter::DoKeyedLoadIC(Callable ic,
Node* raw_slot = __ BytecodeOperandIdx(1);
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
- Node* result = __ CallIC(ic.descriptor(), code_target, object, name, smi_slot,
- type_feedback_vector);
+ Node* context = __ GetContext();
+ Node* result = __ CallStub(ic.descriptor(), code_target, context, object,
+ name, smi_slot, type_feedback_vector);
__ SetAccumulator(result);
__ Dispatch();
}
-
-// KeyedLoadICSloppy <object> <slot>
+// KeyedLoadIC <object> <slot>
//
-// Calls the sloppy mode KeyedLoadIC at FeedBackVector slot <slot> for <object>
-// and the key in the accumulator.
-void Interpreter::DoKeyedLoadICSloppy(
- compiler::InterpreterAssembler* assembler) {
- Callable ic =
- CodeFactory::KeyedLoadICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
- DoKeyedLoadIC(ic, assembler);
-}
-
-
-// KeyedLoadICStrict <object> <slot>
-//
-// Calls the strict mode KeyedLoadIC at FeedBackVector slot <slot> for <object>
-// and the key in the accumulator.
-void Interpreter::DoKeyedLoadICStrict(
- compiler::InterpreterAssembler* assembler) {
- Callable ic =
- CodeFactory::KeyedLoadICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
- DoKeyedLoadIC(ic, assembler);
-}
-
-
-// KeyedLoadICSloppyWide <object> <slot>
-//
-// Calls the sloppy mode KeyedLoadIC at FeedBackVector slot <slot> for <object>
-// and the key in the accumulator.
-void Interpreter::DoKeyedLoadICSloppyWide(
- compiler::InterpreterAssembler* assembler) {
+// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
+// in the accumulator.
+void Interpreter::DoKeyedLoadIC(InterpreterAssembler* assembler) {
Callable ic =
- CodeFactory::KeyedLoadICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+ CodeFactory::KeyedLoadICInOptimizedCode(isolate_, UNINITIALIZED);
DoKeyedLoadIC(ic, assembler);
}
-
-// KeyedLoadICStrictWide <object> <slot>
+// KeyedLoadICWide <object> <slot>
//
-// Calls the strict mode KeyedLoadIC at FeedBackVector slot <slot> for <object>
-// and the key in the accumulator.
-void Interpreter::DoKeyedLoadICStrictWide(
- compiler::InterpreterAssembler* assembler) {
+// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
+// in the accumulator.
+void Interpreter::DoKeyedLoadICWide(InterpreterAssembler* assembler) {
Callable ic =
- CodeFactory::KeyedLoadICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
+ CodeFactory::KeyedLoadICInOptimizedCode(isolate_, UNINITIALIZED);
DoKeyedLoadIC(ic, assembler);
}
-void Interpreter::DoStoreIC(Callable ic,
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) {
Node* code_target = __ HeapConstant(ic.code());
Node* object_reg_index = __ BytecodeOperandReg(0);
Node* object = __ LoadRegister(object_reg_index);
@@ -707,8 +603,9 @@ void Interpreter::DoStoreIC(Callable ic,
Node* raw_slot = __ BytecodeOperandIdx(2);
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
- __ CallIC(ic.descriptor(), code_target, object, name, value, smi_slot,
- type_feedback_vector);
+ Node* context = __ GetContext();
+ __ CallStub(ic.descriptor(), code_target, context, object, name, value,
+ smi_slot, type_feedback_vector);
__ Dispatch();
}
@@ -718,7 +615,7 @@ void Interpreter::DoStoreIC(Callable ic,
// Calls the sloppy mode StoreIC at FeedBackVector slot <slot> for <object> and
// the name in constant pool entry <name_index> with the value in the
// accumulator.
-void Interpreter::DoStoreICSloppy(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStoreICSloppy(InterpreterAssembler* assembler) {
Callable ic =
CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
DoStoreIC(ic, assembler);
@@ -730,7 +627,7 @@ void Interpreter::DoStoreICSloppy(compiler::InterpreterAssembler* assembler) {
// Calls the strict mode StoreIC at FeedBackVector slot <slot> for <object> and
// the name in constant pool entry <name_index> with the value in the
// accumulator.
-void Interpreter::DoStoreICStrict(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStoreICStrict(InterpreterAssembler* assembler) {
Callable ic =
CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
DoStoreIC(ic, assembler);
@@ -742,8 +639,7 @@ void Interpreter::DoStoreICStrict(compiler::InterpreterAssembler* assembler) {
// Calls the sloppy mode StoreIC at FeedBackVector slot <slot> for <object> and
// the name in constant pool entry <name_index> with the value in the
// accumulator.
-void Interpreter::DoStoreICSloppyWide(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStoreICSloppyWide(InterpreterAssembler* assembler) {
Callable ic =
CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
DoStoreIC(ic, assembler);
@@ -755,16 +651,13 @@ void Interpreter::DoStoreICSloppyWide(
// Calls the strict mode StoreIC at FeedBackVector slot <slot> for <object> and
// the name in constant pool entry <name_index> with the value in the
// accumulator.
-void Interpreter::DoStoreICStrictWide(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoStoreICStrictWide(InterpreterAssembler* assembler) {
Callable ic =
CodeFactory::StoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
DoStoreIC(ic, assembler);
}
-
-void Interpreter::DoKeyedStoreIC(Callable ic,
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler) {
Node* code_target = __ HeapConstant(ic.code());
Node* object_reg_index = __ BytecodeOperandReg(0);
Node* object = __ LoadRegister(object_reg_index);
@@ -774,8 +667,9 @@ void Interpreter::DoKeyedStoreIC(Callable ic,
Node* raw_slot = __ BytecodeOperandIdx(2);
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
- __ CallIC(ic.descriptor(), code_target, object, name, value, smi_slot,
- type_feedback_vector);
+ Node* context = __ GetContext();
+ __ CallStub(ic.descriptor(), code_target, context, object, name, value,
+ smi_slot, type_feedback_vector);
__ Dispatch();
}
@@ -784,8 +678,7 @@ void Interpreter::DoKeyedStoreIC(Callable ic,
//
// Calls the sloppy mode KeyStoreIC at FeedBackVector slot <slot> for <object>
// and the key <key> with the value in the accumulator.
-void Interpreter::DoKeyedStoreICSloppy(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoKeyedStoreICSloppy(InterpreterAssembler* assembler) {
Callable ic =
CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
DoKeyedStoreIC(ic, assembler);
@@ -796,8 +689,7 @@ void Interpreter::DoKeyedStoreICSloppy(
//
// Calls the strict mode KeyStoreIC at FeedBackVector slot <slot> for <object>
// and the key <key> with the value in the accumulator.
-void Interpreter::DoKeyedStoreICStrict(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoKeyedStoreICStrict(InterpreterAssembler* assembler) {
Callable ic =
CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
DoKeyedStoreIC(ic, assembler);
@@ -808,8 +700,7 @@ void Interpreter::DoKeyedStoreICStrict(
//
// Calls the sloppy mode KeyStoreIC at FeedBackVector slot <slot> for <object>
// and the key <key> with the value in the accumulator.
-void Interpreter::DoKeyedStoreICSloppyWide(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoKeyedStoreICSloppyWide(InterpreterAssembler* assembler) {
Callable ic =
CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
DoKeyedStoreIC(ic, assembler);
@@ -820,22 +711,22 @@ void Interpreter::DoKeyedStoreICSloppyWide(
//
// Calls the strict mode KeyStoreIC at FeedBackVector slot <slot> for <object>
// and the key <key> with the value in the accumulator.
-void Interpreter::DoKeyedStoreICStrictWide(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoKeyedStoreICStrictWide(InterpreterAssembler* assembler) {
Callable ic =
CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT, UNINITIALIZED);
DoKeyedStoreIC(ic, assembler);
}
-
// PushContext <context>
//
-// Pushes the accumulator as the current context, and saves it in <context>
-void Interpreter::DoPushContext(compiler::InterpreterAssembler* assembler) {
+// Saves the current context in <context>, and pushes the accumulator as the
+// new current context.
+void Interpreter::DoPushContext(InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(0);
- Node* context = __ GetAccumulator();
- __ SetContext(context);
- __ StoreRegister(context, reg_index);
+ Node* new_context = __ GetAccumulator();
+ Node* old_context = __ GetContext();
+ __ StoreRegister(old_context, reg_index);
+ __ SetContext(new_context);
__ Dispatch();
}
@@ -843,22 +734,22 @@ void Interpreter::DoPushContext(compiler::InterpreterAssembler* assembler) {
// PopContext <context>
//
// Pops the current context and sets <context> as the new context.
-void Interpreter::DoPopContext(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoPopContext(InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(0);
Node* context = __ LoadRegister(reg_index);
__ SetContext(context);
__ Dispatch();
}
-
void Interpreter::DoBinaryOp(Runtime::FunctionId function_id,
- compiler::InterpreterAssembler* assembler) {
+ InterpreterAssembler* assembler) {
// TODO(rmcilroy): Call ICs which back-patch bytecode with type specialized
// operations, instead of calling builtins directly.
Node* reg_index = __ BytecodeOperandReg(0);
Node* lhs = __ LoadRegister(reg_index);
Node* rhs = __ GetAccumulator();
- Node* result = __ CallRuntime(function_id, lhs, rhs);
+ Node* context = __ GetContext();
+ Node* result = __ CallRuntime(function_id, context, lhs, rhs);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -867,7 +758,7 @@ void Interpreter::DoBinaryOp(Runtime::FunctionId function_id,
// Add <src>
//
// Add register <src> to accumulator.
-void Interpreter::DoAdd(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoAdd(InterpreterAssembler* assembler) {
DoBinaryOp(Runtime::kAdd, assembler);
}
@@ -875,7 +766,7 @@ void Interpreter::DoAdd(compiler::InterpreterAssembler* assembler) {
// Sub <src>
//
// Subtract register <src> from accumulator.
-void Interpreter::DoSub(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoSub(InterpreterAssembler* assembler) {
DoBinaryOp(Runtime::kSubtract, assembler);
}
@@ -883,7 +774,7 @@ void Interpreter::DoSub(compiler::InterpreterAssembler* assembler) {
// Mul <src>
//
// Multiply accumulator by register <src>.
-void Interpreter::DoMul(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoMul(InterpreterAssembler* assembler) {
DoBinaryOp(Runtime::kMultiply, assembler);
}
@@ -891,7 +782,7 @@ void Interpreter::DoMul(compiler::InterpreterAssembler* assembler) {
// Div <src>
//
// Divide register <src> by accumulator.
-void Interpreter::DoDiv(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoDiv(InterpreterAssembler* assembler) {
DoBinaryOp(Runtime::kDivide, assembler);
}
@@ -899,7 +790,7 @@ void Interpreter::DoDiv(compiler::InterpreterAssembler* assembler) {
// Mod <src>
//
// Modulo register <src> by accumulator.
-void Interpreter::DoMod(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoMod(InterpreterAssembler* assembler) {
DoBinaryOp(Runtime::kModulus, assembler);
}
@@ -907,7 +798,7 @@ void Interpreter::DoMod(compiler::InterpreterAssembler* assembler) {
// BitwiseOr <src>
//
// BitwiseOr register <src> to accumulator.
-void Interpreter::DoBitwiseOr(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoBitwiseOr(InterpreterAssembler* assembler) {
DoBinaryOp(Runtime::kBitwiseOr, assembler);
}
@@ -915,7 +806,7 @@ void Interpreter::DoBitwiseOr(compiler::InterpreterAssembler* assembler) {
// BitwiseXor <src>
//
// BitwiseXor register <src> to accumulator.
-void Interpreter::DoBitwiseXor(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoBitwiseXor(InterpreterAssembler* assembler) {
DoBinaryOp(Runtime::kBitwiseXor, assembler);
}
@@ -923,7 +814,7 @@ void Interpreter::DoBitwiseXor(compiler::InterpreterAssembler* assembler) {
// BitwiseAnd <src>
//
// BitwiseAnd register <src> to accumulator.
-void Interpreter::DoBitwiseAnd(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoBitwiseAnd(InterpreterAssembler* assembler) {
DoBinaryOp(Runtime::kBitwiseAnd, assembler);
}
@@ -934,7 +825,7 @@ void Interpreter::DoBitwiseAnd(compiler::InterpreterAssembler* assembler) {
// Register <src> is converted to an int32 and the accumulator to uint32
// before the operation. 5 lsb bits from the accumulator are used as count
// i.e. <src> << (accumulator & 0x1F).
-void Interpreter::DoShiftLeft(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoShiftLeft(InterpreterAssembler* assembler) {
DoBinaryOp(Runtime::kShiftLeft, assembler);
}
@@ -945,7 +836,7 @@ void Interpreter::DoShiftLeft(compiler::InterpreterAssembler* assembler) {
// Result is sign extended. Register <src> is converted to an int32 and the
// accumulator to uint32 before the operation. 5 lsb bits from the accumulator
// are used as count i.e. <src> >> (accumulator & 0x1F).
-void Interpreter::DoShiftRight(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoShiftRight(InterpreterAssembler* assembler) {
DoBinaryOp(Runtime::kShiftRight, assembler);
}
@@ -956,17 +847,16 @@ void Interpreter::DoShiftRight(compiler::InterpreterAssembler* assembler) {
// Result is zero-filled. The accumulator and register <src> are converted to
// uint32 before the operation 5 lsb bits from the accumulator are used as
// count i.e. <src> << (accumulator & 0x1F).
-void Interpreter::DoShiftRightLogical(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoShiftRightLogical(InterpreterAssembler* assembler) {
DoBinaryOp(Runtime::kShiftRightLogical, assembler);
}
-
void Interpreter::DoCountOp(Runtime::FunctionId function_id,
- compiler::InterpreterAssembler* assembler) {
+ InterpreterAssembler* assembler) {
Node* value = __ GetAccumulator();
Node* one = __ NumberConstant(1);
- Node* result = __ CallRuntime(function_id, value, one);
+ Node* context = __ GetContext();
+ Node* result = __ CallRuntime(function_id, context, value, one);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -975,7 +865,7 @@ void Interpreter::DoCountOp(Runtime::FunctionId function_id,
// Inc
//
// Increments value in the accumulator by one.
-void Interpreter::DoInc(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoInc(InterpreterAssembler* assembler) {
DoCountOp(Runtime::kAdd, assembler);
}
@@ -983,7 +873,7 @@ void Interpreter::DoInc(compiler::InterpreterAssembler* assembler) {
// Dec
//
// Decrements value in the accumulator by one.
-void Interpreter::DoDec(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoDec(InterpreterAssembler* assembler) {
DoCountOp(Runtime::kSubtract, assembler);
}
@@ -992,9 +882,11 @@ void Interpreter::DoDec(compiler::InterpreterAssembler* assembler) {
//
// Perform logical-not on the accumulator, first casting the
// accumulator to a boolean value if required.
-void Interpreter::DoLogicalNot(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
- Node* result = __ CallRuntime(Runtime::kInterpreterLogicalNot, accumulator);
+ Node* context = __ GetContext();
+ Node* result =
+ __ CallRuntime(Runtime::kInterpreterLogicalNot, context, accumulator);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -1004,20 +896,22 @@ void Interpreter::DoLogicalNot(compiler::InterpreterAssembler* assembler) {
//
// Load the accumulator with the string representating type of the
// object in the accumulator.
-void Interpreter::DoTypeOf(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoTypeOf(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
- Node* result = __ CallRuntime(Runtime::kInterpreterTypeOf, accumulator);
+ Node* context = __ GetContext();
+ Node* result =
+ __ CallRuntime(Runtime::kInterpreterTypeOf, context, accumulator);
__ SetAccumulator(result);
__ Dispatch();
}
-
void Interpreter::DoDelete(Runtime::FunctionId function_id,
- compiler::InterpreterAssembler* assembler) {
+ InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(0);
Node* object = __ LoadRegister(reg_index);
Node* key = __ GetAccumulator();
- Node* result = __ CallRuntime(function_id, object, key);
+ Node* context = __ GetContext();
+ Node* result = __ CallRuntime(function_id, context, object, key);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -1027,8 +921,7 @@ void Interpreter::DoDelete(Runtime::FunctionId function_id,
//
// Delete the property specified in the accumulator from the object
// referenced by the register operand following strict mode semantics.
-void Interpreter::DoDeletePropertyStrict(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoDeletePropertyStrict(InterpreterAssembler* assembler) {
DoDelete(Runtime::kDeleteProperty_Strict, assembler);
}
@@ -1037,34 +930,23 @@ void Interpreter::DoDeletePropertyStrict(
//
// Delete the property specified in the accumulator from the object
// referenced by the register operand following sloppy mode semantics.
-void Interpreter::DoDeletePropertySloppy(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoDeletePropertySloppy(InterpreterAssembler* assembler) {
DoDelete(Runtime::kDeleteProperty_Sloppy, assembler);
}
-
-// DeleteLookupSlot
-//
-// Delete the variable with the name specified in the accumulator by dynamically
-// looking it up.
-void Interpreter::DoDeleteLookupSlot(
- compiler::InterpreterAssembler* assembler) {
- Node* name = __ GetAccumulator();
- Node* context = __ GetContext();
- Node* result = __ CallRuntime(Runtime::kDeleteLookupSlot, context, name);
- __ SetAccumulator(result);
- __ Dispatch();
-}
-
-
-void Interpreter::DoJSCall(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJSCall(InterpreterAssembler* assembler,
+ TailCallMode tail_call_mode) {
Node* function_reg = __ BytecodeOperandReg(0);
Node* function = __ LoadRegister(function_reg);
Node* receiver_reg = __ BytecodeOperandReg(1);
- Node* first_arg = __ RegisterLocation(receiver_reg);
- Node* args_count = __ BytecodeOperandCount(2);
- // TODO(rmcilroy): Use the call type feedback slot to call via CallIC.
- Node* result = __ CallJS(function, first_arg, args_count);
+ Node* receiver_arg = __ RegisterLocation(receiver_reg);
+ Node* receiver_args_count = __ BytecodeOperandCount(2);
+ Node* receiver_count = __ Int32Constant(1);
+ Node* args_count = __ Int32Sub(receiver_args_count, receiver_count);
+ Node* context = __ GetContext();
+ // TODO(rmcilroy): Use the call type feedback slot to call via CallStub.
+ Node* result =
+ __ CallJS(function, context, receiver_arg, args_count, tail_call_mode);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -1074,8 +956,8 @@ void Interpreter::DoJSCall(compiler::InterpreterAssembler* assembler) {
//
// Call a JSfunction or Callable in |callable| with the |receiver| and
// |arg_count| arguments in subsequent registers.
-void Interpreter::DoCall(compiler::InterpreterAssembler* assembler) {
- DoJSCall(assembler);
+void Interpreter::DoCall(InterpreterAssembler* assembler) {
+ DoJSCall(assembler, TailCallMode::kDisallow);
}
@@ -1083,41 +965,66 @@ void Interpreter::DoCall(compiler::InterpreterAssembler* assembler) {
//
// Call a JSfunction or Callable in |callable| with the |receiver| and
// |arg_count| arguments in subsequent registers.
-void Interpreter::DoCallWide(compiler::InterpreterAssembler* assembler) {
- DoJSCall(assembler);
+void Interpreter::DoCallWide(InterpreterAssembler* assembler) {
+ DoJSCall(assembler, TailCallMode::kDisallow);
}
+// TailCall <callable> <receiver> <arg_count>
+//
+// Tail call a JSfunction or Callable in |callable| with the |receiver| and
+// |arg_count| arguments in subsequent registers.
+void Interpreter::DoTailCall(InterpreterAssembler* assembler) {
+ DoJSCall(assembler, TailCallMode::kAllow);
+}
-// CallRuntime <function_id> <first_arg> <arg_count>
+// TailCallWide <callable> <receiver> <arg_count>
//
-// Call the runtime function |function_id| with the first argument in
-// register |first_arg| and |arg_count| arguments in subsequent
-// registers.
-void Interpreter::DoCallRuntime(compiler::InterpreterAssembler* assembler) {
+// Tail call a JSfunction or Callable in |callable| with the |receiver| and
+// |arg_count| arguments in subsequent registers.
+void Interpreter::DoTailCallWide(InterpreterAssembler* assembler) {
+ DoJSCall(assembler, TailCallMode::kAllow);
+}
+
+void Interpreter::DoCallRuntimeCommon(InterpreterAssembler* assembler) {
Node* function_id = __ BytecodeOperandIdx(0);
Node* first_arg_reg = __ BytecodeOperandReg(1);
Node* first_arg = __ RegisterLocation(first_arg_reg);
Node* args_count = __ BytecodeOperandCount(2);
- Node* result = __ CallRuntime(function_id, first_arg, args_count);
+ Node* context = __ GetContext();
+ Node* result = __ CallRuntimeN(function_id, context, first_arg, args_count);
__ SetAccumulator(result);
__ Dispatch();
}
-// CallRuntimeForPair <function_id> <first_arg> <arg_count> <first_return>
+// CallRuntime <function_id> <first_arg> <arg_count>
//
-// Call the runtime function |function_id| which returns a pair, with the
-// first argument in register |first_arg| and |arg_count| arguments in
-// subsequent registers. Returns the result in <first_return> and
-// <first_return + 1>
-void Interpreter::DoCallRuntimeForPair(
- compiler::InterpreterAssembler* assembler) {
+// Call the runtime function |function_id| with the first argument in
+// register |first_arg| and |arg_count| arguments in subsequent
+// registers.
+void Interpreter::DoCallRuntime(InterpreterAssembler* assembler) {
+ DoCallRuntimeCommon(assembler);
+}
+
+
+// CallRuntime <function_id> <first_arg> <arg_count>
+//
+// Call the runtime function |function_id| with the first argument in
+// register |first_arg| and |arg_count| arguments in subsequent
+// registers.
+void Interpreter::DoCallRuntimeWide(InterpreterAssembler* assembler) {
+ DoCallRuntimeCommon(assembler);
+}
+
+void Interpreter::DoCallRuntimeForPairCommon(InterpreterAssembler* assembler) {
// Call the runtime function.
Node* function_id = __ BytecodeOperandIdx(0);
Node* first_arg_reg = __ BytecodeOperandReg(1);
Node* first_arg = __ RegisterLocation(first_arg_reg);
Node* args_count = __ BytecodeOperandCount(2);
- Node* result_pair = __ CallRuntime(function_id, first_arg, args_count, 2);
+ Node* context = __ GetContext();
+ Node* result_pair =
+ __ CallRuntimeN(function_id, context, first_arg, args_count, 2);
// Store the results in <first_return> and <first_return + 1>
Node* first_return_reg = __ BytecodeOperandReg(3);
@@ -1126,20 +1033,38 @@ void Interpreter::DoCallRuntimeForPair(
Node* result1 = __ Projection(1, result_pair);
__ StoreRegister(result0, first_return_reg);
__ StoreRegister(result1, second_return_reg);
-
__ Dispatch();
}
-// CallJSRuntime <context_index> <receiver> <arg_count>
+// CallRuntimeForPair <function_id> <first_arg> <arg_count> <first_return>
//
-// Call the JS runtime function that has the |context_index| with the receiver
-// in register |receiver| and |arg_count| arguments in subsequent registers.
-void Interpreter::DoCallJSRuntime(compiler::InterpreterAssembler* assembler) {
+// Call the runtime function |function_id| which returns a pair, with the
+// first argument in register |first_arg| and |arg_count| arguments in
+// subsequent registers. Returns the result in <first_return> and
+// <first_return + 1>
+void Interpreter::DoCallRuntimeForPair(InterpreterAssembler* assembler) {
+ DoCallRuntimeForPairCommon(assembler);
+}
+
+
+// CallRuntimeForPairWide <function_id> <first_arg> <arg_count> <first_return>
+//
+// Call the runtime function |function_id| which returns a pair, with the
+// first argument in register |first_arg| and |arg_count| arguments in
+// subsequent registers. Returns the result in <first_return> and
+// <first_return + 1>
+void Interpreter::DoCallRuntimeForPairWide(InterpreterAssembler* assembler) {
+ DoCallRuntimeForPairCommon(assembler);
+}
+
+void Interpreter::DoCallJSRuntimeCommon(InterpreterAssembler* assembler) {
Node* context_index = __ BytecodeOperandIdx(0);
Node* receiver_reg = __ BytecodeOperandReg(1);
Node* first_arg = __ RegisterLocation(receiver_reg);
- Node* args_count = __ BytecodeOperandCount(2);
+ Node* receiver_args_count = __ BytecodeOperandCount(2);
+ Node* receiver_count = __ Int32Constant(1);
+ Node* args_count = __ Int32Sub(receiver_args_count, receiver_count);
// Get the function to call from the native context.
Node* context = __ GetContext();
@@ -1148,35 +1073,72 @@ void Interpreter::DoCallJSRuntime(compiler::InterpreterAssembler* assembler) {
Node* function = __ LoadContextSlot(native_context, context_index);
// Call the function.
- Node* result = __ CallJS(function, first_arg, args_count);
+ Node* result = __ CallJS(function, context, first_arg, args_count,
+ TailCallMode::kDisallow);
__ SetAccumulator(result);
__ Dispatch();
}
-// New <constructor> <first_arg> <arg_count>
+// CallJSRuntime <context_index> <receiver> <arg_count>
//
-// Call operator new with |constructor| and the first argument in
-// register |first_arg| and |arg_count| arguments in subsequent
+// Call the JS runtime function that has the |context_index| with the receiver
+// in register |receiver| and |arg_count| arguments in subsequent registers.
+void Interpreter::DoCallJSRuntime(InterpreterAssembler* assembler) {
+ DoCallJSRuntimeCommon(assembler);
+}
+
+
+// CallJSRuntimeWide <context_index> <receiver> <arg_count>
//
-void Interpreter::DoNew(compiler::InterpreterAssembler* assembler) {
+// Call the JS runtime function that has the |context_index| with the receiver
+// in register |receiver| and |arg_count| arguments in subsequent registers.
+void Interpreter::DoCallJSRuntimeWide(InterpreterAssembler* assembler) {
+ DoCallJSRuntimeCommon(assembler);
+}
+
+void Interpreter::DoCallConstruct(InterpreterAssembler* assembler) {
Callable ic = CodeFactory::InterpreterPushArgsAndConstruct(isolate_);
+ Node* new_target = __ GetAccumulator();
Node* constructor_reg = __ BytecodeOperandReg(0);
Node* constructor = __ LoadRegister(constructor_reg);
Node* first_arg_reg = __ BytecodeOperandReg(1);
Node* first_arg = __ RegisterLocation(first_arg_reg);
Node* args_count = __ BytecodeOperandCount(2);
+ Node* context = __ GetContext();
Node* result =
- __ CallConstruct(constructor, constructor, first_arg, args_count);
+ __ CallConstruct(constructor, context, new_target, first_arg, args_count);
__ SetAccumulator(result);
__ Dispatch();
}
+// New <constructor> <first_arg> <arg_count>
+//
+// Call operator new with |constructor| and the first argument in
+// register |first_arg| and |arg_count| arguments in subsequent
+// registers. The new.target is in the accumulator.
+//
+void Interpreter::DoNew(InterpreterAssembler* assembler) {
+ DoCallConstruct(assembler);
+}
+
+
+// NewWide <constructor> <first_arg> <arg_count>
+//
+// Call operator new with |constructor| and the first argument in
+// register |first_arg| and |arg_count| arguments in subsequent
+// registers. The new.target is in the accumulator.
+//
+void Interpreter::DoNewWide(InterpreterAssembler* assembler) {
+ DoCallConstruct(assembler);
+}
+
+
// TestEqual <src>
//
// Test if the value in the <src> register equals the accumulator.
-void Interpreter::DoTestEqual(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoTestEqual(InterpreterAssembler* assembler) {
DoBinaryOp(Runtime::kInterpreterEquals, assembler);
}
@@ -1184,7 +1146,7 @@ void Interpreter::DoTestEqual(compiler::InterpreterAssembler* assembler) {
// TestNotEqual <src>
//
// Test if the value in the <src> register is not equal to the accumulator.
-void Interpreter::DoTestNotEqual(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoTestNotEqual(InterpreterAssembler* assembler) {
DoBinaryOp(Runtime::kInterpreterNotEquals, assembler);
}
@@ -1192,7 +1154,7 @@ void Interpreter::DoTestNotEqual(compiler::InterpreterAssembler* assembler) {
// TestEqualStrict <src>
//
// Test if the value in the <src> register is strictly equal to the accumulator.
-void Interpreter::DoTestEqualStrict(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoTestEqualStrict(InterpreterAssembler* assembler) {
DoBinaryOp(Runtime::kInterpreterStrictEquals, assembler);
}
@@ -1201,8 +1163,7 @@ void Interpreter::DoTestEqualStrict(compiler::InterpreterAssembler* assembler) {
//
// Test if the value in the <src> register is not strictly equal to the
// accumulator.
-void Interpreter::DoTestNotEqualStrict(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoTestNotEqualStrict(InterpreterAssembler* assembler) {
DoBinaryOp(Runtime::kInterpreterStrictNotEquals, assembler);
}
@@ -1210,7 +1171,7 @@ void Interpreter::DoTestNotEqualStrict(
// TestLessThan <src>
//
// Test if the value in the <src> register is less than the accumulator.
-void Interpreter::DoTestLessThan(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoTestLessThan(InterpreterAssembler* assembler) {
DoBinaryOp(Runtime::kInterpreterLessThan, assembler);
}
@@ -1218,7 +1179,7 @@ void Interpreter::DoTestLessThan(compiler::InterpreterAssembler* assembler) {
// TestGreaterThan <src>
//
// Test if the value in the <src> register is greater than the accumulator.
-void Interpreter::DoTestGreaterThan(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) {
DoBinaryOp(Runtime::kInterpreterGreaterThan, assembler);
}
@@ -1227,8 +1188,7 @@ void Interpreter::DoTestGreaterThan(compiler::InterpreterAssembler* assembler) {
//
// Test if the value in the <src> register is less than or equal to the
// accumulator.
-void Interpreter::DoTestLessThanOrEqual(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) {
DoBinaryOp(Runtime::kInterpreterLessThanOrEqual, assembler);
}
@@ -1237,8 +1197,7 @@ void Interpreter::DoTestLessThanOrEqual(
//
// Test if the value in the <src> register is greater than or equal to the
// accumulator.
-void Interpreter::DoTestGreaterThanOrEqual(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoTestGreaterThanOrEqual(InterpreterAssembler* assembler) {
DoBinaryOp(Runtime::kInterpreterGreaterThanOrEqual, assembler);
}
@@ -1247,7 +1206,7 @@ void Interpreter::DoTestGreaterThanOrEqual(
//
// Test if the object referenced by the register operand is a property of the
// object referenced by the accumulator.
-void Interpreter::DoTestIn(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoTestIn(InterpreterAssembler* assembler) {
DoBinaryOp(Runtime::kHasProperty, assembler);
}
@@ -1256,7 +1215,7 @@ void Interpreter::DoTestIn(compiler::InterpreterAssembler* assembler) {
//
// Test if the object referenced by the <src> register is an an instance of type
// referenced by the accumulator.
-void Interpreter::DoTestInstanceOf(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoTestInstanceOf(InterpreterAssembler* assembler) {
DoBinaryOp(Runtime::kInstanceOf, assembler);
}
@@ -1264,9 +1223,10 @@ void Interpreter::DoTestInstanceOf(compiler::InterpreterAssembler* assembler) {
// ToName
//
// Cast the object referenced by the accumulator to a name.
-void Interpreter::DoToName(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoToName(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
- Node* result = __ CallRuntime(Runtime::kToName, accumulator);
+ Node* context = __ GetContext();
+ Node* result = __ CallRuntime(Runtime::kToName, context, accumulator);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -1275,9 +1235,10 @@ void Interpreter::DoToName(compiler::InterpreterAssembler* assembler) {
// ToNumber
//
// Cast the object referenced by the accumulator to a number.
-void Interpreter::DoToNumber(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoToNumber(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
- Node* result = __ CallRuntime(Runtime::kToNumber, accumulator);
+ Node* context = __ GetContext();
+ Node* result = __ CallRuntime(Runtime::kToNumber, context, accumulator);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -1286,9 +1247,10 @@ void Interpreter::DoToNumber(compiler::InterpreterAssembler* assembler) {
// ToObject
//
// Cast the object referenced by the accumulator to a JSObject.
-void Interpreter::DoToObject(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoToObject(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
- Node* result = __ CallRuntime(Runtime::kToObject, accumulator);
+ Node* context = __ GetContext();
+ Node* result = __ CallRuntime(Runtime::kToObject, context, accumulator);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -1297,7 +1259,7 @@ void Interpreter::DoToObject(compiler::InterpreterAssembler* assembler) {
// Jump <imm8>
//
// Jump by number of bytes represented by the immediate operand |imm8|.
-void Interpreter::DoJump(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJump(InterpreterAssembler* assembler) {
Node* relative_jump = __ BytecodeOperandImm(0);
__ Jump(relative_jump);
}
@@ -1306,7 +1268,7 @@ void Interpreter::DoJump(compiler::InterpreterAssembler* assembler) {
// JumpConstant <idx8>
//
// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool.
-void Interpreter::DoJumpConstant(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpConstant(InterpreterAssembler* assembler) {
Node* index = __ BytecodeOperandIdx(0);
Node* constant = __ LoadConstantPoolEntry(index);
Node* relative_jump = __ SmiUntag(constant);
@@ -1318,8 +1280,7 @@ void Interpreter::DoJumpConstant(compiler::InterpreterAssembler* assembler) {
//
// Jump by number of bytes in the Smi in the |idx16| entry in the
// constant pool.
-void Interpreter::DoJumpConstantWide(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpConstantWide(InterpreterAssembler* assembler) {
DoJumpConstant(assembler);
}
@@ -1328,7 +1289,7 @@ void Interpreter::DoJumpConstantWide(
//
// Jump by number of bytes represented by an immediate operand if the
// accumulator contains true.
-void Interpreter::DoJumpIfTrue(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfTrue(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
Node* relative_jump = __ BytecodeOperandImm(0);
Node* true_value = __ BooleanConstant(true);
@@ -1340,8 +1301,7 @@ void Interpreter::DoJumpIfTrue(compiler::InterpreterAssembler* assembler) {
//
// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
// if the accumulator contains true.
-void Interpreter::DoJumpIfTrueConstant(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfTrueConstant(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
Node* index = __ BytecodeOperandIdx(0);
Node* constant = __ LoadConstantPoolEntry(index);
@@ -1355,8 +1315,7 @@ void Interpreter::DoJumpIfTrueConstant(
//
// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
// if the accumulator contains true.
-void Interpreter::DoJumpIfTrueConstantWide(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfTrueConstantWide(InterpreterAssembler* assembler) {
DoJumpIfTrueConstant(assembler);
}
@@ -1365,7 +1324,7 @@ void Interpreter::DoJumpIfTrueConstantWide(
//
// Jump by number of bytes represented by an immediate operand if the
// accumulator contains false.
-void Interpreter::DoJumpIfFalse(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfFalse(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
Node* relative_jump = __ BytecodeOperandImm(0);
Node* false_value = __ BooleanConstant(false);
@@ -1377,8 +1336,7 @@ void Interpreter::DoJumpIfFalse(compiler::InterpreterAssembler* assembler) {
//
// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
// if the accumulator contains false.
-void Interpreter::DoJumpIfFalseConstant(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfFalseConstant(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
Node* index = __ BytecodeOperandIdx(0);
Node* constant = __ LoadConstantPoolEntry(index);
@@ -1392,8 +1350,7 @@ void Interpreter::DoJumpIfFalseConstant(
//
// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
// if the accumulator contains false.
-void Interpreter::DoJumpIfFalseConstantWide(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfFalseConstantWide(InterpreterAssembler* assembler) {
DoJumpIfFalseConstant(assembler);
}
@@ -1402,11 +1359,11 @@ void Interpreter::DoJumpIfFalseConstantWide(
//
// Jump by number of bytes represented by an immediate operand if the object
// referenced by the accumulator is true when the object is cast to boolean.
-void Interpreter::DoJumpIfToBooleanTrue(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfToBooleanTrue(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
+ Node* context = __ GetContext();
Node* to_boolean_value =
- __ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+ __ CallRuntime(Runtime::kInterpreterToBoolean, context, accumulator);
Node* relative_jump = __ BytecodeOperandImm(0);
Node* true_value = __ BooleanConstant(true);
__ JumpIfWordEqual(to_boolean_value, true_value, relative_jump);
@@ -1419,10 +1376,11 @@ void Interpreter::DoJumpIfToBooleanTrue(
// if the object referenced by the accumulator is true when the object is cast
// to boolean.
void Interpreter::DoJumpIfToBooleanTrueConstant(
- compiler::InterpreterAssembler* assembler) {
+ InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
+ Node* context = __ GetContext();
Node* to_boolean_value =
- __ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+ __ CallRuntime(Runtime::kInterpreterToBoolean, context, accumulator);
Node* index = __ BytecodeOperandIdx(0);
Node* constant = __ LoadConstantPoolEntry(index);
Node* relative_jump = __ SmiUntag(constant);
@@ -1437,7 +1395,7 @@ void Interpreter::DoJumpIfToBooleanTrueConstant(
// if the object referenced by the accumulator is true when the object is cast
// to boolean.
void Interpreter::DoJumpIfToBooleanTrueConstantWide(
- compiler::InterpreterAssembler* assembler) {
+ InterpreterAssembler* assembler) {
DoJumpIfToBooleanTrueConstant(assembler);
}
@@ -1446,11 +1404,11 @@ void Interpreter::DoJumpIfToBooleanTrueConstantWide(
//
// Jump by number of bytes represented by an immediate operand if the object
// referenced by the accumulator is false when the object is cast to boolean.
-void Interpreter::DoJumpIfToBooleanFalse(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfToBooleanFalse(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
+ Node* context = __ GetContext();
Node* to_boolean_value =
- __ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+ __ CallRuntime(Runtime::kInterpreterToBoolean, context, accumulator);
Node* relative_jump = __ BytecodeOperandImm(0);
Node* false_value = __ BooleanConstant(false);
__ JumpIfWordEqual(to_boolean_value, false_value, relative_jump);
@@ -1463,10 +1421,11 @@ void Interpreter::DoJumpIfToBooleanFalse(
// if the object referenced by the accumulator is false when the object is cast
// to boolean.
void Interpreter::DoJumpIfToBooleanFalseConstant(
- compiler::InterpreterAssembler* assembler) {
+ InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
+ Node* context = __ GetContext();
Node* to_boolean_value =
- __ CallRuntime(Runtime::kInterpreterToBoolean, accumulator);
+ __ CallRuntime(Runtime::kInterpreterToBoolean, context, accumulator);
Node* index = __ BytecodeOperandIdx(0);
Node* constant = __ LoadConstantPoolEntry(index);
Node* relative_jump = __ SmiUntag(constant);
@@ -1481,7 +1440,7 @@ void Interpreter::DoJumpIfToBooleanFalseConstant(
// if the object referenced by the accumulator is false when the object is cast
// to boolean.
void Interpreter::DoJumpIfToBooleanFalseConstantWide(
- compiler::InterpreterAssembler* assembler) {
+ InterpreterAssembler* assembler) {
DoJumpIfToBooleanFalseConstant(assembler);
}
@@ -1490,7 +1449,7 @@ void Interpreter::DoJumpIfToBooleanFalseConstantWide(
//
// Jump by number of bytes represented by an immediate operand if the object
// referenced by the accumulator is the null constant.
-void Interpreter::DoJumpIfNull(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfNull(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
Node* relative_jump = __ BytecodeOperandImm(0);
@@ -1502,8 +1461,7 @@ void Interpreter::DoJumpIfNull(compiler::InterpreterAssembler* assembler) {
//
// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
// if the object referenced by the accumulator is the null constant.
-void Interpreter::DoJumpIfNullConstant(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfNullConstant(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
Node* index = __ BytecodeOperandIdx(0);
@@ -1517,17 +1475,15 @@ void Interpreter::DoJumpIfNullConstant(
//
// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
// if the object referenced by the accumulator is the null constant.
-void Interpreter::DoJumpIfNullConstantWide(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfNullConstantWide(InterpreterAssembler* assembler) {
DoJumpIfNullConstant(assembler);
}
-
-// jumpifundefined <imm8>
+// JumpIfUndefined <imm8>
//
// Jump by number of bytes represented by an immediate operand if the object
// referenced by the accumulator is the undefined constant.
-void Interpreter::DoJumpIfUndefined(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfUndefined(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
Node* undefined_value =
__ HeapConstant(isolate_->factory()->undefined_value());
@@ -1540,8 +1496,7 @@ void Interpreter::DoJumpIfUndefined(compiler::InterpreterAssembler* assembler) {
//
// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
// if the object referenced by the accumulator is the undefined constant.
-void Interpreter::DoJumpIfUndefinedConstant(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoJumpIfUndefinedConstant(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
Node* undefined_value =
__ HeapConstant(isolate_->factory()->undefined_value());
@@ -1557,13 +1512,44 @@ void Interpreter::DoJumpIfUndefinedConstant(
// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
// if the object referenced by the accumulator is the undefined constant.
void Interpreter::DoJumpIfUndefinedConstantWide(
- compiler::InterpreterAssembler* assembler) {
+ InterpreterAssembler* assembler) {
DoJumpIfUndefinedConstant(assembler);
}
+// JumpIfNotHole <imm8>
+//
+// Jump by number of bytes represented by an immediate operand if the object
+// referenced by the accumulator is the hole.
+void Interpreter::DoJumpIfNotHole(InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
+ Node* relative_jump = __ BytecodeOperandImm(0);
+ __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
+}
+
+// JumpIfNotHoleConstant <idx8>
+//
+// Jump by number of bytes in the Smi in the |idx8| entry in the constant pool
+// if the object referenced by the accumulator is the hole constant.
+void Interpreter::DoJumpIfNotHoleConstant(InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* constant = __ LoadConstantPoolEntry(index);
+ Node* relative_jump = __ SmiUntag(constant);
+ __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
+}
+
+// JumpIfNotHoleConstantWide <idx16>
+//
+// Jump by number of bytes in the Smi in the |idx16| entry in the constant pool
+// if the object referenced by the accumulator is the hole constant.
+void Interpreter::DoJumpIfNotHoleConstantWide(InterpreterAssembler* assembler) {
+ DoJumpIfNotHoleConstant(assembler);
+}
void Interpreter::DoCreateLiteral(Runtime::FunctionId function_id,
- compiler::InterpreterAssembler* assembler) {
+ InterpreterAssembler* assembler) {
Node* index = __ BytecodeOperandIdx(0);
Node* constant_elements = __ LoadConstantPoolEntry(index);
Node* literal_index_raw = __ BytecodeOperandIdx(1);
@@ -1571,7 +1557,8 @@ void Interpreter::DoCreateLiteral(Runtime::FunctionId function_id,
Node* flags_raw = __ BytecodeOperandImm(2);
Node* flags = __ SmiTag(flags_raw);
Node* closure = __ LoadRegister(Register::function_closure());
- Node* result = __ CallRuntime(function_id, closure, literal_index,
+ Node* context = __ GetContext();
+ Node* result = __ CallRuntime(function_id, context, closure, literal_index,
constant_elements, flags);
__ SetAccumulator(result);
__ Dispatch();
@@ -1582,8 +1569,7 @@ void Interpreter::DoCreateLiteral(Runtime::FunctionId function_id,
//
// Creates a regular expression literal for literal index <literal_idx> with
// <flags> and the pattern in <pattern_idx>.
-void Interpreter::DoCreateRegExpLiteral(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateRegExpLiteral(InterpreterAssembler* assembler) {
DoCreateLiteral(Runtime::kCreateRegExpLiteral, assembler);
}
@@ -1592,8 +1578,7 @@ void Interpreter::DoCreateRegExpLiteral(
//
// Creates a regular expression literal for literal index <literal_idx> with
// <flags> and the pattern in <pattern_idx>.
-void Interpreter::DoCreateRegExpLiteralWide(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateRegExpLiteralWide(InterpreterAssembler* assembler) {
DoCreateLiteral(Runtime::kCreateRegExpLiteral, assembler);
}
@@ -1602,8 +1587,7 @@ void Interpreter::DoCreateRegExpLiteralWide(
//
// Creates an array literal for literal index <literal_idx> with flags <flags>
// and constant elements in <element_idx>.
-void Interpreter::DoCreateArrayLiteral(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) {
DoCreateLiteral(Runtime::kCreateArrayLiteral, assembler);
}
@@ -1612,8 +1596,7 @@ void Interpreter::DoCreateArrayLiteral(
//
// Creates an array literal for literal index <literal_idx> with flags <flags>
// and constant elements in <element_idx>.
-void Interpreter::DoCreateArrayLiteralWide(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateArrayLiteralWide(InterpreterAssembler* assembler) {
DoCreateLiteral(Runtime::kCreateArrayLiteral, assembler);
}
@@ -1622,8 +1605,7 @@ void Interpreter::DoCreateArrayLiteralWide(
//
// Creates an object literal for literal index <literal_idx> with flags <flags>
// and constant elements in <element_idx>.
-void Interpreter::DoCreateObjectLiteral(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
DoCreateLiteral(Runtime::kCreateObjectLiteral, assembler);
}
@@ -1632,8 +1614,7 @@ void Interpreter::DoCreateObjectLiteral(
//
// Creates an object literal for literal index <literal_idx> with flags <flags>
// and constant elements in <element_idx>.
-void Interpreter::DoCreateObjectLiteralWide(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateObjectLiteralWide(InterpreterAssembler* assembler) {
DoCreateLiteral(Runtime::kCreateObjectLiteral, assembler);
}
@@ -1642,15 +1623,16 @@ void Interpreter::DoCreateObjectLiteralWide(
//
// Creates a new closure for SharedFunctionInfo at position |index| in the
// constant pool and with the PretenureFlag <tenured>.
-void Interpreter::DoCreateClosure(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateClosure(InterpreterAssembler* assembler) {
// TODO(rmcilroy): Possibly call FastNewClosureStub when possible instead of
// calling into the runtime.
Node* index = __ BytecodeOperandIdx(0);
Node* shared = __ LoadConstantPoolEntry(index);
Node* tenured_raw = __ BytecodeOperandImm(1);
Node* tenured = __ SmiTag(tenured_raw);
+ Node* context = __ GetContext();
Node* result =
- __ CallRuntime(Runtime::kInterpreterNewClosure, shared, tenured);
+ __ CallRuntime(Runtime::kInterpreterNewClosure, context, shared, tenured);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -1660,8 +1642,7 @@ void Interpreter::DoCreateClosure(compiler::InterpreterAssembler* assembler) {
//
// Creates a new closure for SharedFunctionInfo at position |index| in the
// constant pool and with the PretenureFlag <tenured>.
-void Interpreter::DoCreateClosureWide(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateClosureWide(InterpreterAssembler* assembler) {
return DoCreateClosure(assembler);
}
@@ -1669,10 +1650,11 @@ void Interpreter::DoCreateClosureWide(
// CreateMappedArguments
//
// Creates a new mapped arguments object.
-void Interpreter::DoCreateMappedArguments(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) {
Node* closure = __ LoadRegister(Register::function_closure());
- Node* result = __ CallRuntime(Runtime::kNewSloppyArguments_Generic, closure);
+ Node* context = __ GetContext();
+ Node* result =
+ __ CallRuntime(Runtime::kNewSloppyArguments_Generic, context, closure);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -1681,21 +1663,56 @@ void Interpreter::DoCreateMappedArguments(
// CreateUnmappedArguments
//
// Creates a new unmapped arguments object.
-void Interpreter::DoCreateUnmappedArguments(
- compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoCreateUnmappedArguments(InterpreterAssembler* assembler) {
+ Callable callable = CodeFactory::FastNewStrictArguments(isolate_);
+ Node* target = __ HeapConstant(callable.code());
+ Node* context = __ GetContext();
Node* closure = __ LoadRegister(Register::function_closure());
- Node* result = __ CallRuntime(Runtime::kNewStrictArguments_Generic, closure);
+ Node* result = __ CallStub(callable.descriptor(), target, context, closure);
__ SetAccumulator(result);
__ Dispatch();
}
+// CreateRestParameter
+//
+// Creates a new rest parameter array.
+void Interpreter::DoCreateRestParameter(InterpreterAssembler* assembler) {
+ Callable callable = CodeFactory::FastNewRestParameter(isolate_);
+ Node* target = __ HeapConstant(callable.code());
+ Node* closure = __ LoadRegister(Register::function_closure());
+ Node* context = __ GetContext();
+ Node* result = __ CallStub(callable.descriptor(), target, context, closure);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+// StackCheck
+//
+// Performs a stack guard check.
+void Interpreter::DoStackCheck(InterpreterAssembler* assembler) {
+ __ StackCheck();
+ __ Dispatch();
+}
// Throw
//
// Throws the exception in the accumulator.
-void Interpreter::DoThrow(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoThrow(InterpreterAssembler* assembler) {
+ Node* exception = __ GetAccumulator();
+ Node* context = __ GetContext();
+ __ CallRuntime(Runtime::kThrow, context, exception);
+ // We shouldn't ever return from a throw.
+ __ Abort(kUnexpectedReturnFromThrow);
+}
+
+
+// ReThrow
+//
+// Re-throws the exception in the accumulator.
+void Interpreter::DoReThrow(InterpreterAssembler* assembler) {
Node* exception = __ GetAccumulator();
- __ CallRuntime(Runtime::kThrow, exception);
+ Node* context = __ GetContext();
+ __ CallRuntime(Runtime::kReThrow, context, exception);
// We shouldn't ever return from a throw.
__ Abort(kUnexpectedReturnFromThrow);
}
@@ -1704,59 +1721,105 @@ void Interpreter::DoThrow(compiler::InterpreterAssembler* assembler) {
// Return
//
// Return the value in the accumulator.
-void Interpreter::DoReturn(compiler::InterpreterAssembler* assembler) {
- __ Return();
+void Interpreter::DoReturn(InterpreterAssembler* assembler) {
+ __ InterpreterReturn();
}
+// Debugger
+//
+// Call runtime to handle debugger statement.
+void Interpreter::DoDebugger(InterpreterAssembler* assembler) {
+ Node* context = __ GetContext();
+ __ CallRuntime(Runtime::kHandleDebuggerStatement, context);
+ __ Dispatch();
+}
-// ForInPrepare <cache_type> <cache_array> <cache_length>
+// DebugBreak
+//
+// Call runtime to handle a debug break.
+#define DEBUG_BREAK(Name, ...) \
+ void Interpreter::Do##Name(InterpreterAssembler* assembler) { \
+ Node* context = __ GetContext(); \
+ Node* original_handler = __ CallRuntime(Runtime::kDebugBreak, context); \
+ __ DispatchToBytecodeHandler(original_handler); \
+ }
+DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
+#undef DEBUG_BREAK
+
+// ForInPrepare <cache_info_triple>
//
// Returns state for for..in loop execution based on the object in the
-// accumulator. The registers |cache_type|, |cache_array|, and
-// |cache_length| represent output parameters.
-void Interpreter::DoForInPrepare(compiler::InterpreterAssembler* assembler) {
+// accumulator. The result is output in registers |cache_info_triple| to
+// |cache_info_triple + 2|, with the registers holding cache_type, cache_array,
+// and cache_length respectively.
+void Interpreter::DoForInPrepare(InterpreterAssembler* assembler) {
Node* object = __ GetAccumulator();
- Node* result = __ CallRuntime(Runtime::kInterpreterForInPrepare, object);
+ Node* context = __ GetContext();
+ Node* result_triple = __ CallRuntime(Runtime::kForInPrepare, context, object);
+
+ // Set output registers:
+ // 0 == cache_type, 1 == cache_array, 2 == cache_length
+ Node* output_register = __ BytecodeOperandReg(0);
for (int i = 0; i < 3; i++) {
- // 0 == cache_type, 1 == cache_array, 2 == cache_length
- Node* cache_info = __ LoadFixedArrayElement(result, i);
- Node* cache_info_reg = __ BytecodeOperandReg(i);
- __ StoreRegister(cache_info, cache_info_reg);
+ Node* cache_info = __ Projection(i, result_triple);
+ __ StoreRegister(cache_info, output_register);
+ output_register = __ NextRegister(output_register);
}
- __ SetAccumulator(result);
__ Dispatch();
}
-// ForInNext <receiver> <cache_type> <cache_array> <index>
+// ForInPrepareWide <cache_info_triple>
+//
+// Returns state for for..in loop execution based on the object in the
+// accumulator. The result is output in registers |cache_info_triple| to
+// |cache_info_triple + 2|, with the registers holding cache_type, cache_array,
+// and cache_length respectively.
+void Interpreter::DoForInPrepareWide(InterpreterAssembler* assembler) {
+ DoForInPrepare(assembler);
+}
+
+
+// ForInNext <receiver> <index> <cache_info_pair>
//
// Returns the next enumerable property in the the accumulator.
-void Interpreter::DoForInNext(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoForInNext(InterpreterAssembler* assembler) {
Node* receiver_reg = __ BytecodeOperandReg(0);
Node* receiver = __ LoadRegister(receiver_reg);
- Node* cache_type_reg = __ BytecodeOperandReg(1);
+ Node* index_reg = __ BytecodeOperandReg(1);
+ Node* index = __ LoadRegister(index_reg);
+ Node* cache_type_reg = __ BytecodeOperandReg(2);
Node* cache_type = __ LoadRegister(cache_type_reg);
- Node* cache_array_reg = __ BytecodeOperandReg(2);
+ Node* cache_array_reg = __ NextRegister(cache_type_reg);
Node* cache_array = __ LoadRegister(cache_array_reg);
- Node* index_reg = __ BytecodeOperandReg(3);
- Node* index = __ LoadRegister(index_reg);
- Node* result = __ CallRuntime(Runtime::kForInNext, receiver, cache_array,
- cache_type, index);
+ Node* context = __ GetContext();
+ Node* result = __ CallRuntime(Runtime::kForInNext, context, receiver,
+ cache_array, cache_type, index);
__ SetAccumulator(result);
__ Dispatch();
}
+// ForInNextWide <receiver> <index> <cache_info_pair>
+//
+// Returns the next enumerable property in the the accumulator.
+void Interpreter::DoForInNextWide(InterpreterAssembler* assembler) {
+ return DoForInNext(assembler);
+}
+
+
// ForInDone <index> <cache_length>
//
// Returns true if the end of the enumerable properties has been reached.
-void Interpreter::DoForInDone(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoForInDone(InterpreterAssembler* assembler) {
// TODO(oth): Implement directly rather than making a runtime call.
Node* index_reg = __ BytecodeOperandReg(0);
Node* index = __ LoadRegister(index_reg);
Node* cache_length_reg = __ BytecodeOperandReg(1);
Node* cache_length = __ LoadRegister(cache_length_reg);
- Node* result = __ CallRuntime(Runtime::kForInDone, index, cache_length);
+ Node* context = __ GetContext();
+ Node* result =
+ __ CallRuntime(Runtime::kForInDone, context, index, cache_length);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -1766,11 +1829,12 @@ void Interpreter::DoForInDone(compiler::InterpreterAssembler* assembler) {
//
// Increments the loop counter in register |index| and stores the result
// in the accumulator.
-void Interpreter::DoForInStep(compiler::InterpreterAssembler* assembler) {
+void Interpreter::DoForInStep(InterpreterAssembler* assembler) {
// TODO(oth): Implement directly rather than making a runtime call.
Node* index_reg = __ BytecodeOperandReg(0);
Node* index = __ LoadRegister(index_reg);
- Node* result = __ CallRuntime(Runtime::kForInStep, index);
+ Node* context = __ GetContext();
+ Node* result = __ CallRuntime(Runtime::kForInStep, context, index);
__ SetAccumulator(result);
__ Dispatch();
}
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index ef9b5d1fe3..e02e9142b3 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -21,91 +21,113 @@ class Isolate;
class Callable;
class CompilationInfo;
-namespace compiler {
-class InterpreterAssembler;
-}
-
namespace interpreter {
+class InterpreterAssembler;
+
class Interpreter {
public:
explicit Interpreter(Isolate* isolate);
virtual ~Interpreter() {}
- // Creates an uninitialized interpreter handler table, where each handler
- // points to the Illegal builtin.
- static Handle<FixedArray> CreateUninitializedInterpreterTable(
- Isolate* isolate);
-
- // Initializes the interpreter.
+ // Initializes the interpreter dispatch table.
void Initialize();
+ // Returns the interrupt budget which should be used for the profiler counter.
+ static int InterruptBudget();
+
// Generate bytecode for |info|.
static bool MakeBytecode(CompilationInfo* info);
+ // Return bytecode handler for |bytecode|.
+ Code* GetBytecodeHandler(Bytecode bytecode);
+
+ // GC support.
+ void IterateDispatchTable(ObjectVisitor* v);
+
+ void TraceCodegen(Handle<Code> code, const char* name);
+
+ Address dispatch_table_address() {
+ return reinterpret_cast<Address>(&dispatch_table_[0]);
+ }
+
private:
// Bytecode handler generator functions.
#define DECLARE_BYTECODE_HANDLER_GENERATOR(Name, ...) \
- void Do##Name(compiler::InterpreterAssembler* assembler);
+ void Do##Name(InterpreterAssembler* assembler);
BYTECODE_LIST(DECLARE_BYTECODE_HANDLER_GENERATOR)
#undef DECLARE_BYTECODE_HANDLER_GENERATOR
// Generates code to perform the binary operations via |function_id|.
void DoBinaryOp(Runtime::FunctionId function_id,
- compiler::InterpreterAssembler* assembler);
+ InterpreterAssembler* assembler);
// Generates code to perform the count operations via |function_id|.
void DoCountOp(Runtime::FunctionId function_id,
- compiler::InterpreterAssembler* assembler);
+ InterpreterAssembler* assembler);
// Generates code to perform the comparison operation associated with
// |compare_op|.
- void DoCompareOp(Token::Value compare_op,
- compiler::InterpreterAssembler* assembler);
+ void DoCompareOp(Token::Value compare_op, InterpreterAssembler* assembler);
// Generates code to load a constant from the constant pool.
- void DoLoadConstant(compiler::InterpreterAssembler* assembler);
+ void DoLoadConstant(InterpreterAssembler* assembler);
// Generates code to perform a global load via |ic|.
- void DoLoadGlobal(Callable ic, compiler::InterpreterAssembler* assembler);
+ void DoLoadGlobal(Callable ic, InterpreterAssembler* assembler);
// Generates code to perform a global store via |ic|.
- void DoStoreGlobal(Callable ic, compiler::InterpreterAssembler* assembler);
+ void DoStoreGlobal(Callable ic, InterpreterAssembler* assembler);
// Generates code to perform a named property load via |ic|.
- void DoLoadIC(Callable ic, compiler::InterpreterAssembler* assembler);
+ void DoLoadIC(Callable ic, InterpreterAssembler* assembler);
// Generates code to perform a keyed property load via |ic|.
- void DoKeyedLoadIC(Callable ic, compiler::InterpreterAssembler* assembler);
+ void DoKeyedLoadIC(Callable ic, InterpreterAssembler* assembler);
// Generates code to perform a namedproperty store via |ic|.
- void DoStoreIC(Callable ic, compiler::InterpreterAssembler* assembler);
+ void DoStoreIC(Callable ic, InterpreterAssembler* assembler);
// Generates code to perform a keyed property store via |ic|.
- void DoKeyedStoreIC(Callable ic, compiler::InterpreterAssembler* assembler);
+ void DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler);
// Generates code to perform a JS call.
- void DoJSCall(compiler::InterpreterAssembler* assembler);
+ void DoJSCall(InterpreterAssembler* assembler, TailCallMode tail_call_mode);
+
+ // Generates code to perform a runtime call.
+ void DoCallRuntimeCommon(InterpreterAssembler* assembler);
+
+ // Generates code to perform a runtime call returning a pair.
+ void DoCallRuntimeForPairCommon(InterpreterAssembler* assembler);
+
+ // Generates code to perform a JS runtime call.
+ void DoCallJSRuntimeCommon(InterpreterAssembler* assembler);
+
+ // Generates code to perform a constructor call..
+ void DoCallConstruct(InterpreterAssembler* assembler);
// Generates code ro create a literal via |function_id|.
void DoCreateLiteral(Runtime::FunctionId function_id,
- compiler::InterpreterAssembler* assembler);
+ InterpreterAssembler* assembler);
// Generates code to perform delete via function_id.
void DoDelete(Runtime::FunctionId function_id,
- compiler::InterpreterAssembler* assembler);
+ InterpreterAssembler* assembler);
// Generates code to perform a lookup slot load via |function_id|.
void DoLoadLookupSlot(Runtime::FunctionId function_id,
- compiler::InterpreterAssembler* assembler);
+ InterpreterAssembler* assembler);
// Generates code to perform a lookup slot store depending on |language_mode|.
void DoStoreLookupSlot(LanguageMode language_mode,
- compiler::InterpreterAssembler* assembler);
+ InterpreterAssembler* assembler);
+
+ bool IsDispatchTableInitialized();
- bool IsInterpreterTableInitialized(Handle<FixedArray> handler_table);
+ static const int kDispatchTableSize = static_cast<int>(Bytecode::kLast) + 1;
Isolate* isolate_;
+ Code* dispatch_table_[kDispatchTableSize];
DISALLOW_COPY_AND_ASSIGN(Interpreter);
};
diff --git a/deps/v8/src/interpreter/register-translator.cc b/deps/v8/src/interpreter/register-translator.cc
new file mode 100644
index 0000000000..3eba42f0dc
--- /dev/null
+++ b/deps/v8/src/interpreter/register-translator.cc
@@ -0,0 +1,173 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/register-translator.h"
+
+#include "src/interpreter/bytecode-array-builder.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+RegisterTranslator::RegisterTranslator(RegisterMover* mover)
+ : mover_(mover),
+ emitting_moves_(false),
+ window_registers_count_(0),
+ output_moves_count_(0) {}
+
+void RegisterTranslator::TranslateInputRegisters(Bytecode bytecode,
+ uint32_t* raw_operands,
+ int raw_operand_count) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), raw_operand_count);
+ if (!emitting_moves_) {
+ emitting_moves_ = true;
+ DCHECK_EQ(window_registers_count_, 0);
+ int register_bitmap = Bytecodes::GetRegisterOperandBitmap(bytecode);
+ for (int i = 0; i < raw_operand_count; i++) {
+ if ((register_bitmap & (1 << i)) == 0) {
+ continue;
+ }
+ Register in_reg = Register::FromRawOperand(raw_operands[i]);
+ Register out_reg = TranslateAndMove(bytecode, i, in_reg);
+ raw_operands[i] = out_reg.ToRawOperand();
+ }
+ window_registers_count_ = 0;
+ emitting_moves_ = false;
+ } else {
+ // When the register translator is translating registers, it will
+ // cause the bytecode generator to emit moves on it's behalf. This
+ // path is reached by these moves.
+ DCHECK(bytecode == Bytecode::kMovWide && raw_operand_count == 2 &&
+ Register::FromRawOperand(raw_operands[0]).is_valid() &&
+ Register::FromRawOperand(raw_operands[1]).is_valid());
+ }
+}
+
+Register RegisterTranslator::TranslateAndMove(Bytecode bytecode,
+ int operand_index, Register reg) {
+ if (FitsInReg8Operand(reg)) {
+ return reg;
+ }
+
+ OperandType operand_type = Bytecodes::GetOperandType(bytecode, operand_index);
+ OperandSize operand_size = Bytecodes::SizeOfOperand(operand_type);
+ if (operand_size == OperandSize::kShort) {
+ CHECK(FitsInReg16Operand(reg));
+ return Translate(reg);
+ }
+
+ CHECK((operand_type == OperandType::kReg8 ||
+ operand_type == OperandType::kRegOut8) &&
+ RegisterIsMovableToWindow(bytecode, operand_index));
+ Register translated_reg = Translate(reg);
+ Register window_reg(kTranslationWindowStart + window_registers_count_);
+ window_registers_count_ += 1;
+ if (Bytecodes::IsRegisterInputOperandType(operand_type)) {
+ DCHECK(!Bytecodes::IsRegisterOutputOperandType(operand_type));
+ mover()->MoveRegisterUntranslated(translated_reg, window_reg);
+ } else if (Bytecodes::IsRegisterOutputOperandType(operand_type)) {
+ DCHECK_LT(output_moves_count_, kTranslationWindowLength);
+ output_moves_[output_moves_count_] =
+ std::make_pair(window_reg, translated_reg);
+ output_moves_count_ += 1;
+ } else {
+ UNREACHABLE();
+ }
+ return window_reg;
+}
+
+// static
+bool RegisterTranslator::RegisterIsMovableToWindow(Bytecode bytecode,
+ int operand_index) {
+ // By design, we only support moving individual registers. There
+ // should be wide variants of such bytecodes instead to avoid the
+ // need for a large translation window.
+ OperandType operand_type = Bytecodes::GetOperandType(bytecode, operand_index);
+ if (operand_type != OperandType::kReg8 &&
+ operand_type != OperandType::kRegOut8) {
+ return false;
+ } else if (operand_index + 1 == Bytecodes::NumberOfOperands(bytecode)) {
+ return true;
+ } else {
+ OperandType next_operand_type =
+ Bytecodes::GetOperandType(bytecode, operand_index + 1);
+ return (next_operand_type != OperandType::kRegCount8 &&
+ next_operand_type != OperandType::kRegCount16);
+ }
+}
+
+void RegisterTranslator::TranslateOutputRegisters() {
+ if (!emitting_moves_) {
+ emitting_moves_ = true;
+ while (output_moves_count_ > 0) {
+ output_moves_count_ -= 1;
+ mover()->MoveRegisterUntranslated(
+ output_moves_[output_moves_count_].first,
+ output_moves_[output_moves_count_].second);
+ }
+ emitting_moves_ = false;
+ }
+}
+
+// static
+Register RegisterTranslator::Translate(Register reg) {
+ if (reg.index() >= kTranslationWindowStart) {
+ return Register(reg.index() + kTranslationWindowLength);
+ } else {
+ return reg;
+ }
+}
+
+// static
+bool RegisterTranslator::InTranslationWindow(Register reg) {
+ return (reg.index() >= kTranslationWindowStart &&
+ reg.index() <= kTranslationWindowLimit);
+}
+
+// static
+Register RegisterTranslator::UntranslateRegister(Register reg) {
+ if (reg.index() >= kTranslationWindowStart) {
+ return Register(reg.index() - kTranslationWindowLength);
+ } else {
+ return reg;
+ }
+}
+
+// static
+int RegisterTranslator::DistanceToTranslationWindow(Register reg) {
+ return kTranslationWindowStart - reg.index();
+}
+
+// static
+bool RegisterTranslator::FitsInReg8Operand(Register reg) {
+ return reg.is_byte_operand() && reg.index() < kTranslationWindowStart;
+}
+
+// static
+bool RegisterTranslator::FitsInReg16Operand(Register reg) {
+ int max_index = Register::MaxRegisterIndex() - kTranslationWindowLength + 1;
+ return reg.is_short_operand() && reg.index() < max_index;
+}
+
+// static
+int RegisterTranslator::RegisterCountAdjustment(int register_count,
+ int parameter_count) {
+ if (register_count > kTranslationWindowStart) {
+ return kTranslationWindowLength;
+ } else if (parameter_count > 0) {
+ Register param0 = Register::FromParameterIndex(0, parameter_count);
+ if (!param0.is_byte_operand()) {
+ // TODO(oth): Number of parameters means translation is
+ // required, but the translation window location is such that
+ // some space is wasted. Hopefully a rare corner case, but could
+ // relocate window to limit waste.
+ return kTranslationWindowLimit + 1 - register_count;
+ }
+ }
+ return 0;
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interpreter/register-translator.h b/deps/v8/src/interpreter/register-translator.h
new file mode 100644
index 0000000000..b683a899e2
--- /dev/null
+++ b/deps/v8/src/interpreter/register-translator.h
@@ -0,0 +1,119 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_REGISTER_TRANSLATOR_H_
+#define V8_INTERPRETER_REGISTER_TRANSLATOR_H_
+
+#include "src/interpreter/bytecodes.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class RegisterMover;
+
+// A class that enables bytecodes having only byte sized register operands
+// to access all registers in the two byte space. Most bytecode uses few
+// registers so space can be saved if most bytecodes with register operands
+// just take byte operands.
+//
+// To reach the wider register space, a translation window is reserved in
+// the byte addressable space specifically for copying registers into and
+// out of before a bytecode is emitted. The translation window occupies
+// the last register slots at the top of the byte addressable range.
+//
+// Because of the translation window any registers which naturally lie
+// at above the translation window have to have their register index
+// incremented by the window width before they are emitted.
+//
+// This class does not support moving ranges of registers to and from
+// the translation window. It would be straightforward to add support
+// for constrained ranges, e.g. kRegPair8, kRegTriple8 operands, but
+// these would have two negative effects. The translation window would
+// need to be wider, further limiting the space for byte operands. And
+// every register in a range would need to be moved consuming more
+// space in the bytecode array.
+class RegisterTranslator final {
+ public:
+ explicit RegisterTranslator(RegisterMover* mover);
+
+ // Translate and re-write the register operands that are inputs
+ // to |bytecode| when it is about to be emitted.
+ void TranslateInputRegisters(Bytecode bytecode, uint32_t* raw_operands,
+ int raw_operand_count);
+
+ // Translate and re-write the register operands that are outputs
+ // from |bytecode| when it has just been output.
+ void TranslateOutputRegisters();
+
+ // Returns true if |reg| is in the translation window.
+ static bool InTranslationWindow(Register reg);
+
+ // Return register value as if it had been translated.
+ static Register UntranslateRegister(Register reg);
+
+ // Returns the distance in registers between the translation window
+ // start and |reg|. The result is negative when |reg| is above the
+ // start of the translation window.
+ static int DistanceToTranslationWindow(Register reg);
+
+ // Returns true if |reg| can be represented as an 8-bit operand
+ // after translation.
+ static bool FitsInReg8Operand(Register reg);
+
+ // Returns true if |reg| can be represented as an 16-bit operand
+ // after translation.
+ static bool FitsInReg16Operand(Register reg);
+
+ // Returns the increment to the register count necessary if the
+ // value indicates the translation window is required.
+ static int RegisterCountAdjustment(int register_count, int parameter_count);
+
+ private:
+ static const int kTranslationWindowLength = 4;
+ static const int kTranslationWindowLimit = -kMinInt8;
+ static const int kTranslationWindowStart =
+ kTranslationWindowLimit - kTranslationWindowLength + 1;
+
+ Register TranslateAndMove(Bytecode bytecode, int operand_index, Register reg);
+ static bool RegisterIsMovableToWindow(Bytecode bytecode, int operand_index);
+
+ static Register Translate(Register reg);
+
+ RegisterMover* mover() const { return mover_; }
+
+ // Entity to perform register moves necessary to translate registers
+ // and ensure reachability.
+ RegisterMover* mover_;
+
+ // Flag to avoid re-entrancy when emitting move bytecodes for
+ // translation.
+ bool emitting_moves_;
+
+ // Number of window registers in use.
+ int window_registers_count_;
+
+ // State for restoring register moves emitted by TranslateOutputRegisters.
+ std::pair<Register, Register> output_moves_[kTranslationWindowLength];
+ int output_moves_count_;
+};
+
+// Interface for RegisterTranslator helper class that will emit
+// register move bytecodes at the translator's behest.
+class RegisterMover {
+ public:
+ virtual ~RegisterMover() {}
+
+ // Move register |from| to register |to| with no translation.
+ // returns false if either register operand is invalid. Implementations
+ // of this method must be aware that register moves with bad
+ // register values are a security hole.
+ virtual void MoveRegisterUntranslated(Register from, Register to) = 0;
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_REGISTER_TRANSLATOR_H_
diff --git a/deps/v8/src/interpreter/source-position-table.cc b/deps/v8/src/interpreter/source-position-table.cc
new file mode 100644
index 0000000000..0b7c44e2d9
--- /dev/null
+++ b/deps/v8/src/interpreter/source-position-table.cc
@@ -0,0 +1,84 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/source-position-table.h"
+
+#include "src/assembler.h"
+#include "src/objects-inl.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class IsStatementField : public BitField<bool, 0, 1> {};
+class SourcePositionField : public BitField<int, 1, 30> {};
+
+void SourcePositionTableBuilder::AddStatementPosition(size_t bytecode_offset,
+ int source_position) {
+ int offset = static_cast<int>(bytecode_offset);
+ // If a position has already been assigned to this bytecode offset,
+ // do not reassign a new statement position.
+ if (CodeOffsetHasPosition(offset)) return;
+ uint32_t encoded = IsStatementField::encode(true) |
+ SourcePositionField::encode(source_position);
+ entries_.push_back({offset, encoded});
+}
+
+void SourcePositionTableBuilder::AddExpressionPosition(size_t bytecode_offset,
+ int source_position) {
+ int offset = static_cast<int>(bytecode_offset);
+ // If a position has already been assigned to this bytecode offset,
+ // do not reassign a new statement position.
+ if (CodeOffsetHasPosition(offset)) return;
+ uint32_t encoded = IsStatementField::encode(false) |
+ SourcePositionField::encode(source_position);
+ entries_.push_back({offset, encoded});
+}
+
+void SourcePositionTableBuilder::RevertPosition(size_t bytecode_offset) {
+ int offset = static_cast<int>(bytecode_offset);
+ // If we already added a source position table entry, but the bytecode array
+ // builder ended up not outputting a bytecode for the corresponding bytecode
+ // offset, we have to remove that entry.
+ if (CodeOffsetHasPosition(offset)) entries_.pop_back();
+}
+
+Handle<FixedArray> SourcePositionTableBuilder::ToFixedArray() {
+ int length = static_cast<int>(entries_.size());
+ Handle<FixedArray> table =
+ isolate_->factory()->NewFixedArray(length * 2, TENURED);
+ for (int i = 0; i < length; i++) {
+ table->set(i * 2, Smi::FromInt(entries_[i].bytecode_offset));
+ table->set(i * 2 + 1, Smi::FromInt(entries_[i].source_position_and_type));
+ }
+ return table;
+}
+
+SourcePositionTableIterator::SourcePositionTableIterator(
+ BytecodeArray* bytecode_array)
+ : table_(bytecode_array->source_position_table()),
+ index_(0),
+ length_(table_->length()) {
+ DCHECK(table_->length() % 2 == 0);
+ Advance();
+}
+
+void SourcePositionTableIterator::Advance() {
+ if (index_ < length_) {
+ int new_bytecode_offset = Smi::cast(table_->get(index_))->value();
+ // Bytecode offsets are in ascending order.
+ DCHECK(bytecode_offset_ < new_bytecode_offset || index_ == 0);
+ bytecode_offset_ = new_bytecode_offset;
+ uint32_t source_position_and_type =
+ static_cast<uint32_t>(Smi::cast(table_->get(index_ + 1))->value());
+ is_statement_ = IsStatementField::decode(source_position_and_type);
+ source_position_ = SourcePositionField::decode(source_position_and_type);
+ }
+ index_ += 2;
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interpreter/source-position-table.h b/deps/v8/src/interpreter/source-position-table.h
new file mode 100644
index 0000000000..336cf42bc2
--- /dev/null
+++ b/deps/v8/src/interpreter/source-position-table.h
@@ -0,0 +1,82 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_SOURCE_POSITION_TABLE_H_
+#define V8_INTERPRETER_SOURCE_POSITION_TABLE_H_
+
+#include "src/assert-scope.h"
+#include "src/handles.h"
+#include "src/zone.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class BytecodeArray;
+class FixedArray;
+class Isolate;
+
+namespace interpreter {
+
+class SourcePositionTableBuilder {
+ public:
+ explicit SourcePositionTableBuilder(Isolate* isolate, Zone* zone)
+ : isolate_(isolate), entries_(zone) {}
+
+ void AddStatementPosition(size_t bytecode_offset, int source_position);
+ void AddExpressionPosition(size_t bytecode_offset, int source_position);
+ void RevertPosition(size_t bytecode_offset);
+ Handle<FixedArray> ToFixedArray();
+
+ private:
+ struct Entry {
+ int bytecode_offset;
+ uint32_t source_position_and_type;
+ };
+
+ bool CodeOffsetHasPosition(int bytecode_offset) {
+ // Return whether bytecode offset already has a position assigned.
+ return entries_.size() > 0 &&
+ entries_.back().bytecode_offset == bytecode_offset;
+ }
+
+ Isolate* isolate_;
+ ZoneVector<Entry> entries_;
+};
+
+class SourcePositionTableIterator {
+ public:
+ explicit SourcePositionTableIterator(BytecodeArray* bytecode_array);
+
+ void Advance();
+
+ int bytecode_offset() const {
+ DCHECK(!done());
+ return bytecode_offset_;
+ }
+ int source_position() const {
+ DCHECK(!done());
+ return source_position_;
+ }
+ bool is_statement() const {
+ DCHECK(!done());
+ return is_statement_;
+ }
+ bool done() const { return index_ > length_; }
+
+ private:
+ FixedArray* table_;
+ int index_;
+ int length_;
+ bool is_statement_;
+ int bytecode_offset_;
+ int source_position_;
+ DisallowHeapAllocation no_gc;
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_SOURCE_POSITION_TABLE_H_
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 4e42b436b1..8eb8b71b39 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -390,8 +390,9 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object,
}
DCHECK(cursor + 4 <= elements->length());
- Handle<Code> code = frames[i].code();
- Handle<Smi> offset(Smi::FromInt(frames[i].offset()), this);
+ Handle<AbstractCode> abstract_code = frames[i].abstract_code();
+
+ Handle<Smi> offset(Smi::FromInt(frames[i].code_offset()), this);
// The stack trace API should not expose receivers and function
// objects on frames deeper than the top-most one with a strict
// mode function. The number of sloppy frames is stored as
@@ -405,7 +406,7 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object,
}
elements->set(cursor++, *recv);
elements->set(cursor++, *fun);
- elements->set(cursor++, *code);
+ elements->set(cursor++, *abstract_code);
elements->set(cursor++, *offset);
frames_seen++;
}
@@ -594,9 +595,9 @@ int PositionFromStackTrace(Handle<FixedArray> elements, int index) {
if (maybe_code->IsSmi()) {
return Smi::cast(maybe_code)->value();
} else {
- Code* code = Code::cast(maybe_code);
- Address pc = code->address() + Smi::cast(elements->get(index + 3))->value();
- return code->SourcePosition(pc);
+ AbstractCode* abstract_code = AbstractCode::cast(maybe_code);
+ int code_offset = Smi::cast(elements->get(index + 3))->value();
+ return abstract_code->SourcePosition(code_offset);
}
}
@@ -661,7 +662,8 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace(
// Filter frames from other security contexts.
if (!(options & StackTrace::kExposeFramesAcrossSecurityOrigins) &&
!this->context()->HasSameSecurityTokenAs(fun->context())) continue;
- int position = frames[i].code()->SourcePosition(frames[i].pc());
+ int position =
+ frames[i].abstract_code()->SourcePosition(frames[i].code_offset());
Handle<JSObject> stack_frame =
helper.NewStackFrameObject(fun, position, frames[i].is_constructor());
@@ -780,14 +782,6 @@ void Isolate::ReportFailedAccessCheck(Handle<JSObject> receiver) {
}
-bool Isolate::IsInternallyUsedPropertyName(Handle<Object> name) {
- if (name->IsSymbol()) {
- return Handle<Symbol>::cast(name)->is_private();
- }
- return name.is_identical_to(factory()->hidden_string());
-}
-
-
bool Isolate::MayAccess(Handle<Context> accessing_context,
Handle<JSObject> receiver) {
DCHECK(receiver->IsJSGlobalProxy() || receiver->IsAccessCheckNeeded());
@@ -826,11 +820,11 @@ bool Isolate::MayAccess(Handle<Context> accessing_context,
if (!access_check_info) return false;
Object* fun_obj = access_check_info->callback();
callback = v8::ToCData<v8::AccessCheckCallback>(fun_obj);
+ data = handle(access_check_info->data(), this);
if (!callback) {
fun_obj = access_check_info->named_callback();
named_callback = v8::ToCData<v8::NamedSecurityCallback>(fun_obj);
if (!named_callback) return false;
- data = handle(access_check_info->data(), this);
}
}
@@ -841,7 +835,7 @@ bool Isolate::MayAccess(Handle<Context> accessing_context,
VMState<EXTERNAL> state(this);
if (callback) {
return callback(v8::Utils::ToLocal(accessing_context),
- v8::Utils::ToLocal(receiver));
+ v8::Utils::ToLocal(receiver), v8::Utils::ToLocal(data));
}
Handle<Object> key = factory()->undefined_value();
return named_callback(v8::Utils::ToLocal(receiver), v8::Utils::ToLocal(key),
@@ -1100,34 +1094,63 @@ Object* Isolate::UnwindAndFindHandler() {
if (frame->is_optimized() && catchable_by_js) {
OptimizedFrame* js_frame = static_cast<OptimizedFrame*>(frame);
int stack_slots = 0; // Will contain stack slot count of frame.
- offset = js_frame->LookupExceptionHandlerInTable(&stack_slots, NULL);
+ offset = js_frame->LookupExceptionHandlerInTable(&stack_slots, nullptr);
if (offset >= 0) {
// Compute the stack pointer from the frame pointer. This ensures that
// argument slots on the stack are dropped as returning would.
- Address return_sp = frame->fp() -
- StandardFrameConstants::kFixedFrameSizeFromFp -
+ Address return_sp = frame->fp() +
+ StandardFrameConstants::kFixedFrameSizeAboveFp -
stack_slots * kPointerSize;
// Gather information from the frame.
code = frame->LookupCode();
+ if (code->marked_for_deoptimization()) {
+ // If the target code is lazy deoptimized, we jump to the original
+ // return address, but we make a note that we are throwing, so that
+ // the deoptimizer can do the right thing.
+ offset = static_cast<int>(frame->pc() - code->entry());
+ set_deoptimizer_lazy_throw(true);
+ }
handler_sp = return_sp;
handler_fp = frame->fp();
break;
}
}
+ // For interpreted frame we perform a range lookup in the handler table.
+ if (frame->is_interpreted() && catchable_by_js) {
+ InterpretedFrame* js_frame = static_cast<InterpretedFrame*>(frame);
+ int context_reg = 0; // Will contain register index holding context.
+ offset = js_frame->LookupExceptionHandlerInTable(&context_reg, nullptr);
+ if (offset >= 0) {
+ // Patch the bytecode offset in the interpreted frame to reflect the
+ // position of the exception handler. The special builtin below will
+ // take care of continuing to dispatch at that position. Also restore
+ // the correct context for the handler from the interpreter register.
+ context = Context::cast(js_frame->GetInterpreterRegister(context_reg));
+ js_frame->PatchBytecodeOffset(static_cast<int>(offset));
+ offset = 0;
+
+ // Gather information from the frame.
+ code = *builtins()->InterpreterEnterBytecodeDispatch();
+ handler_sp = frame->sp();
+ handler_fp = frame->fp();
+ break;
+ }
+ }
+
// For JavaScript frames we perform a range lookup in the handler table.
if (frame->is_java_script() && catchable_by_js) {
JavaScriptFrame* js_frame = static_cast<JavaScriptFrame*>(frame);
- int stack_slots = 0; // Will contain operand stack depth of handler.
- offset = js_frame->LookupExceptionHandlerInTable(&stack_slots, NULL);
+ int stack_depth = 0; // Will contain operand stack depth of handler.
+ offset = js_frame->LookupExceptionHandlerInTable(&stack_depth, nullptr);
if (offset >= 0) {
// Compute the stack pointer from the frame pointer. This ensures that
// operand stack slots are dropped for nested statements. Also restore
// correct context for the handler which is pushed within the try-block.
Address return_sp = frame->fp() -
StandardFrameConstants::kFixedFrameSizeFromFp -
- stack_slots * kPointerSize;
+ stack_depth * kPointerSize;
STATIC_ASSERT(TryBlockConstant::kElementCount == 1);
context = Context::cast(Memory::Object_at(return_sp - kPointerSize));
@@ -1175,10 +1198,8 @@ Isolate::CatchType Isolate::PredictExceptionCatcher() {
// For JavaScript frames we perform a lookup in the handler table.
if (frame->is_java_script()) {
JavaScriptFrame* js_frame = static_cast<JavaScriptFrame*>(frame);
- int stack_slots = 0; // The computed stack slot count is not used.
HandlerTable::CatchPrediction prediction;
- if (js_frame->LookupExceptionHandlerInTable(&stack_slots, &prediction) >
- 0) {
+ if (js_frame->LookupExceptionHandlerInTable(nullptr, &prediction) > 0) {
// We are conservative with our prediction: try-finally is considered
// to always rethrow, to meet the expectation of the debugger.
if (prediction == HandlerTable::CAUGHT) return CAUGHT_BY_JAVASCRIPT;
@@ -1263,7 +1284,9 @@ void Isolate::PrintCurrentStackTrace(FILE* out) {
HandleScope scope(this);
// Find code position if recorded in relocation info.
JavaScriptFrame* frame = it.frame();
- int pos = frame->LookupCode()->SourcePosition(frame->pc());
+ Code* code = frame->LookupCode();
+ int offset = static_cast<int>(frame->pc() - code->instruction_start());
+ int pos = frame->LookupCode()->SourcePosition(offset);
Handle<Object> pos_obj(Smi::FromInt(pos), this);
// Fetch function and receiver.
Handle<JSFunction> fun(frame->function());
@@ -1298,7 +1321,7 @@ bool Isolate::ComputeLocation(MessageLocation* target) {
List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
it.frame()->Summarize(&frames);
FrameSummary& summary = frames.last();
- int pos = summary.code()->SourcePosition(summary.pc());
+ int pos = summary.abstract_code()->SourcePosition(summary.code_offset());
*target = MessageLocation(casted_script, pos, pos + 1, handle(fun));
return true;
}
@@ -1586,8 +1609,7 @@ Handle<Object> Isolate::GetPromiseOnStackOnThrow() {
if (PredictExceptionCatcher() != CAUGHT_BY_JAVASCRIPT) return undefined;
for (JavaScriptFrameIterator it(this); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
- int stack_slots = 0; // The computed stack slot count is not used.
- if (frame->LookupExceptionHandlerInTable(&stack_slots, NULL) > 0) {
+ if (frame->LookupExceptionHandlerInTable(nullptr, nullptr) > 0) {
// Throwing inside a Promise only leads to a reject if not caught by an
// inner try-catch or try-finally.
if (frame->function() == *promise_function) {
@@ -1732,7 +1754,6 @@ void Isolate::ThreadDataTable::RemoveAllThreads(Isolate* isolate) {
#define TRACE_ISOLATE(tag)
#endif
-
Isolate::Isolate(bool enable_serializer)
: embedder_data_(),
entry_stack_(NULL),
@@ -1748,6 +1769,7 @@ Isolate::Isolate(bool enable_serializer)
stub_cache_(NULL),
code_aging_helper_(NULL),
deoptimizer_data_(NULL),
+ deoptimizer_lazy_throw_(false),
materialized_object_store_(NULL),
capture_stack_trace_for_uncaught_exceptions_(false),
stack_trace_for_uncaught_exceptions_frame_limit_(0),
@@ -1903,9 +1925,6 @@ void Isolate::Deinit() {
Sampler* sampler = logger_->sampler();
if (sampler && sampler->IsActive()) sampler->Stop();
- delete interpreter_;
- interpreter_ = NULL;
-
delete deoptimizer_data_;
deoptimizer_data_ = NULL;
builtins_.TearDown();
@@ -1919,13 +1938,17 @@ void Isolate::Deinit() {
delete basic_block_profiler_;
basic_block_profiler_ = NULL;
+ delete heap_profiler_;
+ heap_profiler_ = NULL;
+
heap_.TearDown();
logger_->TearDown();
+ delete interpreter_;
+ interpreter_ = NULL;
+
cancelable_task_manager()->CancelAndWait();
- delete heap_profiler_;
- heap_profiler_ = NULL;
delete cpu_profiler_;
cpu_profiler_ = NULL;
@@ -2376,6 +2399,11 @@ void Isolate::DumpAndResetCompilationStats() {
turbo_statistics_ = nullptr;
delete hstatistics_;
hstatistics_ = nullptr;
+ if (FLAG_runtime_call_stats) {
+ OFStream os(stdout);
+ counters()->runtime_call_stats()->Print(os);
+ counters()->runtime_call_stats()->Reset();
+ }
}
@@ -2483,8 +2511,35 @@ bool Isolate::IsFastArrayConstructorPrototypeChainIntact() {
return cell_reports_intact;
}
+bool Isolate::IsArraySpeciesLookupChainIntact() {
+ // Note: It would be nice to have debug checks to make sure that the
+ // species protector is accurate, but this would be hard to do for most of
+ // what the protector stands for:
+ // - You'd need to traverse the heap to check that no Array instance has
+ // a constructor property or a modified __proto__
+ // - To check that Array[Symbol.species] == Array, JS code has to execute,
+ // but JS cannot be invoked in callstack overflow situations
+ // All that could be checked reliably is that
+ // Array.prototype.constructor == Array. Given that limitation, no check is
+ // done here. In place, there are mjsunit tests harmony/array-species* which
+ // ensure that behavior is correct in various invalid protector cases.
+
+ PropertyCell* species_cell = heap()->species_protector();
+ return species_cell->value()->IsSmi() &&
+ Smi::cast(species_cell->value())->value() == kArrayProtectorValid;
+}
+
+void Isolate::InvalidateArraySpeciesProtector() {
+ DCHECK(factory()->species_protector()->value()->IsSmi());
+ DCHECK(IsArraySpeciesLookupChainIntact());
+ PropertyCell::SetValueWithInvalidation(
+ factory()->species_protector(),
+ handle(Smi::FromInt(kArrayProtectorInvalid), this));
+ DCHECK(!IsArraySpeciesLookupChainIntact());
+}
void Isolate::UpdateArrayProtectorOnSetElement(Handle<JSObject> object) {
+ DisallowHeapAllocation no_gc;
if (IsFastArrayConstructorPrototypeChainIntact() &&
object->map()->is_prototype_map()) {
Object* context = heap()->native_contexts_list();
@@ -2494,6 +2549,7 @@ void Isolate::UpdateArrayProtectorOnSetElement(Handle<JSObject> object) {
*object ||
current_context->get(Context::INITIAL_ARRAY_PROTOTYPE_INDEX) ==
*object) {
+ CountUsage(v8::Isolate::UseCounterFeature::kArrayProtectorDirtied);
PropertyCell::SetValueWithInvalidation(
factory()->array_protector(),
handle(Smi::FromInt(kArrayProtectorInvalid), this));
@@ -2580,6 +2636,31 @@ Handle<JSObject> Isolate::GetSymbolRegistry() {
}
+void Isolate::AddBeforeCallEnteredCallback(BeforeCallEnteredCallback callback) {
+ for (int i = 0; i < before_call_entered_callbacks_.length(); i++) {
+ if (callback == before_call_entered_callbacks_.at(i)) return;
+ }
+ before_call_entered_callbacks_.Add(callback);
+}
+
+
+void Isolate::RemoveBeforeCallEnteredCallback(
+ BeforeCallEnteredCallback callback) {
+ for (int i = 0; i < before_call_entered_callbacks_.length(); i++) {
+ if (callback == before_call_entered_callbacks_.at(i)) {
+ before_call_entered_callbacks_.Remove(i);
+ }
+ }
+}
+
+
+void Isolate::FireBeforeCallEnteredCallback() {
+ for (int i = 0; i < before_call_entered_callbacks_.length(); i++) {
+ before_call_entered_callbacks_.at(i)(reinterpret_cast<v8::Isolate*>(this));
+ }
+}
+
+
void Isolate::AddCallCompletedCallback(CallCompletedCallback callback) {
for (int i = 0; i < call_completed_callbacks_.length(); i++) {
if (callback == call_completed_callbacks_.at(i)) return;
@@ -2605,10 +2686,10 @@ void Isolate::FireCallCompletedCallback() {
if (!handle_scope_implementer()->CallDepthIsZero()) return;
if (run_microtasks) RunMicrotasks();
// Fire callbacks. Increase call depth to prevent recursive callbacks.
- v8::Isolate::SuppressMicrotaskExecutionScope suppress(
- reinterpret_cast<v8::Isolate*>(this));
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this);
+ v8::Isolate::SuppressMicrotaskExecutionScope suppress(isolate);
for (int i = 0; i < call_completed_callbacks_.length(); i++) {
- call_completed_callbacks_.at(i)();
+ call_completed_callbacks_.at(i)(isolate);
}
}
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index 40c8157165..2d74dc4a63 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -683,8 +683,6 @@ class Isolate {
// set.
bool MayAccess(Handle<Context> accessing_context, Handle<JSObject> receiver);
- bool IsInternallyUsedPropertyName(Handle<Object> name);
-
void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback);
void ReportFailedAccessCheck(Handle<JSObject> receiver);
@@ -820,6 +818,10 @@ class Isolate {
StubCache* stub_cache() { return stub_cache_; }
CodeAgingHelper* code_aging_helper() { return code_aging_helper_; }
DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
+ bool deoptimizer_lazy_throw() const { return deoptimizer_lazy_throw_; }
+ void set_deoptimizer_lazy_throw(bool value) {
+ deoptimizer_lazy_throw_ = value;
+ }
ThreadLocalTop* thread_local_top() { return &thread_local_top_; }
MaterializedObjectStore* materialized_object_store() {
return materialized_object_store_;
@@ -891,7 +893,7 @@ class Isolate {
unibrow::Mapping<unibrow::Ecma262Canonicalize>*
interp_canonicalize_mapping() {
- return &interp_canonicalize_mapping_;
+ return &regexp_macro_assembler_canonicalize_;
}
Debug* debug() { return debug_; }
@@ -958,6 +960,7 @@ class Isolate {
static const int kArrayProtectorInvalid = 0;
bool IsFastArrayConstructorPrototypeChainIntact();
+ bool IsArraySpeciesLookupChainIntact();
// On intent to set an element in object, make sure that appropriate
// notifications occur if the set is on the elements of the array or
@@ -973,6 +976,7 @@ class Isolate {
void UpdateArrayProtectorOnNormalizeElements(Handle<JSObject> object) {
UpdateArrayProtectorOnSetElement(object);
}
+ void InvalidateArraySpeciesProtector();
// Returns true if array is the initial array prototype in any native context.
bool IsAnyInitialArrayPrototype(Handle<JSArray> array);
@@ -1053,6 +1057,10 @@ class Isolate {
void RemoveCallCompletedCallback(CallCompletedCallback callback);
void FireCallCompletedCallback();
+ void AddBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
+ void RemoveBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
+ void FireBeforeCallEnteredCallback();
+
void SetPromiseRejectCallback(PromiseRejectCallback callback);
void ReportPromiseReject(Handle<JSObject> promise, Handle<Object> value,
v8::PromiseRejectEvent event);
@@ -1218,6 +1226,7 @@ class Isolate {
StubCache* stub_cache_;
CodeAgingHelper* code_aging_helper_;
DeoptimizerData* deoptimizer_data_;
+ bool deoptimizer_lazy_throw_;
MaterializedObjectStore* materialized_object_store_;
ThreadLocalTop thread_local_top_;
bool capture_stack_trace_for_uncaught_exceptions_;
@@ -1245,7 +1254,6 @@ class Isolate {
regexp_macro_assembler_canonicalize_;
RegExpStack* regexp_stack_;
DateCache* date_cache_;
- unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
CallInterfaceDescriptorData* call_descriptor_data_;
base::RandomNumberGenerator* random_number_generator_;
@@ -1316,6 +1324,9 @@ class Isolate {
int next_unique_sfi_id_;
#endif
+ // List of callbacks before a Call starts execution.
+ List<BeforeCallEnteredCallback> before_call_entered_callbacks_;
+
// List of callbacks when a Call completes.
List<CallCompletedCallback> call_completed_callbacks_;
diff --git a/deps/v8/src/js/array.js b/deps/v8/src/js/array.js
index f9cf161191..0a5e2839ef 100644
--- a/deps/v8/src/js/array.js
+++ b/deps/v8/src/js/array.js
@@ -12,7 +12,6 @@
// Imports
var AddIndexedProperty;
-var FLAG_harmony_tolength;
var FLAG_harmony_species;
var GetIterator;
var GetMethod;
@@ -28,7 +27,6 @@ var ObjectToString = utils.ImportNow("object_to_string");
var ObserveBeginPerformSplice;
var ObserveEndPerformSplice;
var ObserveEnqueueSpliceRecord;
-var SameValueZero;
var iteratorSymbol = utils.ImportNow("iterator_symbol");
var unscopablesSymbol = utils.ImportNow("unscopables_symbol");
@@ -44,11 +42,9 @@ utils.Import(function(from) {
ObserveBeginPerformSplice = from.ObserveBeginPerformSplice;
ObserveEndPerformSplice = from.ObserveEndPerformSplice;
ObserveEnqueueSpliceRecord = from.ObserveEnqueueSpliceRecord;
- SameValueZero = from.SameValueZero;
});
utils.ImportFromExperimental(function(from) {
- FLAG_harmony_tolength = from.FLAG_harmony_tolength;
FLAG_harmony_species = from.FLAG_harmony_species;
});
@@ -213,8 +209,6 @@ function Join(array, length, separator, convert) {
elements[elements_length++] = e;
}
elements.length = elements_length;
- var result = %_FastOneByteArrayJoin(elements, '');
- if (!IS_UNDEFINED(result)) return result;
return %StringBuilderConcat(elements, elements_length, '');
}
// Non-empty separator case.
@@ -237,9 +231,6 @@ function Join(array, length, separator, convert) {
elements[i] = e;
}
}
- var result = %_FastOneByteArrayJoin(elements, separator);
- if (!IS_UNDEFINED(result)) return result;
-
return %StringBuilderJoin(elements, length, separator);
} finally {
// Make sure to remove the last element of the visited array no
@@ -431,7 +422,7 @@ function ArrayToString() {
function InnerArrayToLocaleString(array, length) {
- var len = TO_LENGTH_OR_UINT32(length);
+ var len = TO_LENGTH(length);
if (len === 0) return "";
return Join(array, len, ',', ConvertToLocaleString);
}
@@ -451,9 +442,6 @@ function InnerArrayJoin(separator, array, length) {
separator = TO_STRING(separator);
}
- var result = %_FastOneByteArrayJoin(array, separator);
- if (!IS_UNDEFINED(result)) return result;
-
// Fast case for one-element arrays.
if (length === 1) {
var e = array[0];
@@ -469,7 +457,7 @@ function ArrayJoin(separator) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.join");
var array = TO_OBJECT(this);
- var length = TO_LENGTH_OR_UINT32(array.length);
+ var length = TO_LENGTH(array.length);
return InnerArrayJoin(separator, array, length);
}
@@ -498,7 +486,7 @@ function ArrayPop() {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.pop");
var array = TO_OBJECT(this);
- var n = TO_LENGTH_OR_UINT32(array.length);
+ var n = TO_LENGTH(array.length);
if (n == 0) {
array.length = n;
return;
@@ -516,13 +504,13 @@ function ArrayPop() {
function ObservedArrayPush() {
- var n = TO_LENGTH_OR_UINT32(this.length);
- var m = %_ArgumentsLength();
+ var n = TO_LENGTH(this.length);
+ var m = arguments.length;
try {
ObserveBeginPerformSplice(this);
for (var i = 0; i < m; i++) {
- this[i+n] = %_Arguments(i);
+ this[i+n] = arguments[i];
}
var new_length = n + m;
this.length = new_length;
@@ -544,8 +532,8 @@ function ArrayPush() {
return ObservedArrayPush.apply(this, arguments);
var array = TO_OBJECT(this);
- var n = TO_LENGTH_OR_UINT32(array.length);
- var m = %_ArgumentsLength();
+ var n = TO_LENGTH(array.length);
+ var m = arguments.length;
// It appears that there is no enforced, absolute limit on the number of
// arguments, but it would surely blow the stack to use 2**30 or more.
@@ -557,7 +545,7 @@ function ArrayPush() {
}
for (var i = 0; i < m; i++) {
- array[i+n] = %_Arguments(i);
+ array[i+n] = arguments[i];
}
var new_length = n + m;
@@ -650,7 +638,7 @@ function ArrayReverse() {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reverse");
var array = TO_OBJECT(this);
- var len = TO_LENGTH_OR_UINT32(array.length);
+ var len = TO_LENGTH(array.length);
var isArray = IS_ARRAY(array);
if (UseSparseVariant(array, len, isArray, len)) {
@@ -685,7 +673,7 @@ function ArrayShift() {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.shift");
var array = TO_OBJECT(this);
- var len = TO_LENGTH_OR_UINT32(array.length);
+ var len = TO_LENGTH(array.length);
if (len === 0) {
array.length = 0;
@@ -712,14 +700,14 @@ function ArrayShift() {
function ObservedArrayUnshift() {
- var len = TO_LENGTH_OR_UINT32(this.length);
- var num_arguments = %_ArgumentsLength();
+ var len = TO_LENGTH(this.length);
+ var num_arguments = arguments.length;
try {
ObserveBeginPerformSplice(this);
SimpleMove(this, 0, 0, len, num_arguments);
for (var i = 0; i < num_arguments; i++) {
- this[i] = %_Arguments(i);
+ this[i] = arguments[i];
}
var new_length = len + num_arguments;
this.length = new_length;
@@ -739,8 +727,8 @@ function ArrayUnshift(arg1) { // length == 1
return ObservedArrayUnshift.apply(this, arguments);
var array = TO_OBJECT(this);
- var len = TO_LENGTH_OR_UINT32(array.length);
- var num_arguments = %_ArgumentsLength();
+ var len = TO_LENGTH(array.length);
+ var num_arguments = arguments.length;
if (len > 0 && UseSparseVariant(array, len, IS_ARRAY(array), len) &&
!%object_is_sealed(array)) {
@@ -750,7 +738,7 @@ function ArrayUnshift(arg1) { // length == 1
}
for (var i = 0; i < num_arguments; i++) {
- array[i] = %_Arguments(i);
+ array[i] = arguments[i];
}
var new_length = len + num_arguments;
@@ -763,7 +751,7 @@ function ArraySlice(start, end) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.slice");
var array = TO_OBJECT(this);
- var len = TO_LENGTH_OR_UINT32(array.length);
+ var len = TO_LENGTH(array.length);
var start_i = TO_INTEGER(start);
var end_i = len;
@@ -833,8 +821,8 @@ function ComputeSpliceDeleteCount(delete_count, num_arguments, len, start_i) {
function ObservedArraySplice(start, delete_count) {
- var num_arguments = %_ArgumentsLength();
- var len = TO_LENGTH_OR_UINT32(this.length);
+ var num_arguments = arguments.length;
+ var len = TO_LENGTH(this.length);
var start_i = ComputeSpliceStartIndex(TO_INTEGER(start), len);
var del_count = ComputeSpliceDeleteCount(delete_count, num_arguments, len,
start_i);
@@ -852,9 +840,9 @@ function ObservedArraySplice(start, delete_count) {
// place of the deleted elements.
var i = start_i;
var arguments_index = 2;
- var arguments_length = %_ArgumentsLength();
+ var arguments_length = arguments.length;
while (arguments_index < arguments_length) {
- this[i++] = %_Arguments(arguments_index++);
+ this[i++] = arguments[arguments_index++];
}
this.length = len - del_count + num_elements_to_add;
@@ -879,9 +867,9 @@ function ArraySplice(start, delete_count) {
if (%IsObserved(this))
return ObservedArraySplice.apply(this, arguments);
- var num_arguments = %_ArgumentsLength();
+ var num_arguments = arguments.length;
var array = TO_OBJECT(this);
- var len = TO_LENGTH_OR_UINT32(array.length);
+ var len = TO_LENGTH(array.length);
var start_i = ComputeSpliceStartIndex(TO_INTEGER(start), len);
var del_count = ComputeSpliceDeleteCount(delete_count, num_arguments, len,
start_i);
@@ -915,9 +903,9 @@ function ArraySplice(start, delete_count) {
// place of the deleted elements.
var i = start_i;
var arguments_index = 2;
- var arguments_length = %_ArgumentsLength();
+ var arguments_length = arguments.length;
while (arguments_index < arguments_length) {
- array[i++] = %_Arguments(arguments_index++);
+ array[i++] = arguments[arguments_index++];
}
array.length = len - del_count + num_elements_to_add;
@@ -1068,7 +1056,7 @@ function InnerArraySort(array, length, comparefn) {
var CopyFromPrototype = function CopyFromPrototype(obj, length) {
var max = 0;
for (var proto = %_GetPrototype(obj); proto; proto = %_GetPrototype(proto)) {
- var indices = %GetArrayKeys(proto, length);
+ var indices = IS_PROXY(proto) ? length : %GetArrayKeys(proto, length);
if (IS_NUMBER(indices)) {
// It's an interval.
var proto_length = indices;
@@ -1097,7 +1085,7 @@ function InnerArraySort(array, length, comparefn) {
// elements in that range.
var ShadowPrototypeElements = function(obj, from, to) {
for (var proto = %_GetPrototype(obj); proto; proto = %_GetPrototype(proto)) {
- var indices = %GetArrayKeys(proto, to);
+ var indices = IS_PROXY(proto) ? to : %GetArrayKeys(proto, to);
if (IS_NUMBER(indices)) {
// It's an interval.
var proto_length = indices;
@@ -1217,7 +1205,7 @@ function ArraySort(comparefn) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.sort");
var array = TO_OBJECT(this);
- var length = TO_LENGTH_OR_UINT32(array.length);
+ var length = TO_LENGTH(array.length);
return InnerArraySort(array, length, comparefn);
}
@@ -1248,7 +1236,7 @@ function ArrayFilter(f, receiver) {
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = TO_OBJECT(this);
- var length = TO_LENGTH_OR_UINT32(array.length);
+ var length = TO_LENGTH(array.length);
if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
var result = ArraySpeciesCreate(array, 0);
return InnerArrayFilter(f, receiver, array, length, result);
@@ -1274,7 +1262,7 @@ function ArrayForEach(f, receiver) {
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = TO_OBJECT(this);
- var length = TO_LENGTH_OR_UINT32(array.length);
+ var length = TO_LENGTH(array.length);
InnerArrayForEach(f, receiver, array, length);
}
@@ -1301,7 +1289,7 @@ function ArraySome(f, receiver) {
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = TO_OBJECT(this);
- var length = TO_LENGTH_OR_UINT32(array.length);
+ var length = TO_LENGTH(array.length);
return InnerArraySome(f, receiver, array, length);
}
@@ -1325,7 +1313,7 @@ function ArrayEvery(f, receiver) {
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = TO_OBJECT(this);
- var length = TO_LENGTH_OR_UINT32(array.length);
+ var length = TO_LENGTH(array.length);
return InnerArrayEvery(f, receiver, array, length);
}
@@ -1336,7 +1324,7 @@ function ArrayMap(f, receiver) {
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = TO_OBJECT(this);
- var length = TO_LENGTH_OR_UINT32(array.length);
+ var length = TO_LENGTH(array.length);
if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
var result = ArraySpeciesCreate(array, length);
var is_array = IS_ARRAY(array);
@@ -1411,7 +1399,7 @@ function InnerArrayIndexOf(array, element, index, length) {
function ArrayIndexOf(element, index) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.indexOf");
- var length = TO_LENGTH_OR_UINT32(this.length);
+ var length = TO_LENGTH(this.length);
return InnerArrayIndexOf(this, element, index, length);
}
@@ -1469,9 +1457,9 @@ function InnerArrayLastIndexOf(array, element, index, length, argumentsLength) {
function ArrayLastIndexOf(element, index) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.lastIndexOf");
- var length = TO_LENGTH_OR_UINT32(this.length);
+ var length = TO_LENGTH(this.length);
return InnerArrayLastIndexOf(this, element, index, length,
- %_ArgumentsLength());
+ arguments.length);
}
@@ -1508,9 +1496,9 @@ function ArrayReduce(callback, current) {
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = TO_OBJECT(this);
- var length = TO_LENGTH_OR_UINT32(array.length);
+ var length = TO_LENGTH(array.length);
return InnerArrayReduce(callback, current, array, length,
- %_ArgumentsLength());
+ arguments.length);
}
@@ -1548,9 +1536,9 @@ function ArrayReduceRight(callback, current) {
// Pull out the length so that side effects are visible before the
// callback function is checked.
var array = TO_OBJECT(this);
- var length = TO_LENGTH_OR_UINT32(array.length);
+ var length = TO_LENGTH(array.length);
return InnerArrayReduceRight(callback, current, array, length,
- %_ArgumentsLength());
+ arguments.length);
}
@@ -1701,7 +1689,7 @@ function ArrayFill(value, start, end) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.fill");
var array = TO_OBJECT(this);
- var length = TO_LENGTH_OR_UINT32(array.length);
+ var length = TO_LENGTH(array.length);
return InnerArrayFill(value, start, end, array, length);
}
@@ -1726,7 +1714,7 @@ function InnerArrayIncludes(searchElement, fromIndex, array, length) {
while (k < length) {
var elementK = array[k];
- if (SameValueZero(searchElement, elementK)) {
+ if (%SameValueZero(searchElement, elementK)) {
return true;
}
@@ -1778,23 +1766,10 @@ function ArrayFrom(arrayLike, mapfn, receiver) {
if (!IS_UNDEFINED(iterable)) {
result = %IsConstructor(this) ? new this() : [];
-
- var iterator = GetIterator(items, iterable);
-
k = 0;
- while (true) {
- var next = iterator.next();
-
- if (!IS_RECEIVER(next)) {
- throw MakeTypeError(kIteratorResultNotAnObject, next);
- }
-
- if (next.done) {
- result.length = k;
- return result;
- }
- nextValue = next.value;
+ for (nextValue of
+ { [iteratorSymbol]() { return GetIterator(items, iterable) } }) {
if (mapping) {
mappedValue = %_Call(mapfn, receiver, nextValue, k);
} else {
@@ -1803,6 +1778,8 @@ function ArrayFrom(arrayLike, mapfn, receiver) {
AddArrayElement(this, result, k, mappedValue);
k++;
}
+ result.length = k;
+ return result;
} else {
var len = TO_LENGTH(items.length);
result = %IsConstructor(this) ? new this(len) : new GlobalArray(len);
@@ -1824,13 +1801,13 @@ function ArrayFrom(arrayLike, mapfn, receiver) {
// ES6, draft 05-22-14, section 22.1.2.3
-function ArrayOf() {
- var length = %_ArgumentsLength();
+function ArrayOf(...args) {
+ var length = args.length;
var constructor = this;
// TODO: Implement IsConstructor (ES6 section 7.2.5)
var array = %IsConstructor(constructor) ? new constructor(length) : [];
for (var i = 0; i < length; i++) {
- AddArrayElement(constructor, array, i, %_Arguments(i));
+ AddArrayElement(constructor, array, i, args[i]);
}
array.length = length;
return array;
diff --git a/deps/v8/src/js/generator.js b/deps/v8/src/js/generator.js
index 7f43656ebc..3dcdcc0ffa 100644
--- a/deps/v8/src/js/generator.js
+++ b/deps/v8/src/js/generator.js
@@ -36,15 +36,31 @@ function GeneratorObjectNext(value) {
if (continuation > 0) {
// Generator is suspended.
DEBUG_PREPARE_STEP_IN_IF_STEPPING(this);
- try {
- return %_GeneratorNext(this, value);
- } catch (e) {
- %GeneratorClose(this);
- throw e;
- }
+ return %_GeneratorNext(this, value);
} else if (continuation == 0) {
// Generator is already closed.
- return { value: void 0, done: true };
+ return %_CreateIterResultObject(UNDEFINED, true);
+ } else {
+ // Generator is running.
+ throw MakeTypeError(kGeneratorRunning);
+ }
+}
+
+
+function GeneratorObjectReturn(value) {
+ if (!IS_GENERATOR(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ '[Generator].prototype.return', this);
+ }
+
+ var continuation = %GeneratorGetContinuation(this);
+ if (continuation > 0) {
+ // Generator is suspended.
+ DEBUG_PREPARE_STEP_IN_IF_STEPPING(this);
+ return %_GeneratorReturn(this, value);
+ } else if (continuation == 0) {
+ // Generator is already closed.
+ return %_CreateIterResultObject(value, true);
} else {
// Generator is running.
throw MakeTypeError(kGeneratorRunning);
@@ -61,12 +77,8 @@ function GeneratorObjectThrow(exn) {
var continuation = %GeneratorGetContinuation(this);
if (continuation > 0) {
// Generator is suspended.
- try {
- return %_GeneratorThrow(this, exn);
- } catch (e) {
- %GeneratorClose(this);
- throw e;
- }
+ DEBUG_PREPARE_STEP_IN_IF_STEPPING(this);
+ return %_GeneratorThrow(this, exn);
} else if (continuation == 0) {
// Generator is already closed.
throw exn;
@@ -78,9 +90,11 @@ function GeneratorObjectThrow(exn) {
// ----------------------------------------------------------------------------
-// Both Runtime_GeneratorNext and Runtime_GeneratorThrow are supported by
-// neither Crankshaft nor TurboFan, disable optimization of wrappers here.
+// None of the three resume operations (Runtime_GeneratorNext,
+// Runtime_GeneratorReturn, Runtime_GeneratorThrow) is supported by
+// Crankshaft or TurboFan. Disable optimization of wrappers here.
%NeverOptimizeFunction(GeneratorObjectNext);
+%NeverOptimizeFunction(GeneratorObjectReturn);
%NeverOptimizeFunction(GeneratorObjectThrow);
// Set up non-enumerable functions on the generator prototype object.
@@ -88,6 +102,7 @@ var GeneratorObjectPrototype = GeneratorFunctionPrototype.prototype;
utils.InstallFunctions(GeneratorObjectPrototype,
DONT_ENUM,
["next", GeneratorObjectNext,
+ "return", GeneratorObjectReturn,
"throw", GeneratorObjectThrow]);
%AddNamedProperty(GeneratorObjectPrototype, "constructor",
diff --git a/deps/v8/src/js/harmony-reflect.js b/deps/v8/src/js/harmony-reflect.js
deleted file mode 100644
index dcadad522f..0000000000
--- a/deps/v8/src/js/harmony-reflect.js
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2013-2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-'use strict';
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalReflect = global.Reflect;
-var MakeTypeError;
-var ReflectApply = utils.ImportNow("reflect_apply");
-var ReflectConstruct = utils.ImportNow("reflect_construct");
-
-utils.Import(function(from) {
- MakeTypeError = from.MakeTypeError;
-});
-
-// -------------------------------------------------------------------
-
-function ReflectEnumerate(obj) {
- if (!IS_RECEIVER(obj))
- throw MakeTypeError(kCalledOnNonObject, "Reflect.enumerate")
- return (function* () { for (var x in obj) yield x })();
-}
-
-utils.InstallFunctions(GlobalReflect, DONT_ENUM, [
- "apply", ReflectApply,
- "construct", ReflectConstruct,
- "enumerate", ReflectEnumerate
-]);
-
-})
diff --git a/deps/v8/src/js/i18n.js b/deps/v8/src/js/i18n.js
index 7e00fcdac4..7b2f5a1a12 100644
--- a/deps/v8/src/js/i18n.js
+++ b/deps/v8/src/js/i18n.js
@@ -232,8 +232,8 @@ function addBoundMethod(obj, methodName, implementation, length) {
// DateTimeFormat.format needs to be 0 arg method, but can stil
// receive optional dateValue param. If one was provided, pass it
// along.
- if (%_ArgumentsLength() > 0) {
- return implementation(that, %_Arguments(0));
+ if (arguments.length > 0) {
+ return implementation(that, arguments[0]);
} else {
return implementation(that);
}
@@ -1002,8 +1002,8 @@ function initializeCollator(collator, locales, options) {
* @constructor
*/
%AddNamedProperty(Intl, 'Collator', function() {
- var locales = %_Arguments(0);
- var options = %_Arguments(1);
+ var locales = arguments[0];
+ var options = arguments[1];
if (!this || this === Intl) {
// Constructor is called as a function.
@@ -1060,7 +1060,7 @@ function initializeCollator(collator, locales, options) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
- return supportedLocalesOf('collator', locales, %_Arguments(1));
+ return supportedLocalesOf('collator', locales, arguments[1]);
},
DONT_ENUM
);
@@ -1255,8 +1255,8 @@ function initializeNumberFormat(numberFormat, locales, options) {
* @constructor
*/
%AddNamedProperty(Intl, 'NumberFormat', function() {
- var locales = %_Arguments(0);
- var options = %_Arguments(1);
+ var locales = arguments[0];
+ var options = arguments[1];
if (!this || this === Intl) {
// Constructor is called as a function.
@@ -1332,7 +1332,7 @@ function initializeNumberFormat(numberFormat, locales, options) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
- return supportedLocalesOf('numberformat', locales, %_Arguments(1));
+ return supportedLocalesOf('numberformat', locales, arguments[1]);
},
DONT_ENUM
);
@@ -1659,8 +1659,8 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
* @constructor
*/
%AddNamedProperty(Intl, 'DateTimeFormat', function() {
- var locales = %_Arguments(0);
- var options = %_Arguments(1);
+ var locales = arguments[0];
+ var options = arguments[1];
if (!this || this === Intl) {
// Constructor is called as a function.
@@ -1755,7 +1755,7 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
- return supportedLocalesOf('dateformat', locales, %_Arguments(1));
+ return supportedLocalesOf('dateformat', locales, arguments[1]);
},
DONT_ENUM
);
@@ -1886,8 +1886,8 @@ function initializeBreakIterator(iterator, locales, options) {
* @constructor
*/
%AddNamedProperty(Intl, 'v8BreakIterator', function() {
- var locales = %_Arguments(0);
- var options = %_Arguments(1);
+ var locales = arguments[0];
+ var options = arguments[1];
if (!this || this === Intl) {
// Constructor is called as a function.
@@ -1943,7 +1943,7 @@ function initializeBreakIterator(iterator, locales, options) {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
- return supportedLocalesOf('breakiterator', locales, %_Arguments(1));
+ return supportedLocalesOf('breakiterator', locales, arguments[1]);
},
DONT_ENUM
);
@@ -2061,8 +2061,8 @@ OverrideFunction(GlobalString.prototype, 'localeCompare', function(that) {
throw MakeTypeError(kMethodInvokedOnNullOrUndefined);
}
- var locales = %_Arguments(1);
- var options = %_Arguments(2);
+ var locales = arguments[1];
+ var options = arguments[2];
var collator = cachedOrNewService('collator', locales, options);
return compare(collator, this, that);
}
@@ -2085,7 +2085,7 @@ OverrideFunction(GlobalString.prototype, 'normalize', function() {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.normalize");
var s = TO_STRING(this);
- var formArg = %_Arguments(0);
+ var formArg = arguments[0];
var form = IS_UNDEFINED(formArg) ? 'NFC' : TO_STRING(formArg);
var NORMALIZATION_FORMS = ['NFC', 'NFD', 'NFKC', 'NFKD'];
@@ -2114,8 +2114,8 @@ OverrideFunction(GlobalNumber.prototype, 'toLocaleString', function() {
throw MakeTypeError(kMethodInvokedOnWrongType, "Number");
}
- var locales = %_Arguments(0);
- var options = %_Arguments(1);
+ var locales = arguments[0];
+ var options = arguments[1];
var numberFormat = cachedOrNewService('numberformat', locales, options);
return formatNumber(numberFormat, this);
}
@@ -2151,8 +2151,8 @@ OverrideFunction(GlobalDate.prototype, 'toLocaleString', function() {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
- var locales = %_Arguments(0);
- var options = %_Arguments(1);
+ var locales = arguments[0];
+ var options = arguments[1];
return toLocaleDateTime(
this, locales, options, 'any', 'all', 'dateformatall');
}
@@ -2169,8 +2169,8 @@ OverrideFunction(GlobalDate.prototype, 'toLocaleDateString', function() {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
- var locales = %_Arguments(0);
- var options = %_Arguments(1);
+ var locales = arguments[0];
+ var options = arguments[1];
return toLocaleDateTime(
this, locales, options, 'date', 'date', 'dateformatdate');
}
@@ -2187,8 +2187,8 @@ OverrideFunction(GlobalDate.prototype, 'toLocaleTimeString', function() {
throw MakeTypeError(kOrdinaryFunctionCalledAsConstructor);
}
- var locales = %_Arguments(0);
- var options = %_Arguments(1);
+ var locales = arguments[0];
+ var options = arguments[1];
return toLocaleDateTime(
this, locales, options, 'time', 'time', 'dateformattime');
}
diff --git a/deps/v8/src/js/json.js b/deps/v8/src/js/json.js
index b8836eaddd..73d7802be9 100644
--- a/deps/v8/src/js/json.js
+++ b/deps/v8/src/js/json.js
@@ -187,7 +187,7 @@ function JSONSerialize(key, holder, replacer, stack, indent, gap) {
function JSONStringify(value, replacer, space) {
- if (%_ArgumentsLength() == 1 && !IS_PROXY(value)) {
+ if (arguments.length === 1 && !IS_PROXY(value)) {
return %BasicJSONStringify(value);
}
if (!IS_CALLABLE(replacer) && %is_arraylike(replacer)) {
@@ -234,6 +234,9 @@ function JSONStringify(value, replacer, space) {
} else {
gap = "";
}
+ if (!IS_CALLABLE(replacer) && !property_list && !gap && !IS_PROXY(value)) {
+ return %BasicJSONStringify(value);
+ }
return JSONSerialize('', {'': value}, replacer, new InternalArray(), "", gap);
}
diff --git a/deps/v8/src/js/macros.py b/deps/v8/src/js/macros.py
index 3bcc8c114e..b2a785697b 100644
--- a/deps/v8/src/js/macros.py
+++ b/deps/v8/src/js/macros.py
@@ -67,9 +67,9 @@ macro IS_ARRAYBUFFER(arg) = (%_ClassOf(arg) === 'ArrayBuffer');
macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean');
macro IS_BOOLEAN_WRAPPER(arg) = (%_ClassOf(arg) === 'Boolean');
macro IS_DATAVIEW(arg) = (%_ClassOf(arg) === 'DataView');
-macro IS_DATE(arg) = (%_IsDate(arg));
+macro IS_DATE(arg) = (%IsDate(arg));
macro IS_ERROR(arg) = (%_ClassOf(arg) === 'Error');
-macro IS_FUNCTION(arg) = (%_IsFunction(arg));
+macro IS_FUNCTION(arg) = (%IsFunction(arg));
macro IS_GENERATOR(arg) = (%_ClassOf(arg) === 'Generator');
macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global');
macro IS_MAP(arg) = (%_ClassOf(arg) === 'Map');
@@ -85,7 +85,7 @@ macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script');
macro IS_SET(arg) = (%_ClassOf(arg) === 'Set');
macro IS_SET_ITERATOR(arg) = (%_ClassOf(arg) === 'Set Iterator');
macro IS_SHAREDARRAYBUFFER(arg) = (%_ClassOf(arg) === 'SharedArrayBuffer');
-macro IS_SIMD_VALUE(arg) = (%_IsSimdValue(arg));
+macro IS_SIMD_VALUE(arg) = (%IsSimdValue(arg));
macro IS_STRING(arg) = (typeof(arg) === 'string');
macro IS_STRING_WRAPPER(arg) = (%_ClassOf(arg) === 'String');
macro IS_STRONG(arg) = (%IsStrong(arg));
@@ -114,8 +114,6 @@ macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToI
macro TO_INT32(arg) = ((arg) | 0);
macro TO_UINT32(arg) = ((arg) >>> 0);
macro TO_LENGTH(arg) = (%_ToLength(arg));
-macro TO_LENGTH_OR_UINT32(arg) = (FLAG_harmony_tolength ? TO_LENGTH(arg) : TO_UINT32(arg));
-macro TO_LENGTH_OR_INTEGER(arg) = (FLAG_harmony_tolength ? TO_LENGTH(arg) : TO_INTEGER(arg));
macro TO_STRING(arg) = (%_ToString(arg));
macro TO_NUMBER(arg) = (%_ToNumber(arg));
macro TO_OBJECT(arg) = (%_ToObject(arg));
diff --git a/deps/v8/src/js/math.js b/deps/v8/src/js/math.js
index 990a7e993c..a698fd4285 100644
--- a/deps/v8/src/js/math.js
+++ b/deps/v8/src/js/math.js
@@ -75,60 +75,6 @@ function MathLog(x) {
return %_MathLogRT(TO_NUMBER(x));
}
-// ECMA 262 - 15.8.2.11
-function MathMax(arg1, arg2) { // length == 2
- var length = %_ArgumentsLength();
- if (length == 2) {
- arg1 = TO_NUMBER(arg1);
- arg2 = TO_NUMBER(arg2);
- if (arg2 > arg1) return arg2;
- if (arg1 > arg2) return arg1;
- if (arg1 == arg2) {
- // Make sure -0 is considered less than +0.
- return (arg1 === 0 && %_IsMinusZero(arg1)) ? arg2 : arg1;
- }
- // All comparisons failed, one of the arguments must be NaN.
- return NaN;
- }
- var r = -INFINITY;
- for (var i = 0; i < length; i++) {
- var n = %_Arguments(i);
- n = TO_NUMBER(n);
- // Make sure +0 is considered greater than -0.
- if (NUMBER_IS_NAN(n) || n > r || (r === 0 && n === 0 && %_IsMinusZero(r))) {
- r = n;
- }
- }
- return r;
-}
-
-// ECMA 262 - 15.8.2.12
-function MathMin(arg1, arg2) { // length == 2
- var length = %_ArgumentsLength();
- if (length == 2) {
- arg1 = TO_NUMBER(arg1);
- arg2 = TO_NUMBER(arg2);
- if (arg2 > arg1) return arg1;
- if (arg1 > arg2) return arg2;
- if (arg1 == arg2) {
- // Make sure -0 is considered less than +0.
- return (arg1 === 0 && %_IsMinusZero(arg1)) ? arg1 : arg2;
- }
- // All comparisons failed, one of the arguments must be NaN.
- return NaN;
- }
- var r = INFINITY;
- for (var i = 0; i < length; i++) {
- var n = %_Arguments(i);
- n = TO_NUMBER(n);
- // Make sure -0 is considered less than +0.
- if (NUMBER_IS_NAN(n) || n < r || (r === 0 && n === 0 && %_IsMinusZero(n))) {
- r = n;
- }
- }
- return r;
-}
-
// ECMA 262 - 15.8.2.13
function MathPowJS(x, y) {
return %_MathPow(TO_NUMBER(x), TO_NUMBER(y));
@@ -218,17 +164,14 @@ function MathHypot(x, y) { // Function length is 2.
// We may want to introduce fast paths for two arguments and when
// normalization to avoid overflow is not necessary. For now, we
// simply assume the general case.
- var length = %_ArgumentsLength();
- var args = new InternalArray(length);
+ var length = arguments.length;
var max = 0;
for (var i = 0; i < length; i++) {
- var n = %_Arguments(i);
- n = TO_NUMBER(n);
- if (n === INFINITY || n === -INFINITY) return INFINITY;
- n = MathAbs(n);
+ var n = MathAbs(arguments[i]);
if (n > max) max = n;
- args[i] = n;
+ arguments[i] = n;
}
+ if (max === INFINITY) return INFINITY;
// Kahan summation to avoid rounding errors.
// Normalize the numbers to the largest one to avoid overflow.
@@ -236,7 +179,7 @@ function MathHypot(x, y) { // Function length is 2.
var sum = 0;
var compensation = 0;
for (var i = 0; i < length; i++) {
- var n = args[i] / max;
+ var n = arguments[i] / max;
var summand = n * n - compensation;
var preliminary = sum + summand;
compensation = (preliminary - sum) - summand;
@@ -314,8 +257,6 @@ utils.InstallFunctions(GlobalMath, DONT_ENUM, [
"sqrt", MathSqrtJS,
"atan2", MathAtan2JS,
"pow", MathPowJS,
- "max", MathMax,
- "min", MathMin,
"imul", MathImul,
"sign", MathSign,
"trunc", MathTrunc,
@@ -349,8 +290,6 @@ utils.Export(function(to) {
to.MathExp = MathExp;
to.MathFloor = MathFloorJS;
to.IntRandom = MathRandomRaw;
- to.MathMax = MathMax;
- to.MathMin = MathMin;
});
})
diff --git a/deps/v8/src/js/prologue.js b/deps/v8/src/js/prologue.js
index 2779393bd1..24225a0a00 100644
--- a/deps/v8/src/js/prologue.js
+++ b/deps/v8/src/js/prologue.js
@@ -179,8 +179,6 @@ function PostNatives(utils) {
"MapEntries",
"MapIterator",
"MapIteratorNext",
- "MathMax",
- "MathMin",
"MaxSimple",
"MinSimple",
"ObjectDefineProperty",
@@ -189,7 +187,6 @@ function PostNatives(utils) {
"PromiseChain",
"PromiseDeferred",
"PromiseResolved",
- "SameValueZero",
"SetIterator",
"SetIteratorNext",
"SetValues",
diff --git a/deps/v8/src/js/proxy.js b/deps/v8/src/js/proxy.js
index 842bac0252..a111c09427 100644
--- a/deps/v8/src/js/proxy.js
+++ b/deps/v8/src/js/proxy.js
@@ -12,11 +12,6 @@
// Imports
//
var GlobalProxy = global.Proxy;
-var MakeTypeError;
-
-utils.Import(function(from) {
- MakeTypeError = from.MakeTypeError;
-});
//----------------------------------------------------------------------------
@@ -25,33 +20,6 @@ function ProxyCreateRevocable(target, handler) {
return {proxy: p, revoke: () => %JSProxyRevoke(p)};
}
-// -------------------------------------------------------------------
-// Proxy Builtins
-
-// Implements part of ES6 9.5.11 Proxy.[[Enumerate]]:
-// Call the trap, which should return an iterator, exhaust the iterator,
-// and return an array containing the values.
-function ProxyEnumerate(trap, handler, target) {
- // 7. Let trapResult be ? Call(trap, handler, Ā«targetĀ»).
- var trap_result = %_Call(trap, handler, target);
- // 8. If Type(trapResult) is not Object, throw a TypeError exception.
- if (!IS_RECEIVER(trap_result)) {
- throw MakeTypeError(kProxyEnumerateNonObject);
- }
- // 9. Return trapResult.
- var result = [];
- for (var it = trap_result.next(); !it.done; it = trap_result.next()) {
- var key = it.value;
- // Not yet spec'ed as of 2015-11-25, but will be spec'ed soon:
- // If the iterator returns a non-string value, throw a TypeError.
- if (!IS_STRING(key)) {
- throw MakeTypeError(kProxyEnumerateNonString);
- }
- result.push(key);
- }
- return result;
-}
-
//-------------------------------------------------------------------
//Set up non-enumerable properties of the Proxy object.
@@ -59,11 +27,4 @@ utils.InstallFunctions(GlobalProxy, DONT_ENUM, [
"revocable", ProxyCreateRevocable
]);
-// -------------------------------------------------------------------
-// Exports
-
-%InstallToContext([
- "proxy_enumerate", ProxyEnumerate,
-]);
-
})
diff --git a/deps/v8/src/js/regexp.js b/deps/v8/src/js/regexp.js
index eeacd6eb9e..e80d0190f4 100644
--- a/deps/v8/src/js/regexp.js
+++ b/deps/v8/src/js/regexp.js
@@ -9,7 +9,7 @@
// -------------------------------------------------------------------
// Imports
-var FLAG_harmony_tolength;
+var ExpandReplacement;
var GlobalObject = global.Object;
var GlobalRegExp = global.RegExp;
var GlobalRegExpPrototype;
@@ -17,14 +17,12 @@ var InternalArray = utils.InternalArray;
var InternalPackedArray = utils.InternalPackedArray;
var MakeTypeError;
var matchSymbol = utils.ImportNow("match_symbol");
+var replaceSymbol = utils.ImportNow("replace_symbol");
var searchSymbol = utils.ImportNow("search_symbol");
var splitSymbol = utils.ImportNow("split_symbol");
-utils.ImportFromExperimental(function(from) {
- FLAG_harmony_tolength = from.FLAG_harmony_tolength;
-});
-
utils.Import(function(from) {
+ ExpandReplacement = from.ExpandReplacement;
MakeTypeError = from.MakeTypeError;
});
@@ -176,7 +174,7 @@ function RegExpExecJS(string) {
// Conversion is required by the ES2015 specification (RegExpBuiltinExec
// algorithm, step 4) even if the value is discarded for non-global RegExps.
- var i = TO_LENGTH_OR_INTEGER(lastIndex);
+ var i = TO_LENGTH(lastIndex);
var updateLastIndex = REGEXP_GLOBAL(this) || REGEXP_STICKY(this);
if (updateLastIndex) {
@@ -223,7 +221,7 @@ function RegExpTest(string) {
// Conversion is required by the ES2015 specification (RegExpBuiltinExec
// algorithm, step 4) even if the value is discarded for non-global RegExps.
- var i = TO_LENGTH_OR_INTEGER(lastIndex);
+ var i = TO_LENGTH(lastIndex);
if (REGEXP_GLOBAL(this) || REGEXP_STICKY(this)) {
if (i < 0 || i > string.length) {
@@ -262,7 +260,7 @@ function RegExpTest(string) {
}
function TrimRegExp(regexp) {
- if (!%_ObjectEquals(regexp_key, regexp)) {
+ if (regexp_key !== regexp) {
regexp_key = regexp;
regexp_val =
new GlobalRegExp(
@@ -283,8 +281,11 @@ function RegExpToString() {
%IncrementUseCounter(kRegExpPrototypeToString);
return '/(?:)/';
}
- throw MakeTypeError(kIncompatibleMethodReceiver,
- 'RegExp.prototype.toString', this);
+ if (!IS_RECEIVER(this)) {
+ throw MakeTypeError(
+ kIncompatibleMethodReceiver, 'RegExp.prototype.toString', this);
+ }
+ return '/' + TO_STRING(this.source) + '/' + TO_STRING(this.flags);
}
var result = '/' + REGEXP_SOURCE(this) + '/';
if (REGEXP_GLOBAL(this)) result += 'g';
@@ -296,6 +297,15 @@ function RegExpToString() {
}
+function AtSurrogatePair(subject, index) {
+ if (index + 1 >= subject.length) return false;
+ var first = %_StringCharCodeAt(subject, index);
+ if (first < 0xD800 || first > 0xDBFF) return false;
+ var second = %_StringCharCodeAt(subject, index + 1);
+ return second >= 0xDC00 || second <= 0xDFFF;
+}
+
+
// ES6 21.2.5.11.
function RegExpSplit(string, limit) {
// TODO(yangguo): allow non-regexp receivers.
@@ -337,7 +347,11 @@ function RegExpSplit(string, limit) {
// We ignore a zero-length match at the currentIndex.
if (startIndex === endIndex && endIndex === currentIndex) {
- startIndex++;
+ if (REGEXP_UNICODE(this) && AtSurrogatePair(subject, startIndex)) {
+ startIndex += 2;
+ } else {
+ startIndex++;
+ }
continue;
}
@@ -382,6 +396,175 @@ function RegExpMatch(string) {
}
+// ES6 21.2.5.8.
+
+// TODO(lrn): This array will survive indefinitely if replace is never
+// called again. However, it will be empty, since the contents are cleared
+// in the finally block.
+var reusableReplaceArray = new InternalArray(4);
+
+// Helper function for replacing regular expressions with the result of a
+// function application in String.prototype.replace.
+function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
+ var resultArray = reusableReplaceArray;
+ if (resultArray) {
+ reusableReplaceArray = null;
+ } else {
+ // Inside a nested replace (replace called from the replacement function
+ // of another replace) or we have failed to set the reusable array
+ // back due to an exception in a replacement function. Create a new
+ // array to use in the future, or until the original is written back.
+ resultArray = new InternalArray(16);
+ }
+ var res = %RegExpExecMultiple(regexp,
+ subject,
+ RegExpLastMatchInfo,
+ resultArray);
+ regexp.lastIndex = 0;
+ if (IS_NULL(res)) {
+ // No matches at all.
+ reusableReplaceArray = resultArray;
+ return subject;
+ }
+ var len = res.length;
+ if (NUMBER_OF_CAPTURES(RegExpLastMatchInfo) == 2) {
+ // If the number of captures is two then there are no explicit captures in
+ // the regexp, just the implicit capture that captures the whole match. In
+ // this case we can simplify quite a bit and end up with something faster.
+ // The builder will consist of some integers that indicate slices of the
+ // input string and some replacements that were returned from the replace
+ // function.
+ var match_start = 0;
+ for (var i = 0; i < len; i++) {
+ var elem = res[i];
+ if (%_IsSmi(elem)) {
+ // Integers represent slices of the original string.
+ if (elem > 0) {
+ match_start = (elem >> 11) + (elem & 0x7ff);
+ } else {
+ match_start = res[++i] - elem;
+ }
+ } else {
+ var func_result = replace(elem, match_start, subject);
+ // Overwrite the i'th element in the results with the string we got
+ // back from the callback function.
+ res[i] = TO_STRING(func_result);
+ match_start += elem.length;
+ }
+ }
+ } else {
+ for (var i = 0; i < len; i++) {
+ var elem = res[i];
+ if (!%_IsSmi(elem)) {
+ // elem must be an Array.
+ // Use the apply argument as backing for global RegExp properties.
+ var func_result = %Apply(replace, UNDEFINED, elem, 0, elem.length);
+ // Overwrite the i'th element in the results with the string we got
+ // back from the callback function.
+ res[i] = TO_STRING(func_result);
+ }
+ }
+ }
+ var result = %StringBuilderConcat(res, len, subject);
+ resultArray.length = 0;
+ reusableReplaceArray = resultArray;
+ return result;
+}
+
+
+// Compute the string of a given regular expression capture.
+function CaptureString(string, lastCaptureInfo, index) {
+ // Scale the index.
+ var scaled = index << 1;
+ // Compute start and end.
+ var start = lastCaptureInfo[CAPTURE(scaled)];
+ // If start isn't valid, return undefined.
+ if (start < 0) return;
+ var end = lastCaptureInfo[CAPTURE(scaled + 1)];
+ return %_SubString(string, start, end);
+}
+
+
+function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
+ var matchInfo = DoRegExpExec(regexp, subject, 0);
+ if (IS_NULL(matchInfo)) {
+ regexp.lastIndex = 0;
+ return subject;
+ }
+ var index = matchInfo[CAPTURE0];
+ var result = %_SubString(subject, 0, index);
+ var endOfMatch = matchInfo[CAPTURE1];
+ // Compute the parameter list consisting of the match, captures, index,
+ // and subject for the replace function invocation.
+ // The number of captures plus one for the match.
+ var m = NUMBER_OF_CAPTURES(matchInfo) >> 1;
+ var replacement;
+ if (m == 1) {
+ // No captures, only the match, which is always valid.
+ var s = %_SubString(subject, index, endOfMatch);
+ // Don't call directly to avoid exposing the built-in global object.
+ replacement = replace(s, index, subject);
+ } else {
+ var parameters = new InternalArray(m + 2);
+ for (var j = 0; j < m; j++) {
+ parameters[j] = CaptureString(subject, matchInfo, j);
+ }
+ parameters[j] = index;
+ parameters[j + 1] = subject;
+
+ replacement = %Apply(replace, UNDEFINED, parameters, 0, j + 2);
+ }
+
+ result += replacement; // The add method converts to string if necessary.
+ // Can't use matchInfo any more from here, since the function could
+ // overwrite it.
+ return result + %_SubString(subject, endOfMatch, subject.length);
+}
+
+
+function RegExpReplace(string, replace) {
+ // TODO(littledan): allow non-regexp receivers.
+ if (!IS_REGEXP(this)) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ "RegExp.prototype.@@replace", this);
+ }
+ var subject = TO_STRING(string);
+ var search = this;
+
+ if (!IS_CALLABLE(replace)) {
+ replace = TO_STRING(replace);
+
+ if (!REGEXP_GLOBAL(search)) {
+ // Non-global regexp search, string replace.
+ var match = DoRegExpExec(search, subject, 0);
+ if (match == null) {
+ search.lastIndex = 0
+ return subject;
+ }
+ if (replace.length == 0) {
+ return %_SubString(subject, 0, match[CAPTURE0]) +
+ %_SubString(subject, match[CAPTURE1], subject.length)
+ }
+ return ExpandReplacement(replace, subject, RegExpLastMatchInfo,
+ %_SubString(subject, 0, match[CAPTURE0])) +
+ %_SubString(subject, match[CAPTURE1], subject.length);
+ }
+
+ // Global regexp search, string replace.
+ search.lastIndex = 0;
+ return %StringReplaceGlobalRegExpWithString(
+ subject, search, replace, RegExpLastMatchInfo);
+ }
+
+ if (REGEXP_GLOBAL(search)) {
+ // Global regexp search, function replace.
+ return StringReplaceGlobalRegExpWithFunction(subject, search, replace);
+ }
+ // Non-global regexp search, function replace.
+ return StringReplaceNonGlobalRegExpWithFunction(subject, search, replace);
+}
+
+
// ES6 21.2.5.9.
function RegExpSearch(string) {
// TODO(yangguo): allow non-regexp receivers.
@@ -530,6 +713,7 @@ utils.InstallFunctions(GlobalRegExp.prototype, DONT_ENUM, [
"toString", RegExpToString,
"compile", RegExpCompileJS,
matchSymbol, RegExpMatch,
+ replaceSymbol, RegExpReplace,
searchSymbol, RegExpSearch,
splitSymbol, RegExpSplit,
]);
@@ -539,9 +723,6 @@ utils.InstallGetter(GlobalRegExp.prototype, 'ignoreCase', RegExpGetIgnoreCase);
utils.InstallGetter(GlobalRegExp.prototype, 'multiline', RegExpGetMultiline);
utils.InstallGetter(GlobalRegExp.prototype, 'source', RegExpGetSource);
-// The length of compile is 1 in SpiderMonkey.
-%FunctionSetLength(GlobalRegExp.prototype.compile, 1);
-
// The properties `input` and `$_` are aliases for each other. When this
// value is set the value it is set to is coerced to a string.
// Getter and setter for the input.
diff --git a/deps/v8/src/js/runtime.js b/deps/v8/src/js/runtime.js
index 301d75a391..7a61094da6 100644
--- a/deps/v8/src/js/runtime.js
+++ b/deps/v8/src/js/runtime.js
@@ -36,47 +36,12 @@ utils.ImportFromExperimental(function(from) {
// ----------------------------------------------------------------------------
-/* -----------------------------
- - - - H e l p e r s - - -
- -----------------------------
-*/
-
-function CONCAT_ITERABLE_TO_ARRAY(iterable) {
- return %concat_iterable_to_array(this, iterable);
-};
-
-/* -------------------------------------
- - - - C o n v e r s i o n s - - -
- -------------------------------------
+/* ---------------------------------
+ - - - U t i l i t i e s - - -
+ ---------------------------------
*/
-// ES5, section 9.12
-function SameValue(x, y) {
- if (typeof x != typeof y) return false;
- if (IS_NUMBER(x)) {
- if (NUMBER_IS_NAN(x) && NUMBER_IS_NAN(y)) return true;
- // x is +0 and y is -0 or vice versa.
- if (x === 0 && y === 0 && %_IsMinusZero(x) != %_IsMinusZero(y)) {
- return false;
- }
- }
- if (IS_SIMD_VALUE(x)) return %SimdSameValue(x, y);
- return x === y;
-}
-
-
-// ES6, section 7.2.4
-function SameValueZero(x, y) {
- if (typeof x != typeof y) return false;
- if (IS_NUMBER(x)) {
- if (NUMBER_IS_NAN(x) && NUMBER_IS_NAN(y)) return true;
- }
- if (IS_SIMD_VALUE(x)) return %SimdSameValueZero(x, y);
- return x === y;
-}
-
-
function ConcatIterableToArray(target, iterable) {
var index = target.length;
for (var element of iterable) {
@@ -86,12 +51,6 @@ function ConcatIterableToArray(target, iterable) {
}
-/* ---------------------------------
- - - - U t i l i t i e s - - -
- ---------------------------------
-*/
-
-
// This function should be called rather than %AddElement in contexts where the
// argument might not be less than 2**32-1. ES2015 ToLength semantics mean that
// this is a concern at basically all callsites.
@@ -174,17 +133,11 @@ utils.Export(function(to) {
to.AddIndexedProperty = AddIndexedProperty;
to.MaxSimple = MaxSimple;
to.MinSimple = MinSimple;
- to.SameValue = SameValue;
- to.SameValueZero = SameValueZero;
to.ToPositiveInteger = ToPositiveInteger;
to.SpeciesConstructor = SpeciesConstructor;
});
%InstallToContext([
- "concat_iterable_to_array_builtin", CONCAT_ITERABLE_TO_ARRAY,
-]);
-
-%InstallToContext([
"concat_iterable_to_array", ConcatIterableToArray,
]);
diff --git a/deps/v8/src/js/spread.js b/deps/v8/src/js/spread.js
index 235c91ab79..82ea839598 100644
--- a/deps/v8/src/js/spread.js
+++ b/deps/v8/src/js/spread.js
@@ -18,11 +18,11 @@ utils.Import(function(from) {
// -------------------------------------------------------------------
function SpreadArguments() {
- var count = %_ArgumentsLength();
+ var count = arguments.length;
var args = new InternalArray();
for (var i = 0; i < count; ++i) {
- var array = %_Arguments(i);
+ var array = arguments[i];
var length = array.length;
for (var j = 0; j < length; ++j) {
args.push(array[j]);
diff --git a/deps/v8/src/js/string.js b/deps/v8/src/js/string.js
index b220038b74..a4019784e8 100644
--- a/deps/v8/src/js/string.js
+++ b/deps/v8/src/js/string.js
@@ -17,12 +17,11 @@ var InternalArray = utils.InternalArray;
var InternalPackedArray = utils.InternalPackedArray;
var MakeRangeError;
var MakeTypeError;
-var MathMax;
-var MathMin;
+var MaxSimple;
+var MinSimple;
var matchSymbol = utils.ImportNow("match_symbol");
-var RegExpExec;
var RegExpExecNoTests;
-var RegExpLastMatchInfo;
+var replaceSymbol = utils.ImportNow("replace_symbol");
var searchSymbol = utils.ImportNow("search_symbol");
var splitSymbol = utils.ImportNow("split_symbol");
@@ -31,11 +30,9 @@ utils.Import(function(from) {
ArrayJoin = from.ArrayJoin;
MakeRangeError = from.MakeRangeError;
MakeTypeError = from.MakeTypeError;
- MathMax = from.MathMax;
- MathMin = from.MathMin;
- RegExpExec = from.RegExpExec;
+ MaxSimple = from.MaxSimple;
+ MinSimple = from.MinSimple;
RegExpExecNoTests = from.RegExpExecNoTests;
- RegExpLastMatchInfo = from.RegExpLastMatchInfo;
});
//-------------------------------------------------------------------
@@ -84,41 +81,34 @@ function StringCharCodeAtJS(pos) {
// ECMA-262, section 15.5.4.6
function StringConcat(other /* and more */) { // length == 1
+ "use strict";
CHECK_OBJECT_COERCIBLE(this, "String.prototype.concat");
- var len = %_ArgumentsLength();
- var this_as_string = TO_STRING(this);
- if (len === 1) {
- return this_as_string + TO_STRING(other);
- }
- var parts = new InternalArray(len + 1);
- parts[0] = this_as_string;
- for (var i = 0; i < len; i++) {
- var part = %_Arguments(i);
- parts[i + 1] = TO_STRING(part);
+ var s = TO_STRING(this);
+ var len = arguments.length;
+ for (var i = 0; i < len; ++i) {
+ s = s + TO_STRING(arguments[i]);
}
- return %StringBuilderConcat(parts, len + 1, "");
+ return s;
}
// ECMA-262 section 15.5.4.7
-function StringIndexOfJS(pattern /* position */) { // length == 1
+function StringIndexOf(pattern, position) { // length == 1
CHECK_OBJECT_COERCIBLE(this, "String.prototype.indexOf");
var subject = TO_STRING(this);
pattern = TO_STRING(pattern);
- var index = 0;
- if (%_ArgumentsLength() > 1) {
- index = %_Arguments(1); // position
- index = TO_INTEGER(index);
- if (index < 0) index = 0;
- if (index > subject.length) index = subject.length;
- }
+ var index = TO_INTEGER(position);
+ if (index < 0) index = 0;
+ if (index > subject.length) index = subject.length;
return %StringIndexOf(subject, pattern, index);
}
+%FunctionSetLength(StringIndexOf, 1);
+
// ECMA-262 section 15.5.4.8
-function StringLastIndexOfJS(pat /* position */) { // length == 1
+function StringLastIndexOf(pat, pos) { // length == 1
CHECK_OBJECT_COERCIBLE(this, "String.prototype.lastIndexOf");
var sub = TO_STRING(this);
@@ -126,16 +116,14 @@ function StringLastIndexOfJS(pat /* position */) { // length == 1
var pat = TO_STRING(pat);
var patLength = pat.length;
var index = subLength - patLength;
- if (%_ArgumentsLength() > 1) {
- var position = TO_NUMBER(%_Arguments(1));
- if (!NUMBER_IS_NAN(position)) {
- position = TO_INTEGER(position);
- if (position < 0) {
- position = 0;
- }
- if (position + patLength < subLength) {
- index = position;
- }
+ var position = TO_NUMBER(pos);
+ if (!NUMBER_IS_NAN(position)) {
+ position = TO_INTEGER(position);
+ if (position < 0) {
+ position = 0;
+ }
+ if (position + patLength < subLength) {
+ index = position;
}
}
if (index < 0) {
@@ -144,6 +132,8 @@ function StringLastIndexOfJS(pat /* position */) { // length == 1
return %StringLastIndexOf(sub, pat, index);
}
+%FunctionSetLength(StringLastIndexOf, 1);
+
// ECMA-262 section 15.5.4.9
//
@@ -180,11 +170,10 @@ function StringMatchJS(pattern) {
// For now we do nothing, as proper normalization requires big tables.
// If Intl is enabled, then i18n.js will override it and provide the the
// proper functionality.
-function StringNormalizeJS() {
+function StringNormalize(formArg) { // length == 0
CHECK_OBJECT_COERCIBLE(this, "String.prototype.normalize");
var s = TO_STRING(this);
- var formArg = %_Arguments(0);
var form = IS_UNDEFINED(formArg) ? 'NFC' : TO_STRING(formArg);
var NORMALIZATION_FORMS = ['NFC', 'NFD', 'NFKC', 'NFKD'];
@@ -197,6 +186,8 @@ function StringNormalizeJS() {
return s;
}
+%FunctionSetLength(StringNormalize, 0);
+
// This has the same size as the RegExpLastMatchInfo array, and can be used
// for functions that expect that structure to be returned. It is used when
@@ -206,14 +197,12 @@ function StringNormalizeJS() {
var reusableMatchInfo = [2, "", "", -1, -1];
-// ECMA-262, section 15.5.4.11
+// ES6, section 21.1.3.14
function StringReplace(search, replace) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.replace");
- var subject = TO_STRING(this);
-
// Decision tree for dispatch
- // .. regexp search
+ // .. regexp search (in src/js/regexp.js, RegExpReplace)
// .... string replace
// ...... non-global search
// ........ empty string replace
@@ -229,40 +218,15 @@ function StringReplace(search, replace) {
// ...... function replace
// ...... string replace (with $-expansion)
- if (IS_REGEXP(search)) {
- if (!IS_CALLABLE(replace)) {
- replace = TO_STRING(replace);
-
- if (!REGEXP_GLOBAL(search)) {
- // Non-global regexp search, string replace.
- var match = RegExpExec(search, subject, 0);
- if (match == null) {
- search.lastIndex = 0
- return subject;
- }
- if (replace.length == 0) {
- return %_SubString(subject, 0, match[CAPTURE0]) +
- %_SubString(subject, match[CAPTURE1], subject.length)
- }
- return ExpandReplacement(replace, subject, RegExpLastMatchInfo,
- %_SubString(subject, 0, match[CAPTURE0])) +
- %_SubString(subject, match[CAPTURE1], subject.length);
- }
-
- // Global regexp search, string replace.
- search.lastIndex = 0;
- return %StringReplaceGlobalRegExpWithString(
- subject, search, replace, RegExpLastMatchInfo);
- }
-
- if (REGEXP_GLOBAL(search)) {
- // Global regexp search, function replace.
- return StringReplaceGlobalRegExpWithFunction(subject, search, replace);
+ if (!IS_NULL_OR_UNDEFINED(search)) {
+ var replacer = search[replaceSymbol];
+ if (!IS_UNDEFINED(replacer)) {
+ return %_Call(replacer, search, this, replace);
}
- // Non-global regexp search, function replace.
- return StringReplaceNonGlobalRegExpWithFunction(subject, search, replace);
}
+ var subject = TO_STRING(this);
+
search = TO_STRING(search);
if (search.length == 1 &&
@@ -379,130 +343,6 @@ function ExpandReplacement(string, subject, matchInfo, result) {
}
-// Compute the string of a given regular expression capture.
-function CaptureString(string, lastCaptureInfo, index) {
- // Scale the index.
- var scaled = index << 1;
- // Compute start and end.
- var start = lastCaptureInfo[CAPTURE(scaled)];
- // If start isn't valid, return undefined.
- if (start < 0) return;
- var end = lastCaptureInfo[CAPTURE(scaled + 1)];
- return %_SubString(string, start, end);
-}
-
-
-// TODO(lrn): This array will survive indefinitely if replace is never
-// called again. However, it will be empty, since the contents are cleared
-// in the finally block.
-var reusableReplaceArray = new InternalArray(4);
-
-// Helper function for replacing regular expressions with the result of a
-// function application in String.prototype.replace.
-function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
- var resultArray = reusableReplaceArray;
- if (resultArray) {
- reusableReplaceArray = null;
- } else {
- // Inside a nested replace (replace called from the replacement function
- // of another replace) or we have failed to set the reusable array
- // back due to an exception in a replacement function. Create a new
- // array to use in the future, or until the original is written back.
- resultArray = new InternalArray(16);
- }
- var res = %RegExpExecMultiple(regexp,
- subject,
- RegExpLastMatchInfo,
- resultArray);
- regexp.lastIndex = 0;
- if (IS_NULL(res)) {
- // No matches at all.
- reusableReplaceArray = resultArray;
- return subject;
- }
- var len = res.length;
- if (NUMBER_OF_CAPTURES(RegExpLastMatchInfo) == 2) {
- // If the number of captures is two then there are no explicit captures in
- // the regexp, just the implicit capture that captures the whole match. In
- // this case we can simplify quite a bit and end up with something faster.
- // The builder will consist of some integers that indicate slices of the
- // input string and some replacements that were returned from the replace
- // function.
- var match_start = 0;
- for (var i = 0; i < len; i++) {
- var elem = res[i];
- if (%_IsSmi(elem)) {
- // Integers represent slices of the original string.
- if (elem > 0) {
- match_start = (elem >> 11) + (elem & 0x7ff);
- } else {
- match_start = res[++i] - elem;
- }
- } else {
- var func_result = replace(elem, match_start, subject);
- // Overwrite the i'th element in the results with the string we got
- // back from the callback function.
- res[i] = TO_STRING(func_result);
- match_start += elem.length;
- }
- }
- } else {
- for (var i = 0; i < len; i++) {
- var elem = res[i];
- if (!%_IsSmi(elem)) {
- // elem must be an Array.
- // Use the apply argument as backing for global RegExp properties.
- var func_result = %Apply(replace, UNDEFINED, elem, 0, elem.length);
- // Overwrite the i'th element in the results with the string we got
- // back from the callback function.
- res[i] = TO_STRING(func_result);
- }
- }
- }
- var result = %StringBuilderConcat(res, len, subject);
- resultArray.length = 0;
- reusableReplaceArray = resultArray;
- return result;
-}
-
-
-function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
- var matchInfo = RegExpExec(regexp, subject, 0);
- if (IS_NULL(matchInfo)) {
- regexp.lastIndex = 0;
- return subject;
- }
- var index = matchInfo[CAPTURE0];
- var result = %_SubString(subject, 0, index);
- var endOfMatch = matchInfo[CAPTURE1];
- // Compute the parameter list consisting of the match, captures, index,
- // and subject for the replace function invocation.
- // The number of captures plus one for the match.
- var m = NUMBER_OF_CAPTURES(matchInfo) >> 1;
- var replacement;
- if (m == 1) {
- // No captures, only the match, which is always valid.
- var s = %_SubString(subject, index, endOfMatch);
- // Don't call directly to avoid exposing the built-in global object.
- replacement = replace(s, index, subject);
- } else {
- var parameters = new InternalArray(m + 2);
- for (var j = 0; j < m; j++) {
- parameters[j] = CaptureString(subject, matchInfo, j);
- }
- parameters[j] = index;
- parameters[j + 1] = subject;
-
- replacement = %Apply(replace, UNDEFINED, parameters, 0, j + 2);
- }
-
- result += replacement; // The add method converts to string if necessary.
- // Can't use matchInfo any more from here, since the function could
- // overwrite it.
- return result + %_SubString(subject, endOfMatch, subject.length);
-}
-
-
// ES6 21.1.3.15.
function StringSearch(pattern) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.search");
@@ -719,28 +559,14 @@ function StringTrimRight() {
// ECMA-262, section 15.5.3.2
-function StringFromCharCode(code) {
- var n = %_ArgumentsLength();
- if (n == 1) return %_StringCharFromCode(code & 0xffff);
-
- var one_byte = %NewString(n, NEW_ONE_BYTE_STRING);
- var i;
- for (i = 0; i < n; i++) {
- code = %_Arguments(i) & 0xffff;
- if (code > 0xff) break;
- %_OneByteSeqStringSetChar(i, code, one_byte);
- }
- if (i == n) return one_byte;
- one_byte = %TruncateString(one_byte, i);
-
- var two_byte = %NewString(n - i, NEW_TWO_BYTE_STRING);
- %_TwoByteSeqStringSetChar(0, code, two_byte);
- i++;
- for (var j = 1; i < n; i++, j++) {
- code = %_Arguments(i) & 0xffff;
- %_TwoByteSeqStringSetChar(j, code, two_byte);
+function StringFromCharCode(_) { // length == 1
+ "use strict";
+ var s = "";
+ var n = arguments.length;
+ for (var i = 0; i < n; ++i) {
+ s += %_StringCharFromCode(arguments[i] & 0xffff);
}
- return one_byte + two_byte;
+ return s;
}
@@ -870,7 +696,7 @@ function StringRepeat(count) {
// ES6 draft 04-05-14, section 21.1.3.18
-function StringStartsWith(searchString /* position */) { // length == 1
+function StringStartsWith(searchString, position) { // length == 1
CHECK_OBJECT_COERCIBLE(this, "String.prototype.startsWith");
var s = TO_STRING(this);
@@ -880,16 +706,10 @@ function StringStartsWith(searchString /* position */) { // length == 1
}
var ss = TO_STRING(searchString);
- var pos = 0;
- if (%_ArgumentsLength() > 1) {
- var arg = %_Arguments(1); // position
- if (!IS_UNDEFINED(arg)) {
- pos = TO_INTEGER(arg);
- }
- }
+ var pos = TO_INTEGER(position);
var s_len = s.length;
- var start = MathMin(MathMax(pos, 0), s_len);
+ var start = MinSimple(MaxSimple(pos, 0), s_len);
var ss_len = ss.length;
if (ss_len + start > s_len) {
return false;
@@ -898,9 +718,11 @@ function StringStartsWith(searchString /* position */) { // length == 1
return %_SubString(s, start, start + ss_len) === ss;
}
+%FunctionSetLength(StringStartsWith, 1);
+
// ES6 draft 04-05-14, section 21.1.3.7
-function StringEndsWith(searchString /* position */) { // length == 1
+function StringEndsWith(searchString, position) { // length == 1
CHECK_OBJECT_COERCIBLE(this, "String.prototype.endsWith");
var s = TO_STRING(this);
@@ -911,15 +733,9 @@ function StringEndsWith(searchString /* position */) { // length == 1
var ss = TO_STRING(searchString);
var s_len = s.length;
- var pos = s_len;
- if (%_ArgumentsLength() > 1) {
- var arg = %_Arguments(1); // position
- if (!IS_UNDEFINED(arg)) {
- pos = TO_INTEGER(arg);
- }
- }
+ var pos = !IS_UNDEFINED(position) ? TO_INTEGER(position) : s_len
- var end = MathMin(MathMax(pos, 0), s_len);
+ var end = MinSimple(MaxSimple(pos, 0), s_len);
var ss_len = ss.length;
var start = end - ss_len;
if (start < 0) {
@@ -929,9 +745,11 @@ function StringEndsWith(searchString /* position */) { // length == 1
return %_SubString(s, start, start + ss_len) === ss;
}
+%FunctionSetLength(StringEndsWith, 1);
+
// ES6 draft 04-05-14, section 21.1.3.6
-function StringIncludes(searchString /* position */) { // length == 1
+function StringIncludes(searchString, position) { // length == 1
CHECK_OBJECT_COERCIBLE(this, "String.prototype.includes");
var string = TO_STRING(this);
@@ -941,11 +759,7 @@ function StringIncludes(searchString /* position */) { // length == 1
}
searchString = TO_STRING(searchString);
- var pos = 0;
- if (%_ArgumentsLength() > 1) {
- pos = %_Arguments(1); // position
- pos = TO_INTEGER(pos);
- }
+ var pos = TO_INTEGER(position);
var stringLength = string.length;
if (pos < 0) pos = 0;
@@ -959,6 +773,8 @@ function StringIncludes(searchString /* position */) { // length == 1
return %StringIndexOf(string, searchString, pos) !== -1;
}
+%FunctionSetLength(StringIncludes, 1);
+
// ES6 Draft 05-22-2014, section 21.1.3.3
function StringCodePointAt(pos) {
@@ -984,12 +800,13 @@ function StringCodePointAt(pos) {
// ES6 Draft 05-22-2014, section 21.1.2.2
function StringFromCodePoint(_) { // length = 1
+ "use strict";
var code;
- var length = %_ArgumentsLength();
+ var length = arguments.length;
var index;
var result = "";
for (index = 0; index < length; index++) {
- code = %_Arguments(index);
+ code = arguments[index];
if (!%_IsSmi(code)) {
code = TO_NUMBER(code);
}
@@ -1013,8 +830,8 @@ function StringFromCodePoint(_) { // length = 1
// ES6 Draft 03-17-2015, section 21.1.2.4
function StringRaw(callSite) {
- // TODO(caitp): Use rest parameters when implemented
- var numberOfSubstitutions = %_ArgumentsLength();
+ "use strict";
+ var numberOfSubstitutions = arguments.length;
var cooked = TO_OBJECT(callSite);
var raw = TO_OBJECT(cooked.raw);
var literalSegments = TO_LENGTH(raw.length);
@@ -1024,7 +841,7 @@ function StringRaw(callSite) {
for (var i = 1; i < literalSegments; ++i) {
if (i < numberOfSubstitutions) {
- result += TO_STRING(%_Arguments(i));
+ result += TO_STRING(arguments[i]);
}
result += TO_STRING(raw[i]);
}
@@ -1058,11 +875,11 @@ utils.InstallFunctions(GlobalString.prototype, DONT_ENUM, [
"concat", StringConcat,
"endsWith", StringEndsWith,
"includes", StringIncludes,
- "indexOf", StringIndexOfJS,
- "lastIndexOf", StringLastIndexOfJS,
+ "indexOf", StringIndexOf,
+ "lastIndexOf", StringLastIndexOf,
"localeCompare", StringLocaleCompareJS,
"match", StringMatchJS,
- "normalize", StringNormalizeJS,
+ "normalize", StringNormalize,
"repeat", StringRepeat,
"replace", StringReplace,
"search", StringSearch,
@@ -1098,9 +915,10 @@ utils.InstallFunctions(GlobalString.prototype, DONT_ENUM, [
// Exports
utils.Export(function(to) {
+ to.ExpandReplacement = ExpandReplacement;
to.StringCharAt = StringCharAtJS;
- to.StringIndexOf = StringIndexOfJS;
- to.StringLastIndexOf = StringLastIndexOfJS;
+ to.StringIndexOf = StringIndexOf;
+ to.StringLastIndexOf = StringLastIndexOf;
to.StringMatch = StringMatchJS;
to.StringReplace = StringReplace;
to.StringSlice = StringSlice;
diff --git a/deps/v8/src/js/symbol.js b/deps/v8/src/js/symbol.js
index 5be6e0168d..ae543691c2 100644
--- a/deps/v8/src/js/symbol.js
+++ b/deps/v8/src/js/symbol.js
@@ -11,7 +11,6 @@
// -------------------------------------------------------------------
// Imports
-var GlobalObject = global.Object;
var GlobalSymbol = global.Symbol;
var hasInstanceSymbol = utils.ImportNow("has_instance_symbol");
var isConcatSpreadableSymbol =
@@ -73,22 +72,11 @@ function SymbolKeyFor(symbol) {
return %SymbolRegistry().keyFor[symbol];
}
-
-// ES6 19.1.2.8
-function ObjectGetOwnPropertySymbols(obj) {
- obj = TO_OBJECT(obj);
-
- return %GetOwnPropertyKeys(obj, PROPERTY_FILTER_SKIP_STRINGS);
-}
-
// -------------------------------------------------------------------
-%FunctionSetPrototype(GlobalSymbol, new GlobalObject());
-
utils.InstallConstants(GlobalSymbol, [
- // TODO(rossberg): expose when implemented.
- // "hasInstance", hasInstanceSymbol,
- // "isConcatSpreadable", isConcatSpreadableSymbol,
+ "hasInstance", hasInstanceSymbol,
+ "isConcatSpreadable", isConcatSpreadableSymbol,
"iterator", iteratorSymbol,
// TODO(yangguo): expose when implemented.
// "match", matchSymbol,
@@ -108,8 +96,6 @@ utils.InstallFunctions(GlobalSymbol, DONT_ENUM, [
]);
%AddNamedProperty(
- GlobalSymbol.prototype, "constructor", GlobalSymbol, DONT_ENUM);
-%AddNamedProperty(
GlobalSymbol.prototype, toStringTagSymbol, "Symbol", DONT_ENUM | READ_ONLY);
utils.InstallFunctions(GlobalSymbol.prototype, DONT_ENUM | READ_ONLY, [
@@ -121,10 +107,6 @@ utils.InstallFunctions(GlobalSymbol.prototype, DONT_ENUM, [
"valueOf", SymbolValueOf
]);
-utils.InstallFunctions(GlobalObject, DONT_ENUM, [
- "getOwnPropertySymbols", ObjectGetOwnPropertySymbols
-]);
-
// -------------------------------------------------------------------
// Exports
diff --git a/deps/v8/src/js/typedarray.js b/deps/v8/src/js/typedarray.js
index fd668a57fd..3d500a379e 100644
--- a/deps/v8/src/js/typedarray.js
+++ b/deps/v8/src/js/typedarray.js
@@ -300,8 +300,10 @@ function NAMESubArray(begin, end) {
var newLength = endInt - beginInt;
var beginByteOffset =
%_ArrayBufferViewGetByteOffset(this) + beginInt * ELEMENT_SIZE;
- return TypedArraySpeciesCreate(this, %TypedArrayGetBuffer(this),
- beginByteOffset, newLength, true);
+ // BUG(v8:4665): For web compatibility, subarray needs to always build an
+ // instance of the default constructor.
+ // TODO(littledan): Switch to the standard or standardize the fix
+ return new GlobalNAME(%TypedArrayGetBuffer(this), beginByteOffset, newLength);
}
endmacro
@@ -460,6 +462,7 @@ function TypedArraySet(obj, offset) {
return;
}
}
+%FunctionSetLength(TypedArraySet, 1);
function TypedArrayGetToStringTag() {
if (!%_IsTypedArray(this)) return;
@@ -564,22 +567,20 @@ function TypedArrayReverse() {
function TypedArrayComparefn(x, y) {
- if (IsNaN(x) && IsNaN(y)) {
- return IsNaN(y) ? 0 : 1;
+ if (x === 0 && x === y) {
+ x = 1 / x;
+ y = 1 / y;
}
- if (IsNaN(x)) {
+ if (x < y) {
+ return -1;
+ } else if (x > y) {
+ return 1;
+ } else if (IsNaN(x) && IsNaN(y)) {
+ return IsNaN(y) ? 0 : 1;
+ } else if (IsNaN(x)) {
return 1;
}
- if (x === 0 && x === y) {
- if (%_IsMinusZero(x)) {
- if (!%_IsMinusZero(y)) {
- return -1;
- }
- } else if (%_IsMinusZero(y)) {
- return 1;
- }
- }
- return x - y;
+ return 0;
}
@@ -614,7 +615,7 @@ function TypedArrayLastIndexOf(element, index) {
var length = %_TypedArrayGetLength(this);
return InnerArrayLastIndexOf(this, element, index, length,
- %_ArgumentsLength());
+ arguments.length);
}
%FunctionSetLength(TypedArrayLastIndexOf, 1);
@@ -678,7 +679,7 @@ function TypedArrayReduce(callback, current) {
var length = %_TypedArrayGetLength(this);
return InnerArrayReduce(callback, current, this, length,
- %_ArgumentsLength());
+ arguments.length);
}
%FunctionSetLength(TypedArrayReduce, 1);
@@ -689,7 +690,7 @@ function TypedArrayReduceRight(callback, current) {
var length = %_TypedArrayGetLength(this);
return InnerArrayReduceRight(callback, current, this, length,
- %_ArgumentsLength());
+ arguments.length);
}
%FunctionSetLength(TypedArrayReduceRight, 1);
@@ -750,10 +751,10 @@ function TypedArrayIncludes(searchElement, fromIndex) {
// ES6 draft 08-24-14, section 22.2.2.2
function TypedArrayOf() {
- var length = %_ArgumentsLength();
+ var length = arguments.length;
var array = TypedArrayCreate(this, length);
for (var i = 0; i < length; i++) {
- array[i] = %_Arguments(i);
+ array[i] = arguments[i];
}
return array;
}
@@ -846,36 +847,6 @@ TYPED_ARRAYS(SETUP_TYPED_ARRAY)
// --------------------------- DataView -----------------------------
-function DataViewConstructor(buffer, byteOffset, byteLength) { // length = 3
- if (IS_UNDEFINED(new.target)) {
- throw MakeTypeError(kConstructorNotFunction, "DataView");
- }
-
- // TODO(binji): support SharedArrayBuffers?
- if (!IS_ARRAYBUFFER(buffer)) throw MakeTypeError(kDataViewNotArrayBuffer);
- if (!IS_UNDEFINED(byteOffset)) {
- byteOffset = ToPositiveInteger(byteOffset, kInvalidDataViewOffset);
- }
- if (!IS_UNDEFINED(byteLength)) {
- byteLength = TO_INTEGER(byteLength);
- }
-
- var bufferByteLength = %_ArrayBufferGetByteLength(buffer);
-
- var offset = IS_UNDEFINED(byteOffset) ? 0 : byteOffset;
- if (offset > bufferByteLength) throw MakeRangeError(kInvalidDataViewOffset);
-
- var length = IS_UNDEFINED(byteLength)
- ? bufferByteLength - offset
- : byteLength;
- if (length < 0 || offset + length > bufferByteLength) {
- throw new MakeRangeError(kInvalidDataViewLength);
- }
- var result = %NewObject(GlobalDataView, new.target);
- %_DataViewInitialize(result, buffer, offset, length);
- return result;
-}
-
function DataViewGetBufferJS() {
if (!IS_DATAVIEW(this)) {
throw MakeTypeError(kIncompatibleMethodReceiver, 'DataView.buffer', this);
@@ -917,26 +888,27 @@ function DataViewGetTYPENAMEJS(offset, little_endian) {
throw MakeTypeError(kIncompatibleMethodReceiver,
'DataView.getTYPENAME', this);
}
- if (%_ArgumentsLength() < 1) throw MakeTypeError(kInvalidArgument);
+ if (arguments.length < 1) throw MakeTypeError(kInvalidArgument);
offset = ToPositiveInteger(offset, kInvalidDataViewAccessorOffset);
return %DataViewGetTYPENAME(this, offset, !!little_endian);
}
+%FunctionSetLength(DataViewGetTYPENAMEJS, 1);
function DataViewSetTYPENAMEJS(offset, value, little_endian) {
if (!IS_DATAVIEW(this)) {
throw MakeTypeError(kIncompatibleMethodReceiver,
'DataView.setTYPENAME', this);
}
- if (%_ArgumentsLength() < 2) throw MakeTypeError(kInvalidArgument);
+ if (arguments.length < 2) throw MakeTypeError(kInvalidArgument);
offset = ToPositiveInteger(offset, kInvalidDataViewAccessorOffset);
%DataViewSetTYPENAME(this, offset, TO_NUMBER(value), !!little_endian);
}
+%FunctionSetLength(DataViewSetTYPENAMEJS, 2);
endmacro
DATA_VIEW_TYPES(DATA_VIEW_GETTER_SETTER)
// Setup the DataView constructor.
-%SetCode(GlobalDataView, DataViewConstructor);
%FunctionSetPrototype(GlobalDataView, new GlobalObject);
// Set up constructor property on the DataView prototype.
diff --git a/deps/v8/src/js/v8natives.js b/deps/v8/src/js/v8natives.js
index 26447dac5d..5e1a8256ee 100644
--- a/deps/v8/src/js/v8natives.js
+++ b/deps/v8/src/js/v8natives.js
@@ -10,7 +10,6 @@
// Imports
var GlobalArray = global.Array;
-var GlobalBoolean = global.Boolean;
var GlobalNumber = global.Number;
var GlobalObject = global.Object;
var InternalArray = utils.InternalArray;
@@ -24,7 +23,6 @@ var ObjectToString = utils.ImportNow("object_to_string");
var ObserveBeginPerformSplice;
var ObserveEndPerformSplice;
var ObserveEnqueueSpliceRecord;
-var SameValue = utils.ImportNow("SameValue");
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
@@ -545,17 +543,17 @@ function DefineObjectProperty(obj, p, desc, should_throw) {
if ((IsGenericDescriptor(desc) ||
IsDataDescriptor(desc) == IsDataDescriptor(current)) &&
(!desc.hasEnumerable() ||
- SameValue(desc.isEnumerable(), current.isEnumerable())) &&
+ %SameValue(desc.isEnumerable(), current.isEnumerable())) &&
(!desc.hasConfigurable() ||
- SameValue(desc.isConfigurable(), current.isConfigurable())) &&
+ %SameValue(desc.isConfigurable(), current.isConfigurable())) &&
(!desc.hasWritable() ||
- SameValue(desc.isWritable(), current.isWritable())) &&
+ %SameValue(desc.isWritable(), current.isWritable())) &&
(!desc.hasValue() ||
- SameValue(desc.getValue(), current.getValue())) &&
+ %SameValue(desc.getValue(), current.getValue())) &&
(!desc.hasGetter() ||
- SameValue(desc.getGet(), current.getGet())) &&
+ %SameValue(desc.getGet(), current.getGet())) &&
(!desc.hasSetter() ||
- SameValue(desc.getSet(), current.getSet()))) {
+ %SameValue(desc.getSet(), current.getSet()))) {
return true;
}
if (!current.isConfigurable()) {
@@ -594,7 +592,7 @@ function DefineObjectProperty(obj, p, desc, should_throw) {
}
}
if (!currentIsWritable && desc.hasValue() &&
- !SameValue(desc.getValue(), current.getValue())) {
+ !%SameValue(desc.getValue(), current.getValue())) {
if (should_throw) {
throw MakeTypeError(kRedefineDisallowed, p);
} else {
@@ -605,14 +603,14 @@ function DefineObjectProperty(obj, p, desc, should_throw) {
// Step 11
if (IsAccessorDescriptor(desc) && IsAccessorDescriptor(current)) {
if (desc.hasSetter() &&
- !SameValue(desc.getSet(), current.getSet())) {
+ !%SameValue(desc.getSet(), current.getSet())) {
if (should_throw) {
throw MakeTypeError(kRedefineDisallowed, p);
} else {
return false;
}
}
- if (desc.hasGetter() && !SameValue(desc.getGet(),current.getGet())) {
+ if (desc.hasGetter() && !%SameValue(desc.getGet(),current.getGet())) {
if (should_throw) {
throw MakeTypeError(kRedefineDisallowed, p);
} else {
@@ -772,19 +770,6 @@ function ObjectSetPrototypeOf(obj, proto) {
}
-// ES6 section 19.1.2.6
-function ObjectGetOwnPropertyDescriptor(obj, p) {
- return %GetOwnProperty(obj, p);
-}
-
-
-// ES5 section 15.2.3.4.
-function ObjectGetOwnPropertyNames(obj) {
- obj = TO_OBJECT(obj);
- return %GetOwnPropertyKeys(obj, PROPERTY_FILTER_SKIP_SYMBOLS);
-}
-
-
// ES5 section 15.2.3.6.
function ObjectDefineProperty(obj, p, attributes) {
// The new pure-C++ implementation doesn't support O.o.
@@ -802,11 +787,6 @@ function ObjectDefineProperty(obj, p, attributes) {
}
-function GetOwnEnumerablePropertyNames(object) {
- return %GetOwnPropertyKeys(object, PROPERTY_FILTER_ONLY_ENUMERABLE);
-}
-
-
// ES5 section 15.2.3.7.
function ObjectDefineProperties(obj, properties) {
// The new pure-C++ implementation doesn't support O.o.
@@ -816,7 +796,7 @@ function ObjectDefineProperties(obj, properties) {
throw MakeTypeError(kCalledOnNonObject, "Object.defineProperties");
}
var props = TO_OBJECT(properties);
- var names = GetOwnEnumerablePropertyNames(props);
+ var names = %GetOwnPropertyKeys(props, PROPERTY_FILTER_ONLY_ENUMERABLE);
var descriptors = new InternalArray();
for (var i = 0; i < names.length; i++) {
descriptors.push(ToPropertyDescriptor(props[names[i]]));
@@ -889,65 +869,13 @@ utils.InstallFunctions(GlobalObject, DONT_ENUM, [
"defineProperties", ObjectDefineProperties,
"getPrototypeOf", ObjectGetPrototypeOf,
"setPrototypeOf", ObjectSetPrototypeOf,
- "getOwnPropertyDescriptor", ObjectGetOwnPropertyDescriptor,
- "getOwnPropertyNames", ObjectGetOwnPropertyNames,
// getOwnPropertySymbols is added in symbol.js.
- "is", SameValue, // ECMA-262, Edition 6, section 19.1.2.10
+ // is is added in bootstrapper.cc.
// deliverChangeRecords, getNotifier, observe and unobserve are added
// in object-observe.js.
]);
-// ----------------------------------------------------------------------------
-// Boolean
-
-function BooleanConstructor(x) {
- // TODO(bmeurer): Move this to toplevel.
- "use strict";
- if (!IS_UNDEFINED(new.target)) {
- %_SetValueOf(this, TO_BOOLEAN(x));
- } else {
- return TO_BOOLEAN(x);
- }
-}
-
-
-function BooleanToString() {
- // NOTE: Both Boolean objects and values can enter here as
- // 'this'. This is not as dictated by ECMA-262.
- var b = this;
- if (!IS_BOOLEAN(b)) {
- if (!IS_BOOLEAN_WRAPPER(b)) {
- throw MakeTypeError(kNotGeneric, 'Boolean.prototype.toString');
- }
- b = %_ValueOf(b);
- }
- return b ? 'true' : 'false';
-}
-
-
-function BooleanValueOf() {
- // NOTE: Both Boolean objects and values can enter here as
- // 'this'. This is not as dictated by ECMA-262.
- if (!IS_BOOLEAN(this) && !IS_BOOLEAN_WRAPPER(this)) {
- throw MakeTypeError(kNotGeneric, 'Boolean.prototype.valueOf');
- }
- return %_ValueOf(this);
-}
-
-
-// ----------------------------------------------------------------------------
-
-%SetCode(GlobalBoolean, BooleanConstructor);
-%FunctionSetPrototype(GlobalBoolean, new GlobalBoolean(false));
-%AddNamedProperty(GlobalBoolean.prototype, "constructor", GlobalBoolean,
- DONT_ENUM);
-
-utils.InstallFunctions(GlobalBoolean.prototype, DONT_ENUM, [
- "toString", BooleanToString,
- "valueOf", BooleanValueOf
-]);
-
// ----------------------------------------------------------------------------
// Number
diff --git a/deps/v8/src/parsing/json-parser.h b/deps/v8/src/json-parser.h
index e23c73383e..efd3c04b98 100644
--- a/deps/v8/src/parsing/json-parser.h
+++ b/deps/v8/src/json-parser.h
@@ -2,18 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PARSING_JSON_PARSER_H_
-#define V8_PARSING_JSON_PARSER_H_
+#ifndef V8_JSON_PARSER_H_
+#define V8_JSON_PARSER_H_
#include "src/char-predicates.h"
#include "src/conversions.h"
#include "src/debug/debug.h"
#include "src/factory.h"
+#include "src/field-type.h"
#include "src/messages.h"
#include "src/parsing/scanner.h"
#include "src/parsing/token.h"
#include "src/transitions.h"
-#include "src/types.h"
namespace v8 {
namespace internal {
@@ -128,7 +128,9 @@ class JsonParser BASE_EMBEDDED {
}
Handle<String> ParseJsonInternalizedString() {
- return ScanJsonString<true>();
+ Handle<String> result = ScanJsonString<true>();
+ if (result.is_null()) return result;
+ return factory()->InternalizeString(result);
}
template <bool is_internalized>
@@ -217,11 +219,12 @@ MaybeHandle<Object> JsonParser<seq_one_byte>::ParseJson() {
// Parse failed. Current character is the unexpected token.
Factory* factory = this->factory();
MessageTemplate::Template message;
- Handle<String> argument;
+ Handle<Object> arg1 = Handle<Smi>(Smi::FromInt(position_), isolate());
+ Handle<Object> arg2;
switch (c0_) {
case kEndOfString:
- message = MessageTemplate::kUnexpectedEOS;
+ message = MessageTemplate::kJsonParseUnexpectedEOS;
break;
case '-':
case '0':
@@ -234,14 +237,15 @@ MaybeHandle<Object> JsonParser<seq_one_byte>::ParseJson() {
case '7':
case '8':
case '9':
- message = MessageTemplate::kUnexpectedTokenNumber;
+ message = MessageTemplate::kJsonParseUnexpectedTokenNumber;
break;
case '"':
- message = MessageTemplate::kUnexpectedTokenString;
+ message = MessageTemplate::kJsonParseUnexpectedTokenString;
break;
default:
- message = MessageTemplate::kUnexpectedToken;
- argument = factory->LookupSingleCharacterStringFromCode(c0_);
+ message = MessageTemplate::kJsonParseUnexpectedToken;
+ arg2 = arg1;
+ arg1 = factory->LookupSingleCharacterStringFromCode(c0_);
break;
}
@@ -250,7 +254,7 @@ MaybeHandle<Object> JsonParser<seq_one_byte>::ParseJson() {
// separated source file.
isolate()->debug()->OnCompileError(script);
MessageLocation location(script, position_, position_ + 1);
- Handle<Object> error = factory->NewSyntaxError(message, argument);
+ Handle<Object> error = factory->NewSyntaxError(message, arg1, arg2);
return isolate()->template Throw<Object>(error, &location);
}
return result;
@@ -416,7 +420,7 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
!target->instance_descriptors()
->GetFieldType(descriptor)
->NowContains(value)) {
- Handle<HeapType> value_type(
+ Handle<FieldType> value_type(
value->OptimalType(isolate(), expected_representation));
Map::GeneralizeFieldType(target, descriptor,
expected_representation, value_type);
@@ -839,4 +843,4 @@ Handle<String> JsonParser<seq_one_byte>::ScanJsonString() {
} // namespace internal
} // namespace v8
-#endif // V8_PARSING_JSON_PARSER_H_
+#endif // V8_JSON_PARSER_H_
diff --git a/deps/v8/src/json-stringifier.h b/deps/v8/src/json-stringifier.h
index 5c0459eb1b..d97ca2ba73 100644
--- a/deps/v8/src/json-stringifier.h
+++ b/deps/v8/src/json-stringifier.h
@@ -567,8 +567,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
Handle<FixedArray> contents;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate_, contents,
- JSReceiver::GetKeys(object, JSReceiver::OWN_ONLY, ENUMERABLE_STRINGS),
- EXCEPTION);
+ JSReceiver::GetKeys(object, OWN_ONLY, ENUMERABLE_STRINGS), EXCEPTION);
for (int i = 0; i < contents->length(); i++) {
Object* key = contents->get(i);
diff --git a/deps/v8/src/key-accumulator.cc b/deps/v8/src/key-accumulator.cc
index e7a9c3cceb..c2c4996922 100644
--- a/deps/v8/src/key-accumulator.cc
+++ b/deps/v8/src/key-accumulator.cc
@@ -29,6 +29,9 @@ Handle<FixedArray> KeyAccumulator::GetKeys(GetKeysConversion convert) {
// Make sure we have all the lengths collected.
NextPrototype();
+ if (type_ == OWN_ONLY && !ownProxyKeys_.is_null()) {
+ return ownProxyKeys_;
+ }
// Assemble the result array by first adding the element keys and then the
// property keys. We use the total number of String + Symbol keys per level in
// |level_lengths_| and the available element keys in the corresponding bucket
@@ -260,7 +263,13 @@ Maybe<bool> KeyAccumulator::AddKeysFromProxy(Handle<JSProxy> proxy,
// Proxies define a complete list of keys with no distinction of
// elements and properties, which breaks the normal assumption for the
// KeyAccumulator.
- AddKeys(keys, PROXY_MAGIC);
+ if (type_ == OWN_ONLY) {
+ ownProxyKeys_ = keys;
+ level_string_length_ = keys->length();
+ length_ = level_string_length_;
+ } else {
+ AddKeys(keys, PROXY_MAGIC);
+ }
// Invert the current length to indicate a present proxy, so we can ignore
// element keys for this level. Otherwise we would not fully respect the order
// given by the proxy.
diff --git a/deps/v8/src/key-accumulator.h b/deps/v8/src/key-accumulator.h
index 8a4d886f51..9daee10cd3 100644
--- a/deps/v8/src/key-accumulator.h
+++ b/deps/v8/src/key-accumulator.h
@@ -31,17 +31,16 @@ enum AddKeyConversion { DO_NOT_CONVERT, CONVERT_TO_ARRAY_INDEX, PROXY_MAGIC };
// are more compact and allow for reasonably fast includes check.
class KeyAccumulator final BASE_EMBEDDED {
public:
- KeyAccumulator(Isolate* isolate, PropertyFilter filter)
- : isolate_(isolate), filter_(filter) {}
+ KeyAccumulator(Isolate* isolate, KeyCollectionType type,
+ PropertyFilter filter)
+ : isolate_(isolate), type_(type), filter_(filter) {}
~KeyAccumulator();
bool AddKey(uint32_t key);
- bool AddKey(Object* key, AddKeyConversion convert = DO_NOT_CONVERT);
- bool AddKey(Handle<Object> key, AddKeyConversion convert = DO_NOT_CONVERT);
- void AddKeys(Handle<FixedArray> array,
- AddKeyConversion convert = DO_NOT_CONVERT);
- void AddKeys(Handle<JSObject> array,
- AddKeyConversion convert = DO_NOT_CONVERT);
+ bool AddKey(Object* key, AddKeyConversion convert);
+ bool AddKey(Handle<Object> key, AddKeyConversion convert);
+ void AddKeys(Handle<FixedArray> array, AddKeyConversion convert);
+ void AddKeys(Handle<JSObject> array, AddKeyConversion convert);
void AddKeysFromProxy(Handle<JSObject> array);
Maybe<bool> AddKeysFromProxy(Handle<JSProxy> proxy, Handle<FixedArray> keys);
void AddElementKeysFromInterceptor(Handle<JSObject> array);
@@ -61,6 +60,7 @@ class KeyAccumulator final BASE_EMBEDDED {
void SortCurrentElementsListRemoveDuplicates();
Isolate* isolate_;
+ KeyCollectionType type_;
PropertyFilter filter_;
// |elements_| contains the sorted element keys (indices) per level.
std::vector<std::vector<uint32_t>*> elements_;
@@ -73,6 +73,7 @@ class KeyAccumulator final BASE_EMBEDDED {
// |symbol_properties_| contains the unique Symbol property keys for all
// levels in insertion order per level.
Handle<OrderedHashSet> symbol_properties_;
+ Handle<FixedArray> ownProxyKeys_;
// |length_| keeps track of the total number of all element and property keys.
int length_ = 0;
// |levelLength_| keeps track of the number of String keys in the current
diff --git a/deps/v8/src/libplatform/default-platform.cc b/deps/v8/src/libplatform/default-platform.cc
index e8c15572ad..6902504d10 100644
--- a/deps/v8/src/libplatform/default-platform.cc
+++ b/deps/v8/src/libplatform/default-platform.cc
@@ -193,5 +193,10 @@ const char* DefaultPlatform::GetCategoryGroupName(
static const char dummy[] = "dummy";
return dummy;
}
+
+size_t DefaultPlatform::NumberOfAvailableBackgroundThreads() {
+ return static_cast<size_t>(thread_pool_size_);
+}
+
} // namespace platform
} // namespace v8
diff --git a/deps/v8/src/libplatform/default-platform.h b/deps/v8/src/libplatform/default-platform.h
index 8bdda95be6..2c428ee77e 100644
--- a/deps/v8/src/libplatform/default-platform.h
+++ b/deps/v8/src/libplatform/default-platform.h
@@ -34,6 +34,7 @@ class DefaultPlatform : public Platform {
bool PumpMessageLoop(v8::Isolate* isolate);
// v8::Platform implementation.
+ size_t NumberOfAvailableBackgroundThreads() override;
void CallOnBackgroundThread(Task* task,
ExpectedRuntime expected_runtime) override;
void CallOnForegroundThread(v8::Isolate* isolate, Task* task) override;
diff --git a/deps/v8/src/list.h b/deps/v8/src/list.h
index 8b8a5dd1ed..83e5f4594e 100644
--- a/deps/v8/src/list.h
+++ b/deps/v8/src/list.h
@@ -207,15 +207,13 @@ size_t GetMemoryUsedByList(const List<T, P>& list) {
class Map;
-template<class> class TypeImpl;
-struct HeapTypeConfig;
-typedef TypeImpl<HeapTypeConfig> HeapType;
+class FieldType;
class Code;
template<typename T> class Handle;
typedef List<Map*> MapList;
typedef List<Code*> CodeList;
typedef List<Handle<Map> > MapHandleList;
-typedef List<Handle<HeapType> > TypeHandleList;
+typedef List<Handle<FieldType> > TypeHandleList;
typedef List<Handle<Code> > CodeHandleList;
// Perform binary search for an element in an already sorted
diff --git a/deps/v8/src/log-inl.h b/deps/v8/src/log-inl.h
index d47a24b96a..765398fdd7 100644
--- a/deps/v8/src/log-inl.h
+++ b/deps/v8/src/log-inl.h
@@ -38,19 +38,6 @@ void Logger::CallEventLogger(Isolate* isolate, const char* name, StartEnd se,
isolate->event_logger()(name, se);
}
}
- if (expose_to_api) {
- if (se == START) {
- TRACE_EVENT_BEGIN0("v8", name);
- } else {
- TRACE_EVENT_END0("v8", name);
- }
- } else {
- if (se == START) {
- TRACE_EVENT_BEGIN0(TRACE_DISABLED_BY_DEFAULT("v8"), name);
- } else {
- TRACE_EVENT_END0(TRACE_DISABLED_BY_DEFAULT("v8"), name);
- }
- }
}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index a10d9621d3..cbdd9dd106 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -934,6 +934,7 @@ void Logger::TimerEvent(Logger::StartEnd se, const char* name) {
void Logger::EnterExternal(Isolate* isolate) {
LOG(isolate, TimerEvent(START, TimerEventExternal::name()));
+ TRACE_EVENT_BEGIN0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.External");
DCHECK(isolate->current_vm_state() == JS);
isolate->set_current_vm_state(EXTERNAL);
}
@@ -941,6 +942,7 @@ void Logger::EnterExternal(Isolate* isolate) {
void Logger::LeaveExternal(Isolate* isolate) {
LOG(isolate, TimerEvent(END, TimerEventExternal::name()));
+ TRACE_EVENT_END0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.External");
DCHECK(isolate->current_vm_state() == EXTERNAL);
isolate->set_current_vm_state(JS);
}
@@ -1497,11 +1499,7 @@ void Logger::TickEvent(TickSample* sample, bool overflow) {
msg.Append(",%ld", static_cast<int>(timer_.Elapsed().InMicroseconds()));
if (sample->has_external_callback) {
msg.Append(",1,");
-#if USES_FUNCTION_DESCRIPTORS
- msg.AppendAddress(*FUNCTION_ENTRYPOINT_ADDRESS(sample->external_callback));
-#else
- msg.AppendAddress(sample->external_callback);
-#endif
+ msg.AppendAddress(sample->external_callback_entry);
} else {
msg.Append(",0,");
msg.AppendAddress(sample->tos);
@@ -1712,6 +1710,9 @@ void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data);
Object* callback_obj = call_data->callback();
Address entry_point = v8::ToCData<Address>(callback_obj);
+#if USES_FUNCTION_DESCRIPTORS
+ entry_point = *FUNCTION_ENTRYPOINT_ADDRESS(entry_point);
+#endif
PROFILE(isolate_, CallbackEvent(*func_name, entry_point));
}
} else {
@@ -1749,16 +1750,22 @@ void Logger::LogAccessorCallbacks() {
HeapIterator iterator(heap);
DisallowHeapAllocation no_gc;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
- if (!obj->IsExecutableAccessorInfo()) continue;
- ExecutableAccessorInfo* ai = ExecutableAccessorInfo::cast(obj);
+ if (!obj->IsAccessorInfo()) continue;
+ AccessorInfo* ai = AccessorInfo::cast(obj);
if (!ai->name()->IsName()) continue;
Address getter_entry = v8::ToCData<Address>(ai->getter());
Name* name = Name::cast(ai->name());
if (getter_entry != 0) {
+#if USES_FUNCTION_DESCRIPTORS
+ getter_entry = *FUNCTION_ENTRYPOINT_ADDRESS(getter_entry);
+#endif
PROFILE(isolate_, GetterCallbackEvent(name, getter_entry));
}
Address setter_entry = v8::ToCData<Address>(ai->setter());
if (setter_entry != 0) {
+#if USES_FUNCTION_DESCRIPTORS
+ setter_entry = *FUNCTION_ENTRYPOINT_ADDRESS(setter_entry);
+#endif
PROFILE(isolate_, SetterCallbackEvent(name, setter_entry));
}
}
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index 064115b3aa..1a454dad26 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -410,11 +410,13 @@ class Logger {
friend class CpuProfiler;
};
-
#define TIMER_EVENTS_LIST(V) \
V(RecompileSynchronous, true) \
V(RecompileConcurrent, true) \
V(CompileFullCode, true) \
+ V(OptimizeCode, true) \
+ V(CompileCode, true) \
+ V(DeoptimizeCode, true) \
V(Execute, true) \
V(External, true) \
V(IcMiss, false)
diff --git a/deps/v8/src/lookup.cc b/deps/v8/src/lookup.cc
index 48da4fabee..bad5a20df5 100644
--- a/deps/v8/src/lookup.cc
+++ b/deps/v8/src/lookup.cc
@@ -7,6 +7,7 @@
#include "src/bootstrapper.h"
#include "src/deoptimizer.h"
#include "src/elements.h"
+#include "src/field-type.h"
#include "src/isolate-inl.h"
namespace v8 {
@@ -52,7 +53,7 @@ void LookupIterator::Next() {
has_property_ = false;
JSReceiver* holder = *holder_;
- Map* map = *holder_map_;
+ Map* map = holder->map();
// Perform lookup on current holder.
state_ = LookupInHolder(map, holder);
@@ -73,10 +74,7 @@ void LookupIterator::Next() {
state_ = LookupInHolder(map, holder);
} while (!IsFound());
- if (holder != *holder_) {
- holder_ = handle(holder, isolate_);
- holder_map_ = handle(map, isolate_);
- }
+ if (holder != *holder_) holder_ = handle(holder, isolate_);
}
@@ -85,7 +83,6 @@ void LookupIterator::RestartInternal(InterceptorState interceptor_state) {
interceptor_state_ = interceptor_state;
property_details_ = PropertyDetails::Empty();
holder_ = initial_holder_;
- holder_map_ = handle(holder_->map(), isolate_);
number_ = DescriptorArray::kNotFound;
Next();
}
@@ -122,9 +119,10 @@ Handle<Map> LookupIterator::GetReceiverMap() const {
Handle<JSObject> LookupIterator::GetStoreTarget() const {
if (receiver_->IsJSGlobalProxy()) {
- PrototypeIterator iter(isolate(), receiver_);
- if (iter.IsAtEnd()) return Handle<JSGlobalProxy>::cast(receiver_);
- return PrototypeIterator::GetCurrent<JSGlobalObject>(iter);
+ Object* prototype = JSGlobalProxy::cast(*receiver_)->map()->prototype();
+ if (!prototype->IsNull()) {
+ return handle(JSGlobalObject::cast(prototype), isolate_);
+ }
}
return Handle<JSObject>::cast(receiver_);
}
@@ -140,20 +138,56 @@ bool LookupIterator::HasAccess() const {
void LookupIterator::ReloadPropertyInformation() {
state_ = BEFORE_PROPERTY;
interceptor_state_ = InterceptorState::kUninitialized;
- state_ = LookupInHolder(*holder_map_, *holder_);
- DCHECK(IsFound() || holder_map_->is_dictionary_map());
+ state_ = LookupInHolder(holder_->map(), *holder_);
+ DCHECK(IsFound() || !holder_->HasFastProperties());
}
+bool LookupIterator::HolderIsInContextIndex(uint32_t index) const {
+ DisallowHeapAllocation no_gc;
-void LookupIterator::ReloadHolderMap() {
- DCHECK_EQ(DATA, state_);
- DCHECK(IsElement());
- DCHECK(JSObject::cast(*holder_)->HasFixedTypedArrayElements());
- if (*holder_map_ != holder_->map()) {
- holder_map_ = handle(holder_->map(), isolate_);
+ Object* context = heap()->native_contexts_list();
+ while (!context->IsUndefined()) {
+ Context* current_context = Context::cast(context);
+ if (current_context->get(index) == *holder_) {
+ return true;
+ }
+ context = current_context->get(Context::NEXT_CONTEXT_LINK);
}
+ return false;
}
+void LookupIterator::UpdateProtector() {
+ if (!FLAG_harmony_species) return;
+
+ if (IsElement()) return;
+ if (isolate_->bootstrapper()->IsActive()) return;
+ if (!isolate_->IsArraySpeciesLookupChainIntact()) return;
+
+ if (*name_ == *isolate_->factory()->constructor_string()) {
+ // Setting the constructor property could change an instance's @@species
+ if (holder_->IsJSArray()) {
+ isolate_->CountUsage(
+ v8::Isolate::UseCounterFeature::kArrayInstanceConstructorModified);
+ isolate_->InvalidateArraySpeciesProtector();
+ } else if (holder_->map()->is_prototype_map()) {
+ // Setting the constructor of Array.prototype of any realm also needs
+ // to invalidate the species protector
+ if (HolderIsInContextIndex(Context::INITIAL_ARRAY_PROTOTYPE_INDEX)) {
+ isolate_->CountUsage(v8::Isolate::UseCounterFeature::
+ kArrayPrototypeConstructorModified);
+ isolate_->InvalidateArraySpeciesProtector();
+ }
+ }
+ } else if (*name_ == *isolate_->factory()->species_symbol()) {
+ // Setting the Symbol.species property of any Array constructor invalidates
+ // the species protector
+ if (HolderIsInContextIndex(Context::ARRAY_FUNCTION_INDEX)) {
+ isolate_->CountUsage(
+ v8::Isolate::UseCounterFeature::kArraySpeciesModified);
+ isolate_->InvalidateArraySpeciesProtector();
+ }
+ }
+}
void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
DCHECK(state_ == DATA || state_ == ACCESSOR);
@@ -162,25 +196,38 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
Handle<JSObject> holder = GetHolder<JSObject>();
if (IsElement()) {
- ElementsKind kind = holder_map_->elements_kind();
+ ElementsKind kind = holder->GetElementsKind();
ElementsKind to = value->OptimalElementsKind();
if (IsHoleyElementsKind(kind)) to = GetHoleyElementsKind(to);
to = GetMoreGeneralElementsKind(kind, to);
- JSObject::TransitionElementsKind(holder, to);
- holder_map_ = handle(holder->map(), isolate_);
+
+ if (kind != to) {
+ JSObject::TransitionElementsKind(holder, to);
+ }
// Copy the backing store if it is copy-on-write.
if (IsFastSmiOrObjectElementsKind(to)) {
JSObject::EnsureWritableFastElements(holder);
}
+ return;
+ }
- } else {
- if (holder_map_->is_dictionary_map()) return;
- holder_map_ =
- Map::PrepareForDataProperty(holder_map_, descriptor_number(), value);
+ if (!holder->HasFastProperties()) return;
+
+ Handle<Map> old_map(holder->map(), isolate_);
+ Handle<Map> new_map =
+ Map::PrepareForDataProperty(old_map, descriptor_number(), value);
+
+ if (old_map.is_identical_to(new_map)) {
+ // Update the property details if the representation was None.
+ if (representation().IsNone()) {
+ property_details_ =
+ new_map->instance_descriptors()->GetDetails(descriptor_number());
+ }
+ return;
}
- JSObject::MigrateToMap(holder, holder_map_);
+ JSObject::MigrateToMap(holder, new_map);
ReloadPropertyInformation();
}
@@ -196,16 +243,16 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
Handle<FixedArrayBase> elements(holder->elements());
holder->GetElementsAccessor()->Reconfigure(holder, elements, number_, value,
attributes);
- } else if (holder_map_->is_dictionary_map()) {
+ } else if (!holder->HasFastProperties()) {
PropertyDetails details(attributes, v8::internal::DATA, 0,
PropertyCellType::kMutable);
JSObject::SetNormalizedProperty(holder, name(), value, details);
} else {
- holder_map_ = Map::ReconfigureExistingProperty(
- holder_map_, descriptor_number(), i::kData, attributes);
- holder_map_ =
- Map::PrepareForDataProperty(holder_map_, descriptor_number(), value);
- JSObject::MigrateToMap(holder, holder_map_);
+ Handle<Map> old_map(holder->map(), isolate_);
+ Handle<Map> new_map = Map::ReconfigureExistingProperty(
+ old_map, descriptor_number(), i::kData, attributes);
+ new_map = Map::PrepareForDataProperty(new_map, descriptor_number(), value);
+ JSObject::MigrateToMap(holder, new_map);
}
ReloadPropertyInformation();
@@ -218,54 +265,66 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
#endif
}
-
+// Can only be called when the receiver is a JSObject. JSProxy has to be handled
+// via a trap. Adding properties to primitive values is not observable.
void LookupIterator::PrepareTransitionToDataProperty(
- Handle<Object> value, PropertyAttributes attributes,
- Object::StoreFromKeyed store_mode) {
+ Handle<JSObject> receiver, Handle<Object> value,
+ PropertyAttributes attributes, Object::StoreFromKeyed store_mode) {
+ DCHECK(receiver.is_identical_to(GetStoreTarget()));
if (state_ == TRANSITION) return;
DCHECK(state_ != LookupIterator::ACCESSOR ||
(GetAccessors()->IsAccessorInfo() &&
AccessorInfo::cast(*GetAccessors())->is_special_data_property()));
DCHECK_NE(INTEGER_INDEXED_EXOTIC, state_);
DCHECK(state_ == NOT_FOUND || !HolderIsReceiverOrHiddenPrototype());
- // Can only be called when the receiver is a JSObject. JSProxy has to be
- // handled via a trap. Adding properties to primitive values is not
- // observable.
- Handle<JSObject> receiver = GetStoreTarget();
- if (!isolate()->IsInternallyUsedPropertyName(name()) &&
- !receiver->map()->is_extensible()) {
+ Handle<Map> map(receiver->map(), isolate_);
+
+ // Dictionary maps can always have additional data properties.
+ if (map->is_dictionary_map()) {
+ state_ = TRANSITION;
+ if (map->IsJSGlobalObjectMap()) {
+ // Install a property cell.
+ auto cell = JSGlobalObject::EnsurePropertyCell(
+ Handle<JSGlobalObject>::cast(receiver), name());
+ DCHECK(cell->value()->IsTheHole());
+ transition_ = cell;
+ } else {
+ transition_ = map;
+ }
return;
}
- auto transition = Map::TransitionToDataProperty(
- handle(receiver->map(), isolate_), name_, value, attributes, store_mode);
+ Handle<Map> transition =
+ Map::TransitionToDataProperty(map, name_, value, attributes, store_mode);
state_ = TRANSITION;
transition_ = transition;
- if (receiver->IsJSGlobalObject()) {
- // Install a property cell.
- InternalizeName();
- auto cell = JSGlobalObject::EnsurePropertyCell(
- Handle<JSGlobalObject>::cast(receiver), name());
- DCHECK(cell->value()->IsTheHole());
- transition_ = cell;
- } else if (!transition->is_dictionary_map()) {
+ if (!transition->is_dictionary_map()) {
property_details_ = transition->GetLastDescriptorDetails();
has_property_ = true;
}
}
-
-void LookupIterator::ApplyTransitionToDataProperty() {
+void LookupIterator::ApplyTransitionToDataProperty(Handle<JSObject> receiver) {
DCHECK_EQ(TRANSITION, state_);
- Handle<JSObject> receiver = GetStoreTarget();
+ DCHECK(receiver.is_identical_to(GetStoreTarget()));
+
if (receiver->IsJSGlobalObject()) return;
holder_ = receiver;
- holder_map_ = transition_map();
- JSObject::MigrateToMap(receiver, holder_map_);
- ReloadPropertyInformation();
+ Handle<Map> transition = transition_map();
+ bool simple_transition = transition->GetBackPointer() == receiver->map();
+ JSObject::MigrateToMap(receiver, transition);
+
+ if (simple_transition) {
+ int number = transition->LastAdded();
+ number_ = static_cast<uint32_t>(number);
+ property_details_ = transition->GetLastDescriptorDetails();
+ state_ = DATA;
+ } else {
+ ReloadPropertyInformation();
+ }
}
@@ -283,7 +342,6 @@ void LookupIterator::Delete() {
if (holder->HasFastProperties()) {
JSObject::NormalizeProperties(Handle<JSObject>::cast(holder), mode, 0,
"DeletingProperty");
- holder_map_ = handle(holder->map(), isolate_);
ReloadPropertyInformation();
}
// TODO(verwaest): Get rid of the name_ argument.
@@ -292,6 +350,7 @@ void LookupIterator::Delete() {
JSObject::ReoptimizeIfPrototype(Handle<JSObject>::cast(holder));
}
}
+ state_ = NOT_FOUND;
}
@@ -306,14 +365,14 @@ void LookupIterator::TransitionToAccessorProperty(
if (!IsElement() && !receiver->map()->is_dictionary_map()) {
holder_ = receiver;
- holder_map_ = Map::TransitionToAccessorProperty(
- handle(receiver->map(), isolate_), name_, component, accessor,
- attributes);
- JSObject::MigrateToMap(receiver, holder_map_);
+ Handle<Map> old_map(receiver->map(), isolate_);
+ Handle<Map> new_map = Map::TransitionToAccessorProperty(
+ old_map, name_, component, accessor, attributes);
+ JSObject::MigrateToMap(receiver, new_map);
ReloadPropertyInformation();
- if (!holder_map_->is_dictionary_map()) return;
+ if (!new_map->is_dictionary_map()) return;
}
Handle<AccessorPair> pair;
@@ -383,34 +442,29 @@ void LookupIterator::TransitionToAccessorPair(Handle<Object> pair,
JSObject::ReoptimizeIfPrototype(receiver);
}
- holder_map_ = handle(receiver->map(), isolate_);
ReloadPropertyInformation();
}
bool LookupIterator::HolderIsReceiverOrHiddenPrototype() const {
DCHECK(has_property_ || state_ == INTERCEPTOR || state_ == JSPROXY);
- return InternalHolderIsReceiverOrHiddenPrototype();
-}
-
-bool LookupIterator::InternalHolderIsReceiverOrHiddenPrototype() const {
// Optimization that only works if configuration_ is not mutable.
if (!check_prototype_chain()) return true;
DisallowHeapAllocation no_gc;
if (!receiver_->IsJSReceiver()) return false;
- Object* current = *receiver_;
- JSReceiver* holder = *holder_;
+ JSReceiver* current = JSReceiver::cast(*receiver_);
+ JSReceiver* object = *holder_;
+ if (current == object) return true;
+ if (!current->map()->has_hidden_prototype()) return false;
// JSProxy do not occur as hidden prototypes.
- if (current->IsJSProxy()) {
- return JSReceiver::cast(current) == holder;
- }
+ if (current->IsJSProxy()) return false;
PrototypeIterator iter(isolate(), current,
- PrototypeIterator::START_AT_RECEIVER);
- do {
- if (iter.GetCurrent<JSReceiver>() == holder) return true;
- DCHECK(!current->IsJSProxy());
+ PrototypeIterator::START_AT_PROTOTYPE,
+ PrototypeIterator::END_AT_NON_HIDDEN);
+ while (!iter.IsAtEnd()) {
+ if (iter.GetCurrent<JSReceiver>() == object) return true;
iter.Advance();
- } while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN));
+ }
return false;
}
@@ -419,30 +473,22 @@ Handle<Object> LookupIterator::FetchValue() const {
Object* result = NULL;
if (IsElement()) {
Handle<JSObject> holder = GetHolder<JSObject>();
- // TODO(verwaest): Optimize.
- if (holder->IsStringObjectWithCharacterAt(index_)) {
- Handle<JSValue> js_value = Handle<JSValue>::cast(holder);
- Handle<String> string(String::cast(js_value->value()));
- return factory()->LookupSingleCharacterStringFromCode(
- String::Flatten(string)->Get(index_));
- }
-
ElementsAccessor* accessor = holder->GetElementsAccessor();
- return accessor->Get(handle(holder->elements()), number_);
- } else if (holder_map_->IsJSGlobalObjectMap()) {
+ return accessor->Get(holder, number_);
+ } else if (holder_->IsJSGlobalObject()) {
Handle<JSObject> holder = GetHolder<JSObject>();
result = holder->global_dictionary()->ValueAt(number_);
DCHECK(result->IsPropertyCell());
result = PropertyCell::cast(result)->value();
- } else if (holder_map_->is_dictionary_map()) {
+ } else if (!holder_->HasFastProperties()) {
result = holder_->property_dictionary()->ValueAt(number_);
} else if (property_details_.type() == v8::internal::DATA) {
Handle<JSObject> holder = GetHolder<JSObject>();
- FieldIndex field_index = FieldIndex::ForDescriptor(*holder_map_, number_);
+ FieldIndex field_index = FieldIndex::ForDescriptor(holder->map(), number_);
return JSObject::FastPropertyAt(holder, property_details_.representation(),
field_index);
} else {
- result = holder_map_->instance_descriptors()->GetValue(number_);
+ result = holder_->map()->instance_descriptors()->GetValue(number_);
}
return handle(result, isolate_);
}
@@ -450,7 +496,7 @@ Handle<Object> LookupIterator::FetchValue() const {
int LookupIterator::GetAccessorIndex() const {
DCHECK(has_property_);
- DCHECK(!holder_map_->is_dictionary_map());
+ DCHECK(holder_->HasFastProperties());
DCHECK_EQ(v8::internal::ACCESSOR_CONSTANT, property_details_.type());
return descriptor_number();
}
@@ -458,7 +504,7 @@ int LookupIterator::GetAccessorIndex() const {
int LookupIterator::GetConstantIndex() const {
DCHECK(has_property_);
- DCHECK(!holder_map_->is_dictionary_map());
+ DCHECK(holder_->HasFastProperties());
DCHECK_EQ(v8::internal::DATA_CONSTANT, property_details_.type());
DCHECK(!IsElement());
return descriptor_number();
@@ -467,22 +513,22 @@ int LookupIterator::GetConstantIndex() const {
FieldIndex LookupIterator::GetFieldIndex() const {
DCHECK(has_property_);
- DCHECK(!holder_map_->is_dictionary_map());
+ DCHECK(holder_->HasFastProperties());
DCHECK_EQ(v8::internal::DATA, property_details_.type());
DCHECK(!IsElement());
+ Map* holder_map = holder_->map();
int index =
- holder_map_->instance_descriptors()->GetFieldIndex(descriptor_number());
+ holder_map->instance_descriptors()->GetFieldIndex(descriptor_number());
bool is_double = representation().IsDouble();
- return FieldIndex::ForPropertyIndex(*holder_map_, index, is_double);
+ return FieldIndex::ForPropertyIndex(holder_map, index, is_double);
}
-
-Handle<HeapType> LookupIterator::GetFieldType() const {
+Handle<FieldType> LookupIterator::GetFieldType() const {
DCHECK(has_property_);
- DCHECK(!holder_map_->is_dictionary_map());
+ DCHECK(holder_->HasFastProperties());
DCHECK_EQ(v8::internal::DATA, property_details_.type());
return handle(
- holder_map_->instance_descriptors()->GetFieldType(descriptor_number()),
+ holder_->map()->instance_descriptors()->GetFieldType(descriptor_number()),
isolate_);
}
@@ -516,53 +562,26 @@ void LookupIterator::WriteDataValue(Handle<Object> value) {
if (IsElement()) {
Handle<JSObject> object = Handle<JSObject>::cast(holder);
ElementsAccessor* accessor = object->GetElementsAccessor();
- accessor->Set(object->elements(), number_, *value);
+ accessor->Set(object, number_, *value);
+ } else if (holder->HasFastProperties()) {
+ if (property_details_.type() == v8::internal::DATA) {
+ JSObject::cast(*holder)->WriteToField(descriptor_number(),
+ property_details_, *value);
+ } else {
+ DCHECK_EQ(v8::internal::DATA_CONSTANT, property_details_.type());
+ }
} else if (holder->IsJSGlobalObject()) {
Handle<GlobalDictionary> property_dictionary =
handle(JSObject::cast(*holder)->global_dictionary());
PropertyCell::UpdateCell(property_dictionary, dictionary_entry(), value,
property_details_);
- } else if (holder_map_->is_dictionary_map()) {
+ } else {
NameDictionary* property_dictionary = holder->property_dictionary();
property_dictionary->ValueAtPut(dictionary_entry(), *value);
- } else if (property_details_.type() == v8::internal::DATA) {
- JSObject::cast(*holder)->WriteToField(descriptor_number(), *value);
- } else {
- DCHECK_EQ(v8::internal::DATA_CONSTANT, property_details_.type());
}
}
-bool LookupIterator::IsIntegerIndexedExotic(JSReceiver* holder) {
- DCHECK(exotic_index_state_ != ExoticIndexState::kNotExotic);
- if (exotic_index_state_ == ExoticIndexState::kExotic) return true;
- if (!InternalHolderIsReceiverOrHiddenPrototype()) {
- exotic_index_state_ = ExoticIndexState::kNotExotic;
- return false;
- }
- DCHECK(exotic_index_state_ == ExoticIndexState::kUninitialized);
- bool result = false;
- // Compute and cache result.
- if (IsElement()) {
- result = index_ >= JSTypedArray::cast(holder)->length_value();
- } else if (name()->IsString()) {
- Handle<String> name_string = Handle<String>::cast(name());
- if (name_string->length() != 0) {
- result = IsSpecialIndex(isolate_->unicode_cache(), *name_string);
- }
- }
- exotic_index_state_ =
- result ? ExoticIndexState::kExotic : ExoticIndexState::kNotExotic;
- return result;
-}
-
-
-void LookupIterator::InternalizeName() {
- if (name_->IsUniqueName()) return;
- name_ = factory()->InternalizeString(Handle<String>::cast(name_));
-}
-
-
bool LookupIterator::HasInterceptor(Map* map) const {
if (IsElement()) return map->has_indexed_interceptor();
return map->has_named_interceptor();
@@ -591,21 +610,30 @@ JSReceiver* LookupIterator::NextHolder(Map* map) {
DisallowHeapAllocation no_gc;
if (!map->prototype()->IsJSReceiver()) return NULL;
- JSReceiver* next = JSReceiver::cast(map->prototype());
- DCHECK(!next->map()->IsJSGlobalObjectMap() ||
- next->map()->is_hidden_prototype());
+ DCHECK(!map->IsJSGlobalProxyMap() || map->has_hidden_prototype());
if (!check_prototype_chain() &&
- !(check_hidden() && next->map()->is_hidden_prototype()) &&
+ !(check_hidden() && map->has_hidden_prototype()) &&
// Always lookup behind the JSGlobalProxy into the JSGlobalObject, even
// when not checking other hidden prototypes.
!map->IsJSGlobalProxyMap()) {
return NULL;
}
- return next;
+ return JSReceiver::cast(map->prototype());
}
+LookupIterator::State LookupIterator::NotFound(JSReceiver* const holder) const {
+ DCHECK(!IsElement());
+ if (!holder->IsJSTypedArray() || !name_->IsString()) return NOT_FOUND;
+
+ Handle<String> name_string = Handle<String>::cast(name_);
+ if (name_string->length() == 0) return NOT_FOUND;
+
+ return IsSpecialIndex(isolate_->unicode_cache(), *name_string)
+ ? INTEGER_INDEXED_EXOTIC
+ : NOT_FOUND;
+}
LookupIterator::State LookupIterator::LookupInHolder(Map* const map,
JSReceiver* const holder) {
@@ -617,51 +645,32 @@ LookupIterator::State LookupIterator::LookupInHolder(Map* const map,
switch (state_) {
case NOT_FOUND:
if (map->IsJSProxyMap()) {
- // Do not leak private property names.
if (IsElement() || !name_->IsPrivate()) return JSPROXY;
}
- if (map->is_access_check_needed() &&
- (IsElement() || !isolate_->IsInternallyUsedPropertyName(name_))) {
- return ACCESS_CHECK;
+ if (map->is_access_check_needed()) {
+ if (IsElement() || !name_->IsPrivate()) return ACCESS_CHECK;
}
// Fall through.
case ACCESS_CHECK:
- if (exotic_index_state_ != ExoticIndexState::kNotExotic &&
- holder->IsJSTypedArray() && IsIntegerIndexedExotic(holder)) {
- return INTEGER_INDEXED_EXOTIC;
- }
if (check_interceptor() && HasInterceptor(map) &&
!SkipInterceptor(JSObject::cast(holder))) {
- // Do not leak private property names.
- if (!name_.is_null() && name_->IsPrivate()) return NOT_FOUND;
- return INTERCEPTOR;
+ if (IsElement() || !name_->IsPrivate()) return INTERCEPTOR;
}
// Fall through.
case INTERCEPTOR:
if (IsElement()) {
- // TODO(verwaest): Optimize.
- if (holder->IsStringObjectWithCharacterAt(index_)) {
- PropertyAttributes attributes =
- static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
- property_details_ = PropertyDetails(attributes, v8::internal::DATA, 0,
- PropertyCellType::kNoCell);
- } else {
- JSObject* js_object = JSObject::cast(holder);
- if (js_object->elements() == isolate()->heap()->empty_fixed_array()) {
- return NOT_FOUND;
- }
-
- ElementsAccessor* accessor = js_object->GetElementsAccessor();
- FixedArrayBase* backing_store = js_object->elements();
- number_ =
- accessor->GetEntryForIndex(js_object, backing_store, index_);
- if (number_ == kMaxUInt32) return NOT_FOUND;
- property_details_ = accessor->GetDetails(backing_store, number_);
+ JSObject* js_object = JSObject::cast(holder);
+ ElementsAccessor* accessor = js_object->GetElementsAccessor();
+ FixedArrayBase* backing_store = js_object->elements();
+ number_ = accessor->GetEntryForIndex(js_object, backing_store, index_);
+ if (number_ == kMaxUInt32) {
+ return holder->IsJSTypedArray() ? INTEGER_INDEXED_EXOTIC : NOT_FOUND;
}
+ property_details_ = accessor->GetDetails(js_object, number_);
} else if (!map->is_dictionary_map()) {
DescriptorArray* descriptors = map->instance_descriptors();
- int number = descriptors->SearchWithCache(*name_, map);
- if (number == DescriptorArray::kNotFound) return NOT_FOUND;
+ int number = descriptors->SearchWithCache(isolate_, *name_, map);
+ if (number == DescriptorArray::kNotFound) return NotFound(holder);
number_ = static_cast<uint32_t>(number);
property_details_ = descriptors->GetDetails(number_);
} else if (map->IsJSGlobalObjectMap()) {
@@ -676,7 +685,7 @@ LookupIterator::State LookupIterator::LookupInHolder(Map* const map,
} else {
NameDictionary* dict = holder->property_dictionary();
int number = dict->FindEntry(name_);
- if (number == NameDictionary::kNotFound) return NOT_FOUND;
+ if (number == NameDictionary::kNotFound) return NotFound(holder);
number_ = static_cast<uint32_t>(number);
property_details_ = dict->DetailsAt(number_);
}
diff --git a/deps/v8/src/lookup.h b/deps/v8/src/lookup.h
index 7d689560b8..0c298d99bf 100644
--- a/deps/v8/src/lookup.h
+++ b/deps/v8/src/lookup.h
@@ -48,16 +48,14 @@ class LookupIterator final BASE_EMBEDDED {
Configuration configuration = DEFAULT)
: configuration_(ComputeConfiguration(configuration, name)),
state_(NOT_FOUND),
- exotic_index_state_(ExoticIndexState::kUninitialized),
interceptor_state_(InterceptorState::kUninitialized),
property_details_(PropertyDetails::Empty()),
isolate_(name->GetIsolate()),
- name_(Name::Flatten(name)),
+ name_(isolate_->factory()->InternalizeName(name)),
// kMaxUInt32 isn't a valid index.
index_(kMaxUInt32),
receiver_(receiver),
holder_(GetRoot(isolate_, receiver)),
- holder_map_(holder_->map(), isolate_),
initial_holder_(holder_),
number_(DescriptorArray::kNotFound) {
#ifdef DEBUG
@@ -72,16 +70,14 @@ class LookupIterator final BASE_EMBEDDED {
Configuration configuration = DEFAULT)
: configuration_(ComputeConfiguration(configuration, name)),
state_(NOT_FOUND),
- exotic_index_state_(ExoticIndexState::kUninitialized),
interceptor_state_(InterceptorState::kUninitialized),
property_details_(PropertyDetails::Empty()),
isolate_(name->GetIsolate()),
- name_(Name::Flatten(name)),
+ name_(isolate_->factory()->InternalizeName(name)),
// kMaxUInt32 isn't a valid index.
index_(kMaxUInt32),
receiver_(receiver),
holder_(holder),
- holder_map_(holder_->map(), isolate_),
initial_holder_(holder_),
number_(DescriptorArray::kNotFound) {
#ifdef DEBUG
@@ -95,7 +91,6 @@ class LookupIterator final BASE_EMBEDDED {
Configuration configuration = DEFAULT)
: configuration_(configuration),
state_(NOT_FOUND),
- exotic_index_state_(ExoticIndexState::kUninitialized),
interceptor_state_(InterceptorState::kUninitialized),
property_details_(PropertyDetails::Empty()),
isolate_(isolate),
@@ -103,7 +98,6 @@ class LookupIterator final BASE_EMBEDDED {
index_(index),
receiver_(receiver),
holder_(GetRoot(isolate, receiver, index)),
- holder_map_(holder_->map(), isolate_),
initial_holder_(holder_),
number_(DescriptorArray::kNotFound) {
// kMaxUInt32 isn't a valid index.
@@ -116,7 +110,6 @@ class LookupIterator final BASE_EMBEDDED {
Configuration configuration = DEFAULT)
: configuration_(configuration),
state_(NOT_FOUND),
- exotic_index_state_(ExoticIndexState::kUninitialized),
interceptor_state_(InterceptorState::kUninitialized),
property_details_(PropertyDetails::Empty()),
isolate_(isolate),
@@ -124,7 +117,6 @@ class LookupIterator final BASE_EMBEDDED {
index_(index),
receiver_(receiver),
holder_(holder),
- holder_map_(holder_->map(), isolate_),
initial_holder_(holder_),
number_(DescriptorArray::kNotFound) {
// kMaxUInt32 isn't a valid index.
@@ -135,27 +127,27 @@ class LookupIterator final BASE_EMBEDDED {
static LookupIterator PropertyOrElement(
Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
Configuration configuration = DEFAULT) {
- name = Name::Flatten(name);
uint32_t index;
- LookupIterator it =
- name->AsArrayIndex(&index)
- ? LookupIterator(isolate, receiver, index, configuration)
- : LookupIterator(receiver, name, configuration);
- it.name_ = name;
- return it;
+ if (name->AsArrayIndex(&index)) {
+ LookupIterator it =
+ LookupIterator(isolate, receiver, index, configuration);
+ it.name_ = name;
+ return it;
+ }
+ return LookupIterator(receiver, name, configuration);
}
static LookupIterator PropertyOrElement(
Isolate* isolate, Handle<Object> receiver, Handle<Name> name,
Handle<JSReceiver> holder, Configuration configuration = DEFAULT) {
- name = Name::Flatten(name);
uint32_t index;
- LookupIterator it =
- name->AsArrayIndex(&index)
- ? LookupIterator(isolate, receiver, index, holder, configuration)
- : LookupIterator(receiver, name, holder, configuration);
- it.name_ = name;
- return it;
+ if (name->AsArrayIndex(&index)) {
+ LookupIterator it =
+ LookupIterator(isolate, receiver, index, holder, configuration);
+ it.name_ = name;
+ return it;
+ }
+ return LookupIterator(receiver, name, holder, configuration);
}
static LookupIterator PropertyOrElement(
@@ -193,7 +185,7 @@ class LookupIterator final BASE_EMBEDDED {
Factory* factory() const { return isolate_->factory(); }
Handle<Object> GetReceiver() const { return receiver_; }
Handle<JSObject> GetStoreTarget() const;
- bool is_dictionary_holder() const { return holder_map_->is_dictionary_map(); }
+ bool is_dictionary_holder() const { return !holder_->HasFastProperties(); }
Handle<Map> transition_map() const {
DCHECK_EQ(TRANSITION, state_);
return Handle<Map>::cast(transition_);
@@ -214,17 +206,23 @@ class LookupIterator final BASE_EMBEDDED {
bool HasAccess() const;
/* PROPERTY */
+ bool ExtendingNonExtensible(Handle<JSObject> receiver) {
+ DCHECK(receiver.is_identical_to(GetStoreTarget()));
+ return !receiver->map()->is_extensible() &&
+ (IsElement() || !name_->IsPrivate());
+ }
void PrepareForDataProperty(Handle<Object> value);
- void PrepareTransitionToDataProperty(Handle<Object> value,
+ void PrepareTransitionToDataProperty(Handle<JSObject> receiver,
+ Handle<Object> value,
PropertyAttributes attributes,
Object::StoreFromKeyed store_mode);
bool IsCacheableTransition() {
- if (state_ != TRANSITION) return false;
+ DCHECK_EQ(TRANSITION, state_);
return transition_->IsPropertyCell() ||
(!transition_map()->is_dictionary_map() &&
transition_map()->GetBackPointer()->IsMap());
}
- void ApplyTransitionToDataProperty();
+ void ApplyTransitionToDataProperty(Handle<JSObject> receiver);
void ReconfigureDataProperty(Handle<Object> value,
PropertyAttributes attributes);
void Delete();
@@ -237,13 +235,17 @@ class LookupIterator final BASE_EMBEDDED {
DCHECK(has_property_);
return property_details_;
}
+ PropertyAttributes property_attributes() const {
+ return property_details().attributes();
+ }
bool IsConfigurable() const { return property_details().IsConfigurable(); }
bool IsReadOnly() const { return property_details().IsReadOnly(); }
+ bool IsEnumerable() const { return property_details().IsEnumerable(); }
Representation representation() const {
return property_details().representation();
}
FieldIndex GetFieldIndex() const;
- Handle<HeapType> GetFieldType() const;
+ Handle<FieldType> GetFieldType() const;
int GetAccessorIndex() const;
int GetConstantIndex() const;
Handle<PropertyCell> GetPropertyCell() const;
@@ -254,8 +256,7 @@ class LookupIterator final BASE_EMBEDDED {
}
Handle<Object> GetDataValue() const;
void WriteDataValue(Handle<Object> value);
- void InternalizeName();
- void ReloadHolderMap();
+ void UpdateProtector();
private:
enum class InterceptorState {
@@ -277,7 +278,6 @@ class LookupIterator final BASE_EMBEDDED {
void ReloadPropertyInformation();
inline bool SkipInterceptor(JSObject* holder);
bool HasInterceptor(Map* map) const;
- bool InternalHolderIsReceiverOrHiddenPrototype() const;
inline InterceptorInfo* GetInterceptor(JSObject* holder) const {
if (IsElement()) return holder->GetIndexedInterceptor();
return holder->GetNamedInterceptor();
@@ -288,13 +288,15 @@ class LookupIterator final BASE_EMBEDDED {
return (configuration_ & kInterceptor) != 0;
}
int descriptor_number() const {
+ DCHECK(!IsElement());
DCHECK(has_property_);
- DCHECK(!holder_map_->is_dictionary_map());
+ DCHECK(holder_->HasFastProperties());
return number_;
}
int dictionary_entry() const {
+ DCHECK(!IsElement());
DCHECK(has_property_);
- DCHECK(holder_map_->is_dictionary_map());
+ DCHECK(!holder_->HasFastProperties());
return number_;
}
@@ -317,15 +319,15 @@ class LookupIterator final BASE_EMBEDDED {
return GetRootForNonJSReceiver(isolate, receiver, index);
}
- enum class ExoticIndexState { kUninitialized, kNotExotic, kExotic };
- inline bool IsIntegerIndexedExotic(JSReceiver* holder);
+ State NotFound(JSReceiver* const holder) const;
+
+ bool HolderIsInContextIndex(uint32_t index) const;
// If configuration_ becomes mutable, update
// HolderIsReceiverOrHiddenPrototype.
const Configuration configuration_;
State state_;
bool has_property_;
- ExoticIndexState exotic_index_state_;
InterceptorState interceptor_state_;
PropertyDetails property_details_;
Isolate* const isolate_;
@@ -334,7 +336,6 @@ class LookupIterator final BASE_EMBEDDED {
Handle<Object> transition_;
const Handle<Object> receiver_;
Handle<JSReceiver> holder_;
- Handle<Map> holder_map_;
const Handle<JSReceiver> initial_holder_;
uint32_t number_;
};
diff --git a/deps/v8/src/machine-type.cc b/deps/v8/src/machine-type.cc
index 1fb886ca52..fcc3e97973 100644
--- a/deps/v8/src/machine-type.cc
+++ b/deps/v8/src/machine-type.cc
@@ -26,6 +26,8 @@ std::ostream& operator<<(std::ostream& os, MachineRepresentation rep) {
return os << "kRepFloat32";
case MachineRepresentation::kFloat64:
return os << "kRepFloat64";
+ case MachineRepresentation::kSimd128:
+ return os << "kRepSimd128";
case MachineRepresentation::kTagged:
return os << "kRepTagged";
}
diff --git a/deps/v8/src/machine-type.h b/deps/v8/src/machine-type.h
index 97f6ae3bbd..1085657894 100644
--- a/deps/v8/src/machine-type.h
+++ b/deps/v8/src/machine-type.h
@@ -24,6 +24,7 @@ enum class MachineRepresentation : uint8_t {
kWord64,
kFloat32,
kFloat64,
+ kSimd128,
kTagged
};
@@ -84,6 +85,9 @@ class MachineType {
return MachineType(MachineRepresentation::kFloat64,
MachineSemantic::kNumber);
}
+ static MachineType Simd128() {
+ return MachineType(MachineRepresentation::kSimd128, MachineSemantic::kNone);
+ }
static MachineType Int8() {
return MachineType(MachineRepresentation::kWord8, MachineSemantic::kInt32);
}
@@ -143,6 +147,9 @@ class MachineType {
static MachineType RepFloat64() {
return MachineType(MachineRepresentation::kFloat64, MachineSemantic::kNone);
}
+ static MachineType RepSimd128() {
+ return MachineType(MachineRepresentation::kSimd128, MachineSemantic::kNone);
+ }
static MachineType RepTagged() {
return MachineType(MachineRepresentation::kTagged, MachineSemantic::kNone);
}
@@ -187,6 +194,8 @@ inline int ElementSizeLog2Of(MachineRepresentation rep) {
case MachineRepresentation::kWord64:
case MachineRepresentation::kFloat64:
return 3;
+ case MachineRepresentation::kSimd128:
+ return 4;
case MachineRepresentation::kTagged:
return kPointerSizeLog2;
default:
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index 23deb1afeb..072ac1d5a6 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -223,9 +223,12 @@ bool CheckMethodName(Isolate* isolate, Handle<JSObject> obj, Handle<Name> name,
Handle<Object> CallSite::GetMethodName() {
- MaybeHandle<JSReceiver> maybe = Object::ToObject(isolate_, receiver_);
- Handle<JSReceiver> receiver;
- if (!maybe.ToHandle(&receiver) || !receiver->IsJSObject()) {
+ if (receiver_->IsNull() || receiver_->IsUndefined()) {
+ return isolate_->factory()->null_value();
+ }
+ Handle<JSReceiver> receiver =
+ Object::ToObject(isolate_, receiver_).ToHandleChecked();
+ if (!receiver->IsJSObject()) {
return isolate_->factory()->null_value();
}
@@ -247,7 +250,7 @@ Handle<Object> CallSite::GetMethodName() {
if (!current->IsJSObject()) break;
Handle<JSObject> current_obj = Handle<JSObject>::cast(current);
if (current_obj->IsAccessCheckNeeded()) break;
- Handle<FixedArray> keys = JSObject::GetEnumPropertyKeys(current_obj, false);
+ Handle<FixedArray> keys = JSObject::GetEnumPropertyKeys(current_obj);
for (int i = 0; i < keys->length(); i++) {
HandleScope inner_scope(isolate_);
if (!keys->get(i)->IsName()) continue;
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index 8cd60b1c5c..c71e11ba24 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -71,7 +71,6 @@ class CallSite {
int32_t pos_;
};
-
#define MESSAGE_TEMPLATES(T) \
/* Error */ \
T(None, "") \
@@ -95,6 +94,7 @@ class CallSite {
T(ArrayFunctionsOnSealed, "Cannot add/remove sealed array elements") \
T(ArrayNotSubclassable, "Subclassing Arrays is not currently supported.") \
T(CalledNonCallable, "% is not a function") \
+ T(CalledNonCallableInstanceOf, "right-hand side is not a function") \
T(CalledOnNonObject, "% called on non-object") \
T(CalledOnNullOrUndefined, "% called on null or undefined") \
T(CallSiteExpectsFunction, \
@@ -295,6 +295,7 @@ class CallSite {
T(RestrictedFunctionProperties, \
"'caller' and 'arguments' are restricted function properties and cannot " \
"be accessed in this context.") \
+ T(ReturnMethodNotCallable, "The iterator's 'return' method is not callable") \
T(StaticPrototype, "Classes may not have static property named prototype") \
T(StrictCannotAssign, "Cannot assign to read only '%' in strict mode") \
T(StrictDeleteProperty, "Cannot delete property '%' of %") \
@@ -316,10 +317,13 @@ class CallSite {
"to be non-writable is deprecated") \
T(StrongSetProto, \
"On strong object %, redefining the internal prototype is deprecated") \
+ T(SymbolIteratorInvalid, \
+ "Result of the Symbol.iterator method is not an object") \
T(SymbolKeyFor, "% is not a symbol") \
T(SymbolToNumber, "Cannot convert a Symbol value to a number") \
T(SymbolToString, "Cannot convert a Symbol value to a string") \
T(SimdToNumber, "Cannot convert a SIMD value to a number") \
+ T(ThrowMethodMissing, "The iterator does not provide a 'throw' method.") \
T(UndefinedOrNullToObject, "Cannot convert undefined or null to object") \
T(ValueAndAccessor, \
"Invalid property descriptor. Cannot both specify accessors and a value " \
@@ -332,8 +336,6 @@ class CallSite {
T(StrongSuperCallMissing, \
"In strong mode, invoking the super constructor in a subclass is " \
"required") \
- T(StrongUnboundGlobal, \
- "In strong mode, using an undeclared global variable '%' is not allowed") \
T(UnsupportedSuper, "Unsupported reference to 'super'") \
/* RangeError */ \
T(DateRange, "Provided date is not in valid range.") \
@@ -384,12 +386,10 @@ class CallSite {
T(DuplicateExport, "Duplicate export of '%'") \
T(DuplicateProto, \
"Duplicate __proto__ fields are not allowed in object literals") \
- T(ForInLoopInitializer, \
- "for-in loop variable declaration may not have an initializer.") \
+ T(ForInOfLoopInitializer, \
+ "% loop variable declaration may not have an initializer.") \
T(ForInOfLoopMultiBindings, \
"Invalid left-hand side in % loop: Must have a single binding.") \
- T(ForOfLoopInitializer, \
- "for-of loop variable declaration may not have an initializer.") \
T(IllegalAccess, "Illegal access") \
T(IllegalBreak, "Illegal break statement") \
T(IllegalContinue, "Illegal continue statement") \
@@ -397,6 +397,7 @@ class CallSite {
"Illegal '%' directive in function with non-simple parameter list") \
T(IllegalReturn, "Illegal return statement") \
T(InvalidEscapedReservedWord, "Keyword must not contain escaped characters") \
+ T(InvalidEscapedMetaProperty, "'%' must not contain escaped characters") \
T(InvalidLhsInAssignment, "Invalid left-hand side in assignment") \
T(InvalidCoverInitializedName, "Invalid shorthand property initializer") \
T(InvalidDestructuringTarget, "Invalid destructuring assignment target") \
@@ -406,6 +407,10 @@ class CallSite {
T(InvalidLhsInPrefixOp, \
"Invalid left-hand side expression in prefix operation") \
T(InvalidRegExpFlags, "Invalid flags supplied to RegExp constructor '%'") \
+ T(JsonParseUnexpectedEOS, "Unexpected end of JSON input") \
+ T(JsonParseUnexpectedToken, "Unexpected token % in JSON at position %") \
+ T(JsonParseUnexpectedTokenNumber, "Unexpected number in JSON at position %") \
+ T(JsonParseUnexpectedTokenString, "Unexpected string in JSON at position %") \
T(LabelRedeclaration, "Label '%' has already been declared") \
T(MalformedArrowFunParamList, "Malformed arrow function parameter list") \
T(MalformedRegExp, "Invalid regular expression: /%/: %") \
@@ -417,6 +422,8 @@ class CallSite {
T(NoCatchOrFinally, "Missing catch or finally after try") \
T(NotIsvar, "builtin %%IS_VAR: not a variable") \
T(ParamAfterRest, "Rest parameter must be last formal parameter") \
+ T(InvalidRestParameter, \
+ "Rest parameter must be an identifier or destructuring pattern") \
T(PushPastSafeLength, \
"Pushing % elements on an array-like of length % " \
"is disallowed, as the total surpasses 2**53-1") \
@@ -480,8 +487,6 @@ class CallSite {
"with 'break', 'continue', 'return' or 'throw'") \
T(StrongUndefined, \
"In strong mode, binding or assigning to 'undefined' is deprecated") \
- T(StrongUseBeforeDeclaration, \
- "In strong mode, declaring variable '%' before its use is required") \
T(StrongVar, \
"In strong mode, 'var' is deprecated, use 'let' or 'const' instead") \
T(TemplateOctalLiteral, \
@@ -495,6 +500,8 @@ class CallSite {
T(TypedArrayTooShort, \
"Derived TypedArray constructor created an array which was too small") \
T(UnexpectedEOS, "Unexpected end of input") \
+ T(UnexpectedFunctionSent, \
+ "function.sent expression is not allowed outside a generator") \
T(UnexpectedReserved, "Unexpected reserved word") \
T(UnexpectedStrictReserved, "Unexpected strict mode reserved word") \
T(UnexpectedSuper, "'super' keyword unexpected here") \
@@ -510,6 +517,8 @@ class CallSite {
T(UnterminatedRegExp, "Invalid regular expression: missing /") \
T(UnterminatedTemplate, "Unterminated template literal") \
T(UnterminatedTemplateExpr, "Missing } in template expression") \
+ T(FoundNonCallableHasInstance, "Found non-callable @@hasInstance") \
+ T(NonObjectInInstanceOfCheck, "Expecting an object in instanceof check") \
/* EvalError */ \
T(CodeGenFromStrings, "%") \
/* URIError */ \
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index 27ec8e5bda..5e27f4545b 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -214,8 +214,8 @@ void RelocInfo::set_target_object(Object* target,
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target));
}
}
@@ -284,10 +284,8 @@ void RelocInfo::set_target_cell(Cell* cell,
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
- // TODO(1550) We are passing NULL as a slot because cell can never be on
- // evacuation candidate.
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), NULL, cell);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
+ cell);
}
}
@@ -351,25 +349,6 @@ void RelocInfo::WipeOut() {
}
-bool RelocInfo::IsPatchedReturnSequence() {
- Instr instr0 = Assembler::instr_at(pc_);
- Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);
- Instr instr2 = Assembler::instr_at(pc_ + 2 * Assembler::kInstrSize);
- bool patched_return = ((instr0 & kOpcodeMask) == LUI &&
- (instr1 & kOpcodeMask) == ORI &&
- ((instr2 & kOpcodeMask) == JAL ||
- ((instr2 & kOpcodeMask) == SPECIAL &&
- (instr2 & kFunctionFieldMask) == JALR)));
- return patched_return;
-}
-
-
-bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
- Instr current_instr = Assembler::instr_at(pc_);
- return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
-}
-
-
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index a8b6cc7c32..e50a239a4a 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -285,10 +285,7 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
void Assembler::GetCode(CodeDesc* desc) {
- if (IsPrevInstrCompactBranch()) {
- nop();
- ClearCompactBranchState();
- }
+ EmitForbiddenSlotInstruction();
DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
// Set up code descriptor.
desc->buffer = buffer_;
@@ -302,10 +299,7 @@ void Assembler::GetCode(CodeDesc* desc) {
void Assembler::Align(int m) {
DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
- if (IsPrevInstrCompactBranch()) {
- nop();
- ClearCompactBranchState();
- }
+ EmitForbiddenSlotInstruction();
while ((pc_offset() & (m - 1)) != 0) {
nop();
}
@@ -2092,33 +2086,36 @@ void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// load to two 32-bit loads.
DCHECK(!src.rm().is(at));
- if (IsFp64Mode()) {
+ if (IsFp32Mode()) { // fp32 mode.
if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
GenInstrImmediate(LWC1, src.rm(), fd,
src.offset_ + Register::kMantissaOffset);
- GenInstrImmediate(LW, src.rm(), at,
+ FPURegister nextfpreg;
+ nextfpreg.setcode(fd.code() + 1);
+ GenInstrImmediate(LWC1, src.rm(), nextfpreg,
src.offset_ + Register::kExponentOffset);
- mthc1(at, fd);
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(src);
GenInstrImmediate(LWC1, at, fd, Register::kMantissaOffset);
- GenInstrImmediate(LW, at, at, Register::kExponentOffset);
- mthc1(at, fd);
+ FPURegister nextfpreg;
+ nextfpreg.setcode(fd.code() + 1);
+ GenInstrImmediate(LWC1, at, nextfpreg, Register::kExponentOffset);
}
- } else { // fp32 mode.
+ } else {
+ DCHECK(IsFp64Mode() || IsFpxxMode());
+ // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
GenInstrImmediate(LWC1, src.rm(), fd,
src.offset_ + Register::kMantissaOffset);
- FPURegister nextfpreg;
- nextfpreg.setcode(fd.code() + 1);
- GenInstrImmediate(LWC1, src.rm(), nextfpreg,
+ GenInstrImmediate(LW, src.rm(), at,
src.offset_ + Register::kExponentOffset);
+ mthc1(at, fd);
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(src);
GenInstrImmediate(LWC1, at, fd, Register::kMantissaOffset);
- FPURegister nextfpreg;
- nextfpreg.setcode(fd.code() + 1);
- GenInstrImmediate(LWC1, at, nextfpreg, Register::kExponentOffset);
+ GenInstrImmediate(LW, at, at, Register::kExponentOffset);
+ mthc1(at, fd);
}
}
}
@@ -2139,33 +2136,36 @@ void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
// store to two 32-bit stores.
DCHECK(!src.rm().is(at));
DCHECK(!src.rm().is(t8));
- if (IsFp64Mode()) {
+ if (IsFp32Mode()) { // fp32 mode.
if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
GenInstrImmediate(SWC1, src.rm(), fd,
src.offset_ + Register::kMantissaOffset);
- mfhc1(at, fd);
- GenInstrImmediate(SW, src.rm(), at,
+ FPURegister nextfpreg;
+ nextfpreg.setcode(fd.code() + 1);
+ GenInstrImmediate(SWC1, src.rm(), nextfpreg,
src.offset_ + Register::kExponentOffset);
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(src);
GenInstrImmediate(SWC1, at, fd, Register::kMantissaOffset);
- mfhc1(t8, fd);
- GenInstrImmediate(SW, at, t8, Register::kExponentOffset);
+ FPURegister nextfpreg;
+ nextfpreg.setcode(fd.code() + 1);
+ GenInstrImmediate(SWC1, at, nextfpreg, Register::kExponentOffset);
}
- } else { // fp32 mode.
+ } else {
+ DCHECK(IsFp64Mode() || IsFpxxMode());
+ // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
GenInstrImmediate(SWC1, src.rm(), fd,
src.offset_ + Register::kMantissaOffset);
- FPURegister nextfpreg;
- nextfpreg.setcode(fd.code() + 1);
- GenInstrImmediate(SWC1, src.rm(), nextfpreg,
+ mfhc1(at, fd);
+ GenInstrImmediate(SW, src.rm(), at,
src.offset_ + Register::kExponentOffset);
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(src);
GenInstrImmediate(SWC1, at, fd, Register::kMantissaOffset);
- FPURegister nextfpreg;
- nextfpreg.setcode(fd.code() + 1);
- GenInstrImmediate(SWC1, at, nextfpreg, Register::kExponentOffset);
+ mfhc1(t8, fd);
+ GenInstrImmediate(SW, at, t8, Register::kExponentOffset);
}
}
}
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 054695483f..b708ef7700 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -304,6 +304,8 @@ struct FPUControlRegister {
const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister };
const FPUControlRegister FCSR = { kFCSRRegister };
+// TODO(mips) Define SIMD registers.
+typedef DoubleRegister Simd128Register;
// -----------------------------------------------------------------------------
// Machine instruction Operands.
@@ -518,14 +520,11 @@ class Assembler : public AssemblerBase {
// a target is resolved and written.
static const int kSpecialTargetSize = 0;
- // Number of consecutive instructions used to store 32bit constant.
- // Before jump-optimizations, this constant was used in
- // RelocInfo::target_address_address() function to tell serializer address of
- // the instruction that follows LUI/ORI instruction pair. Now, with new jump
- // optimization, where jump-through-register instruction that usually
- // follows LUI/ORI pair is substituted with J/JAL, this constant equals
- // to 3 instructions (LUI+ORI+J/JAL/JR/JALR).
- static const int kInstructionsFor32BitConstant = 3;
+ // Number of consecutive instructions used to store 32bit constant. This
+ // constant is used in RelocInfo::target_address_address() function to tell
+ // serializer address of the instruction that follows LUI/ORI instruction
+ // pair.
+ static const int kInstructionsFor32BitConstant = 2;
// Distance between the instruction referring to the address of the call
// target and the return address.
@@ -1035,7 +1034,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, const SourcePosition position);
+ void RecordDeoptReason(const int reason, int raw_position);
static int RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
@@ -1206,6 +1205,12 @@ class Assembler : public AssemblerBase {
return block_buffer_growth_;
}
+ void EmitForbiddenSlotInstruction() {
+ if (IsPrevInstrCompactBranch()) {
+ nop();
+ }
+ }
+
inline void CheckTrampolinePoolQuick(int extra_instructions = 0);
private:
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index f6c1dfbaaf..09f4d59e35 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -142,6 +142,107 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// static
+void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- ra : return address
+ // -- sp[(argc - n) * 8] : arg[n] (zero-based)
+ // -- sp[(argc + 1) * 8] : receiver
+ // -----------------------------------
+ Condition const cc = (kind == MathMaxMinKind::kMin) ? ge : le;
+ Heap::RootListIndex const root_index =
+ (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
+ : Heap::kMinusInfinityValueRootIndex;
+ DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? f2 : f0;
+
+ // Load the accumulator with the default return value (either -Infinity or
+ // +Infinity), with the tagged value in a1 and the double value in f0.
+ __ LoadRoot(a1, root_index);
+ __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
+ __ mov(a3, a0);
+
+ Label done_loop, loop;
+ __ bind(&loop);
+ {
+ // Check if all parameters done.
+ __ Subu(a0, a0, Operand(1));
+ __ Branch(&done_loop, lt, a0, Operand(zero_reg));
+
+ // Load the next parameter tagged value into a2.
+ __ Lsa(at, sp, a0, kPointerSizeLog2);
+ __ lw(a2, MemOperand(at));
+
+ // Load the double value of the parameter into f2, maybe converting the
+ // parameter to a number first using the ToNumberStub if necessary.
+ Label convert, convert_smi, convert_number, done_convert;
+ __ bind(&convert);
+ __ JumpIfSmi(a2, &convert_smi);
+ __ lw(t0, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ JumpIfRoot(t0, Heap::kHeapNumberMapRootIndex, &convert_number);
+ {
+ // Parameter is not a Number, use the ToNumberStub to convert it.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(a0);
+ __ SmiTag(a3);
+ __ Push(a0, a1, a3);
+ __ mov(a0, a2);
+ ToNumberStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ mov(a2, v0);
+ __ Pop(a0, a1, a3);
+ {
+ // Restore the double accumulator value (f0).
+ Label restore_smi, done_restore;
+ __ JumpIfSmi(a1, &restore_smi);
+ __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
+ __ jmp(&done_restore);
+ __ bind(&restore_smi);
+ __ SmiToDoubleFPURegister(a1, f0, t0);
+ __ bind(&done_restore);
+ }
+ __ SmiUntag(a3);
+ __ SmiUntag(a0);
+ }
+ __ jmp(&convert);
+ __ bind(&convert_number);
+ __ ldc1(f2, FieldMemOperand(a2, HeapNumber::kValueOffset));
+ __ jmp(&done_convert);
+ __ bind(&convert_smi);
+ __ SmiToDoubleFPURegister(a2, f2, t0);
+ __ bind(&done_convert);
+
+ // Perform the actual comparison with the accumulator value on the left hand
+ // side (f0) and the next parameter value on the right hand side (f2).
+ Label compare_equal, compare_nan, compare_swap;
+ __ BranchF(&compare_equal, &compare_nan, eq, f0, f2);
+ __ BranchF(&compare_swap, nullptr, cc, f0, f2);
+ __ Branch(&loop);
+
+ // Left and right hand side are equal, check for -0 vs. +0.
+ __ bind(&compare_equal);
+ __ FmoveHigh(t0, reg);
+ __ Branch(&loop, ne, t0, Operand(0x80000000));
+
+ // Result is on the right hand side.
+ __ bind(&compare_swap);
+ __ mov_d(f0, f2);
+ __ mov(a1, a2);
+ __ jmp(&loop);
+
+ // At least one side is NaN, which means that the result will be NaN too.
+ __ bind(&compare_nan);
+ __ LoadRoot(a1, Heap::kNanValueRootIndex);
+ __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
+ __ jmp(&loop);
+ }
+
+ __ bind(&done_loop);
+ __ Lsa(sp, sp, a3, kPointerSizeLog2);
+ __ mov(v0, a1);
+ __ DropAndRet(1);
+}
+
+// static
void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
@@ -157,8 +258,7 @@ void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
{
__ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
__ Subu(a0, a0, Operand(1));
- __ sll(a0, a0, kPointerSizeLog2);
- __ Addu(sp, a0, sp);
+ __ Lsa(sp, sp, a0, kPointerSizeLog2);
__ lw(a0, MemOperand(sp));
__ Drop(2);
}
@@ -194,8 +294,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
Label no_arguments, done;
__ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
__ Subu(a0, a0, Operand(1));
- __ sll(a0, a0, kPointerSizeLog2);
- __ Addu(sp, a0, sp);
+ __ Lsa(sp, sp, a0, kPointerSizeLog2);
__ lw(a0, MemOperand(sp));
__ Drop(2);
__ jmp(&done);
@@ -234,8 +333,9 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a0, a1, a3); // first argument, constructor, new target
- __ CallRuntime(Runtime::kNewObject);
+ __ Push(a0); // first argument
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ Pop(a0);
}
__ Ret(USE_DELAY_SLOT);
@@ -259,8 +359,7 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
{
__ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
__ Subu(a0, a0, Operand(1));
- __ sll(a0, a0, kPointerSizeLog2);
- __ Addu(sp, a0, sp);
+ __ Lsa(sp, sp, a0, kPointerSizeLog2);
__ lw(a0, MemOperand(sp));
__ Drop(2);
}
@@ -322,8 +421,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
Label no_arguments, done;
__ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
__ Subu(a0, a0, Operand(1));
- __ sll(a0, a0, kPointerSizeLog2);
- __ Addu(sp, a0, sp);
+ __ Lsa(sp, sp, a0, kPointerSizeLog2);
__ lw(a0, MemOperand(sp));
__ Drop(2);
__ jmp(&done);
@@ -364,33 +462,15 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a0, a1, a3); // first argument, constructor, new target
- __ CallRuntime(Runtime::kNewObject);
+ __ Push(a0); // first argument
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ Pop(a0);
}
__ Ret(USE_DELAY_SLOT);
__ sw(a0, FieldMemOperand(v0, JSValue::kValueOffset)); // In delay slot
}
-
-static void CallRuntimePassFunction(
- MacroAssembler* masm, Runtime::FunctionId function_id) {
- // ----------- S t a t e -------------
- // -- a1 : target function (preserved for callee)
- // -- a3 : new target (preserved for callee)
- // -----------------------------------
-
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the target function and the new target.
- // Push function as parameter to the runtime call.
- __ Push(a1, a3, a1);
-
- __ CallRuntime(function_id, 1);
- // Restore target function and new target.
- __ Pop(a1, a3);
-}
-
-
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
@@ -398,8 +478,27 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ Jump(at);
}
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ // ----------- S t a t e -------------
+ // -- a0 : argument count (preserved for callee)
+ // -- a1 : target function (preserved for callee)
+ // -- a3 : new target (preserved for callee)
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the target function and the new target.
+ // Push function as parameter to the runtime call.
+ __ SmiTag(a0);
+ __ Push(a0, a1, a3, a1);
+
+ __ CallRuntime(function_id, 1);
+
+ // Restore target function and new target.
+ __ Pop(a0, a1, a3);
+ __ SmiUntag(a0);
+ }
-static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
__ Addu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
}
@@ -415,8 +514,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
__ LoadRoot(t0, Heap::kStackLimitRootIndex);
__ Branch(&ok, hs, sp, Operand(t0));
- CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
@@ -425,7 +523,8 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool create_implicit_receiver) {
+ bool create_implicit_receiver,
+ bool check_derived_construct) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
@@ -447,144 +546,18 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Push(a2, a0);
if (create_implicit_receiver) {
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- // Verify that the new target is a JSFunction.
- __ GetObjectType(a3, t1, t0);
- __ Branch(&rt_call, ne, t0, Operand(JS_FUNCTION_TYPE));
-
- // Load the initial map and verify that it is in fact a map.
- // a3: new target
- __ lw(a2,
- FieldMemOperand(a3, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(a2, &rt_call);
- __ GetObjectType(a2, t5, t4);
- __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
-
- // Fall back to runtime if the expected base constructor and base
- // constructor differ.
- __ lw(t1, FieldMemOperand(a2, Map::kConstructorOrBackPointerOffset));
- __ Branch(&rt_call, ne, a1, Operand(t1));
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // a1: constructor function
- // a2: initial map
- __ lbu(t5, FieldMemOperand(a2, Map::kInstanceTypeOffset));
- __ Branch(&rt_call, eq, t5, Operand(JS_FUNCTION_TYPE));
-
- // Now allocate the JSObject on the heap.
- // a1: constructor function
- // a2: initial map
- // a3: new target
- __ lbu(t3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
-
- __ Allocate(t3, t4, t3, t6, &rt_call, SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to
- // initial map and properties and elements are set to empty fixed array.
- // a1: constructor function
- // a2: initial map
- // a3: new target
- // t4: JSObject (not HeapObject tagged - the actual address).
- // t3: start of next object
- __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
- __ mov(t5, t4);
- STATIC_ASSERT(0 * kPointerSize == JSObject::kMapOffset);
- __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
- STATIC_ASSERT(1 * kPointerSize == JSObject::kPropertiesOffset);
- __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
- STATIC_ASSERT(2 * kPointerSize == JSObject::kElementsOffset);
- __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
- STATIC_ASSERT(3 * kPointerSize == JSObject::kHeaderSize);
- __ Addu(t5, t5, Operand(3 * kPointerSize));
-
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on.
- __ Addu(t4, t4, Operand(kHeapObjectTag));
-
- // Fill all the in-object properties with appropriate filler.
- // t4: JSObject (tagged)
- // t5: First in-object property of JSObject (not tagged)
- __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
-
- if (!is_api_function) {
- Label no_inobject_slack_tracking;
-
- MemOperand bit_field3 = FieldMemOperand(a2, Map::kBitField3Offset);
- // Check if slack tracking is enabled.
- __ lw(t0, bit_field3);
- __ DecodeField<Map::ConstructionCounter>(t2, t0);
- // t2: slack tracking counter
- __ Branch(&no_inobject_slack_tracking, lt, t2,
- Operand(Map::kSlackTrackingCounterEnd));
- // Decrease generous allocation count.
- __ Subu(t0, t0, Operand(1 << Map::ConstructionCounter::kShift));
- __ sw(t0, bit_field3);
-
- // Allocate object with a slack.
- __ lbu(a0, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
- __ sll(a0, a0, kPointerSizeLog2);
- __ subu(a0, t3, a0);
- // a0: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields, t5,
- Operand(a0));
- }
- __ InitializeFieldsWithFiller(t5, a0, t7);
-
- // To allow truncation fill the remaining fields with one pointer
- // filler map.
- __ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex);
- __ InitializeFieldsWithFiller(t5, t3, t7);
-
- // t2: slack tracking counter value before decreasing.
- __ Branch(&allocated, ne, t2, Operand(Map::kSlackTrackingCounterEnd));
-
- // Push the constructor, new_target and the object to the stack,
- // and then the initial map as an argument to the runtime call.
- __ Push(a1, a3, t4, a2);
- __ CallRuntime(Runtime::kFinalizeInstanceSize);
- __ Pop(a1, a3, t4);
-
- // Continue with JSObject being successfully allocated.
- // a1: constructor function
- // a3: new target
- // t4: JSObject
- __ jmp(&allocated);
-
- __ bind(&no_inobject_slack_tracking);
- }
-
- __ InitializeFieldsWithFiller(t5, t3, t7);
-
- // Continue with JSObject being successfully allocated.
- // a1: constructor function
- // a3: new target
- // t4: JSObject
- __ jmp(&allocated);
- }
-
- // Allocate the new receiver object using the runtime call.
- // a1: constructor function
- // a3: new target
- __ bind(&rt_call);
-
- // Push the constructor and new_target twice, second pair as arguments
- // to the runtime call.
- __ Push(a1, a3, a1, a3); // constructor function, new target
- __ CallRuntime(Runtime::kNewObject);
+ // Allocate the new receiver object.
+ __ Push(a1, a3);
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ mov(t4, v0);
__ Pop(a1, a3);
- // Receiver for constructor call allocated.
- // a1: constructor function
- // a3: new target
- // t4: JSObject
- __ bind(&allocated);
+ // ----------- S t a t e -------------
+ // -- a1: constructor function
+ // -- a3: new target
+ // -- t0: newly allocated object
+ // -----------------------------------
// Retrieve smi-tagged arguments count from the stack.
__ lw(a0, MemOperand(sp));
@@ -617,8 +590,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ SmiTag(t4, a0);
__ jmp(&entry);
__ bind(&loop);
- __ sll(t0, t4, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t0, a2, Operand(t0));
+ __ Lsa(t0, a2, t4, kPointerSizeLog2 - kSmiTagSize);
__ lw(t1, MemOperand(t0));
__ push(t1);
__ bind(&entry);
@@ -684,8 +656,20 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Leave construct frame.
}
- __ sll(t0, a1, kPointerSizeLog2 - 1);
- __ Addu(sp, sp, t0);
+ // ES6 9.2.2. Step 13+
+ // Check that the result is not a Smi, indicating that the constructor result
+ // from a derived class is neither undefined nor an Object.
+ if (check_derived_construct) {
+ Label dont_throw;
+ __ JumpIfNotSmi(v0, &dont_throw);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
+ }
+ __ bind(&dont_throw);
+ }
+
+ __ Lsa(sp, sp, a1, kPointerSizeLog2 - 1);
__ Addu(sp, sp, kPointerSize);
if (create_implicit_receiver) {
__ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
@@ -695,17 +679,23 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
+ Generate_JSConstructStubHelper(masm, false, true, false);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, true);
+ Generate_JSConstructStubHelper(masm, true, false, false);
}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
+ Generate_JSConstructStubHelper(masm, false, false, false);
+}
+
+
+void Builtins::Generate_JSBuiltinsConstructStubForDerived(
+ MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false, true);
}
@@ -787,8 +777,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// a3: argc
// s0: argv, i.e. points to first arg
Label loop, entry;
- __ sll(t0, a3, kPointerSizeLog2);
- __ addu(t2, s0, t0);
+ __ Lsa(t2, s0, a3, kPointerSizeLog2);
__ b(&entry);
__ nop(); // Branch delay slot nop.
// t2 points past last arg.
@@ -851,10 +840,8 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
// o sp: stack pointer
// o ra: return address
//
-// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-mips.h for its layout.
-// TODO(rmcilroy): We will need to include the current bytecode pointer in the
-// frame.
+// The function builds an interpreter frame. See InterpreterFrameConstants in
+// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
@@ -863,16 +850,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(ra, fp, cp, a1);
__ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
- __ Push(a3);
-
- // Push zero for bytecode array offset.
- __ Push(zero_reg);
// Get the bytecode array from the function object and load the pointer to the
// first entry into kInterpreterBytecodeRegister.
__ lw(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ Label load_debug_bytecode_array, bytecode_array_loaded;
+ Register debug_info = kInterpreterBytecodeArrayRegister;
+ DCHECK(!debug_info.is(a0));
+ __ lw(debug_info, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
+ __ Branch(&load_debug_bytecode_array, ne, debug_info,
+ Operand(DebugInfo::uninitialized()));
__ lw(kInterpreterBytecodeArrayRegister,
FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
+ __ bind(&bytecode_array_loaded);
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -884,6 +874,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(BYTECODE_ARRAY_TYPE));
}
+ // Push new.target, bytecode array and zero for bytecode array offset.
+ __ Push(a3, kInterpreterBytecodeArrayRegister, zero_reg);
+
// Allocate the local and temporary register file on the stack.
{
// Load frame size from the BytecodeArray object.
@@ -914,44 +907,38 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// TODO(rmcilroy): List of things not currently dealt with here but done in
// fullcodegen's prologue:
- // - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
- // - Allow simulator stop operations if FLAG_stop_at is set.
// - Code aging of the BytecodeArray object.
- // Perform stack guard check.
- {
- Label ok;
- __ LoadRoot(at, Heap::kStackLimitRootIndex);
- __ Branch(&ok, hs, sp, Operand(at));
- __ push(kInterpreterBytecodeArrayRegister);
- __ CallRuntime(Runtime::kStackGuard);
- __ pop(kInterpreterBytecodeArrayRegister);
- __ bind(&ok);
- }
-
// Load bytecode offset and dispatch table into registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
__ Addu(kInterpreterRegisterFileRegister, fp,
Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ li(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
- __ LoadRoot(kInterpreterDispatchTableRegister,
- Heap::kInterpreterTableRootIndex);
- __ Addu(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ li(kInterpreterDispatchTableRegister,
+ Operand(ExternalReference::interpreter_dispatch_table_address(
+ masm->isolate())));
// Dispatch to the first bytecode handler for the function.
__ Addu(a0, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ lbu(a0, MemOperand(a0));
- __ sll(at, a0, kPointerSizeLog2);
- __ Addu(at, kInterpreterDispatchTableRegister, at);
+ __ Lsa(at, kInterpreterDispatchTableRegister, a0, kPointerSizeLog2);
__ lw(at, MemOperand(at));
// TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
// and header removal.
__ Addu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(at);
+
+ // Even though the first bytecode handler was called, we will never return.
+ __ Abort(kUnexpectedReturnFromBytecodeHandler);
+
+ // Load debug copy of the bytecode array.
+ __ bind(&load_debug_bytecode_array);
+ __ lw(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(debug_info, DebugInfo::kAbstractCodeIndex));
+ __ Branch(&bytecode_array_loaded);
}
@@ -976,7 +963,8 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// static
-void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+ MacroAssembler* masm, TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a2 : the address of the first argument to be pushed. Subsequent
@@ -1001,7 +989,9 @@ void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
__ Branch(&loop_header, gt, a2, Operand(a3));
// Call the target.
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ tail_call_mode),
+ RelocInfo::CODE_TARGET);
}
@@ -1036,47 +1026,24 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
}
-static void Generate_InterpreterNotifyDeoptimizedHelper(
- MacroAssembler* masm, Deoptimizer::BailoutType type) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(kInterpreterAccumulatorRegister); // Save accumulator register.
-
- // Pass the deoptimization type to the runtime system.
- __ li(a1, Operand(Smi::FromInt(static_cast<int>(type))));
- __ push(a1);
- __ CallRuntime(Runtime::kNotifyDeoptimized);
-
- __ pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
- // Tear down internal frame.
- }
-
- // Drop state (we don't use this for interpreter deopts).
- __ Drop(1);
-
+static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
// Initialize register file register and dispatch table register.
__ Addu(kInterpreterRegisterFileRegister, fp,
Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
- __ LoadRoot(kInterpreterDispatchTableRegister,
- Heap::kInterpreterTableRootIndex);
- __ Addu(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ li(kInterpreterDispatchTableRegister,
+ Operand(ExternalReference::interpreter_dispatch_table_address(
+ masm->isolate())));
// Get the context from the frame.
- // TODO(rmcilroy): Update interpreter frame to expect current context at the
- // context slot instead of the function context.
__ lw(kContextRegister,
MemOperand(kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kContextFromRegisterPointer));
// Get the bytecode array pointer from the frame.
- __ lw(a1,
- MemOperand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kFunctionFromRegisterPointer));
- __ lw(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(a1, SharedFunctionInfo::kFunctionDataOffset));
+ __ lw(
+ kInterpreterBytecodeArrayRegister,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -1099,14 +1066,36 @@ static void Generate_InterpreterNotifyDeoptimizedHelper(
__ Addu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ lbu(a1, MemOperand(a1));
- __ sll(a1, a1, kPointerSizeLog2);
- __ Addu(a1, kInterpreterDispatchTableRegister, a1);
+ __ Lsa(a1, kInterpreterDispatchTableRegister, a1, kPointerSizeLog2);
__ lw(a1, MemOperand(a1));
__ Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(a1);
}
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+ MacroAssembler* masm, Deoptimizer::BailoutType type) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Pass the deoptimization type to the runtime system.
+ __ li(a1, Operand(Smi::FromInt(static_cast<int>(type))));
+ __ push(a1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+ // Tear down internal frame.
+ }
+
+ // Drop state (we don't use these for interpreter deopts) and and pop the
+ // accumulator value into the accumulator register.
+ __ Drop(1);
+ __ Pop(kInterpreterAccumulatorRegister);
+
+ // Enter the bytecode dispatch.
+ Generate_EnterBytecodeDispatch(masm);
+}
+
+
void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
@@ -1121,22 +1110,30 @@ void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ // Set the address of the interpreter entry trampoline as a return address.
+ // This simulates the initial call to bytecode handlers in interpreter entry
+ // trampoline. The return will never actually be taken, but our stack walker
+ // uses this address to determine whether a frame is interpreted.
+ __ li(ra, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
+
+ Generate_EnterBytecodeDispatch(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileLazy);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm,
+ Runtime::kCompileOptimized_NotConcurrent);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
@@ -1356,13 +1353,11 @@ static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
// Load the next prototype and iterate.
__ bind(&next_prototype);
- __ lw(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
- // End if the prototype is null or not hidden.
- __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, receiver_check_failed);
- __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ lw(scratch, FieldMemOperand(map, Map::kBitField3Offset));
- __ DecodeField<Map::IsHiddenPrototype>(scratch);
+ __ DecodeField<Map::HasHiddenPrototype>(scratch);
__ Branch(receiver_check_failed, eq, scratch, Operand(zero_reg));
+ __ lw(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+ __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ Branch(&prototype_loop_start);
@@ -1387,8 +1382,7 @@ void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
// Do the compatible receiver check.
Label receiver_check_failed;
- __ sll(at, a0, kPointerSizeLog2);
- __ Addu(t8, sp, at);
+ __ Lsa(t8, sp, a0, kPointerSizeLog2);
__ lw(t0, MemOperand(t8));
CompatibleReceiverCheck(masm, t0, t1, &receiver_check_failed);
@@ -1522,6 +1516,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
Register scratch = t0;
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
__ mov(a3, a2);
+ // Lsa() cannot be used hare as scratch value used later.
__ sll(scratch, a0, kPointerSizeLog2);
__ Addu(a0, sp, Operand(scratch));
__ lw(a1, MemOperand(a0)); // receiver
@@ -1592,8 +1587,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 2. Get the function to call (passed as receiver) from the stack.
// a0: actual number of arguments
- __ sll(at, a0, kPointerSizeLog2);
- __ addu(at, sp, at);
+ __ Lsa(at, sp, a0, kPointerSizeLog2);
__ lw(a1, MemOperand(at));
// 3. Shift arguments and return address one slot down on the stack
@@ -1604,8 +1598,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
{
Label loop;
// Calculate the copy start address (destination). Copy end address is sp.
- __ sll(at, a0, kPointerSizeLog2);
- __ addu(a2, sp, at);
+ __ Lsa(a2, sp, a0, kPointerSizeLog2);
__ bind(&loop);
__ lw(at, MemOperand(a2, -kPointerSize));
@@ -1705,6 +1698,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
Register scratch = t0;
__ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
__ mov(a2, a1);
+ // Lsa() cannot be used hare as scratch value used later.
__ sll(scratch, a0, kPointerSizeLog2);
__ Addu(a0, sp, Operand(scratch));
__ sw(a2, MemOperand(a0)); // receiver
@@ -1806,8 +1800,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
kPointerSize)));
__ mov(sp, fp);
__ MultiPop(fp.bit() | ra.bit());
- __ sll(t0, a1, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(sp, sp, t0);
+ __ Lsa(sp, sp, a1, kPointerSizeLog2 - kSmiTagSize);
// Adjust for the receiver.
__ Addu(sp, sp, Operand(kPointerSize));
}
@@ -1859,9 +1852,7 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Try to create the list from an arguments object.
__ bind(&create_arguments);
- __ lw(a2,
- FieldMemOperand(a0, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize));
+ __ lw(a2, FieldMemOperand(a0, JSArgumentsObject::kLengthOffset));
__ lw(t0, FieldMemOperand(a0, JSObject::kElementsOffset));
__ lw(at, FieldMemOperand(t0, FixedArray::kLengthOffset));
__ Branch(&create_runtime, ne, a2, Operand(at));
@@ -1915,8 +1906,7 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
Label done, loop;
__ bind(&loop);
__ Branch(&done, eq, t0, Operand(a2));
- __ sll(at, t0, kPointerSizeLog2);
- __ Addu(at, a0, at);
+ __ Lsa(at, a0, t0, kPointerSizeLog2);
__ lw(at, FieldMemOperand(at, FixedArray::kHeaderSize));
__ Push(at);
__ Addu(t0, t0, Operand(1));
@@ -1936,10 +1926,134 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
}
}
+namespace {
+
+// Drops top JavaScript frame and an arguments adaptor frame below it (if
+// present) preserving all the arguments prepared for current call.
+// Does nothing if debugger is currently active.
+// ES6 14.6.3. PrepareForTailCall
+//
+// Stack structure for the function g() tail calling f():
+//
+// ------- Caller frame: -------
+// | ...
+// | g()'s arg M
+// | ...
+// | g()'s arg 1
+// | g()'s receiver arg
+// | g()'s caller pc
+// ------- g()'s frame: -------
+// | g()'s caller fp <- fp
+// | g()'s context
+// | function pointer: g
+// | -------------------------
+// | ...
+// | ...
+// | f()'s arg N
+// | ...
+// | f()'s arg 1
+// | f()'s receiver arg <- sp (f()'s caller pc is not on the stack yet!)
+// ----------------------
+//
+void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+ DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+ Comment cmnt(masm, "[ PrepareForTailCall");
+
+ // Prepare for tail call only if the debugger is not active.
+ Label done;
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(masm->isolate());
+ __ li(at, Operand(debug_is_active));
+ __ lb(scratch1, MemOperand(at));
+ __ Branch(&done, ne, scratch1, Operand(zero_reg));
+
+ // Drop possible interpreter handler/stub frame.
+ {
+ Label no_interpreter_frame;
+ __ lw(scratch3, MemOperand(fp, StandardFrameConstants::kMarkerOffset));
+ __ Branch(&no_interpreter_frame, ne, scratch3,
+ Operand(Smi::FromInt(StackFrame::STUB)));
+ __ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&no_interpreter_frame);
+ }
+
+ // Check if next frame is an arguments adaptor frame.
+ Label no_arguments_adaptor, formal_parameter_count_loaded;
+ __ lw(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+ __ Branch(&no_arguments_adaptor, ne, scratch3,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Drop arguments adaptor frame and load arguments count.
+ __ mov(fp, scratch2);
+ __ lw(scratch1,
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(scratch1);
+ __ Branch(&formal_parameter_count_loaded);
+
+ __ bind(&no_arguments_adaptor);
+ // Load caller's formal parameter count
+ __ lw(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ lw(scratch1,
+ FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(scratch1,
+ FieldMemOperand(scratch1,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+ __ SmiUntag(scratch1);
+
+ __ bind(&formal_parameter_count_loaded);
+
+ // Calculate the end of destination area where we will put the arguments
+ // after we drop current frame. We add kPointerSize to count the receiver
+ // argument which is not included into formal parameters count.
+ Register dst_reg = scratch2;
+ __ Lsa(dst_reg, fp, scratch1, kPointerSizeLog2);
+ __ Addu(dst_reg, dst_reg,
+ Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+
+ Register src_reg = scratch1;
+ __ Lsa(src_reg, sp, args_reg, kPointerSizeLog2);
+ // Count receiver argument as well (not included in args_reg).
+ __ Addu(src_reg, src_reg, Operand(kPointerSize));
+
+ if (FLAG_debug_code) {
+ __ Check(lo, kStackAccessBelowStackPointer, src_reg, Operand(dst_reg));
+ }
+
+ // Restore caller's frame pointer and return address now as they will be
+ // overwritten by the copying loop.
+ __ lw(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Now copy callee arguments to the caller frame going backwards to avoid
+ // callee arguments corruption (source and destination areas could overlap).
+
+ // Both src_reg and dst_reg are pointing to the word after the one to copy,
+ // so they must be pre-decremented in the loop.
+ Register tmp_reg = scratch3;
+ Label loop, entry;
+ __ Branch(&entry);
+ __ bind(&loop);
+ __ Subu(src_reg, src_reg, Operand(kPointerSize));
+ __ Subu(dst_reg, dst_reg, Operand(kPointerSize));
+ __ lw(tmp_reg, MemOperand(src_reg));
+ __ sw(tmp_reg, MemOperand(dst_reg));
+ __ bind(&entry);
+ __ Branch(&loop, ne, sp, Operand(src_reg));
+
+ // Leave current frame.
+ __ mov(sp, dst_reg);
+
+ __ bind(&done);
+}
+} // namespace
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode) {
+ ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the function to call (checked to be a JSFunction)
@@ -1979,8 +2093,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadGlobalProxy(a3);
} else {
Label convert_to_object, convert_receiver;
- __ sll(at, a0, kPointerSizeLog2);
- __ addu(at, sp, at);
+ __ Lsa(at, sp, a0, kPointerSizeLog2);
__ lw(a3, MemOperand(at));
__ JumpIfSmi(a3, &convert_to_object);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
@@ -2016,8 +2129,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver);
}
- __ sll(at, a0, kPointerSizeLog2);
- __ addu(at, sp, at);
+ __ Lsa(at, sp, a0, kPointerSizeLog2);
__ sw(a3, MemOperand(at));
}
__ bind(&done_convert);
@@ -2029,6 +2141,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- cp : the function context.
// -----------------------------------
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, a0, t0, t1, t2);
+ }
+
__ lw(a2,
FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
__ sra(a2, a2, kSmiTagSize); // Un-tag.
@@ -2048,18 +2164,22 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// static
-void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(a1);
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, a0, t0, t1, t2);
+ }
+
// Patch the receiver to [[BoundThis]].
{
__ lw(at, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
- __ sll(t0, a0, kPointerSizeLog2);
- __ addu(t0, t0, sp);
+ __ Lsa(t0, sp, a0, kPointerSizeLog2);
__ sw(at, MemOperand(t0));
}
@@ -2100,11 +2220,9 @@ void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
__ mov(t1, zero_reg);
__ bind(&loop);
__ Branch(&done_loop, gt, t1, Operand(a0));
- __ sll(t2, t0, kPointerSizeLog2);
- __ addu(t2, t2, sp);
+ __ Lsa(t2, sp, t0, kPointerSizeLog2);
__ lw(at, MemOperand(t2));
- __ sll(t2, t1, kPointerSizeLog2);
- __ addu(t2, t2, sp);
+ __ Lsa(t2, sp, t1, kPointerSizeLog2);
__ sw(at, MemOperand(t2));
__ Addu(t0, t0, Operand(1));
__ Addu(t1, t1, Operand(1));
@@ -2121,11 +2239,9 @@ void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
__ bind(&loop);
__ Subu(t0, t0, Operand(1));
__ Branch(&done_loop, lt, t0, Operand(zero_reg));
- __ sll(t1, t0, kPointerSizeLog2);
- __ addu(t1, t1, a2);
+ __ Lsa(t1, a2, t0, kPointerSizeLog2);
__ lw(at, MemOperand(t1));
- __ sll(t1, a0, kPointerSizeLog2);
- __ addu(t1, t1, sp);
+ __ Lsa(t1, sp, a0, kPointerSizeLog2);
__ sw(at, MemOperand(t1));
__ Addu(a0, a0, Operand(1));
__ Branch(&loop);
@@ -2143,7 +2259,8 @@ void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
// static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the target to call (can be any Object).
@@ -2153,12 +2270,23 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ JumpIfSmi(a1, &non_callable);
__ bind(&non_smi);
__ GetObjectType(a1, t1, t2);
- __ Jump(masm->isolate()->builtins()->CallFunction(mode),
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
- __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
+
+ // Check if target has a [[Call]] internal method.
+ __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
+ __ And(t1, t1, Operand(1 << Map::kIsCallable));
+ __ Branch(&non_callable, eq, t1, Operand(zero_reg));
+
__ Branch(&non_function, ne, t2, Operand(JS_PROXY_TYPE));
+ // 0. Prepare for tail call if necessary.
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, a0, t0, t1, t2);
+ }
+
// 1. Runtime fallback for Proxy [[Call]].
__ Push(a1);
// Increase the arguments size to include the pushed function and the
@@ -2171,18 +2299,13 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
__ bind(&non_function);
- // Check if target has a [[Call]] internal method.
- __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
- __ And(t1, t1, Operand(1 << Map::kIsCallable));
- __ Branch(&non_callable, eq, t1, Operand(zero_reg));
// Overwrite the original receiver with the (original) target.
- __ sll(at, a0, kPointerSizeLog2);
- __ addu(at, sp, at);
+ __ Lsa(at, sp, a0, kPointerSizeLog2);
__ sw(a1, MemOperand(at));
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
__ Jump(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined),
+ ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
@@ -2264,11 +2387,9 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ mov(t1, zero_reg);
__ bind(&loop);
__ Branch(&done_loop, ge, t1, Operand(a0));
- __ sll(t2, t0, kPointerSizeLog2);
- __ addu(t2, t2, sp);
+ __ Lsa(t2, sp, t0, kPointerSizeLog2);
__ lw(at, MemOperand(t2));
- __ sll(t2, t1, kPointerSizeLog2);
- __ addu(t2, t2, sp);
+ __ Lsa(t2, sp, t1, kPointerSizeLog2);
__ sw(at, MemOperand(t2));
__ Addu(t0, t0, Operand(1));
__ Addu(t1, t1, Operand(1));
@@ -2285,11 +2406,9 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ bind(&loop);
__ Subu(t0, t0, Operand(1));
__ Branch(&done_loop, lt, t0, Operand(zero_reg));
- __ sll(t1, t0, kPointerSizeLog2);
- __ addu(t1, t1, a2);
+ __ Lsa(t1, a2, t0, kPointerSizeLog2);
__ lw(at, MemOperand(t1));
- __ sll(t1, a0, kPointerSizeLog2);
- __ addu(t1, t1, sp);
+ __ Lsa(t1, sp, a0, kPointerSizeLog2);
__ sw(at, MemOperand(t1));
__ Addu(a0, a0, Operand(1));
__ Branch(&loop);
@@ -2368,8 +2487,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Called Construct on an exotic Object with a [[Construct]] internal method.
{
// Overwrite the original receiver with the (original) target.
- __ sll(at, a0, kPointerSizeLog2);
- __ addu(at, sp, at);
+ __ Lsa(at, sp, a0, kPointerSizeLog2);
__ sw(a1, MemOperand(at));
// Let the "call_as_constructor_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
@@ -2412,8 +2530,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
ArgumentAdaptorStackCheck(masm, &stack_overflow);
// Calculate copy start address into a0 and copy end address into t1.
- __ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(a0, fp, a0);
+ __ Lsa(a0, fp, a0, kPointerSizeLog2 - kSmiTagSize);
// Adjust for return address and receiver.
__ Addu(a0, a0, Operand(2 * kPointerSize));
// Compute copy end address.
@@ -2468,8 +2585,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a1: function
// a2: expected number of arguments
// a3: new target (passed through to callee)
- __ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(a0, fp, a0);
+ __ Lsa(a0, fp, a0, kPointerSizeLog2 - kSmiTagSize);
// Adjust for return address and receiver.
__ Addu(a0, a0, Operand(2 * kPointerSize));
// Compute copy end address. Also adjust for return address.
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index f88d3bd5b4..77dbcb122d 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -91,9 +91,8 @@ void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
#define __ ACCESS_MASM(masm)
-
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
- Condition cc, Strength strength);
+ Condition cc);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register lhs,
Register rhs,
@@ -275,7 +274,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// Equality is almost reflexive (everything but NaN), so this is a test
// for "identity and not NaN".
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
- Condition cc, Strength strength) {
+ Condition cc) {
Label not_identical;
Label heap_number, return_equal;
Register exp_mask_reg = t5;
@@ -296,29 +295,15 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
__ Branch(slow, eq, t4, Operand(SYMBOL_TYPE));
// Call runtime on identical SIMD values since we must throw a TypeError.
__ Branch(slow, eq, t4, Operand(SIMD128_VALUE_TYPE));
- if (is_strong(strength)) {
- // Call the runtime on anything that is converted in the semantics, since
- // we need to throw a TypeError. Smis have already been ruled out.
- __ Branch(&return_equal, eq, t4, Operand(HEAP_NUMBER_TYPE));
- __ And(t4, t4, Operand(kIsNotStringMask));
- __ Branch(slow, ne, t4, Operand(zero_reg));
- }
} else {
__ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
// Comparing JS objects with <=, >= is complicated.
if (cc != eq) {
- __ Branch(slow, greater, t4, Operand(FIRST_JS_RECEIVER_TYPE));
- // Call runtime on identical symbols since we need to throw a TypeError.
- __ Branch(slow, eq, t4, Operand(SYMBOL_TYPE));
- // Call runtime on identical SIMD values since we must throw a TypeError.
- __ Branch(slow, eq, t4, Operand(SIMD128_VALUE_TYPE));
- if (is_strong(strength)) {
- // Call the runtime on anything that is converted in the semantics,
- // since we need to throw a TypeError. Smis and heap numbers have
- // already been ruled out.
- __ And(t4, t4, Operand(kIsNotStringMask));
- __ Branch(slow, ne, t4, Operand(zero_reg));
- }
+ __ Branch(slow, greater, t4, Operand(FIRST_JS_RECEIVER_TYPE));
+ // Call runtime on identical symbols since we need to throw a TypeError.
+ __ Branch(slow, eq, t4, Operand(SYMBOL_TYPE));
+ // Call runtime on identical SIMD values since we must throw a TypeError.
+ __ Branch(slow, eq, t4, Operand(SIMD128_VALUE_TYPE));
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
@@ -514,45 +499,55 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
// Fast negative check for internalized-to-internalized equality.
static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
- Register lhs,
- Register rhs,
+ Register lhs, Register rhs,
Label* possible_strings,
- Label* not_both_strings) {
+ Label* runtime_call) {
DCHECK((lhs.is(a0) && rhs.is(a1)) ||
(lhs.is(a1) && rhs.is(a0)));
// a2 is object type of rhs.
- Label object_test;
+ Label object_test, return_unequal, undetectable;
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ And(at, a2, Operand(kIsNotStringMask));
__ Branch(&object_test, ne, at, Operand(zero_reg));
__ And(at, a2, Operand(kIsNotInternalizedMask));
__ Branch(possible_strings, ne, at, Operand(zero_reg));
__ GetObjectType(rhs, a3, a3);
- __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
+ __ Branch(runtime_call, ge, a3, Operand(FIRST_NONSTRING_TYPE));
__ And(at, a3, Operand(kIsNotInternalizedMask));
__ Branch(possible_strings, ne, at, Operand(zero_reg));
- // Both are internalized strings. We already checked they weren't the same
- // pointer so they are not equal.
+ // Both are internalized. We already checked they weren't the same pointer so
+ // they are not equal. Return non-equal by returning the non-zero object
+ // pointer in v0.
__ Ret(USE_DELAY_SLOT);
- __ li(v0, Operand(1)); // Non-zero indicates not equal.
+ __ mov(v0, a0); // In delay slot.
__ bind(&object_test);
- __ Branch(not_both_strings, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
- __ GetObjectType(rhs, a2, a3);
- __ Branch(not_both_strings, lt, a3, Operand(FIRST_JS_RECEIVER_TYPE));
-
- // If both objects are undetectable, they are equal. Otherwise, they
- // are not equal, since they are different objects and an object is not
- // equal to undefined.
- __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
- __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
- __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
- __ and_(a0, a2, a3);
- __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
+ __ lw(a2, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ lw(a3, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ lbu(t0, FieldMemOperand(a2, Map::kBitFieldOffset));
+ __ lbu(t1, FieldMemOperand(a3, Map::kBitFieldOffset));
+ __ And(at, t0, Operand(1 << Map::kIsUndetectable));
+ __ Branch(&undetectable, ne, at, Operand(zero_reg));
+ __ And(at, t1, Operand(1 << Map::kIsUndetectable));
+ __ Branch(&return_unequal, ne, at, Operand(zero_reg));
+
+ __ GetInstanceType(a2, a2);
+ __ Branch(runtime_call, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
+ __ GetInstanceType(a3, a3);
+ __ Branch(runtime_call, lt, a3, Operand(FIRST_JS_RECEIVER_TYPE));
+
+ __ bind(&return_unequal);
+ // Return non-equal by returning the non-zero object pointer in v0.
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0); // In delay slot.
+
+ __ bind(&undetectable);
+ __ And(at, t1, Operand(1 << Map::kIsUndetectable));
+ __ Branch(&return_unequal, eq, at, Operand(zero_reg));
__ Ret(USE_DELAY_SLOT);
- __ xori(v0, a0, 1 << Map::kIsUndetectable);
+ __ li(v0, Operand(EQUAL)); // In delay slot.
}
@@ -603,7 +598,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc, strength());
+ EmitIdenticalObjectComparison(masm, &slow, cc);
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
@@ -742,8 +737,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
- : Runtime::kCompare);
+ __ TailCallRuntime(Runtime::kCompare);
}
__ bind(&miss);
@@ -973,7 +967,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ cvt_d_w(double_exponent, single_scratch);
// Returning or bailing out.
- Counters* counters = isolate()->counters();
if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
@@ -987,7 +980,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ sdc1(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
DCHECK(heapnumber.is(v0));
- __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
__ DropAndRet(2);
} else {
__ push(ra);
@@ -1003,7 +995,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ MovFromFloatResult(double_result);
__ bind(&done);
- __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
__ Ret();
}
}
@@ -1075,8 +1066,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ mov(s1, a2);
} else {
// Compute the argv pointer in a callee-saved register.
- __ sll(s1, a0, kPointerSizeLog2);
- __ Addu(s1, sp, s1);
+ __ Lsa(s1, sp, a0, kPointerSizeLog2);
__ Subu(s1, s1, kPointerSize);
}
@@ -1092,48 +1082,77 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// a0 = argc
__ mov(s0, a0);
__ mov(s2, a1);
- // a1 = argv (set in the delay slot after find_ra below).
// We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
// also need to reserve the 4 argument slots on the stack.
__ AssertStackIsAligned();
- __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
+ int frame_alignment = MacroAssembler::ActivationFrameAlignment();
+ int frame_alignment_mask = frame_alignment - 1;
+ int result_stack_size;
+ if (result_size() <= 2) {
+ // a0 = argc, a1 = argv, a2 = isolate
+ __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
+ __ mov(a1, s1);
+ result_stack_size = 0;
+ } else {
+ DCHECK_EQ(3, result_size());
+ // Allocate additional space for the result.
+ result_stack_size =
+ ((result_size() * kPointerSize) + frame_alignment_mask) &
+ ~frame_alignment_mask;
+ __ Subu(sp, sp, Operand(result_stack_size));
+
+ // a0 = hidden result argument, a1 = argc, a2 = argv, a3 = isolate.
+ __ li(a3, Operand(ExternalReference::isolate_address(isolate())));
+ __ mov(a2, s1);
+ __ mov(a1, a0);
+ __ mov(a0, sp);
+ }
// To let the GC traverse the return address of the exit frames, we need to
// know where the return address is. The CEntryStub is unmovable, so
// we can store the address on the stack to be able to find it again and
// we never have to restore it, because it will not change.
{ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
- // This branch-and-link sequence is needed to find the current PC on mips,
- // saved to the ra register.
- // Use masm-> here instead of the double-underscore macro since extra
- // coverage code can interfere with the proper calculation of ra.
+ int kNumInstructionsToJump = 4;
Label find_ra;
- masm->bal(&find_ra); // bal exposes branch delay slot.
- masm->mov(a1, s1);
- masm->bind(&find_ra);
-
// Adjust the value in ra to point to the correct return location, 2nd
// instruction past the real call into C code (the jalr(t9)), and push it.
// This is the return address of the exit frame.
- const int kNumInstructionsToJump = 5;
- masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
- masm->sw(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
+ if (kArchVariant >= kMips32r6) {
+ __ addiupc(ra, kNumInstructionsToJump + 1);
+ } else {
+ // This branch-and-link sequence is needed to find the current PC on mips
+ // before r6, saved to the ra register.
+ __ bal(&find_ra); // bal exposes branch delay slot.
+ __ Addu(ra, ra, kNumInstructionsToJump * Instruction::kInstrSize);
+ }
+ __ bind(&find_ra);
+
+ // This spot was reserved in EnterExitFrame.
+ __ sw(ra, MemOperand(sp, result_stack_size));
// Stack space reservation moved to the branch delay slot below.
// Stack is still aligned.
// Call the C routine.
- masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
- masm->jalr(t9);
+ __ mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
+ __ jalr(t9);
// Set up sp in the delay slot.
- masm->addiu(sp, sp, -kCArgsSlotsSize);
+ __ addiu(sp, sp, -kCArgsSlotsSize);
// Make sure the stored 'ra' points to this position.
DCHECK_EQ(kNumInstructionsToJump,
masm->InstructionsGeneratedSince(&find_ra));
}
-
+ if (result_size() > 2) {
+ DCHECK_EQ(3, result_size());
+ // Read result values stored on stack.
+ __ lw(a0, MemOperand(v0, 2 * kPointerSize));
+ __ lw(v1, MemOperand(v0, 1 * kPointerSize));
+ __ lw(v0, MemOperand(v0, 0 * kPointerSize));
+ }
+ // Result returned in v0, v1:v0 or a0:v1:v0 - do not destroy these registers!
// Check result for exception sentinel.
Label exception_returned;
@@ -1556,303 +1575,6 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- // The displacement is the offset of the last parameter (if any)
- // relative to the frame pointer.
- const int kDisplacement =
- StandardFrameConstants::kCallerSPOffset - kPointerSize;
- DCHECK(a1.is(ArgumentsAccessReadDescriptor::index()));
- DCHECK(a0.is(ArgumentsAccessReadDescriptor::parameter_count()));
-
- // Check that the key is a smiGenerateReadElement.
- Label slow;
- __ JumpIfNotSmi(a1, &slow);
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor;
- __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
- __ Branch(&adaptor,
- eq,
- a3,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Check index (a1) against formal parameters count limit passed in
- // through register a0. Use unsigned comparison to get negative
- // check for free.
- __ Branch(&slow, hs, a1, Operand(a0));
-
- // Read the argument from the stack and return it.
- __ subu(a3, a0, a1);
- __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(a3, fp, Operand(t3));
- __ Ret(USE_DELAY_SLOT);
- __ lw(v0, MemOperand(a3, kDisplacement));
-
- // Arguments adaptor case: Check index (a1) against actual arguments
- // limit found in the arguments adaptor frame. Use unsigned
- // comparison to get negative check for free.
- __ bind(&adaptor);
- __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
-
- // Read the argument from the adaptor frame and return it.
- __ subu(a3, a0, a1);
- __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(a3, a2, Operand(t3));
- __ Ret(USE_DELAY_SLOT);
- __ lw(v0, MemOperand(a3, kDisplacement));
-
- // Slow-case: Handle non-smi or out-of-bounds access to arguments
- // by calling the runtime system.
- __ bind(&slow);
- __ push(a1);
- __ TailCallRuntime(Runtime::kArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
- // a1 : function
- // a2 : number of parameters (tagged)
- // a3 : parameters pointer
-
- DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(a0, MemOperand(t0, StandardFrameConstants::kContextOffset));
- __ Branch(&runtime, ne, a0,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Patch the arguments.length and the parameters pointer in the current frame.
- __ lw(a2, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ sll(t3, a2, 1);
- __ Addu(t0, t0, Operand(t3));
- __ addiu(a3, t0, StandardFrameConstants::kCallerSPOffset);
-
- __ bind(&runtime);
- __ Push(a1, a3, a2);
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
- // a1 : function
- // a2 : number of parameters (tagged)
- // a3 : parameters pointer
- // Registers used over whole function:
- // t1 : arguments count (tagged)
- // t2 : mapped parameter count (tagged)
-
- DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(a0, MemOperand(t0, StandardFrameConstants::kContextOffset));
- __ Branch(&adaptor_frame, eq, a0,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // No adaptor, parameter count = argument count.
- __ mov(t1, a2);
- __ Branch(USE_DELAY_SLOT, &try_allocate);
- __ mov(t2, a2); // In delay slot.
-
- // We have an adaptor frame. Patch the parameters pointer.
- __ bind(&adaptor_frame);
- __ lw(t1, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ sll(t6, t1, 1);
- __ Addu(t0, t0, Operand(t6));
- __ Addu(a3, t0, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // t1 = argument count (tagged)
- // t2 = parameter count (tagged)
- // Compute the mapped parameter count = min(t2, t1) in t2.
- __ mov(t2, a2);
- __ Branch(&try_allocate, le, t2, Operand(t1));
- __ mov(t2, t1);
-
- __ bind(&try_allocate);
-
- // Compute the sizes of backing store, parameter map, and arguments object.
- // 1. Parameter map, has 2 extra words containing context and backing store.
- const int kParameterMapHeaderSize =
- FixedArray::kHeaderSize + 2 * kPointerSize;
- // If there are no mapped parameters, we do not need the parameter_map.
- Label param_map_size;
- DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
- __ Branch(USE_DELAY_SLOT, &param_map_size, eq, t2, Operand(zero_reg));
- __ mov(t5, zero_reg); // In delay slot: param map size = 0 when t2 == 0.
- __ sll(t5, t2, 1);
- __ addiu(t5, t5, kParameterMapHeaderSize);
- __ bind(&param_map_size);
-
- // 2. Backing store.
- __ sll(t6, t1, 1);
- __ Addu(t5, t5, Operand(t6));
- __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
-
- // 3. Arguments object.
- __ Addu(t5, t5, Operand(Heap::kSloppyArgumentsObjectSize));
-
- // Do the allocation of all three objects in one go.
- __ Allocate(t5, v0, t5, t0, &runtime, TAG_OBJECT);
-
- // v0 = address of new object(s) (tagged)
- // a2 = argument count (smi-tagged)
- // Get the arguments boilerplate from the current native context into t0.
- const int kNormalOffset =
- Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
- const int kAliasedOffset =
- Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
-
- __ lw(t0, NativeContextMemOperand());
- Label skip2_ne, skip2_eq;
- __ Branch(&skip2_ne, ne, t2, Operand(zero_reg));
- __ lw(t0, MemOperand(t0, kNormalOffset));
- __ bind(&skip2_ne);
-
- __ Branch(&skip2_eq, eq, t2, Operand(zero_reg));
- __ lw(t0, MemOperand(t0, kAliasedOffset));
- __ bind(&skip2_eq);
-
- // v0 = address of new object (tagged)
- // a2 = argument count (smi-tagged)
- // t0 = address of arguments map (tagged)
- // t2 = mapped parameter count (tagged)
- __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
- __ LoadRoot(t5, Heap::kEmptyFixedArrayRootIndex);
- __ sw(t5, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sw(t5, FieldMemOperand(v0, JSObject::kElementsOffset));
-
- // Set up the callee in-object property.
- STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ AssertNotSmi(a1);
- const int kCalleeOffset = JSObject::kHeaderSize +
- Heap::kArgumentsCalleeIndex * kPointerSize;
- __ sw(a1, FieldMemOperand(v0, kCalleeOffset));
-
- // Use the length (smi tagged) and set that as an in-object property too.
- __ AssertSmi(t1);
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- const int kLengthOffset = JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize;
- __ sw(t1, FieldMemOperand(v0, kLengthOffset));
-
- // Set up the elements pointer in the allocated arguments object.
- // If we allocated a parameter map, t0 will point there, otherwise
- // it will point to the backing store.
- __ Addu(t0, v0, Operand(Heap::kSloppyArgumentsObjectSize));
- __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
-
- // v0 = address of new object (tagged)
- // a2 = argument count (tagged)
- // t0 = address of parameter map or backing store (tagged)
- // t2 = mapped parameter count (tagged)
- // Initialize parameter map. If there are no mapped arguments, we're done.
- Label skip_parameter_map;
- Label skip3;
- __ Branch(&skip3, ne, t2, Operand(Smi::FromInt(0)));
- // Move backing store address to a1, because it is
- // expected there when filling in the unmapped arguments.
- __ mov(a1, t0);
- __ bind(&skip3);
-
- __ Branch(&skip_parameter_map, eq, t2, Operand(Smi::FromInt(0)));
-
- __ LoadRoot(t1, Heap::kSloppyArgumentsElementsMapRootIndex);
- __ sw(t1, FieldMemOperand(t0, FixedArray::kMapOffset));
- __ Addu(t1, t2, Operand(Smi::FromInt(2)));
- __ sw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
- __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
- __ sll(t6, t2, 1);
- __ Addu(t1, t0, Operand(t6));
- __ Addu(t1, t1, Operand(kParameterMapHeaderSize));
- __ sw(t1, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
-
- // Copy the parameter slots and the holes in the arguments.
- // We need to fill in mapped_parameter_count slots. They index the context,
- // where parameters are stored in reverse order, at
- // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
- // The mapped parameter thus need to get indices
- // MIN_CONTEXT_SLOTS+parameter_count-1 ..
- // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
- // We loop from right to left.
- Label parameters_loop, parameters_test;
- __ mov(t1, t2);
- __ Addu(t5, a2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
- __ Subu(t5, t5, Operand(t2));
- __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
- __ sll(t6, t1, 1);
- __ Addu(a1, t0, Operand(t6));
- __ Addu(a1, a1, Operand(kParameterMapHeaderSize));
-
- // a1 = address of backing store (tagged)
- // t0 = address of parameter map (tagged)
- // a0 = temporary scratch (a.o., for address calculation)
- // t1 = loop variable (tagged)
- // t3 = the hole value
- __ jmp(&parameters_test);
-
- __ bind(&parameters_loop);
- __ Subu(t1, t1, Operand(Smi::FromInt(1)));
- __ sll(a0, t1, 1);
- __ Addu(a0, a0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
- __ Addu(t6, t0, a0);
- __ sw(t5, MemOperand(t6));
- __ Subu(a0, a0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
- __ Addu(t6, a1, a0);
- __ sw(t3, MemOperand(t6));
- __ Addu(t5, t5, Operand(Smi::FromInt(1)));
- __ bind(&parameters_test);
- __ Branch(&parameters_loop, ne, t1, Operand(Smi::FromInt(0)));
-
- // t1 = argument count (tagged).
- __ lw(t1, FieldMemOperand(v0, kLengthOffset));
-
- __ bind(&skip_parameter_map);
- // v0 = address of new object (tagged)
- // a1 = address of backing store (tagged)
- // t1 = argument count (tagged)
- // t2 = mapped parameter count (tagged)
- // t5 = scratch
- // Copy arguments header and remaining slots (if there are any).
- __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
- __ sw(t5, FieldMemOperand(a1, FixedArray::kMapOffset));
- __ sw(t1, FieldMemOperand(a1, FixedArray::kLengthOffset));
-
- Label arguments_loop, arguments_test;
- __ sll(t6, t2, 1);
- __ Subu(a3, a3, Operand(t6));
- __ jmp(&arguments_test);
-
- __ bind(&arguments_loop);
- __ Subu(a3, a3, Operand(kPointerSize));
- __ lw(t0, MemOperand(a3, 0));
- __ sll(t6, t2, 1);
- __ Addu(t5, a1, Operand(t6));
- __ sw(t0, FieldMemOperand(t5, FixedArray::kHeaderSize));
- __ Addu(t2, t2, Operand(Smi::FromInt(1)));
-
- __ bind(&arguments_test);
- __ Branch(&arguments_loop, lt, t2, Operand(t1));
-
- // Return.
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- // t1 = argument count (tagged)
- __ bind(&runtime);
- __ Push(a1, a3, t1);
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
// Return address is in ra.
Label slow;
@@ -1876,121 +1598,6 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
- // a1 : function
- // a2 : number of parameters (tagged)
- // a3 : parameters pointer
-
- DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label try_allocate, runtime;
- __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(a0, MemOperand(t0, StandardFrameConstants::kContextOffset));
- __ Branch(&try_allocate, ne, a0,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Patch the arguments.length and the parameters pointer.
- __ lw(a2, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ sll(at, a2, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t0, t0, Operand(at));
- __ Addu(a3, t0, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Try the new space allocation. Start out with computing the size
- // of the arguments object and the elements array in words.
- Label add_arguments_object;
- __ bind(&try_allocate);
- __ SmiUntag(t5, a2);
- __ Branch(&add_arguments_object, eq, a2, Operand(zero_reg));
-
- __ Addu(t5, t5, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ bind(&add_arguments_object);
- __ Addu(t5, t5, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
-
- // Do the allocation of both objects in one go.
- __ Allocate(t5, v0, t0, t1, &runtime,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
-
- // Get the arguments boilerplate from the current native context.
- __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, t0);
-
- __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
- __ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex);
- __ sw(t1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sw(t1, FieldMemOperand(v0, JSObject::kElementsOffset));
-
- // Get the length (smi tagged) and set that as an in-object property too.
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ AssertSmi(a2);
- __ sw(a2,
- FieldMemOperand(v0, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize));
-
- Label done;
- __ Branch(&done, eq, a2, Operand(zero_reg));
-
- // Set up the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- __ Addu(t0, v0, Operand(Heap::kStrictArgumentsObjectSize));
- __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
- __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
- __ sw(t1, FieldMemOperand(t0, FixedArray::kMapOffset));
- __ sw(a2, FieldMemOperand(t0, FixedArray::kLengthOffset));
- __ SmiUntag(a2);
-
- // Copy the fixed array slots.
- Label loop;
- // Set up t0 to point to the first array slot.
- __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ bind(&loop);
- // Pre-decrement a3 with kPointerSize on each iteration.
- // Pre-decrement in order to skip receiver.
- __ Addu(a3, a3, Operand(-kPointerSize));
- __ lw(t1, MemOperand(a3));
- // Post-increment t0 with kPointerSize on each iteration.
- __ sw(t1, MemOperand(t0));
- __ Addu(t0, t0, Operand(kPointerSize));
- __ Subu(a2, a2, Operand(1));
- __ Branch(&loop, ne, a2, Operand(zero_reg));
-
- // Return.
- __ bind(&done);
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ Push(a1, a3, a2);
- __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
-void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
- // a2 : number of parameters (tagged)
- // a3 : parameters pointer
- // a1 : rest parameter index (tagged)
- // Check if the calling frame is an arguments adaptor frame.
-
- Label runtime;
- __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(t1, MemOperand(t0, StandardFrameConstants::kContextOffset));
- __ Branch(&runtime, ne, t1,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Patch the arguments.length and the parameters pointer.
- __ lw(a2, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ sll(t1, a2, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(a3, t0, Operand(t1));
- __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ Push(a2, a3, a1);
- __ TailCallRuntime(Runtime::kNewRestParam);
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -2461,8 +2068,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
masm->isolate()->heap()->uninitialized_symbol());
// Load the cache state into t2.
- __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t2, a2, Operand(t2));
+ __ Lsa(t2, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ lw(t2, FieldMemOperand(t2, FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
@@ -2506,8 +2112,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
- __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t2, a2, Operand(t2));
+ __ Lsa(t2, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ sw(at, FieldMemOperand(t2, FixedArray::kHeaderSize));
__ jmp(&done);
@@ -2547,8 +2152,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
GenerateRecordCallTarget(masm);
- __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t1, a2, at);
+ __ Lsa(t1, a2, a3, kPointerSizeLog2 - kSmiTagSize);
Label feedback_register_initialized;
// Put the AllocationSite from the feedback vector into a2, or undefined.
__ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize));
@@ -2587,8 +2191,7 @@ void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
__ li(a0, Operand(arg_count()));
// Increment the call count for monomorphic function calls.
- __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(at, a2, Operand(at));
+ __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
__ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
@@ -2609,8 +2212,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
ParameterCount actual(argc);
// The checks. First, does r1 match the recorded monomorphic target?
- __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t0, a2, Operand(t0));
+ __ Lsa(t0, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
// We don't know that we have a weak cell. We might have a private symbol
@@ -2635,14 +2237,14 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(a1, &extra_checks_or_miss);
// Increment the call count for monomorphic function calls.
- __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(at, a2, Operand(at));
+ __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ lw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
__ Addu(a3, a3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ sw(a3, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
__ bind(&call_function);
- __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+ __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
+ tail_call_mode()),
RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
USE_DELAY_SLOT);
__ li(a0, Operand(argc)); // In delay slot.
@@ -2676,13 +2278,12 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ AssertNotSmi(t0);
__ GetObjectType(t0, t1, t1);
__ Branch(&miss, ne, t1, Operand(JS_FUNCTION_TYPE));
- __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t0, a2, Operand(t0));
+ __ Lsa(t0, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
__ bind(&call);
- __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+ __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
USE_DELAY_SLOT);
__ li(a0, Operand(argc)); // In delay slot.
@@ -2708,8 +2309,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ Branch(&miss, ne, t0, Operand(t1));
// Initialize the call counter.
- __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(at, a2, Operand(at));
+ __ Lsa(at, a2, a3, kPointerSizeLog2 - kSmiTagSize);
__ li(t0, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ sw(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
@@ -2873,8 +2473,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
// At this point code register contains smi tagged one-byte char code.
STATIC_ASSERT(kSmiTag == 0);
- __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(result_, result_, t0);
+ __ Lsa(result_, result_, code_, kPointerSizeLog2 - kSmiTagSize);
__ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
__ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
__ Branch(&slow_case_, eq, result_, Operand(t0));
@@ -3131,8 +2730,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Locate first character of substring to copy.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ sll(t0, a3, 1);
- __ Addu(t1, t1, t0);
+ __ Lsa(t1, t1, a3, 1);
// Locate first character of result.
__ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
@@ -3259,6 +2857,39 @@ void ToStringStub::Generate(MacroAssembler* masm) {
}
+void ToNameStub::Generate(MacroAssembler* masm) {
+ // The ToName stub takes on argument in a0.
+ Label is_number;
+ __ JumpIfSmi(a0, &is_number);
+
+ Label not_name;
+ STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+ __ GetObjectType(a0, a1, a1);
+ // a0: receiver
+ // a1: receiver instance type
+ __ Branch(&not_name, gt, a1, Operand(LAST_NAME_TYPE));
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
+ __ bind(&not_name);
+
+ Label not_heap_number;
+ __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
+ __ bind(&is_number);
+ NumberToStringStub stub(isolate());
+ __ TailCallStub(&stub);
+ __ bind(&not_heap_number);
+
+ Label not_oddball;
+ __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
+ __ Ret(USE_DELAY_SLOT);
+ __ lw(v0, FieldMemOperand(a0, Oddball::kToStringOffset));
+ __ bind(&not_oddball);
+
+ __ push(a0); // Push argument.
+ __ TailCallRuntime(Runtime::kToName);
+}
+
+
void StringHelper::GenerateFlatOneByteStringEquals(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3) {
@@ -3431,18 +3062,14 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ CheckMap(a1, a2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
__ CheckMap(a0, a3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
- if (op() != Token::EQ_STRICT && is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
- } else {
- if (!Token::IsEqualityOp(op())) {
- __ lw(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
- __ AssertSmi(a1);
- __ lw(a0, FieldMemOperand(a0, Oddball::kToNumberOffset));
- __ AssertSmi(a0);
- }
- __ Ret(USE_DELAY_SLOT);
- __ Subu(v0, a1, a0);
+ if (!Token::IsEqualityOp(op())) {
+ __ lw(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
+ __ AssertSmi(a1);
+ __ lw(a0, FieldMemOperand(a0, Oddball::kToNumberOffset));
+ __ AssertSmi(a0);
}
+ __ Ret(USE_DELAY_SLOT);
+ __ Subu(v0, a1, a0);
__ bind(&miss);
GenerateMiss(masm);
@@ -3540,7 +3167,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&unordered);
__ bind(&generic_stub);
- CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
+ CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
CompareICState::GENERIC, CompareICState::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
@@ -3770,8 +3397,6 @@ void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
if (Token::IsEqualityOp(op())) {
__ Ret(USE_DELAY_SLOT);
__ subu(v0, a0, a1);
- } else if (is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (op() == Token::LT || op() == Token::LTE) {
__ li(a2, Operand(Smi::FromInt(GREATER)));
@@ -3867,15 +3492,13 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
// Scale the index by multiplying by the entry size.
STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- __ sll(at, index, 1);
- __ Addu(index, index, at);
+ __ Lsa(index, index, index, 1);
Register entity_name = scratch0;
// Having undefined at this place means the name is not contained.
STATIC_ASSERT(kSmiTagSize == 1);
Register tmp = properties;
- __ sll(scratch0, index, 1);
- __ Addu(tmp, properties, scratch0);
+ __ Lsa(tmp, properties, index, 1);
__ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
DCHECK(!tmp.is(entity_name));
@@ -3965,12 +3588,10 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
STATIC_ASSERT(NameDictionary::kEntrySize == 3);
// scratch2 = scratch2 * 3.
- __ sll(at, scratch2, 1);
- __ Addu(scratch2, scratch2, at);
+ __ Lsa(scratch2, scratch2, scratch2, 1);
// Check if the key is identical to the name.
- __ sll(at, scratch2, 2);
- __ Addu(scratch2, elements, at);
+ __ Lsa(scratch2, elements, scratch2, 2);
__ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
__ Branch(done, eq, name, Operand(at));
}
@@ -4051,14 +3672,10 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// Scale the index by multiplying by the entry size.
STATIC_ASSERT(NameDictionary::kEntrySize == 3);
// index *= 3.
- __ mov(at, index);
- __ sll(index, index, 1);
- __ Addu(index, index, at);
-
+ __ Lsa(index, index, index, 1);
STATIC_ASSERT(kSmiTagSize == 1);
- __ sll(index, index, 2);
- __ Addu(index, index, dictionary);
+ __ Lsa(index, dictionary, index, 2);
__ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
// Having undefined at this place means the name is not contained.
@@ -4158,11 +3775,8 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
regs_.scratch0(),
&dont_need_remembered_set);
- __ CheckPageFlag(regs_.object(),
- regs_.scratch0(),
- 1 << MemoryChunk::SCAN_ON_SCAVENGE,
- ne,
- &dont_need_remembered_set);
+ __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
+ &dont_need_remembered_set);
// First notify the incremental marker if necessary, then update the
// remembered set.
@@ -4382,8 +3996,7 @@ static void HandleArrayCases(MacroAssembler* masm, Register feedback,
// aka feedback scratch2
// also need receiver_map
// use cached_map (scratch1) to look in the weak map values.
- __ sll(at, length, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(too_far, feedback, Operand(at));
+ __ Lsa(too_far, feedback, length, kPointerSizeLog2 - kSmiTagSize);
__ Addu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ Addu(pointer_reg, feedback,
Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
@@ -4419,8 +4032,7 @@ static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
__ Branch(try_array, ne, cached_map, Operand(receiver_map));
Register handler = feedback;
- __ sll(at, slot, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(handler, vector, Operand(at));
+ __ Lsa(handler, vector, slot, kPointerSizeLog2 - kSmiTagSize);
__ lw(handler,
FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
__ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -4437,8 +4049,7 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver_map = t1;
Register scratch1 = t4;
- __ sll(at, slot, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(feedback, vector, Operand(at));
+ __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
__ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Try to quickly handle the monomorphic case without knowing for sure
@@ -4493,8 +4104,7 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver_map = t1;
Register scratch1 = t4;
- __ sll(at, slot, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(feedback, vector, Operand(at));
+ __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
__ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Try to quickly handle the monomorphic case without knowing for sure
@@ -4530,8 +4140,7 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ Branch(&miss, ne, key, Operand(feedback));
// If the name comparison succeeded, we know we have a fixed array with
// at least one map/handler pair.
- __ sll(at, slot, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(feedback, vector, Operand(at));
+ __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
__ lw(feedback,
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
HandleArrayCases(masm, feedback, receiver_map, scratch1, t5, false, &miss);
@@ -4579,8 +4188,7 @@ void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver_map = t2;
Register scratch1 = t5;
- __ sll(scratch1, slot, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(feedback, vector, Operand(scratch1));
+ __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
__ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Try to quickly handle the monomorphic case without knowing for sure
@@ -4652,8 +4260,7 @@ static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
// aka feedback scratch2
// also need receiver_map
// use cached_map (scratch1) to look in the weak map values.
- __ sll(scratch1, too_far, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(too_far, feedback, Operand(scratch1));
+ __ Lsa(too_far, feedback, too_far, kPointerSizeLog2 - kSmiTagSize);
__ Addu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ Addu(pointer_reg, feedback,
Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
@@ -4702,8 +4309,7 @@ void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver_map = t2;
Register scratch1 = t5;
- __ sll(scratch1, slot, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(feedback, vector, Operand(scratch1));
+ __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
__ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Try to quickly handle the monomorphic case without knowing for sure
@@ -4742,8 +4348,7 @@ void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ Branch(&miss, ne, key, Operand(feedback));
// If the name comparison succeeded, we know we have a fixed array with
// at least one map/handler pair.
- __ sll(scratch1, slot, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(feedback, vector, Operand(scratch1));
+ __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
__ lw(feedback,
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
@@ -5050,8 +4655,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
switch (argument_count()) {
case ANY:
case MORE_THAN_ONE:
- __ sll(at, a0, kPointerSizeLog2);
- __ addu(at, sp, at);
+ __ Lsa(at, sp, a0, kPointerSizeLog2);
__ sw(a1, MemOperand(at));
__ li(at, Operand(3));
__ addu(a0, a0, at);
@@ -5144,6 +4748,592 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void FastNewObjectStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a1 : target
+ // -- a3 : new target
+ // -- cp : context
+ // -- ra : return address
+ // -----------------------------------
+ __ AssertFunction(a1);
+ __ AssertReceiver(a3);
+
+ // Verify that the new target is a JSFunction.
+ Label new_object;
+ __ GetObjectType(a3, a2, a2);
+ __ Branch(&new_object, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+ // Load the initial map and verify that it's in fact a map.
+ __ lw(a2, FieldMemOperand(a3, JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(a2, &new_object);
+ __ GetObjectType(a2, a0, a0);
+ __ Branch(&new_object, ne, a0, Operand(MAP_TYPE));
+
+ // Fall back to runtime if the target differs from the new target's
+ // initial map constructor.
+ __ lw(a0, FieldMemOperand(a2, Map::kConstructorOrBackPointerOffset));
+ __ Branch(&new_object, ne, a0, Operand(a1));
+
+ // Allocate the JSObject on the heap.
+ Label allocate, done_allocate;
+ __ lbu(t0, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+ __ Allocate(t0, v0, t1, a0, &allocate, SIZE_IN_WORDS);
+ __ bind(&done_allocate);
+
+ // Initialize the JSObject fields.
+ __ sw(a2, MemOperand(v0, JSObject::kMapOffset));
+ __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
+ __ sw(a3, MemOperand(v0, JSObject::kPropertiesOffset));
+ __ sw(a3, MemOperand(v0, JSObject::kElementsOffset));
+ STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
+ __ Addu(a1, v0, Operand(JSObject::kHeaderSize));
+
+ // ----------- S t a t e -------------
+ // -- v0 : result (untagged)
+ // -- a1 : result fields (untagged)
+ // -- t1 : result end (untagged)
+ // -- a2 : initial map
+ // -- cp : context
+ // -- ra : return address
+ // -----------------------------------
+
+ // Perform in-object slack tracking if requested.
+ Label slack_tracking;
+ STATIC_ASSERT(Map::kNoSlackTracking == 0);
+ __ lw(a3, FieldMemOperand(a2, Map::kBitField3Offset));
+ __ And(at, a3, Operand(Map::ConstructionCounter::kMask));
+ __ Branch(USE_DELAY_SLOT, &slack_tracking, ne, at, Operand(0));
+ __ LoadRoot(a0, Heap::kUndefinedValueRootIndex); // In delay slot.
+ {
+ // Initialize all in-object fields with undefined.
+ __ InitializeFieldsWithFiller(a1, t1, a0);
+
+ // Add the object tag to make the JSObject real.
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ Ret(USE_DELAY_SLOT);
+ __ Addu(v0, v0, Operand(kHeapObjectTag)); // In delay slot.
+ }
+ __ bind(&slack_tracking);
+ {
+ // Decrease generous allocation count.
+ STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+ __ Subu(a3, a3, Operand(1 << Map::ConstructionCounter::kShift));
+ __ sw(a3, FieldMemOperand(a2, Map::kBitField3Offset));
+
+ // Initialize the in-object fields with undefined.
+ __ lbu(t0, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
+ __ sll(t0, t0, kPointerSizeLog2);
+ __ subu(t0, t1, t0);
+ __ InitializeFieldsWithFiller(a1, t0, a0);
+
+ // Initialize the remaining (reserved) fields with one pointer filler map.
+ __ LoadRoot(a0, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(a1, t1, a0);
+
+ // Check if we can finalize the instance size.
+ Label finalize;
+ STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
+ __ And(a3, a3, Operand(Map::ConstructionCounter::kMask));
+ __ Branch(USE_DELAY_SLOT, &finalize, eq, a3, Operand(zero_reg));
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ Addu(v0, v0, Operand(kHeapObjectTag)); // In delay slot.
+ __ Ret();
+
+ // Finalize the instance size.
+ __ bind(&finalize);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(v0, a2);
+ __ CallRuntime(Runtime::kFinalizeInstanceSize);
+ __ Pop(v0);
+ }
+ __ Ret();
+ }
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ __ sll(t0, t0, kPointerSizeLog2 + kSmiTagSize);
+ __ Push(a2, t0);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ Pop(a2);
+ }
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ Subu(v0, v0, Operand(kHeapObjectTag));
+ __ lbu(t1, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+ __ Lsa(t1, v0, t1, kPointerSizeLog2);
+ __ jmp(&done_allocate);
+
+ // Fall back to %NewObject.
+ __ bind(&new_object);
+ __ Push(a1, a3);
+ __ TailCallRuntime(Runtime::kNewObject);
+}
+
+
+void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a1 : function
+ // -- cp : context
+ // -- fp : frame pointer
+ // -- ra : return address
+ // -----------------------------------
+ __ AssertFunction(a1);
+
+ // For Ignition we need to skip all possible handler/stub frames until
+ // we reach the JavaScript frame for the function (similar to what the
+ // runtime fallback implementation does). So make a2 point to that
+ // JavaScript frame.
+ {
+ Label loop, loop_entry;
+ __ Branch(USE_DELAY_SLOT, &loop_entry);
+ __ mov(a2, fp); // In delay slot.
+ __ bind(&loop);
+ __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&loop_entry);
+ __ lw(a3, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+ __ Branch(&loop, ne, a1, Operand(a3));
+ }
+
+ // Check if we have rest parameters (only possible if we have an
+ // arguments adaptor frame below the function frame).
+ Label no_rest_parameters;
+ __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+ __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+ __ Branch(&no_rest_parameters, ne, a3,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Check if the arguments adaptor frame contains more arguments than
+ // specified by the function's internal formal parameter count.
+ Label rest_parameters;
+ __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ lw(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a1,
+ FieldMemOperand(a1, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Subu(a0, a0, Operand(a1));
+ __ Branch(&rest_parameters, gt, a0, Operand(zero_reg));
+
+ // Return an empty rest parameter array.
+ __ bind(&no_rest_parameters);
+ {
+ // ----------- S t a t e -------------
+ // -- cp : context
+ // -- ra : return address
+ // -----------------------------------
+
+ // Allocate an empty rest parameter array.
+ Label allocate, done_allocate;
+ __ Allocate(JSArray::kSize, v0, a0, a1, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Setup the rest parameter array in v0.
+ __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, a1);
+ __ sw(a1, FieldMemOperand(v0, JSArray::kMapOffset));
+ __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
+ __ sw(a1, FieldMemOperand(v0, JSArray::kPropertiesOffset));
+ __ sw(a1, FieldMemOperand(v0, JSArray::kElementsOffset));
+ __ Move(a1, Smi::FromInt(0));
+ __ Ret(USE_DELAY_SLOT);
+ __ sw(a1, FieldMemOperand(v0, JSArray::kLengthOffset)); // In delay slot
+ STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(Smi::FromInt(JSArray::kSize));
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ }
+ __ jmp(&done_allocate);
+ }
+
+ __ bind(&rest_parameters);
+ {
+ // Compute the pointer to the first rest parameter (skippping the receiver).
+ __ Lsa(a2, a2, a0, kPointerSizeLog2 - 1);
+ __ Addu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
+ 1 * kPointerSize));
+
+ // ----------- S t a t e -------------
+ // -- cp : context
+ // -- a0 : number of rest parameters (tagged)
+ // -- a2 : pointer to first rest parameters
+ // -- ra : return address
+ // -----------------------------------
+
+ // Allocate space for the rest parameter array plus the backing store.
+ Label allocate, done_allocate;
+ __ li(a1, Operand(JSArray::kSize + FixedArray::kHeaderSize));
+ __ Lsa(a1, a1, a0, kPointerSizeLog2 - 1);
+ __ Allocate(a1, v0, a3, t0, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Setup the elements array in v0.
+ __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+ __ sw(at, FieldMemOperand(v0, FixedArray::kMapOffset));
+ __ sw(a0, FieldMemOperand(v0, FixedArray::kLengthOffset));
+ __ Addu(a3, v0, Operand(FixedArray::kHeaderSize));
+ {
+ Label loop, done_loop;
+ __ sll(at, a0, kPointerSizeLog2 - 1);
+ __ Addu(a1, a3, at);
+ __ bind(&loop);
+ __ Branch(&done_loop, eq, a1, Operand(a3));
+ __ lw(at, MemOperand(a2, 0 * kPointerSize));
+ __ sw(at, FieldMemOperand(a3, 0 * kPointerSize));
+ __ Subu(a2, a2, Operand(1 * kPointerSize));
+ __ Addu(a3, a3, Operand(1 * kPointerSize));
+ __ jmp(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Setup the rest parameter array in a3.
+ __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, at);
+ __ sw(at, FieldMemOperand(a3, JSArray::kMapOffset));
+ __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
+ __ sw(at, FieldMemOperand(a3, JSArray::kPropertiesOffset));
+ __ sw(v0, FieldMemOperand(a3, JSArray::kElementsOffset));
+ __ sw(a0, FieldMemOperand(a3, JSArray::kLengthOffset));
+ STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a3); // In delay slot
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(a1);
+ __ Push(a0, a2, a1);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ Pop(a0, a2);
+ }
+ __ jmp(&done_allocate);
+ }
+}
+
+
+void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a1 : function
+ // -- cp : context
+ // -- fp : frame pointer
+ // -- ra : return address
+ // -----------------------------------
+ __ AssertFunction(a1);
+
+ // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a2,
+ FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Lsa(a3, fp, a2, kPointerSizeLog2 - 1);
+ __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // a1 : function
+ // a2 : number of parameters (tagged)
+ // a3 : parameters pointer
+ // Registers used over whole function:
+ // t1 : arguments count (tagged)
+ // t2 : mapped parameter count (tagged)
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, try_allocate, runtime;
+ __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(a0, MemOperand(t0, StandardFrameConstants::kContextOffset));
+ __ Branch(&adaptor_frame, eq, a0,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // No adaptor, parameter count = argument count.
+ __ mov(t1, a2);
+ __ Branch(USE_DELAY_SLOT, &try_allocate);
+ __ mov(t2, a2); // In delay slot.
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ lw(t1, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ Lsa(t0, t0, t1, 1);
+ __ Addu(a3, t0, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // t1 = argument count (tagged)
+ // t2 = parameter count (tagged)
+ // Compute the mapped parameter count = min(t2, t1) in t2.
+ __ mov(t2, a2);
+ __ Branch(&try_allocate, le, t2, Operand(t1));
+ __ mov(t2, t1);
+
+ __ bind(&try_allocate);
+
+ // Compute the sizes of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has 2 extra words containing context and backing store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+ // If there are no mapped parameters, we do not need the parameter_map.
+ Label param_map_size;
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
+ __ Branch(USE_DELAY_SLOT, &param_map_size, eq, t2, Operand(zero_reg));
+ __ mov(t5, zero_reg); // In delay slot: param map size = 0 when t2 == 0.
+ __ sll(t5, t2, 1);
+ __ addiu(t5, t5, kParameterMapHeaderSize);
+ __ bind(&param_map_size);
+
+ // 2. Backing store.
+ __ Lsa(t5, t5, t1, 1);
+ __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
+
+ // 3. Arguments object.
+ __ Addu(t5, t5, Operand(JSSloppyArgumentsObject::kSize));
+
+ // Do the allocation of all three objects in one go.
+ __ Allocate(t5, v0, t5, t0, &runtime, TAG_OBJECT);
+
+ // v0 = address of new object(s) (tagged)
+ // a2 = argument count (smi-tagged)
+ // Get the arguments boilerplate from the current native context into t0.
+ const int kNormalOffset =
+ Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
+ const int kAliasedOffset =
+ Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
+
+ __ lw(t0, NativeContextMemOperand());
+ Label skip2_ne, skip2_eq;
+ __ Branch(&skip2_ne, ne, t2, Operand(zero_reg));
+ __ lw(t0, MemOperand(t0, kNormalOffset));
+ __ bind(&skip2_ne);
+
+ __ Branch(&skip2_eq, eq, t2, Operand(zero_reg));
+ __ lw(t0, MemOperand(t0, kAliasedOffset));
+ __ bind(&skip2_eq);
+
+ // v0 = address of new object (tagged)
+ // a2 = argument count (smi-tagged)
+ // t0 = address of arguments map (tagged)
+ // t2 = mapped parameter count (tagged)
+ __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
+ __ LoadRoot(t5, Heap::kEmptyFixedArrayRootIndex);
+ __ sw(t5, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sw(t5, FieldMemOperand(v0, JSObject::kElementsOffset));
+
+ // Set up the callee in-object property.
+ __ AssertNotSmi(a1);
+ __ sw(a1, FieldMemOperand(v0, JSSloppyArgumentsObject::kCalleeOffset));
+
+ // Use the length (smi tagged) and set that as an in-object property too.
+ __ AssertSmi(t1);
+ __ sw(t1, FieldMemOperand(v0, JSSloppyArgumentsObject::kLengthOffset));
+
+ // Set up the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, t0 will point there, otherwise
+ // it will point to the backing store.
+ __ Addu(t0, v0, Operand(JSSloppyArgumentsObject::kSize));
+ __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
+
+ // v0 = address of new object (tagged)
+ // a2 = argument count (tagged)
+ // t0 = address of parameter map or backing store (tagged)
+ // t2 = mapped parameter count (tagged)
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ Label skip3;
+ __ Branch(&skip3, ne, t2, Operand(Smi::FromInt(0)));
+ // Move backing store address to a1, because it is
+ // expected there when filling in the unmapped arguments.
+ __ mov(a1, t0);
+ __ bind(&skip3);
+
+ __ Branch(&skip_parameter_map, eq, t2, Operand(Smi::FromInt(0)));
+
+ __ LoadRoot(t1, Heap::kSloppyArgumentsElementsMapRootIndex);
+ __ sw(t1, FieldMemOperand(t0, FixedArray::kMapOffset));
+ __ Addu(t1, t2, Operand(Smi::FromInt(2)));
+ __ sw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
+ __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
+ __ Lsa(t1, t0, t2, 1);
+ __ Addu(t1, t1, Operand(kParameterMapHeaderSize));
+ __ sw(t1, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. They index the context,
+ // where parameters are stored in reverse order, at
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+ // The mapped parameter thus need to get indices
+ // MIN_CONTEXT_SLOTS+parameter_count-1 ..
+ // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+ // We loop from right to left.
+ Label parameters_loop, parameters_test;
+ __ mov(t1, t2);
+ __ Addu(t5, a2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+ __ Subu(t5, t5, Operand(t2));
+ __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
+ __ Lsa(a1, t0, t1, 1);
+ __ Addu(a1, a1, Operand(kParameterMapHeaderSize));
+
+ // a1 = address of backing store (tagged)
+ // t0 = address of parameter map (tagged)
+ // a0 = temporary scratch (a.o., for address calculation)
+ // t1 = loop variable (tagged)
+ // t3 = the hole value
+ __ jmp(&parameters_test);
+
+ __ bind(&parameters_loop);
+ __ Subu(t1, t1, Operand(Smi::FromInt(1)));
+ __ sll(a0, t1, 1);
+ __ Addu(a0, a0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+ __ Addu(t6, t0, a0);
+ __ sw(t5, MemOperand(t6));
+ __ Subu(a0, a0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
+ __ Addu(t6, a1, a0);
+ __ sw(t3, MemOperand(t6));
+ __ Addu(t5, t5, Operand(Smi::FromInt(1)));
+ __ bind(&parameters_test);
+ __ Branch(&parameters_loop, ne, t1, Operand(Smi::FromInt(0)));
+
+ // t1 = argument count (tagged).
+ __ lw(t1, FieldMemOperand(v0, JSSloppyArgumentsObject::kLengthOffset));
+
+ __ bind(&skip_parameter_map);
+ // v0 = address of new object (tagged)
+ // a1 = address of backing store (tagged)
+ // t1 = argument count (tagged)
+ // t2 = mapped parameter count (tagged)
+ // t5 = scratch
+ // Copy arguments header and remaining slots (if there are any).
+ __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
+ __ sw(t5, FieldMemOperand(a1, FixedArray::kMapOffset));
+ __ sw(t1, FieldMemOperand(a1, FixedArray::kLengthOffset));
+
+ Label arguments_loop, arguments_test;
+ __ sll(t6, t2, 1);
+ __ Subu(a3, a3, Operand(t6));
+ __ jmp(&arguments_test);
+
+ __ bind(&arguments_loop);
+ __ Subu(a3, a3, Operand(kPointerSize));
+ __ lw(t0, MemOperand(a3, 0));
+ __ Lsa(t5, a1, t2, 1);
+ __ sw(t0, FieldMemOperand(t5, FixedArray::kHeaderSize));
+ __ Addu(t2, t2, Operand(Smi::FromInt(1)));
+
+ __ bind(&arguments_test);
+ __ Branch(&arguments_loop, lt, t2, Operand(t1));
+
+ // Return.
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ // t1 = argument count (tagged)
+ __ bind(&runtime);
+ __ Push(a1, a3, t1);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
+}
+
+
+void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a1 : function
+ // -- cp : context
+ // -- fp : frame pointer
+ // -- ra : return address
+ // -----------------------------------
+ __ AssertFunction(a1);
+
+ // For Ignition we need to skip all possible handler/stub frames until
+ // we reach the JavaScript frame for the function (similar to what the
+ // runtime fallback implementation does). So make a2 point to that
+ // JavaScript frame.
+ {
+ Label loop, loop_entry;
+ __ Branch(USE_DELAY_SLOT, &loop_entry);
+ __ mov(a2, fp); // In delay slot.
+ __ bind(&loop);
+ __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&loop_entry);
+ __ lw(a3, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+ __ Branch(&loop, ne, a1, Operand(a3));
+ }
+
+ // Check if we have an arguments adaptor frame below the function frame.
+ Label arguments_adaptor, arguments_done;
+ __ lw(a3, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+ __ lw(a0, MemOperand(a3, StandardFrameConstants::kContextOffset));
+ __ Branch(&arguments_adaptor, eq, a0,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ {
+ __ lw(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a0,
+ FieldMemOperand(a1, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Lsa(a2, a2, a0, kPointerSizeLog2 - 1);
+ __ Addu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
+ 1 * kPointerSize));
+ }
+ __ Branch(&arguments_done);
+ __ bind(&arguments_adaptor);
+ {
+ __ lw(a0, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ Lsa(a2, a3, a0, kPointerSizeLog2 - 1);
+ __ Addu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
+ 1 * kPointerSize));
+ }
+ __ bind(&arguments_done);
+
+ // ----------- S t a t e -------------
+ // -- cp : context
+ // -- a0 : number of rest parameters (tagged)
+ // -- a2 : pointer to first rest parameters
+ // -- ra : return address
+ // -----------------------------------
+
+ // Allocate space for the strict arguments object plus the backing store.
+ Label allocate, done_allocate;
+ __ li(a1, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
+ __ Lsa(a1, a1, a0, kPointerSizeLog2 - 1);
+ __ Allocate(a1, v0, a3, t0, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Setup the elements array in v0.
+ __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+ __ sw(at, FieldMemOperand(v0, FixedArray::kMapOffset));
+ __ sw(a0, FieldMemOperand(v0, FixedArray::kLengthOffset));
+ __ Addu(a3, v0, Operand(FixedArray::kHeaderSize));
+ {
+ Label loop, done_loop;
+ __ sll(at, a0, kPointerSizeLog2 - 1);
+ __ Addu(a1, a3, at);
+ __ bind(&loop);
+ __ Branch(&done_loop, eq, a1, Operand(a3));
+ __ lw(at, MemOperand(a2, 0 * kPointerSize));
+ __ sw(at, FieldMemOperand(a3, 0 * kPointerSize));
+ __ Subu(a2, a2, Operand(1 * kPointerSize));
+ __ Addu(a3, a3, Operand(1 * kPointerSize));
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Setup the strict arguments object in a3.
+ __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, at);
+ __ sw(at, FieldMemOperand(a3, JSStrictArgumentsObject::kMapOffset));
+ __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
+ __ sw(at, FieldMemOperand(a3, JSStrictArgumentsObject::kPropertiesOffset));
+ __ sw(v0, FieldMemOperand(a3, JSStrictArgumentsObject::kElementsOffset));
+ __ sw(a0, FieldMemOperand(a3, JSStrictArgumentsObject::kLengthOffset));
+ STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a3); // In delay slot
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(a1);
+ __ Push(a0, a2, a1);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ Pop(a0, a2);
+ }
+ __ jmp(&done_allocate);
+}
+
+
void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
Register context_reg = cp;
Register slot_reg = a2;
@@ -5157,8 +5347,7 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
}
// Load the PropertyCell value at the specified slot.
- __ sll(at, slot_reg, kPointerSizeLog2);
- __ Addu(at, at, Operand(context_reg));
+ __ Lsa(at, context_reg, slot_reg, kPointerSizeLog2);
__ lw(result_reg, ContextMemOperand(at, 0));
__ lw(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset));
@@ -5196,8 +5385,7 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
}
// Load the PropertyCell at the specified slot.
- __ sll(at, slot_reg, kPointerSizeLog2);
- __ Addu(at, at, Operand(context_reg));
+ __ Lsa(at, context_reg, slot_reg, kPointerSizeLog2);
__ lw(cell_reg, ContextMemOperand(at, 0));
// Load PropertyDetails for the cell (actually only the cell_type and kind).
@@ -5424,11 +5612,10 @@ static void CallApiFunctionAndReturn(
__ jmp(&leave_exit_frame);
}
-
static void CallApiFunctionStubHelper(MacroAssembler* masm,
const ParameterCount& argc,
bool return_first_arg,
- bool call_data_undefined) {
+ bool call_data_undefined, bool is_lazy) {
// ----------- S t a t e -------------
// -- a0 : callee
// -- t0 : call_data
@@ -5464,8 +5651,10 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
// Save context, callee and call data.
__ Push(context, callee, call_data);
- // Load context from callee.
- __ lw(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+ if (!is_lazy) {
+ // Load context from callee.
+ __ lw(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+ }
Register scratch = call_data;
if (!call_data_undefined) {
@@ -5546,7 +5735,7 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
void CallApiFunctionStub::Generate(MacroAssembler* masm) {
bool call_data_undefined = this->call_data_undefined();
CallApiFunctionStubHelper(masm, ParameterCount(a3), false,
- call_data_undefined);
+ call_data_undefined, false);
}
@@ -5554,41 +5743,48 @@ void CallApiAccessorStub::Generate(MacroAssembler* masm) {
bool is_store = this->is_store();
int argc = this->argc();
bool call_data_undefined = this->call_data_undefined();
+ bool is_lazy = this->is_lazy();
CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
- call_data_undefined);
+ call_data_undefined, is_lazy);
}
void CallApiGetterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- sp[0] : name
- // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
+ // -- sp[0] : name
+ // -- sp[4 .. (4 + kArgsLength*4)] : v8::PropertyCallbackInfo::args_
// -- ...
- // -- a2 : api_function_address
+ // -- a2 : api_function_address
// -----------------------------------
Register api_function_address = ApiGetterDescriptor::function_address();
DCHECK(api_function_address.is(a2));
- __ mov(a0, sp); // a0 = Handle<Name>
- __ Addu(a1, a0, Operand(1 * kPointerSize)); // a1 = PCA
+ // v8::PropertyCallbackInfo::args_ array and name handle.
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
+ __ mov(a0, sp); // a0 = Handle<Name>
+ __ Addu(a1, a0, Operand(1 * kPointerSize)); // a1 = v8::PCI::args_
const int kApiStackSpace = 1;
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
- // Create PropertyAccessorInfo instance on the stack above the exit frame with
- // a1 (internal::Object** args_) as the data.
+ // Create v8::PropertyCallbackInfo object on the stack and initialize
+ // it's args_ field.
__ sw(a1, MemOperand(sp, 1 * kPointerSize));
- __ Addu(a1, sp, Operand(1 * kPointerSize)); // a1 = AccessorInfo&
-
- const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+ __ Addu(a1, sp, Operand(1 * kPointerSize)); // a1 = v8::PropertyCallbackInfo&
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback(isolate());
+
+ // +3 is to skip prolog, return address and name handle.
+ MemOperand return_value_operand(
+ fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
kStackUnwindSpace, kInvalidStackOffset,
- MemOperand(fp, 6 * kPointerSize), NULL);
+ return_value_operand, NULL);
}
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index 2a144d990c..878ba3489a 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -767,8 +767,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ Addu(scratch1, elements,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ Addu(scratch3, array, Operand(FixedDoubleArray::kHeaderSize));
- __ sll(at, length, 2);
- __ Addu(array_end, scratch3, at);
+ __ Lsa(array_end, scratch3, length, 2);
// Repurpose registers no longer in use.
Register hole_lower = elements;
@@ -899,8 +898,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
FixedDoubleArray::kHeaderSize - kHeapObjectTag
+ Register::kExponentOffset));
__ Addu(dst_elements, array, Operand(FixedArray::kHeaderSize));
- __ sll(dst_end, dst_end, 1);
- __ Addu(dst_end, dst_elements, dst_end);
+ __ Lsa(dst_end, dst_elements, dst_end, 1);
// Allocating heap numbers in the loop below can fail and cause a jump to
// gc_required. We can't leave a partly initialized FixedArray behind,
@@ -1082,8 +1080,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ And(at, result, Operand(kStringEncodingMask));
__ Branch(&one_byte, ne, at, Operand(zero_reg));
// Two-byte string.
- __ sll(at, index, 1);
- __ Addu(at, string, at);
+ __ Lsa(at, string, index, 1);
__ lhu(result, MemOperand(at));
__ jmp(&done);
__ bind(&one_byte);
@@ -1156,8 +1153,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
// Must not call ExpConstant() after overwriting temp3!
__ li(temp3, Operand(ExternalReference::math_exp_log_table()));
- __ sll(at, temp2, 3);
- __ Addu(temp3, temp3, Operand(at));
+ __ Lsa(temp3, temp3, temp2, 3);
__ lw(temp2, MemOperand(temp3, Register::kMantissaOffset));
__ lw(temp3, MemOperand(temp3, Register::kExponentOffset));
// The first word is loaded is the lower number register.
diff --git a/deps/v8/src/mips/constants-mips.cc b/deps/v8/src/mips/constants-mips.cc
index 6ca430a157..3afb88146d 100644
--- a/deps/v8/src/mips/constants-mips.cc
+++ b/deps/v8/src/mips/constants-mips.cc
@@ -142,7 +142,7 @@ bool Instruction::IsForbiddenAfterBranchInstr(Instr instr) {
case BC:
case BALC:
case POP10: // beqzalc, bovc, beqc
- case POP30: // bnezalc, bvnc, bnec
+ case POP30: // bnezalc, bnvc, bnec
case POP66: // beqzc, jic
case POP76: // bnezc, jialc
return true;
diff --git a/deps/v8/src/mips/constants-mips.h b/deps/v8/src/mips/constants-mips.h
index 8327501b6f..49142515c7 100644
--- a/deps/v8/src/mips/constants-mips.h
+++ b/deps/v8/src/mips/constants-mips.h
@@ -64,9 +64,13 @@ enum FpuMode {
#elif defined(FPU_MODE_FP64)
static const FpuMode kFpuMode = kFP64;
#elif defined(FPU_MODE_FPXX)
- static const FpuMode kFpuMode = kFPXX;
+#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS32R6)
+static const FpuMode kFpuMode = kFPXX;
#else
- static const FpuMode kFpuMode = kFP32;
+#error "FPXX is supported only on Mips32R2 and Mips32R6"
+#endif
+#else
+static const FpuMode kFpuMode = kFP32;
#endif
#if(defined(__mips_hard_float) && __mips_hard_float != 0)
@@ -92,13 +96,9 @@ const uint32_t kHoleNanLower32Offset = 4;
#error Unknown endianness
#endif
-#ifndef FPU_MODE_FPXX
-#define IsFp64Mode() \
- (kFpuMode == kFP64)
-#else
-#define IsFp64Mode() \
- (CpuFeatures::IsSupported(FP64FPU))
-#endif
+#define IsFp64Mode() (kFpuMode == kFP64)
+#define IsFp32Mode() (kFpuMode == kFP32)
+#define IsFpxxMode() (kFpuMode == kFPXX)
#ifndef _MIPS_ARCH_MIPS32RX
#define IsMipsArchVariant(check) \
@@ -390,7 +390,7 @@ enum Opcode : uint32_t {
POP10 = ADDI, // beqzalc, bovc, beqc
POP26 = BLEZL, // bgezc, blezc, bgec/blec
POP27 = BGTZL, // bgtzc, bltzc, bltc/bgtc
- POP30 = DADDI, // bnezalc, bvnc, bnec
+ POP30 = DADDI, // bnezalc, bnvc, bnec
};
enum SecondaryField : uint32_t {
@@ -794,6 +794,7 @@ enum CheckForInexactConversion {
kDontCheckForInexactConversion
};
+enum class MaxMinKind : int { kMin = 0, kMax = 1 };
// -----------------------------------------------------------------------------
// Hints.
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index a9e30de44d..0caaa4c9d4 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -80,27 +80,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
-void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
- // Set the register values. The values are not important as there are no
- // callee saved registers in JavaScript frames, so all registers are
- // spilled. Registers fp and sp are set to the correct values though.
-
- for (int i = 0; i < Register::kNumRegisters; i++) {
- input_->SetRegister(i, i * 4);
- }
- input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
- input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) {
- input_->SetDoubleRegister(i, 0.0);
- }
-
- // Fill the frame content from the actual data on the frame.
- for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
- input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
- }
-}
-
-
void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
ApiFunction function(descriptor->deoptimization_handler());
@@ -119,8 +98,7 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
}
}
-
-bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
// There is no dynamic alignment padding on MIPS in the input frame.
return false;
}
@@ -268,8 +246,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// a1 = one past the last FrameDescription**.
__ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
__ lw(t0, MemOperand(a0, Deoptimizer::output_offset())); // t0 is output_.
- __ sll(a1, a1, kPointerSizeLog2); // Count to offset.
- __ addu(a1, t0, a1); // a1 = one past the last FrameDescription**.
+ __ Lsa(a1, t0, a1, kPointerSizeLog2);
__ BranchShort(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: a2 = current FrameDescription*, a3 = loop index.
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc
index 936514aab2..7e0a480e13 100644
--- a/deps/v8/src/mips/disasm-mips.cc
+++ b/deps/v8/src/mips/disasm-mips.cc
@@ -1500,6 +1500,7 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
if (rs_reg >= rt_reg) {
Format(instr, "bovc 'rs, 'rt, 'imm16s -> 'imm16p4s2");
} else {
+ DCHECK(rt_reg > 0);
if (rs_reg == 0) {
Format(instr, "beqzalc 'rt, 'imm16s -> 'imm16p4s2");
} else {
@@ -1516,6 +1517,7 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
if (rs_reg >= rt_reg) {
Format(instr, "bnvc 'rs, 'rt, 'imm16s -> 'imm16p4s2");
} else {
+ DCHECK(rt_reg > 0);
if (rs_reg == 0) {
Format(instr, "bnezalc 'rt, 'imm16s -> 'imm16p4s2");
} else {
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc
index 3f4fb38028..fdb43f325c 100644
--- a/deps/v8/src/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/mips/interface-descriptors-mips.cc
@@ -54,20 +54,6 @@ const Register StringCompareDescriptor::LeftRegister() { return a1; }
const Register StringCompareDescriptor::RightRegister() { return a0; }
-const Register ArgumentsAccessReadDescriptor::index() { return a1; }
-const Register ArgumentsAccessReadDescriptor::parameter_count() { return a0; }
-
-
-const Register ArgumentsAccessNewDescriptor::function() { return a1; }
-const Register ArgumentsAccessNewDescriptor::parameter_count() { return a2; }
-const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return a3; }
-
-
-const Register RestParamAccessDescriptor::parameter_count() { return a2; }
-const Register RestParamAccessDescriptor::parameter_pointer() { return a3; }
-const Register RestParamAccessDescriptor::rest_parameter_index() { return a1; }
-
-
const Register ApiGetterDescriptor::function_address() { return a2; }
@@ -96,6 +82,32 @@ void FastNewContextDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
+void FastNewObjectDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a1, a3};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+void FastNewRestParameterDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a1};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a1};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a1};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
void ToNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -113,6 +125,10 @@ const Register ToStringDescriptor::ReceiverRegister() { return a0; }
// static
+const Register ToNameDescriptor::ReceiverRegister() { return a0; }
+
+
+// static
const Register ToObjectDescriptor::ReceiverRegister() { return a0; }
@@ -165,13 +181,6 @@ void CreateWeakCellDescriptor::InitializePlatformSpecific(
}
-void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a3, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1};
@@ -407,6 +416,14 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void InterpreterDispatchDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
+ kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
+ kInterpreterDispatchTableRegister};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -418,7 +435,6 @@ void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@@ -430,7 +446,6 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 3c866ac453..e3544c5eec 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -159,9 +159,9 @@ void MacroAssembler::InNewSpace(Register object,
Condition cc,
Label* branch) {
DCHECK(cc == eq || cc == ne);
- And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
- Branch(branch, cc, scratch,
- Operand(ExternalReference::new_space_start(isolate())));
+ const int mask =
+ 1 << MemoryChunk::IN_FROM_SPACE | 1 << MemoryChunk::IN_TO_SPACE;
+ CheckPageFlag(object, scratch, mask, cc, branch);
}
@@ -369,6 +369,67 @@ void MacroAssembler::RecordWrite(
}
}
+void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
+ Register code_entry,
+ Register scratch) {
+ const int offset = JSFunction::kCodeEntryOffset;
+
+ // Since a code entry (value) is always in old space, we don't need to update
+ // remembered set. If incremental marking is off, there is nothing for us to
+ // do.
+ if (!FLAG_incremental_marking) return;
+
+ DCHECK(js_function.is(a1));
+ DCHECK(code_entry.is(t0));
+ DCHECK(scratch.is(t1));
+ AssertNotSmi(js_function);
+
+ if (emit_debug_code()) {
+ Addu(scratch, js_function, Operand(offset - kHeapObjectTag));
+ lw(at, MemOperand(scratch));
+ Assert(eq, kWrongAddressOrValuePassedToRecordWrite, at,
+ Operand(code_entry));
+ }
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis and stores into young gen.
+ Label done;
+
+ CheckPageFlag(code_entry, scratch,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
+ CheckPageFlag(js_function, scratch,
+ MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
+
+ const Register dst = scratch;
+ Addu(dst, js_function, Operand(offset - kHeapObjectTag));
+
+ // Save caller-saved registers. js_function and code_entry are in the
+ // caller-saved register list.
+ DCHECK(kJSCallerSaved & js_function.bit());
+ DCHECK(kJSCallerSaved & code_entry.bit());
+ MultiPush(kJSCallerSaved | ra.bit());
+
+ int argument_count = 3;
+
+ PrepareCallCFunction(argument_count, 0, code_entry);
+
+ mov(a0, js_function);
+ mov(a1, dst);
+ li(a2, Operand(ExternalReference::isolate_address(isolate())));
+
+ {
+ AllowExternalCallThatCantCauseGC scope(this);
+ CallCFunction(
+ ExternalReference::incremental_marking_record_write_code_entry_function(
+ isolate()),
+ argument_count);
+ }
+
+ // Restore caller-saved registers.
+ MultiPop(kJSCallerSaved | ra.bit());
+
+ bind(&done);
+}
void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Register address,
@@ -499,16 +560,14 @@ void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
//
// hash = ~hash + (hash << 15);
nor(scratch, reg0, zero_reg);
- sll(at, reg0, 15);
- addu(reg0, scratch, at);
+ Lsa(reg0, scratch, reg0, 15);
// hash = hash ^ (hash >> 12);
srl(at, reg0, 12);
xor_(reg0, reg0, at);
// hash = hash + (hash << 2);
- sll(at, reg0, 2);
- addu(reg0, reg0, at);
+ Lsa(reg0, reg0, reg0, 2);
// hash = hash ^ (hash >> 4);
srl(at, reg0, 4);
@@ -516,8 +575,7 @@ void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
// hash = hash * 2057;
sll(scratch, reg0, 11);
- sll(at, reg0, 3);
- addu(reg0, reg0, at);
+ Lsa(reg0, reg0, reg0, 3);
addu(reg0, reg0, scratch);
// hash = hash ^ (hash >> 16);
@@ -577,12 +635,10 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
// Scale the index by multiplying by the element size.
DCHECK(SeededNumberDictionary::kEntrySize == 3);
- sll(at, reg2, 1); // 2x.
- addu(reg2, reg2, at); // reg2 = reg2 * 3.
+ Lsa(reg2, reg2, reg2, 1); // reg2 = reg2 * 3.
// Check if the key is identical to the name.
- sll(at, reg2, kPointerSizeLog2);
- addu(reg2, elements, at);
+ Lsa(reg2, elements, reg2, kPointerSizeLog2);
lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
if (i != kNumberDictionaryProbes - 1) {
@@ -1322,6 +1378,11 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
mtc1(t8, fd);
}
+void MacroAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs,
+ FPURegister scratch) {
+ Trunc_uw_s(fs, t8, scratch);
+ mtc1(t8, fd);
+}
void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
@@ -1399,21 +1460,54 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
bind(&done);
}
+void MacroAssembler::Trunc_uw_s(FPURegister fd, Register rs,
+ FPURegister scratch) {
+ DCHECK(!fd.is(scratch));
+ DCHECK(!rs.is(at));
+
+ // Load 2^31 into scratch as its float representation.
+ li(at, 0x4F000000);
+ mtc1(at, scratch);
+ // Test if scratch > fd.
+ // If fd < 2^31 we can convert it normally.
+ Label simple_convert;
+ BranchF32(&simple_convert, NULL, lt, fd, scratch);
+
+ // First we subtract 2^31 from fd, then trunc it to rs
+ // and add 2^31 to rs.
+ sub_s(scratch, fd, scratch);
+ trunc_w_s(scratch, scratch);
+ mfc1(rs, scratch);
+ Or(rs, rs, 1 << 31);
+
+ Label done;
+ Branch(&done);
+ // Simple conversion.
+ bind(&simple_convert);
+ trunc_w_s(scratch, fd);
+ mfc1(rs, scratch);
+
+ bind(&done);
+}
void MacroAssembler::Mthc1(Register rt, FPURegister fs) {
- if (IsFp64Mode()) {
- mthc1(rt, fs);
- } else {
+ if (IsFp32Mode()) {
mtc1(rt, fs.high());
+ } else {
+ DCHECK(IsFp64Mode() || IsFpxxMode());
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ mthc1(rt, fs);
}
}
void MacroAssembler::Mfhc1(Register rt, FPURegister fs) {
- if (IsFp64Mode()) {
- mfhc1(rt, fs);
- } else {
+ if (IsFp32Mode()) {
mfc1(rt, fs.high());
+ } else {
+ DCHECK(IsFp64Mode() || IsFpxxMode());
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ mfhc1(rt, fs);
}
}
@@ -1619,13 +1713,15 @@ void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target,
void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
- if (IsFp64Mode()) {
+ if (IsFp32Mode()) {
+ mtc1(src_low, dst);
+ } else {
+ DCHECK(IsFp64Mode() || IsFpxxMode());
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
DCHECK(!src_low.is(at));
mfhc1(at, dst);
mtc1(src_low, dst);
mthc1(at, dst);
- } else {
- mtc1(src_low, dst);
}
}
@@ -3271,7 +3367,7 @@ void MacroAssembler::Allocate(int object_size,
return;
}
- DCHECK(!AreAliased(result, scratch1, scratch2, t9));
+ DCHECK(!AreAliased(result, scratch1, scratch2, t9, at));
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
@@ -3357,8 +3453,8 @@ void MacroAssembler::Allocate(Register object_size, Register result,
// |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
// is not specified. Other registers must not overlap.
- DCHECK(!AreAliased(object_size, result, scratch, t9));
- DCHECK(!AreAliased(result_end, result, scratch, t9));
+ DCHECK(!AreAliased(object_size, result, scratch, t9, at));
+ DCHECK(!AreAliased(result_end, result, scratch, t9, at));
DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
// Check relative positions of allocation top and limit addresses.
@@ -3412,8 +3508,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
- sll(result_end, object_size, kPointerSizeLog2);
- Addu(result_end, result, result_end);
+ Lsa(result_end, result, object_size, kPointerSizeLog2);
} else {
Addu(result_end, result, Operand(object_size));
}
@@ -3775,8 +3870,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
bind(&have_double_value);
- sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
- Addu(scratch1, scratch1, elements_reg);
+ Lsa(scratch1, elements_reg, key_reg, kDoubleSizeLog2 - kSmiTagSize);
sw(mantissa_reg,
FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset
+ kHoleNanLower32Offset));
@@ -3802,8 +3896,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Addu(scratch1, elements_reg,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
elements_offset));
- sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
- Addu(scratch1, scratch1, scratch2);
+ Lsa(scratch1, scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
// scratch1 is now effective address of the double element
Register untagged_value = scratch2;
@@ -4059,7 +4152,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
}
Push(fun);
Push(fun);
- CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
Pop(fun);
if (new_target.is_valid()) {
Pop(new_target);
@@ -4579,18 +4672,6 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
}
-void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- // You can't call a builtin without a valid frame.
- DCHECK(flag == JUMP_FUNCTION || has_frame());
-
- // Fake a parameter count to avoid emitting code to do the check.
- ParameterCount expected(0);
- LoadNativeContextSlot(native_context_index, a1);
- InvokeFunctionCode(a1, no_reg, expected, expected, flag, call_wrapper);
-}
-
-
void MacroAssembler::SetCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
if (FLAG_native_code_counters && counter->Enabled()) {
@@ -4687,9 +4768,9 @@ void MacroAssembler::Abort(BailoutReason reason) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 1);
+ CallRuntime(Runtime::kAbort);
} else {
- CallRuntime(Runtime::kAbort, 1);
+ CallRuntime(Runtime::kAbort);
}
// Will not return here.
if (is_trampoline_pool_blocked()) {
@@ -4945,8 +5026,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
if (argument_count_is_length) {
addu(sp, sp, argument_count);
} else {
- sll(t8, argument_count, kPointerSizeLog2);
- addu(sp, sp, t8);
+ Lsa(sp, sp, argument_count, kPointerSizeLog2, t8);
}
}
@@ -5160,6 +5240,17 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
+void MacroAssembler::AssertReceiver(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, kOperandIsASmiAndNotAReceiver, t8, Operand(zero_reg));
+ GetObjectType(object, t8, t8);
+ Check(ge, kOperandIsNotAReceiver, t8, Operand(FIRST_JS_RECEIVER_TYPE));
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (emit_debug_code()) {
@@ -5473,8 +5564,7 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
- sll(t8, t8, kPointerSizeLog2);
- Addu(bitmap_reg, bitmap_reg, t8);
+ Lsa(bitmap_reg, bitmap_reg, t8, kPointerSizeLog2, t8);
li(t8, Operand(1));
sllv(mask_reg, t8, mask_reg);
}
@@ -5533,7 +5623,8 @@ void MacroAssembler::LoadAccessor(Register dst, Register holder,
}
-void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
+void MacroAssembler::CheckEnumCache(Label* call_runtime) {
+ Register null_value = t1;
Register empty_fixed_array_value = t2;
LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
Label next, start;
@@ -5547,6 +5638,7 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
Branch(
call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
+ LoadRoot(null_value, Heap::kNullValueRootIndex);
jmp(&start);
bind(&next);
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 4f6a3c868b..05a8fec644 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -15,6 +15,7 @@ namespace internal {
// Give alias names to registers for calling conventions.
const Register kReturnRegister0 = {Register::kCode_v0};
const Register kReturnRegister1 = {Register::kCode_v1};
+const Register kReturnRegister2 = {Register::kCode_a0};
const Register kJSFunctionRegister = {Register::kCode_a1};
const Register kContextRegister = {Register::kCpRegister};
const Register kInterpreterAccumulatorRegister = {Register::kCode_v0};
@@ -207,6 +208,11 @@ class MacroAssembler: public Assembler {
Heap::RootListIndex index,
BranchDelaySlot bdslot = PROTECT);
+ // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a
+ // functor/function with 'Label *func(size_t index)' declaration.
+ template <typename Func>
+ void GenerateSwitchTable(Register index, size_t case_count,
+ Func GetLabelFunction);
#undef COND_ARGS
// Emit code to discard a non-negative number of pointer-sized elements
@@ -357,7 +363,7 @@ class MacroAssembler: public Assembler {
void JumpIfNotInNewSpace(Register object,
Register scratch,
Label* branch) {
- InNewSpace(object, scratch, ne, branch);
+ InNewSpace(object, scratch, eq, branch);
}
// Check if object is in new space. Jumps if the object is in new space.
@@ -365,7 +371,7 @@ class MacroAssembler: public Assembler {
void JumpIfInNewSpace(Register object,
Register scratch,
Label* branch) {
- InNewSpace(object, scratch, eq, branch);
+ InNewSpace(object, scratch, ne, branch);
}
// Check if an object has a given incremental marking color.
@@ -427,6 +433,11 @@ class MacroAssembler: public Assembler {
pointers_to_here_check_for_value);
}
+ // Notify the garbage collector that we wrote a code entry into a
+ // JSFunction. Only scratch is clobbered by the operation.
+ void RecordWriteCodeEntryField(Register js_function, Register code_entry,
+ Register scratch);
+
void RecordWriteForMap(
Register object,
Register map,
@@ -771,6 +782,10 @@ class MacroAssembler: public Assembler {
// Convert unsigned word to double.
void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
+ // Convert single to unsigned word.
+ void Trunc_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch);
+ void Trunc_uw_s(FPURegister fd, Register rs, FPURegister scratch);
+
// Convert double to unsigned word.
void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
@@ -1054,6 +1069,11 @@ class MacroAssembler: public Assembler {
Register map,
Register type_reg);
+ void GetInstanceType(Register object_map, Register object_instance_type) {
+ lbu(object_instance_type,
+ FieldMemOperand(object_map, Map::kInstanceTypeOffset));
+ }
+
// Check if a map for a JSObject indicates that the object has fast elements.
// Jump to the specified label if it does not.
void CheckFastElements(Register map,
@@ -1327,10 +1347,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void JumpToExternalReference(const ExternalReference& builtin,
BranchDelaySlot bd = PROTECT);
- // Invoke specified builtin JavaScript function.
- void InvokeBuiltin(int native_context_index, InvokeFlag flag,
- const CallWrapper& call_wrapper = NullCallWrapper());
-
struct Unresolved {
int pc;
uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders.
@@ -1486,6 +1502,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// enabled via --debug-code.
void AssertBoundFunction(Register object);
+ // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
+ void AssertReceiver(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@@ -1598,7 +1617,7 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Expects object in a0 and returns map with validated enum cache
// in a0. Assumes that any other register can be used as a scratch.
- void CheckEnumCache(Register null_value, Label* call_runtime);
+ void CheckEnumCache(Label* call_runtime);
// AllocationMemento support. Arrays may have an associated
// AllocationMemento object that can be checked for in order to pretransition
@@ -1684,9 +1703,8 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
Register scratch2);
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
- void InNewSpace(Register object,
- Register scratch,
- Condition cond, // eq for new space, ne otherwise.
+ void InNewSpace(Register object, Register scratch,
+ Condition cond, // ne for new space, eq otherwise.
Label* branch);
// Helper for finding the mark bits for an address. Afterwards, the
@@ -1749,7 +1767,29 @@ class CodePatcher {
FlushICache flush_cache_; // Whether to flush the I cache after patching.
};
-
+template <typename Func>
+void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
+ Func GetLabelFunction) {
+ if (kArchVariant >= kMips32r6) {
+ BlockTrampolinePoolFor(case_count + 5);
+ addiupc(at, 5);
+ lsa(at, at, index, kPointerSizeLog2);
+ lw(at, MemOperand(at));
+ } else {
+ Label here;
+ BlockTrampolinePoolFor(case_count + 6);
+ bal(&here);
+ sll(at, index, kPointerSizeLog2); // Branch delay slot.
+ bind(&here);
+ addu(at, at, ra);
+ lw(at, MemOperand(at, 4 * v8::internal::Assembler::kInstrSize));
+ }
+ jr(at);
+ nop(); // Branch delay slot nop.
+ for (size_t index = 0; index < case_count; ++index) {
+ dd(GetLabelFunction(index));
+ }
+}
#ifdef GENERATED_CODE_COVERAGE
#define CODE_COVERAGE_STRINGIFY(x) #x
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index aa4224a54c..0c91cb5512 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -16,6 +16,7 @@
#include "src/mips/constants-mips.h"
#include "src/mips/simulator-mips.h"
#include "src/ostreams.h"
+#include "src/runtime/runtime-utils.h"
// Only build the simulator if not compiling for real MIPS hardware.
@@ -590,7 +591,8 @@ void MipsDebugger::Debug() {
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
int value = *cur;
Heap* current_heap = sim_->isolate_->heap();
- if (((value & 1) == 0) || current_heap->Contains(obj)) {
+ if (((value & 1) == 0) ||
+ current_heap->ContainsSlow(obj->address())) {
PrintF(" (");
if ((value & 1) == 0) {
PrintF("smi %d", value / 2);
@@ -1970,6 +1972,10 @@ typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
int32_t arg4,
int32_t arg5);
+typedef ObjectTriple (*SimulatorRuntimeTripleCall)(int32_t arg0, int32_t arg1,
+ int32_t arg2, int32_t arg3,
+ int32_t arg4);
+
// These prototypes handle the four types of FP calls.
typedef int64_t (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
@@ -2181,7 +2187,29 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
SimulatorRuntimeProfilingGetterCall target =
reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
target(arg0, arg1, Redirection::ReverseRedirection(arg2));
+ } else if (redirection->type() == ExternalReference::BUILTIN_CALL_TRIPLE) {
+ // builtin call returning ObjectTriple.
+ SimulatorRuntimeTripleCall target =
+ reinterpret_cast<SimulatorRuntimeTripleCall>(external);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF(
+ "Call to host triple returning runtime function %p "
+ "args %08x, %08x, %08x, %08x, %08x\n",
+ FUNCTION_ADDR(target), arg1, arg2, arg3, arg4, arg5);
+ }
+ // arg0 is a hidden argument pointing to the return location, so don't
+ // pass it to the target function.
+ ObjectTriple result = target(arg1, arg2, arg3, arg4, arg5);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned { %p, %p, %p }\n", result.x, result.y, result.z);
+ }
+ // Return is passed back in address pointed to by hidden first argument.
+ ObjectTriple* sim_result = reinterpret_cast<ObjectTriple*>(arg0);
+ *sim_result = result;
+ set_register(v0, arg0);
} else {
+ DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL ||
+ redirection->type() == ExternalReference::BUILTIN_CALL_PAIR);
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
if (::v8::internal::FLAG_trace_sim) {
@@ -2320,6 +2348,91 @@ void Simulator::SignalException(Exception e) {
static_cast<int>(e));
}
+// Min/Max template functions for Double and Single arguments.
+
+template <typename T>
+static T FPAbs(T a);
+
+template <>
+double FPAbs<double>(double a) {
+ return fabs(a);
+}
+
+template <>
+float FPAbs<float>(float a) {
+ return fabsf(a);
+}
+
+template <typename T>
+static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T& result) {
+ if (std::isnan(a) && std::isnan(b)) {
+ result = a;
+ } else if (std::isnan(a)) {
+ result = b;
+ } else if (std::isnan(b)) {
+ result = a;
+ } else if (b == a) {
+ // Handle -0.0 == 0.0 case.
+ // std::signbit() returns int 0 or 1 so substracting MaxMinKind::kMax
+ // negates the result.
+ result = std::signbit(b) - static_cast<int>(kind) ? b : a;
+ } else {
+ return false;
+ }
+ return true;
+}
+
+template <typename T>
+static T FPUMin(T a, T b) {
+ T result;
+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+ return result;
+ } else {
+ return b < a ? b : a;
+ }
+}
+
+template <typename T>
+static T FPUMax(T a, T b) {
+ T result;
+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMax, result)) {
+ return result;
+ } else {
+ return b > a ? b : a;
+ }
+}
+
+template <typename T>
+static T FPUMinA(T a, T b) {
+ T result;
+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+ if (FPAbs(a) < FPAbs(b)) {
+ result = a;
+ } else if (FPAbs(b) < FPAbs(a)) {
+ result = b;
+ } else {
+ result = a < b ? a : b;
+ }
+ }
+ return result;
+}
+
+template <typename T>
+static T FPUMaxA(T a, T b) {
+ T result;
+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+ if (FPAbs(a) > FPAbs(b)) {
+ result = a;
+ } else if (FPAbs(b) > FPAbs(a)) {
+ result = b;
+ } else {
+ result = a > b ? a : b;
+ }
+ }
+ return result;
+}
+
+// Handle execution based on instruction types.
void Simulator::DecodeTypeRegisterDRsType() {
double ft, fs, fd;
@@ -2415,72 +2528,19 @@ void Simulator::DecodeTypeRegisterDRsType() {
}
case MIN:
DCHECK(IsMipsArchVariant(kMips32r6));
- fs = get_fpu_register_double(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else {
- set_fpu_register_double(fd_reg(), (fs >= ft) ? ft : fs);
- }
+ set_fpu_register_double(fd_reg(), FPUMin(ft, fs));
break;
- case MINA:
+ case MAX:
DCHECK(IsMipsArchVariant(kMips32r6));
- fs = get_fpu_register_double(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else {
- double result;
- if (fabs(fs) > fabs(ft)) {
- result = ft;
- } else if (fabs(fs) < fabs(ft)) {
- result = fs;
- } else {
- result = (fs < ft ? fs : ft);
- }
- set_fpu_register_double(fd_reg(), result);
- }
+ set_fpu_register_double(fd_reg(), FPUMax(ft, fs));
break;
- case MAXA:
+ case MINA:
DCHECK(IsMipsArchVariant(kMips32r6));
- fs = get_fpu_register_double(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else {
- double result;
- if (fabs(fs) < fabs(ft)) {
- result = ft;
- } else if (fabs(fs) > fabs(ft)) {
- result = fs;
- } else {
- result = (fs > ft ? fs : ft);
- }
- set_fpu_register_double(fd_reg(), result);
- }
+ set_fpu_register_double(fd_reg(), FPUMinA(ft, fs));
break;
- case MAX:
+ case MAXA:
DCHECK(IsMipsArchVariant(kMips32r6));
- fs = get_fpu_register_double(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else {
- set_fpu_register_double(fd_reg(), (fs <= ft) ? ft : fs);
- }
- break;
+ set_fpu_register_double(fd_reg(), FPUMaxA(ft, fs));
break;
case ADD_D:
set_fpu_register_double(fd_reg(), fs + ft);
@@ -3166,71 +3226,19 @@ void Simulator::DecodeTypeRegisterSRsType() {
}
case MIN:
DCHECK(IsMipsArchVariant(kMips32r6));
- fs = get_fpu_register_float(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else {
- set_fpu_register_float(fd_reg(), (fs >= ft) ? ft : fs);
- }
+ set_fpu_register_float(fd_reg(), FPUMin(ft, fs));
break;
case MAX:
DCHECK(IsMipsArchVariant(kMips32r6));
- fs = get_fpu_register_float(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else {
- set_fpu_register_float(fd_reg(), (fs <= ft) ? ft : fs);
- }
+ set_fpu_register_float(fd_reg(), FPUMax(ft, fs));
break;
case MINA:
DCHECK(IsMipsArchVariant(kMips32r6));
- fs = get_fpu_register_float(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else {
- float result;
- if (fabs(fs) > fabs(ft)) {
- result = ft;
- } else if (fabs(fs) < fabs(ft)) {
- result = fs;
- } else {
- result = (fs < ft ? fs : ft);
- }
- set_fpu_register_float(fd_reg(), result);
- }
+ set_fpu_register_float(fd_reg(), FPUMinA(ft, fs));
break;
case MAXA:
DCHECK(IsMipsArchVariant(kMips32r6));
- fs = get_fpu_register_float(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else {
- float result;
- if (fabs(fs) < fabs(ft)) {
- result = ft;
- } else if (fabs(fs) > fabs(ft)) {
- result = fs;
- } else {
- result = (fs > ft ? fs : ft);
- }
- set_fpu_register_float(fd_reg(), result);
- }
+ set_fpu_register_float(fd_reg(), FPUMaxA(ft, fs));
break;
case CVT_L_S: {
if (IsFp64Mode()) {
@@ -3379,7 +3387,11 @@ void Simulator::DecodeTypeRegisterCOP1() {
set_register(rt_reg(), get_fpu_register_word(fs_reg()));
break;
case MFHC1:
- set_register(rt_reg(), get_fpu_register_hi_word(fs_reg()));
+ if (IsFp64Mode()) {
+ set_register(rt_reg(), get_fpu_register_hi_word(fs_reg()));
+ } else {
+ set_register(rt_reg(), get_fpu_register_word(fs_reg() + 1));
+ }
break;
case CTC1: {
// At the moment only FCSR is supported.
@@ -3399,7 +3411,11 @@ void Simulator::DecodeTypeRegisterCOP1() {
set_fpu_register_word(fs_reg(), registers_[rt_reg()]);
break;
case MTHC1:
- set_fpu_register_hi_word(fs_reg(), registers_[rt_reg()]);
+ if (IsFp64Mode()) {
+ set_fpu_register_hi_word(fs_reg(), registers_[rt_reg()]);
+ } else {
+ set_fpu_register_word(fs_reg() + 1, registers_[rt_reg()]);
+ }
break;
case S: {
DecodeTypeRegisterSRsType();
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index 8efe0bba9c..e1c42fdcca 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -359,13 +359,13 @@ class Simulator {
// Compact branch guard.
void CheckForbiddenSlot(int32_t current_pc) {
- Instruction* instr_aftter_compact_branch =
+ Instruction* instr_after_compact_branch =
reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
- if (instr_aftter_compact_branch->IsForbiddenInBranchDelay()) {
+ if (instr_after_compact_branch->IsForbiddenAfterBranch()) {
V8_Fatal(__FILE__, __LINE__,
"Error: Unexpected instruction 0x%08x immediately after a "
"compact branch instruction.",
- *reinterpret_cast<uint32_t*>(instr_aftter_compact_branch));
+ *reinterpret_cast<uint32_t*>(instr_after_compact_branch));
}
}
diff --git a/deps/v8/src/mips64/assembler-mips64-inl.h b/deps/v8/src/mips64/assembler-mips64-inl.h
index 09436ed1d4..37ee3a6807 100644
--- a/deps/v8/src/mips64/assembler-mips64-inl.h
+++ b/deps/v8/src/mips64/assembler-mips64-inl.h
@@ -213,8 +213,8 @@ void RelocInfo::set_target_object(Object* target,
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
target->IsHeapObject()) {
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target));
}
}
@@ -282,10 +282,8 @@ void RelocInfo::set_target_cell(Cell* cell,
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
- // TODO(1550) We are passing NULL as a slot because cell can never be on
- // evacuation candidate.
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), NULL, cell);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
+ cell);
}
}
@@ -349,28 +347,6 @@ void RelocInfo::WipeOut() {
}
-bool RelocInfo::IsPatchedReturnSequence() {
- Instr instr0 = Assembler::instr_at(pc_); // lui.
- Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize); // ori.
- Instr instr2 = Assembler::instr_at(pc_ + 2 * Assembler::kInstrSize); // dsll.
- Instr instr3 = Assembler::instr_at(pc_ + 3 * Assembler::kInstrSize); // ori.
- Instr instr4 = Assembler::instr_at(pc_ + 4 * Assembler::kInstrSize); // jalr.
-
- bool patched_return = ((instr0 & kOpcodeMask) == LUI &&
- (instr1 & kOpcodeMask) == ORI &&
- (instr2 & kFunctionFieldMask) == DSLL &&
- (instr3 & kOpcodeMask) == ORI &&
- (instr4 & kFunctionFieldMask) == JALR);
- return patched_return;
-}
-
-
-bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
- Instr current_instr = Assembler::instr_at(pc_);
- return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
-}
-
-
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/deps/v8/src/mips64/assembler-mips64.cc b/deps/v8/src/mips64/assembler-mips64.cc
index 9c313a18d6..f0d3eba6b6 100644
--- a/deps/v8/src/mips64/assembler-mips64.cc
+++ b/deps/v8/src/mips64/assembler-mips64.cc
@@ -3270,10 +3270,9 @@ void Assembler::CheckTrampolinePool() {
bc(&after_pool);
} else {
b(&after_pool);
- nop();
}
+ nop();
- EmitForbiddenSlotInstruction();
int pool_start = pc_offset();
for (int i = 0; i < unbound_labels_count_; i++) {
{ BlockGrowBufferScope block_buf_growth(this);
diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/mips64/assembler-mips64.h
index f8d315d835..bf2285a2d5 100644
--- a/deps/v8/src/mips64/assembler-mips64.h
+++ b/deps/v8/src/mips64/assembler-mips64.h
@@ -306,6 +306,8 @@ struct FPUControlRegister {
const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister };
const FPUControlRegister FCSR = { kFCSRRegister };
+// TODO(mips64) Define SIMD registers.
+typedef DoubleRegister Simd128Register;
// -----------------------------------------------------------------------------
// Machine instruction Operands.
@@ -1092,7 +1094,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, const SourcePosition position);
+ void RecordDeoptReason(const int reason, int raw_position);
static int RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
intptr_t pc_delta);
@@ -1272,7 +1274,6 @@ class Assembler : public AssemblerBase {
void EmitForbiddenSlotInstruction() {
if (IsPrevInstrCompactBranch()) {
nop();
- ClearCompactBranchState();
}
}
diff --git a/deps/v8/src/mips64/builtins-mips64.cc b/deps/v8/src/mips64/builtins-mips64.cc
index 3a9980beab..1d8d5d3599 100644
--- a/deps/v8/src/mips64/builtins-mips64.cc
+++ b/deps/v8/src/mips64/builtins-mips64.cc
@@ -141,6 +141,109 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// static
+void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- ra : return address
+ // -- sp[(argc - n) * 8] : arg[n] (zero-based)
+ // -- sp[(argc + 1) * 8] : receiver
+ // -----------------------------------
+ Condition const cc = (kind == MathMaxMinKind::kMin) ? ge : le;
+ Heap::RootListIndex const root_index =
+ (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
+ : Heap::kMinusInfinityValueRootIndex;
+ DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? f2 : f0;
+
+ // Load the accumulator with the default return value (either -Infinity or
+ // +Infinity), with the tagged value in a1 and the double value in f0.
+ __ LoadRoot(a1, root_index);
+ __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
+ __ mov(a3, a0);
+
+ Label done_loop, loop;
+ __ bind(&loop);
+ {
+ // Check if all parameters done.
+ __ Dsubu(a0, a0, Operand(1));
+ __ Branch(&done_loop, lt, a0, Operand(zero_reg));
+
+ // Load the next parameter tagged value into a2.
+ __ Dlsa(at, sp, a0, kPointerSizeLog2);
+ __ ld(a2, MemOperand(at));
+
+ // Load the double value of the parameter into f2, maybe converting the
+ // parameter to a number first using the ToNumberStub if necessary.
+ Label convert, convert_smi, convert_number, done_convert;
+ __ bind(&convert);
+ __ JumpIfSmi(a2, &convert_smi);
+ __ ld(a4, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ JumpIfRoot(a4, Heap::kHeapNumberMapRootIndex, &convert_number);
+ {
+ // Parameter is not a Number, use the ToNumberStub to convert it.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(a0);
+ __ SmiTag(a3);
+ __ Push(a0, a1, a3);
+ __ mov(a0, a2);
+ ToNumberStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ mov(a2, v0);
+ __ Pop(a0, a1, a3);
+ {
+ // Restore the double accumulator value (f0).
+ Label restore_smi, done_restore;
+ __ JumpIfSmi(a1, &restore_smi);
+ __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
+ __ jmp(&done_restore);
+ __ bind(&restore_smi);
+ __ SmiToDoubleFPURegister(a1, f0, a4);
+ __ bind(&done_restore);
+ }
+ __ SmiUntag(a3);
+ __ SmiUntag(a0);
+ }
+ __ jmp(&convert);
+ __ bind(&convert_number);
+ __ ldc1(f2, FieldMemOperand(a2, HeapNumber::kValueOffset));
+ __ jmp(&done_convert);
+ __ bind(&convert_smi);
+ __ SmiToDoubleFPURegister(a2, f2, a4);
+ __ bind(&done_convert);
+
+ // Perform the actual comparison with the accumulator value on the left hand
+ // side (f0) and the next parameter value on the right hand side (f2).
+ Label compare_equal, compare_nan, compare_swap;
+ __ BranchF(&compare_equal, &compare_nan, eq, f0, f2);
+ __ BranchF(&compare_swap, nullptr, cc, f0, f2);
+ __ Branch(&loop);
+
+ // Left and right hand side are equal, check for -0 vs. +0.
+ __ bind(&compare_equal);
+ __ FmoveHigh(a4, reg);
+ // Make a4 unsigned.
+ __ dsll32(a4, a4, 0);
+ __ Branch(&loop, ne, a4, Operand(0x8000000000000000));
+
+ // Result is on the right hand side.
+ __ bind(&compare_swap);
+ __ mov_d(f0, f2);
+ __ mov(a1, a2);
+ __ jmp(&loop);
+
+ // At least one side is NaN, which means that the result will be NaN too.
+ __ bind(&compare_nan);
+ __ LoadRoot(a1, Heap::kNanValueRootIndex);
+ __ ldc1(f0, FieldMemOperand(a1, HeapNumber::kValueOffset));
+ __ jmp(&loop);
+ }
+
+ __ bind(&done_loop);
+ __ Dlsa(sp, sp, a3, kPointerSizeLog2);
+ __ mov(v0, a1);
+ __ DropAndRet(1);
+}
+
+// static
void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
@@ -156,8 +259,7 @@ void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
{
__ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
__ Dsubu(a0, a0, Operand(1));
- __ dsll(a0, a0, kPointerSizeLog2);
- __ Daddu(sp, a0, sp);
+ __ Dlsa(sp, sp, a0, kPointerSizeLog2);
__ ld(a0, MemOperand(sp));
__ Drop(2);
}
@@ -192,8 +294,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
Label no_arguments, done;
__ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
__ Dsubu(a0, a0, Operand(1));
- __ dsll(a0, a0, kPointerSizeLog2);
- __ Daddu(sp, a0, sp);
+ __ Dlsa(sp, sp, a0, kPointerSizeLog2);
__ ld(a0, MemOperand(sp));
__ Drop(2);
__ jmp(&done);
@@ -232,8 +333,9 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a0, a1, a3); // first argument, constructor, new target
- __ CallRuntime(Runtime::kNewObject);
+ __ Push(a0);
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ Pop(a0);
}
__ Ret(USE_DELAY_SLOT);
@@ -257,8 +359,7 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
{
__ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
__ Dsubu(a0, a0, Operand(1));
- __ dsll(a0, a0, kPointerSizeLog2);
- __ Daddu(sp, a0, sp);
+ __ Dlsa(sp, sp, a0, kPointerSizeLog2);
__ ld(a0, MemOperand(sp));
__ Drop(2);
}
@@ -319,8 +420,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
Label no_arguments, done;
__ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
__ Dsubu(a0, a0, Operand(1));
- __ dsll(a0, a0, kPointerSizeLog2);
- __ Daddu(sp, a0, sp);
+ __ Dlsa(sp, sp, a0, kPointerSizeLog2);
__ ld(a0, MemOperand(sp));
__ Drop(2);
__ jmp(&done);
@@ -361,33 +461,15 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a0, a1, a3); // first argument, constructor, new target
- __ CallRuntime(Runtime::kNewObject);
+ __ Push(a0);
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ Pop(a0);
}
__ Ret(USE_DELAY_SLOT);
__ sd(a0, FieldMemOperand(v0, JSValue::kValueOffset)); // In delay slot.
}
-
-static void CallRuntimePassFunction(
- MacroAssembler* masm, Runtime::FunctionId function_id) {
- // ----------- S t a t e -------------
- // -- a1 : target function (preserved for callee)
- // -- a3 : new target (preserved for callee)
- // -----------------------------------
-
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the function onto the stack.
- // Push a copy of the target function and the new target.
- __ Push(a1, a3, a1);
-
- __ CallRuntime(function_id, 1);
- // Restore target function and new target.
- __ Pop(a1, a3);
-}
-
-
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
@@ -395,8 +477,26 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ Jump(at);
}
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ // ----------- S t a t e -------------
+ // -- a0 : argument count (preserved for callee)
+ // -- a1 : target function (preserved for callee)
+ // -- a3 : new target (preserved for callee)
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the function onto the stack.
+ // Push a copy of the target function and the new target.
+ __ SmiTag(a0);
+ __ Push(a0, a1, a3, a1);
+
+ __ CallRuntime(function_id, 1);
+ // Restore target function and new target.
+ __ Pop(a0, a1, a3);
+ __ SmiUntag(a0);
+ }
-static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
__ Daddu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
}
@@ -412,8 +512,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
__ LoadRoot(a4, Heap::kStackLimitRootIndex);
__ Branch(&ok, hs, sp, Operand(a4));
- CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
@@ -422,7 +521,8 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool create_implicit_receiver) {
+ bool create_implicit_receiver,
+ bool check_derived_construct) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
@@ -444,143 +544,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Push(a2, a0);
if (create_implicit_receiver) {
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- // Verify that the new target is a JSFunction.
- __ GetObjectType(a3, a5, a4);
- __ Branch(&rt_call, ne, a4, Operand(JS_FUNCTION_TYPE));
-
- // Load the initial map and verify that it is in fact a map.
- // a3: new target
- __ ld(a2,
- FieldMemOperand(a3, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(a2, &rt_call);
- __ GetObjectType(a2, t1, t0);
- __ Branch(&rt_call, ne, t0, Operand(MAP_TYPE));
-
- // Fall back to runtime if the expected base constructor and base
- // constructor differ.
- __ ld(a5, FieldMemOperand(a2, Map::kConstructorOrBackPointerOffset));
- __ Branch(&rt_call, ne, a1, Operand(a5));
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // a1: constructor function
- // a2: initial map
- __ lbu(t1, FieldMemOperand(a2, Map::kInstanceTypeOffset));
- __ Branch(&rt_call, eq, t1, Operand(JS_FUNCTION_TYPE));
-
- // Now allocate the JSObject on the heap.
- // a1: constructor function
- // a2: initial map
- __ lbu(a4, FieldMemOperand(a2, Map::kInstanceSizeOffset));
- __ Allocate(a4, t0, a4, t2, &rt_call, SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to
- // initial map and properties and elements are set to empty fixed array.
- // a1: constructor function
- // a2: initial map
- // a3: object size
- // t0: JSObject (not HeapObject tagged - the actual address).
- // a4: start of next object
- __ LoadRoot(t2, Heap::kEmptyFixedArrayRootIndex);
- __ mov(t1, t0);
- STATIC_ASSERT(0 * kPointerSize == JSObject::kMapOffset);
- __ sd(a2, MemOperand(t1, JSObject::kMapOffset));
- STATIC_ASSERT(1 * kPointerSize == JSObject::kPropertiesOffset);
- __ sd(t2, MemOperand(t1, JSObject::kPropertiesOffset));
- STATIC_ASSERT(2 * kPointerSize == JSObject::kElementsOffset);
- __ sd(t2, MemOperand(t1, JSObject::kElementsOffset));
- STATIC_ASSERT(3 * kPointerSize == JSObject::kHeaderSize);
- __ Daddu(t1, t1, Operand(3 * kPointerSize));
-
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on.
- __ Daddu(t0, t0, Operand(kHeapObjectTag));
-
- // Fill all the in-object properties with appropriate filler.
- // t0: JSObject (tagged)
- // t1: First in-object property of JSObject (not tagged)
- __ LoadRoot(t3, Heap::kUndefinedValueRootIndex);
-
- if (!is_api_function) {
- Label no_inobject_slack_tracking;
-
- MemOperand bit_field3 = FieldMemOperand(a2, Map::kBitField3Offset);
- // Check if slack tracking is enabled.
- __ lwu(t2, bit_field3);
- __ DecodeField<Map::ConstructionCounter>(a6, t2);
- // a6: slack tracking counter
- __ Branch(&no_inobject_slack_tracking, lt, a6,
- Operand(Map::kSlackTrackingCounterEnd));
- // Decrease generous allocation count.
- __ Dsubu(t2, t2, Operand(1 << Map::ConstructionCounter::kShift));
- __ sw(t2, bit_field3);
-
- // Allocate object with a slack.
- __ lbu(a0, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
- __ dsll(a0, a0, kPointerSizeLog2);
- __ dsubu(a0, a4, a0);
- // a0: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields, t1,
- Operand(a0));
- }
- __ InitializeFieldsWithFiller(t1, a0, t3);
-
- // To allow truncation fill the remaining fields with one pointer
- // filler map.
- __ LoadRoot(t3, Heap::kOnePointerFillerMapRootIndex);
- __ InitializeFieldsWithFiller(t1, a4, t3);
-
- // a6: slack tracking counter value before decreasing.
- __ Branch(&allocated, ne, a6, Operand(Map::kSlackTrackingCounterEnd));
-
- // Push the constructor, new_target and the object to the stack,
- // and then the initial map as an argument to the runtime call.
- __ Push(a1, a3, t0, a2);
- __ CallRuntime(Runtime::kFinalizeInstanceSize);
- __ Pop(a1, a3, t0);
-
- // Continue with JSObject being successfully allocated.
- // a1: constructor function
- // a3: new target
- // t0: JSObject
- __ jmp(&allocated);
-
- __ bind(&no_inobject_slack_tracking);
- }
-
- __ InitializeFieldsWithFiller(t1, a4, t3);
-
- // Continue with JSObject being successfully allocated.
- // a1: constructor function
- // a3: new target
- // t0: JSObject
- __ jmp(&allocated);
- }
-
- // Allocate the new receiver object using the runtime call.
- // a1: constructor function
- // a3: new target
- __ bind(&rt_call);
-
- // Push the constructor and new_target twice, second pair as arguments
- // to the runtime call.
- __ Push(a1, a3, a1, a3); // constructor function, new target
- __ CallRuntime(Runtime::kNewObject);
+ __ Push(a1, a3);
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ mov(t0, v0);
__ Pop(a1, a3);
- // Receiver for constructor call allocated.
- // a1: constructor function
- // a3: new target
- // t0: JSObject
- __ bind(&allocated);
-
+ // ----------- S t a t e -------------
+ // -- a1: constructor function
+ // -- a3: new target
+ // -- t0: newly allocated object
+ // -----------------------------------
__ ld(a0, MemOperand(sp));
}
__ SmiUntag(a0);
@@ -610,8 +584,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ mov(t0, a0);
__ jmp(&entry);
__ bind(&loop);
- __ dsll(a4, t0, kPointerSizeLog2);
- __ Daddu(a4, a2, Operand(a4));
+ __ Dlsa(a4, a2, t0, kPointerSizeLog2);
__ ld(a5, MemOperand(a4));
__ push(a5);
__ bind(&entry);
@@ -677,6 +650,19 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Leave construct frame.
}
+ // ES6 9.2.2. Step 13+
+ // Check that the result is not a Smi, indicating that the constructor result
+ // from a derived class is neither undefined nor an Object.
+ if (check_derived_construct) {
+ Label dont_throw;
+ __ JumpIfNotSmi(v0, &dont_throw);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
+ }
+ __ bind(&dont_throw);
+ }
+
__ SmiScale(a4, a1, kPointerSizeLog2);
__ Daddu(sp, sp, a4);
__ Daddu(sp, sp, kPointerSize);
@@ -688,17 +674,23 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
+ Generate_JSConstructStubHelper(masm, false, true, false);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, true);
+ Generate_JSConstructStubHelper(masm, true, false, false);
}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
+ Generate_JSConstructStubHelper(masm, false, false, false);
+}
+
+
+void Builtins::Generate_JSBuiltinsConstructStubForDerived(
+ MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false, true);
}
@@ -778,8 +770,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// a3: argc
// s0: argv, i.e. points to first arg
Label loop, entry;
- __ dsll(a4, a3, kPointerSizeLog2);
- __ daddu(a6, s0, a4);
+ __ Dlsa(a6, s0, a3, kPointerSizeLog2);
__ b(&entry);
__ nop(); // Branch delay slot nop.
// a6 points past last arg.
@@ -841,10 +832,8 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
// o sp: stack pointer
// o ra: return address
//
-// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-mips.h for its layout.
-// TODO(rmcilroy): We will need to include the current bytecode pointer in the
-// frame.
+// The function builds an interpreter frame. See InterpreterFrameConstants in
+// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
@@ -853,16 +842,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(ra, fp, cp, a1);
__ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
- __ Push(a3);
-
- // Push zero for bytecode array offset.
- __ Push(zero_reg);
// Get the bytecode array from the function object and load the pointer to the
// first entry into kInterpreterBytecodeRegister.
__ ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ Label load_debug_bytecode_array, bytecode_array_loaded;
+ Register debug_info = kInterpreterBytecodeArrayRegister;
+ DCHECK(!debug_info.is(a0));
+ __ ld(debug_info, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
+ __ Branch(&load_debug_bytecode_array, ne, debug_info,
+ Operand(DebugInfo::uninitialized()));
__ ld(kInterpreterBytecodeArrayRegister,
FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
+ __ bind(&bytecode_array_loaded);
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -874,6 +866,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(BYTECODE_ARRAY_TYPE));
}
+ // Push new.target, bytecode array and zero for bytecode array offset.
+ __ Push(a3, kInterpreterBytecodeArrayRegister, zero_reg);
+
// Allocate the local and temporary register file on the stack.
{
// Load frame size (word) from the BytecodeArray object.
@@ -904,44 +899,38 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// TODO(rmcilroy): List of things not currently dealt with here but done in
// fullcodegen's prologue:
- // - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
- // - Allow simulator stop operations if FLAG_stop_at is set.
// - Code aging of the BytecodeArray object.
- // Perform stack guard check.
- {
- Label ok;
- __ LoadRoot(at, Heap::kStackLimitRootIndex);
- __ Branch(&ok, hs, sp, Operand(at));
- __ push(kInterpreterBytecodeArrayRegister);
- __ CallRuntime(Runtime::kStackGuard);
- __ pop(kInterpreterBytecodeArrayRegister);
- __ bind(&ok);
- }
-
// Load bytecode offset and dispatch table into registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
__ Daddu(kInterpreterRegisterFileRegister, fp,
Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ li(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
- __ LoadRoot(kInterpreterDispatchTableRegister,
- Heap::kInterpreterTableRootIndex);
- __ Daddu(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ li(kInterpreterDispatchTableRegister,
+ Operand(ExternalReference::interpreter_dispatch_table_address(
+ masm->isolate())));
// Dispatch to the first bytecode handler for the function.
__ Daddu(a0, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ lbu(a0, MemOperand(a0));
- __ dsll(at, a0, kPointerSizeLog2);
- __ Daddu(at, kInterpreterDispatchTableRegister, at);
+ __ Dlsa(at, kInterpreterDispatchTableRegister, a0, kPointerSizeLog2);
__ ld(at, MemOperand(at));
// TODO(rmcilroy): Make dispatch table point to code entrys to avoid untagging
// and header removal.
__ Daddu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(at);
+
+ // Even though the first bytecode handler was called, we will never return.
+ __ Abort(kUnexpectedReturnFromBytecodeHandler);
+
+ // Load debug copy of the bytecode array.
+ __ bind(&load_debug_bytecode_array);
+ __ ld(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(debug_info, DebugInfo::kAbstractCodeIndex));
+ __ Branch(&bytecode_array_loaded);
}
@@ -966,7 +955,8 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// static
-void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+ MacroAssembler* masm, TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a2 : the address of the first argument to be pushed. Subsequent
@@ -991,7 +981,9 @@ void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
__ Branch(&loop_header, gt, a2, Operand(a3));
// Call the target.
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ tail_call_mode),
+ RelocInfo::CODE_TARGET);
}
@@ -1026,47 +1018,24 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
}
-static void Generate_InterpreterNotifyDeoptimizedHelper(
- MacroAssembler* masm, Deoptimizer::BailoutType type) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(kInterpreterAccumulatorRegister); // Save accumulator register.
-
- // Pass the deoptimization type to the runtime system.
- __ li(a1, Operand(Smi::FromInt(static_cast<int>(type))));
- __ push(a1);
- __ CallRuntime(Runtime::kNotifyDeoptimized);
-
- __ pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
- // Tear down internal frame.
- }
-
- // Drop state (we don't use this for interpreter deopts).
- __ Drop(1);
-
+static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
// Initialize register file register and dispatch table register.
__ Daddu(kInterpreterRegisterFileRegister, fp,
Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
- __ LoadRoot(kInterpreterDispatchTableRegister,
- Heap::kInterpreterTableRootIndex);
- __ Daddu(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ li(kInterpreterDispatchTableRegister,
+ Operand(ExternalReference::interpreter_dispatch_table_address(
+ masm->isolate())));
// Get the context from the frame.
- // TODO(rmcilroy): Update interpreter frame to expect current context at the
- // context slot instead of the function context.
__ ld(kContextRegister,
MemOperand(kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kContextFromRegisterPointer));
// Get the bytecode array pointer from the frame.
- __ ld(a1,
- MemOperand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kFunctionFromRegisterPointer));
- __ ld(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ ld(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(a1, SharedFunctionInfo::kFunctionDataOffset));
+ __ ld(
+ kInterpreterBytecodeArrayRegister,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -1089,14 +1058,36 @@ static void Generate_InterpreterNotifyDeoptimizedHelper(
__ Daddu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
__ lbu(a1, MemOperand(a1));
- __ dsll(a1, a1, kPointerSizeLog2);
- __ Daddu(a1, kInterpreterDispatchTableRegister, a1);
+ __ Dlsa(a1, kInterpreterDispatchTableRegister, a1, kPointerSizeLog2);
__ ld(a1, MemOperand(a1));
__ Daddu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(a1);
}
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+ MacroAssembler* masm, Deoptimizer::BailoutType type) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Pass the deoptimization type to the runtime system.
+ __ li(a1, Operand(Smi::FromInt(static_cast<int>(type))));
+ __ push(a1);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+ // Tear down internal frame.
+ }
+
+ // Drop state (we don't use these for interpreter deopts) and and pop the
+ // accumulator value into the accumulator register.
+ __ Drop(1);
+ __ Pop(kInterpreterAccumulatorRegister);
+
+ // Enter the bytecode dispatch.
+ Generate_EnterBytecodeDispatch(masm);
+}
+
+
void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
@@ -1111,22 +1102,30 @@ void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ // Set the address of the interpreter entry trampoline as a return address.
+ // This simulates the initial call to bytecode handlers in interpreter entry
+ // trampoline. The return will never actually be taken, but our stack walker
+ // uses this address to determine whether a frame is interpreted.
+ __ li(ra, Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
+
+ Generate_EnterBytecodeDispatch(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileLazy);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm,
+ Runtime::kCompileOptimized_NotConcurrent);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
@@ -1346,13 +1345,12 @@ static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
// Load the next prototype.
__ bind(&next_prototype);
- __ ld(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
- // End if the prototype is null or not hidden.
- __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, receiver_check_failed);
- __ ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ lwu(scratch, FieldMemOperand(map, Map::kBitField3Offset));
- __ DecodeField<Map::IsHiddenPrototype>(scratch);
+ __ DecodeField<Map::HasHiddenPrototype>(scratch);
__ Branch(receiver_check_failed, eq, scratch, Operand(zero_reg));
+
+ __ ld(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+ __ ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Iterate.
__ Branch(&prototype_loop_start);
@@ -1377,8 +1375,7 @@ void Builtins::Generate_HandleFastApiCall(MacroAssembler* masm) {
// Do the compatible receiver check
Label receiver_check_failed;
- __ sll(at, a0, kPointerSizeLog2);
- __ Daddu(t8, sp, at);
+ __ Dlsa(t8, sp, a0, kPointerSizeLog2);
__ ld(t0, MemOperand(t8));
CompatibleReceiverCheck(masm, t0, t1, &receiver_check_failed);
@@ -1512,6 +1509,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
Register scratch = a4;
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
__ mov(a3, a2);
+ // Dlsa() cannot be used hare as scratch value used later.
__ dsll(scratch, a0, kPointerSizeLog2);
__ Daddu(a0, sp, Operand(scratch));
__ ld(a1, MemOperand(a0)); // receiver
@@ -1582,8 +1580,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 2. Get the function to call (passed as receiver) from the stack.
// a0: actual number of arguments
- __ dsll(at, a0, kPointerSizeLog2);
- __ daddu(at, sp, at);
+ __ Dlsa(at, sp, a0, kPointerSizeLog2);
__ ld(a1, MemOperand(at));
// 3. Shift arguments and return address one slot down on the stack
@@ -1594,8 +1591,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
{
Label loop;
// Calculate the copy start address (destination). Copy end address is sp.
- __ dsll(at, a0, kPointerSizeLog2);
- __ daddu(a2, sp, at);
+ __ Dlsa(a2, sp, a0, kPointerSizeLog2);
__ bind(&loop);
__ ld(at, MemOperand(a2, -kPointerSize));
@@ -1695,6 +1691,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
Register scratch = a4;
__ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
__ mov(a2, a1);
+ // Dlsa() cannot be used hare as scratch value used later.
__ dsll(scratch, a0, kPointerSizeLog2);
__ Daddu(a0, sp, Operand(scratch));
__ sd(a2, MemOperand(a0)); // receiver
@@ -1850,9 +1847,7 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Try to create the list from an arguments object.
__ bind(&create_arguments);
- __ ld(a2,
- FieldMemOperand(a0, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize));
+ __ ld(a2, FieldMemOperand(a0, JSArgumentsObject::kLengthOffset));
__ ld(a4, FieldMemOperand(a0, JSObject::kElementsOffset));
__ ld(at, FieldMemOperand(a4, FixedArray::kLengthOffset));
__ Branch(&create_runtime, ne, a2, Operand(at));
@@ -1906,8 +1901,7 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
Label done, loop;
__ bind(&loop);
__ Branch(&done, eq, a4, Operand(a2));
- __ dsll(at, a4, kPointerSizeLog2);
- __ Daddu(at, a0, at);
+ __ Dlsa(at, a0, a4, kPointerSizeLog2);
__ ld(at, FieldMemOperand(at, FixedArray::kHeaderSize));
__ Push(at);
__ Daddu(a4, a4, Operand(1));
@@ -1927,10 +1921,133 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
}
}
+namespace {
+
+// Drops top JavaScript frame and an arguments adaptor frame below it (if
+// present) preserving all the arguments prepared for current call.
+// Does nothing if debugger is currently active.
+// ES6 14.6.3. PrepareForTailCall
+//
+// Stack structure for the function g() tail calling f():
+//
+// ------- Caller frame: -------
+// | ...
+// | g()'s arg M
+// | ...
+// | g()'s arg 1
+// | g()'s receiver arg
+// | g()'s caller pc
+// ------- g()'s frame: -------
+// | g()'s caller fp <- fp
+// | g()'s context
+// | function pointer: g
+// | -------------------------
+// | ...
+// | ...
+// | f()'s arg N
+// | ...
+// | f()'s arg 1
+// | f()'s receiver arg <- sp (f()'s caller pc is not on the stack yet!)
+// ----------------------
+//
+void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+ DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+ Comment cmnt(masm, "[ PrepareForTailCall");
+
+ // Prepare for tail call only if the debugger is not active.
+ Label done;
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(masm->isolate());
+ __ li(at, Operand(debug_is_active));
+ __ lb(scratch1, MemOperand(at));
+ __ Branch(&done, ne, scratch1, Operand(zero_reg));
+
+ // Drop possible interpreter handler/stub frame.
+ {
+ Label no_interpreter_frame;
+ __ ld(scratch3, MemOperand(fp, StandardFrameConstants::kMarkerOffset));
+ __ Branch(&no_interpreter_frame, ne, scratch3,
+ Operand(Smi::FromInt(StackFrame::STUB)));
+ __ ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&no_interpreter_frame);
+ }
+
+ // Check if next frame is an arguments adaptor frame.
+ Label no_arguments_adaptor, formal_parameter_count_loaded;
+ __ ld(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ld(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+ __ Branch(&no_arguments_adaptor, ne, scratch3,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Drop arguments adaptor frame and load arguments count.
+ __ mov(fp, scratch2);
+ __ ld(scratch1,
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(scratch1);
+ __ Branch(&formal_parameter_count_loaded);
+
+ __ bind(&no_arguments_adaptor);
+ // Load caller's formal parameter count
+ __ ld(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ld(scratch1,
+ FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(scratch1,
+ FieldMemOperand(scratch1,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+
+ __ bind(&formal_parameter_count_loaded);
+
+ // Calculate the end of destination area where we will put the arguments
+ // after we drop current frame. We add kPointerSize to count the receiver
+ // argument which is not included into formal parameters count.
+ Register dst_reg = scratch2;
+ __ Dlsa(dst_reg, fp, scratch1, kPointerSizeLog2);
+ __ Daddu(dst_reg, dst_reg,
+ Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+
+ Register src_reg = scratch1;
+ __ Dlsa(src_reg, sp, args_reg, kPointerSizeLog2);
+ // Count receiver argument as well (not included in args_reg).
+ __ Daddu(src_reg, src_reg, Operand(kPointerSize));
+
+ if (FLAG_debug_code) {
+ __ Check(lo, kStackAccessBelowStackPointer, src_reg, Operand(dst_reg));
+ }
+
+ // Restore caller's frame pointer and return address now as they will be
+ // overwritten by the copying loop.
+ __ ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+ // Now copy callee arguments to the caller frame going backwards to avoid
+ // callee arguments corruption (source and destination areas could overlap).
+
+ // Both src_reg and dst_reg are pointing to the word after the one to copy,
+ // so they must be pre-decremented in the loop.
+ Register tmp_reg = scratch3;
+ Label loop, entry;
+ __ Branch(&entry);
+ __ bind(&loop);
+ __ Dsubu(src_reg, src_reg, Operand(kPointerSize));
+ __ Dsubu(dst_reg, dst_reg, Operand(kPointerSize));
+ __ ld(tmp_reg, MemOperand(src_reg));
+ __ sd(tmp_reg, MemOperand(dst_reg));
+ __ bind(&entry);
+ __ Branch(&loop, ne, sp, Operand(src_reg));
+
+ // Leave current frame.
+ __ mov(sp, dst_reg);
+
+ __ bind(&done);
+}
+} // namespace
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode) {
+ ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the function to call (checked to be a JSFunction)
@@ -1970,8 +2087,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadGlobalProxy(a3);
} else {
Label convert_to_object, convert_receiver;
- __ dsll(at, a0, kPointerSizeLog2);
- __ daddu(at, sp, at);
+ __ Dlsa(at, sp, a0, kPointerSizeLog2);
__ ld(a3, MemOperand(at));
__ JumpIfSmi(a3, &convert_to_object);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
@@ -2007,8 +2123,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver);
}
- __ dsll(at, a0, kPointerSizeLog2);
- __ daddu(at, sp, at);
+ __ Dlsa(at, sp, a0, kPointerSizeLog2);
__ sd(a3, MemOperand(at));
}
__ bind(&done_convert);
@@ -2020,6 +2135,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- cp : the function context.
// -----------------------------------
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, a0, t0, t1, t2);
+ }
+
__ lw(a2,
FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount actual(a0);
@@ -2038,18 +2157,22 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// static
-void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(a1);
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, a0, t0, t1, t2);
+ }
+
// Patch the receiver to [[BoundThis]].
{
__ ld(at, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
- __ dsll(a4, a0, kPointerSizeLog2);
- __ daddu(a4, a4, sp);
+ __ Dlsa(a4, sp, a0, kPointerSizeLog2);
__ sd(at, MemOperand(a4));
}
@@ -2090,11 +2213,9 @@ void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
__ mov(a5, zero_reg);
__ bind(&loop);
__ Branch(&done_loop, gt, a5, Operand(a0));
- __ dsll(a6, a4, kPointerSizeLog2);
- __ daddu(a6, a6, sp);
+ __ Dlsa(a6, sp, a4, kPointerSizeLog2);
__ ld(at, MemOperand(a6));
- __ dsll(a6, a5, kPointerSizeLog2);
- __ daddu(a6, a6, sp);
+ __ Dlsa(a6, sp, a5, kPointerSizeLog2);
__ sd(at, MemOperand(a6));
__ Daddu(a4, a4, Operand(1));
__ Daddu(a5, a5, Operand(1));
@@ -2111,11 +2232,9 @@ void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
__ bind(&loop);
__ Dsubu(a4, a4, Operand(1));
__ Branch(&done_loop, lt, a4, Operand(zero_reg));
- __ dsll(a5, a4, kPointerSizeLog2);
- __ daddu(a5, a5, a2);
+ __ Dlsa(a5, a2, a4, kPointerSizeLog2);
__ ld(at, MemOperand(a5));
- __ dsll(a5, a0, kPointerSizeLog2);
- __ daddu(a5, a5, sp);
+ __ Dlsa(a5, sp, a0, kPointerSizeLog2);
__ sd(at, MemOperand(a5));
__ Daddu(a0, a0, Operand(1));
__ Branch(&loop);
@@ -2133,7 +2252,8 @@ void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
// static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
// -- a1 : the target to call (can be any Object).
@@ -2143,12 +2263,23 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ JumpIfSmi(a1, &non_callable);
__ bind(&non_smi);
__ GetObjectType(a1, t1, t2);
- __ Jump(masm->isolate()->builtins()->CallFunction(mode),
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
- __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
+
+ // Check if target has a [[Call]] internal method.
+ __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
+ __ And(t1, t1, Operand(1 << Map::kIsCallable));
+ __ Branch(&non_callable, eq, t1, Operand(zero_reg));
+
__ Branch(&non_function, ne, t2, Operand(JS_PROXY_TYPE));
+ // 0. Prepare for tail call if necessary.
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, a0, t0, t1, t2);
+ }
+
// 1. Runtime fallback for Proxy [[Call]].
__ Push(a1);
// Increase the arguments size to include the pushed function and the
@@ -2161,18 +2292,13 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
__ bind(&non_function);
- // Check if target has a [[Call]] internal method.
- __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
- __ And(t1, t1, Operand(1 << Map::kIsCallable));
- __ Branch(&non_callable, eq, t1, Operand(zero_reg));
// Overwrite the original receiver with the (original) target.
- __ dsll(at, a0, kPointerSizeLog2);
- __ daddu(at, sp, at);
+ __ Dlsa(at, sp, a0, kPointerSizeLog2);
__ sd(a1, MemOperand(at));
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
__ Jump(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined),
+ ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
@@ -2253,11 +2379,9 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ mov(a5, zero_reg);
__ bind(&loop);
__ Branch(&done_loop, ge, a5, Operand(a0));
- __ dsll(a6, a4, kPointerSizeLog2);
- __ daddu(a6, a6, sp);
+ __ Dlsa(a6, sp, a4, kPointerSizeLog2);
__ ld(at, MemOperand(a6));
- __ dsll(a6, a5, kPointerSizeLog2);
- __ daddu(a6, a6, sp);
+ __ Dlsa(a6, sp, a5, kPointerSizeLog2);
__ sd(at, MemOperand(a6));
__ Daddu(a4, a4, Operand(1));
__ Daddu(a5, a5, Operand(1));
@@ -2274,11 +2398,9 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ bind(&loop);
__ Dsubu(a4, a4, Operand(1));
__ Branch(&done_loop, lt, a4, Operand(zero_reg));
- __ dsll(a5, a4, kPointerSizeLog2);
- __ daddu(a5, a5, a2);
+ __ Dlsa(a5, a2, a4, kPointerSizeLog2);
__ ld(at, MemOperand(a5));
- __ dsll(a5, a0, kPointerSizeLog2);
- __ daddu(a5, a5, sp);
+ __ Dlsa(a5, sp, a0, kPointerSizeLog2);
__ sd(at, MemOperand(a5));
__ Daddu(a0, a0, Operand(1));
__ Branch(&loop);
@@ -2357,8 +2479,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
// Called Construct on an exotic Object with a [[Construct]] internal method.
{
// Overwrite the original receiver with the (original) target.
- __ dsll(at, a0, kPointerSizeLog2);
- __ daddu(at, sp, at);
+ __ Dlsa(at, sp, a0, kPointerSizeLog2);
__ sd(a1, MemOperand(at));
// Let the "call_as_constructor_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
diff --git a/deps/v8/src/mips64/code-stubs-mips64.cc b/deps/v8/src/mips64/code-stubs-mips64.cc
index 2531d6b3f1..bde5531077 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.cc
+++ b/deps/v8/src/mips64/code-stubs-mips64.cc
@@ -90,9 +90,8 @@ void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
#define __ ACCESS_MASM(masm)
-
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
- Condition cc, Strength strength);
+ Condition cc);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register lhs,
Register rhs,
@@ -273,7 +272,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// Equality is almost reflexive (everything but NaN), so this is a test
// for "identity and not NaN".
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
- Condition cc, Strength strength) {
+ Condition cc) {
Label not_identical;
Label heap_number, return_equal;
Register exp_mask_reg = t1;
@@ -294,13 +293,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
__ Branch(slow, eq, t0, Operand(SYMBOL_TYPE));
// Call runtime on identical SIMD values since we must throw a TypeError.
__ Branch(slow, eq, t0, Operand(SIMD128_VALUE_TYPE));
- if (is_strong(strength)) {
- // Call the runtime on anything that is converted in the semantics, since
- // we need to throw a TypeError. Smis have already been ruled out.
- __ Branch(&return_equal, eq, t0, Operand(HEAP_NUMBER_TYPE));
- __ And(t0, t0, Operand(kIsNotStringMask));
- __ Branch(slow, ne, t0, Operand(zero_reg));
- }
} else {
__ Branch(&heap_number, eq, t0, Operand(HEAP_NUMBER_TYPE));
// Comparing JS objects with <=, >= is complicated.
@@ -310,13 +302,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
__ Branch(slow, eq, t0, Operand(SYMBOL_TYPE));
// Call runtime on identical SIMD values since we must throw a TypeError.
__ Branch(slow, eq, t0, Operand(SIMD128_VALUE_TYPE));
- if (is_strong(strength)) {
- // Call the runtime on anything that is converted in the semantics,
- // since we need to throw a TypeError. Smis and heap numbers have
- // already been ruled out.
- __ And(t0, t0, Operand(kIsNotStringMask));
- __ Branch(slow, ne, t0, Operand(zero_reg));
- }
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
@@ -510,45 +495,55 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
// Fast negative check for internalized-to-internalized equality.
static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
- Register lhs,
- Register rhs,
+ Register lhs, Register rhs,
Label* possible_strings,
- Label* not_both_strings) {
+ Label* runtime_call) {
DCHECK((lhs.is(a0) && rhs.is(a1)) ||
(lhs.is(a1) && rhs.is(a0)));
// a2 is object type of rhs.
- Label object_test;
+ Label object_test, return_unequal, undetectable;
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ And(at, a2, Operand(kIsNotStringMask));
__ Branch(&object_test, ne, at, Operand(zero_reg));
__ And(at, a2, Operand(kIsNotInternalizedMask));
__ Branch(possible_strings, ne, at, Operand(zero_reg));
__ GetObjectType(rhs, a3, a3);
- __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
+ __ Branch(runtime_call, ge, a3, Operand(FIRST_NONSTRING_TYPE));
__ And(at, a3, Operand(kIsNotInternalizedMask));
__ Branch(possible_strings, ne, at, Operand(zero_reg));
- // Both are internalized strings. We already checked they weren't the same
- // pointer so they are not equal.
+ // Both are internalized. We already checked they weren't the same pointer so
+ // they are not equal. Return non-equal by returning the non-zero object
+ // pointer in v0.
__ Ret(USE_DELAY_SLOT);
- __ li(v0, Operand(1)); // Non-zero indicates not equal.
+ __ mov(v0, a0); // In delay slot.
__ bind(&object_test);
- __ Branch(not_both_strings, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
- __ GetObjectType(rhs, a2, a3);
- __ Branch(not_both_strings, lt, a3, Operand(FIRST_JS_RECEIVER_TYPE));
-
- // If both objects are undetectable, they are equal. Otherwise, they
- // are not equal, since they are different objects and an object is not
- // equal to undefined.
- __ ld(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
- __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
- __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
- __ and_(a0, a2, a3);
- __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
+ __ ld(a2, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ ld(a3, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ lbu(t0, FieldMemOperand(a2, Map::kBitFieldOffset));
+ __ lbu(t1, FieldMemOperand(a3, Map::kBitFieldOffset));
+ __ And(at, t0, Operand(1 << Map::kIsUndetectable));
+ __ Branch(&undetectable, ne, at, Operand(zero_reg));
+ __ And(at, t1, Operand(1 << Map::kIsUndetectable));
+ __ Branch(&return_unequal, ne, at, Operand(zero_reg));
+
+ __ GetInstanceType(a2, a2);
+ __ Branch(runtime_call, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
+ __ GetInstanceType(a3, a3);
+ __ Branch(runtime_call, lt, a3, Operand(FIRST_JS_RECEIVER_TYPE));
+
+ __ bind(&return_unequal);
+ // Return non-equal by returning the non-zero object pointer in v0.
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0); // In delay slot.
+
+ __ bind(&undetectable);
+ __ And(at, t1, Operand(1 << Map::kIsUndetectable));
+ __ Branch(&return_unequal, eq, at, Operand(zero_reg));
__ Ret(USE_DELAY_SLOT);
- __ xori(v0, a0, 1 << Map::kIsUndetectable);
+ __ li(v0, Operand(EQUAL)); // In delay slot.
}
@@ -600,7 +595,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc, strength());
+ EmitIdenticalObjectComparison(masm, &slow, cc);
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
@@ -739,8 +734,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
- : Runtime::kCompare);
+ __ TailCallRuntime(Runtime::kCompare);
}
__ bind(&miss);
@@ -971,7 +965,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ cvt_d_w(double_exponent, single_scratch);
// Returning or bailing out.
- Counters* counters = isolate()->counters();
if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
@@ -985,7 +978,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ sdc1(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
DCHECK(heapnumber.is(v0));
- __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
__ DropAndRet(2);
} else {
__ push(ra);
@@ -1001,7 +993,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ MovFromFloatResult(double_result);
__ bind(&done);
- __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
__ Ret();
}
}
@@ -1073,8 +1064,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ mov(s1, a2);
} else {
// Compute the argv pointer in a callee-saved register.
- __ dsll(s1, a0, kPointerSizeLog2);
- __ Daddu(s1, sp, s1);
+ __ Dlsa(s1, sp, a0, kPointerSizeLog2);
__ Dsubu(s1, s1, kPointerSize);
}
@@ -1090,47 +1080,77 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// a0 = argc
__ mov(s0, a0);
__ mov(s2, a1);
- // a1 = argv (set in the delay slot after find_ra below).
// We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
// also need to reserve the 4 argument slots on the stack.
__ AssertStackIsAligned();
- __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
+ int frame_alignment = MacroAssembler::ActivationFrameAlignment();
+ int frame_alignment_mask = frame_alignment - 1;
+ int result_stack_size;
+ if (result_size() <= 2) {
+ // a0 = argc, a1 = argv, a2 = isolate
+ __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
+ __ mov(a1, s1);
+ result_stack_size = 0;
+ } else {
+ DCHECK_EQ(3, result_size());
+ // Allocate additional space for the result.
+ result_stack_size =
+ ((result_size() * kPointerSize) + frame_alignment_mask) &
+ ~frame_alignment_mask;
+ __ Dsubu(sp, sp, Operand(result_stack_size));
+
+ // a0 = hidden result argument, a1 = argc, a2 = argv, a3 = isolate.
+ __ li(a3, Operand(ExternalReference::isolate_address(isolate())));
+ __ mov(a2, s1);
+ __ mov(a1, a0);
+ __ mov(a0, sp);
+ }
// To let the GC traverse the return address of the exit frames, we need to
// know where the return address is. The CEntryStub is unmovable, so
// we can store the address on the stack to be able to find it again and
// we never have to restore it, because it will not change.
{ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
- // This branch-and-link sequence is needed to find the current PC on mips,
- // saved to the ra register.
- // Use masm-> here instead of the double-underscore macro since extra
- // coverage code can interfere with the proper calculation of ra.
+ int kNumInstructionsToJump = 4;
Label find_ra;
- masm->bal(&find_ra); // bal exposes branch delay slot.
- masm->mov(a1, s1);
- masm->bind(&find_ra);
-
// Adjust the value in ra to point to the correct return location, 2nd
// instruction past the real call into C code (the jalr(t9)), and push it.
// This is the return address of the exit frame.
- const int kNumInstructionsToJump = 5;
- masm->Daddu(ra, ra, kNumInstructionsToJump * kInt32Size);
- masm->sd(ra, MemOperand(sp)); // This spot was reserved in EnterExitFrame.
+ if (kArchVariant >= kMips64r6) {
+ __ addiupc(ra, kNumInstructionsToJump + 1);
+ } else {
+ // This branch-and-link sequence is needed to find the current PC on mips
+ // before r6, saved to the ra register.
+ __ bal(&find_ra); // bal exposes branch delay slot.
+ __ Daddu(ra, ra, kNumInstructionsToJump * Instruction::kInstrSize);
+ }
+ __ bind(&find_ra);
+
+ // This spot was reserved in EnterExitFrame.
+ __ sd(ra, MemOperand(sp, result_stack_size));
// Stack space reservation moved to the branch delay slot below.
// Stack is still aligned.
// Call the C routine.
- masm->mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
- masm->jalr(t9);
+ __ mov(t9, s2); // Function pointer to t9 to conform to ABI for PIC.
+ __ jalr(t9);
// Set up sp in the delay slot.
- masm->daddiu(sp, sp, -kCArgsSlotsSize);
+ __ daddiu(sp, sp, -kCArgsSlotsSize);
// Make sure the stored 'ra' points to this position.
DCHECK_EQ(kNumInstructionsToJump,
masm->InstructionsGeneratedSince(&find_ra));
}
+ if (result_size() > 2) {
+ DCHECK_EQ(3, result_size());
+ // Read result values stored on stack.
+ __ ld(a0, MemOperand(v0, 2 * kPointerSize));
+ __ ld(v1, MemOperand(v0, 1 * kPointerSize));
+ __ ld(v0, MemOperand(v0, 0 * kPointerSize));
+ }
+ // Result returned in v0, v1:v0 or a0:v1:v0 - do not destroy these registers!
// Check result for exception sentinel.
Label exception_returned;
@@ -1246,14 +1266,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Move(kDoubleRegZero, 0.0);
// Load argv in s0 register.
- if (kMipsAbi == kN64) {
- __ mov(s0, a4); // 5th parameter in mips64 a4 (a4) register.
- } else { // Abi O32.
- // 5th parameter on stack for O32 abi.
- int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
- offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
- __ ld(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
- }
+ __ mov(s0, a4); // 5th parameter in mips64 a4 (a4) register.
__ InitializeRootRegister();
@@ -1558,303 +1571,6 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- // The displacement is the offset of the last parameter (if any)
- // relative to the frame pointer.
- const int kDisplacement =
- StandardFrameConstants::kCallerSPOffset - kPointerSize;
- DCHECK(a1.is(ArgumentsAccessReadDescriptor::index()));
- DCHECK(a0.is(ArgumentsAccessReadDescriptor::parameter_count()));
-
- // Check that the key is a smiGenerateReadElement.
- Label slow;
- __ JumpIfNotSmi(a1, &slow);
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor;
- __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
- __ Branch(&adaptor,
- eq,
- a3,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Check index (a1) against formal parameters count limit passed in
- // through register a0. Use unsigned comparison to get negative
- // check for free.
- __ Branch(&slow, hs, a1, Operand(a0));
-
- // Read the argument from the stack and return it.
- __ dsubu(a3, a0, a1);
- __ SmiScale(a7, a3, kPointerSizeLog2);
- __ Daddu(a3, fp, Operand(a7));
- __ Ret(USE_DELAY_SLOT);
- __ ld(v0, MemOperand(a3, kDisplacement));
-
- // Arguments adaptor case: Check index (a1) against actual arguments
- // limit found in the arguments adaptor frame. Use unsigned
- // comparison to get negative check for free.
- __ bind(&adaptor);
- __ ld(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
-
- // Read the argument from the adaptor frame and return it.
- __ dsubu(a3, a0, a1);
- __ SmiScale(a7, a3, kPointerSizeLog2);
- __ Daddu(a3, a2, Operand(a7));
- __ Ret(USE_DELAY_SLOT);
- __ ld(v0, MemOperand(a3, kDisplacement));
-
- // Slow-case: Handle non-smi or out-of-bounds access to arguments
- // by calling the runtime system.
- __ bind(&slow);
- __ push(a1);
- __ TailCallRuntime(Runtime::kArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
- // a1 : function
- // a2 : number of parameters (tagged)
- // a3 : parameters pointer
-
- DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- __ ld(a4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(a0, MemOperand(a4, StandardFrameConstants::kContextOffset));
- __ Branch(&runtime, ne, a0,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Patch the arguments.length and the parameters pointer in the current frame.
- __ ld(a2, MemOperand(a4, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiScale(a7, a2, kPointerSizeLog2);
- __ Daddu(a4, a4, Operand(a7));
- __ daddiu(a3, a4, StandardFrameConstants::kCallerSPOffset);
-
- __ bind(&runtime);
- __ Push(a1, a3, a2);
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
- // a1 : function
- // a2 : number of parameters (tagged)
- // a3 : parameters pointer
- // Registers used over whole function:
- // a5 : arguments count (tagged)
- // a6 : mapped parameter count (tagged)
-
- DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ ld(a4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(a0, MemOperand(a4, StandardFrameConstants::kContextOffset));
- __ Branch(&adaptor_frame, eq, a0,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // No adaptor, parameter count = argument count.
- __ mov(a5, a2);
- __ Branch(USE_DELAY_SLOT, &try_allocate);
- __ mov(a6, a2); // In delay slot.
-
- // We have an adaptor frame. Patch the parameters pointer.
- __ bind(&adaptor_frame);
- __ ld(a5, MemOperand(a4, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiScale(t2, a5, kPointerSizeLog2);
- __ Daddu(a4, a4, Operand(t2));
- __ Daddu(a3, a4, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // a5 = argument count (tagged)
- // a6 = parameter count (tagged)
- // Compute the mapped parameter count = min(a6, a5) in a6.
- __ mov(a6, a2);
- __ Branch(&try_allocate, le, a6, Operand(a5));
- __ mov(a6, a5);
-
- __ bind(&try_allocate);
-
- // Compute the sizes of backing store, parameter map, and arguments object.
- // 1. Parameter map, has 2 extra words containing context and backing store.
- const int kParameterMapHeaderSize =
- FixedArray::kHeaderSize + 2 * kPointerSize;
- // If there are no mapped parameters, we do not need the parameter_map.
- Label param_map_size;
- DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
- __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a6, Operand(zero_reg));
- __ mov(t1, zero_reg); // In delay slot: param map size = 0 when a6 == 0.
- __ SmiScale(t1, a6, kPointerSizeLog2);
- __ daddiu(t1, t1, kParameterMapHeaderSize);
- __ bind(&param_map_size);
-
- // 2. Backing store.
- __ SmiScale(t2, a5, kPointerSizeLog2);
- __ Daddu(t1, t1, Operand(t2));
- __ Daddu(t1, t1, Operand(FixedArray::kHeaderSize));
-
- // 3. Arguments object.
- __ Daddu(t1, t1, Operand(Heap::kSloppyArgumentsObjectSize));
-
- // Do the allocation of all three objects in one go.
- __ Allocate(t1, v0, t1, a4, &runtime, TAG_OBJECT);
-
- // v0 = address of new object(s) (tagged)
- // a2 = argument count (smi-tagged)
- // Get the arguments boilerplate from the current native context into a4.
- const int kNormalOffset =
- Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
- const int kAliasedOffset =
- Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
-
- __ ld(a4, NativeContextMemOperand());
- Label skip2_ne, skip2_eq;
- __ Branch(&skip2_ne, ne, a6, Operand(zero_reg));
- __ ld(a4, MemOperand(a4, kNormalOffset));
- __ bind(&skip2_ne);
-
- __ Branch(&skip2_eq, eq, a6, Operand(zero_reg));
- __ ld(a4, MemOperand(a4, kAliasedOffset));
- __ bind(&skip2_eq);
-
- // v0 = address of new object (tagged)
- // a2 = argument count (smi-tagged)
- // a4 = address of arguments map (tagged)
- // a6 = mapped parameter count (tagged)
- __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
- __ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex);
- __ sd(t1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sd(t1, FieldMemOperand(v0, JSObject::kElementsOffset));
-
- // Set up the callee in-object property.
- STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ AssertNotSmi(a1);
- const int kCalleeOffset = JSObject::kHeaderSize +
- Heap::kArgumentsCalleeIndex * kPointerSize;
- __ sd(a1, FieldMemOperand(v0, kCalleeOffset));
-
- // Use the length (smi tagged) and set that as an in-object property too.
- __ AssertSmi(a5);
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- const int kLengthOffset = JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize;
- __ sd(a5, FieldMemOperand(v0, kLengthOffset));
-
- // Set up the elements pointer in the allocated arguments object.
- // If we allocated a parameter map, a4 will point there, otherwise
- // it will point to the backing store.
- __ Daddu(a4, v0, Operand(Heap::kSloppyArgumentsObjectSize));
- __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
-
- // v0 = address of new object (tagged)
- // a2 = argument count (tagged)
- // a4 = address of parameter map or backing store (tagged)
- // a6 = mapped parameter count (tagged)
- // Initialize parameter map. If there are no mapped arguments, we're done.
- Label skip_parameter_map;
- Label skip3;
- __ Branch(&skip3, ne, a6, Operand(Smi::FromInt(0)));
- // Move backing store address to a1, because it is
- // expected there when filling in the unmapped arguments.
- __ mov(a1, a4);
- __ bind(&skip3);
-
- __ Branch(&skip_parameter_map, eq, a6, Operand(Smi::FromInt(0)));
-
- __ LoadRoot(a5, Heap::kSloppyArgumentsElementsMapRootIndex);
- __ sd(a5, FieldMemOperand(a4, FixedArray::kMapOffset));
- __ Daddu(a5, a6, Operand(Smi::FromInt(2)));
- __ sd(a5, FieldMemOperand(a4, FixedArray::kLengthOffset));
- __ sd(cp, FieldMemOperand(a4, FixedArray::kHeaderSize + 0 * kPointerSize));
- __ SmiScale(t2, a6, kPointerSizeLog2);
- __ Daddu(a5, a4, Operand(t2));
- __ Daddu(a5, a5, Operand(kParameterMapHeaderSize));
- __ sd(a5, FieldMemOperand(a4, FixedArray::kHeaderSize + 1 * kPointerSize));
-
- // Copy the parameter slots and the holes in the arguments.
- // We need to fill in mapped_parameter_count slots. They index the context,
- // where parameters are stored in reverse order, at
- // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
- // The mapped parameter thus need to get indices
- // MIN_CONTEXT_SLOTS+parameter_count-1 ..
- // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
- // We loop from right to left.
- Label parameters_loop, parameters_test;
- __ mov(a5, a6);
- __ Daddu(t1, a2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
- __ Dsubu(t1, t1, Operand(a6));
- __ LoadRoot(a7, Heap::kTheHoleValueRootIndex);
- __ SmiScale(t2, a5, kPointerSizeLog2);
- __ Daddu(a1, a4, Operand(t2));
- __ Daddu(a1, a1, Operand(kParameterMapHeaderSize));
-
- // a1 = address of backing store (tagged)
- // a4 = address of parameter map (tagged)
- // a0 = temporary scratch (a.o., for address calculation)
- // t1 = loop variable (tagged)
- // a7 = the hole value
- __ jmp(&parameters_test);
-
- __ bind(&parameters_loop);
- __ Dsubu(a5, a5, Operand(Smi::FromInt(1)));
- __ SmiScale(a0, a5, kPointerSizeLog2);
- __ Daddu(a0, a0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
- __ Daddu(t2, a4, a0);
- __ sd(t1, MemOperand(t2));
- __ Dsubu(a0, a0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
- __ Daddu(t2, a1, a0);
- __ sd(a7, MemOperand(t2));
- __ Daddu(t1, t1, Operand(Smi::FromInt(1)));
- __ bind(&parameters_test);
- __ Branch(&parameters_loop, ne, a5, Operand(Smi::FromInt(0)));
-
- // Restore t1 = argument count (tagged).
- __ ld(a5, FieldMemOperand(v0, kLengthOffset));
-
- __ bind(&skip_parameter_map);
- // v0 = address of new object (tagged)
- // a1 = address of backing store (tagged)
- // a5 = argument count (tagged)
- // a6 = mapped parameter count (tagged)
- // t1 = scratch
- // Copy arguments header and remaining slots (if there are any).
- __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
- __ sd(t1, FieldMemOperand(a1, FixedArray::kMapOffset));
- __ sd(a5, FieldMemOperand(a1, FixedArray::kLengthOffset));
-
- Label arguments_loop, arguments_test;
- __ SmiScale(t2, a6, kPointerSizeLog2);
- __ Dsubu(a3, a3, Operand(t2));
- __ jmp(&arguments_test);
-
- __ bind(&arguments_loop);
- __ Dsubu(a3, a3, Operand(kPointerSize));
- __ ld(a4, MemOperand(a3, 0));
- __ SmiScale(t2, a6, kPointerSizeLog2);
- __ Daddu(t1, a1, Operand(t2));
- __ sd(a4, FieldMemOperand(t1, FixedArray::kHeaderSize));
- __ Daddu(a6, a6, Operand(Smi::FromInt(1)));
-
- __ bind(&arguments_test);
- __ Branch(&arguments_loop, lt, a6, Operand(a5));
-
- // Return.
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- // a5 = argument count (tagged)
- __ bind(&runtime);
- __ Push(a1, a3, a5);
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
// Return address is in ra.
Label slow;
@@ -1878,122 +1594,6 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
- // a1 : function
- // a2 : number of parameters (tagged)
- // a3 : parameters pointer
-
- DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label try_allocate, runtime;
- __ ld(a4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(a0, MemOperand(a4, StandardFrameConstants::kContextOffset));
- __ Branch(&try_allocate, ne, a0,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Patch the arguments.length and the parameters pointer.
- __ ld(a2, MemOperand(a4, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiScale(at, a2, kPointerSizeLog2);
- __ Daddu(a4, a4, Operand(at));
- __ Daddu(a3, a4, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Try the new space allocation. Start out with computing the size
- // of the arguments object and the elements array in words.
- Label add_arguments_object;
- __ bind(&try_allocate);
- __ SmiUntag(t1, a2);
- __ Branch(&add_arguments_object, eq, a2, Operand(zero_reg));
-
- __ Daddu(t1, t1, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ bind(&add_arguments_object);
- __ Daddu(t1, t1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
-
- // Do the allocation of both objects in one go.
- __ Allocate(t1, v0, a4, a5, &runtime,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
-
- // Get the arguments boilerplate from the current native context.
- __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, a4);
-
- __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
- __ LoadRoot(a5, Heap::kEmptyFixedArrayRootIndex);
- __ sd(a5, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sd(a5, FieldMemOperand(v0, JSObject::kElementsOffset));
-
- // Get the length (smi tagged) and set that as an in-object property too.
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ AssertSmi(a2);
- __ sd(a2,
- FieldMemOperand(v0, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize));
-
- Label done;
- __ Branch(&done, eq, a2, Operand(zero_reg));
-
- // Set up the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- __ Daddu(a4, v0, Operand(Heap::kStrictArgumentsObjectSize));
- __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
- __ LoadRoot(a5, Heap::kFixedArrayMapRootIndex);
- __ sd(a5, FieldMemOperand(a4, FixedArray::kMapOffset));
- __ sd(a2, FieldMemOperand(a4, FixedArray::kLengthOffset));
- __ SmiUntag(a2);
-
- // Copy the fixed array slots.
- Label loop;
- // Set up a4 to point to the first array slot.
- __ Daddu(a4, a4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ bind(&loop);
- // Pre-decrement a3 with kPointerSize on each iteration.
- // Pre-decrement in order to skip receiver.
- __ Daddu(a3, a3, Operand(-kPointerSize));
- __ ld(a5, MemOperand(a3));
- // Post-increment a4 with kPointerSize on each iteration.
- __ sd(a5, MemOperand(a4));
- __ Daddu(a4, a4, Operand(kPointerSize));
- __ Dsubu(a2, a2, Operand(1));
- __ Branch(&loop, ne, a2, Operand(zero_reg));
-
- // Return.
- __ bind(&done);
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ Push(a1, a3, a2);
- __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
-void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
- // a2 : number of parameters (tagged)
- // a3 : parameters pointer
- // a4 : rest parameter index (tagged)
- // Check if the calling frame is an arguments adaptor frame.
-
- Label runtime;
- __ ld(a0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(a5, MemOperand(a0, StandardFrameConstants::kContextOffset));
- __ Branch(&runtime, ne, a5,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Patch the arguments.length and the parameters pointer.
- __ ld(a2, MemOperand(a0, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiScale(at, a2, kPointerSizeLog2);
-
- __ Daddu(a3, a0, Operand(at));
- __ Daddu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ Push(a2, a3, a4);
- __ TailCallRuntime(Runtime::kNewRestParam);
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -2182,7 +1782,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Isolates: note we add an additional parameter here (isolate pointer).
const int kRegExpExecuteArguments = 9;
- const int kParameterRegisters = (kMipsAbi == kN64) ? 8 : 4;
+ const int kParameterRegisters = 8;
__ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
// Stack pointer now points to cell where return address is to be written.
@@ -2203,58 +1803,28 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// [sp + 1] - Argument 5
// [sp + 0] - saved ra
- if (kMipsAbi == kN64) {
- // Argument 9: Pass current isolate address.
- __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
- __ sd(a0, MemOperand(sp, 1 * kPointerSize));
-
- // Argument 8: Indicate that this is a direct call from JavaScript.
- __ li(a7, Operand(1));
-
- // Argument 7: Start (high end) of backtracking stack memory area.
- __ li(a0, Operand(address_of_regexp_stack_memory_address));
- __ ld(a0, MemOperand(a0, 0));
- __ li(a2, Operand(address_of_regexp_stack_memory_size));
- __ ld(a2, MemOperand(a2, 0));
- __ daddu(a6, a0, a2);
-
- // Argument 6: Set the number of capture registers to zero to force global
- // regexps to behave as non-global. This does not affect non-global regexps.
- __ mov(a5, zero_reg);
-
- // Argument 5: static offsets vector buffer.
- __ li(a4, Operand(
- ExternalReference::address_of_static_offsets_vector(isolate())));
- } else { // O32.
- DCHECK(kMipsAbi == kO32);
-
- // Argument 9: Pass current isolate address.
- // CFunctionArgumentOperand handles MIPS stack argument slots.
- __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
- __ sd(a0, MemOperand(sp, 5 * kPointerSize));
-
- // Argument 8: Indicate that this is a direct call from JavaScript.
- __ li(a0, Operand(1));
- __ sd(a0, MemOperand(sp, 4 * kPointerSize));
-
- // Argument 7: Start (high end) of backtracking stack memory area.
- __ li(a0, Operand(address_of_regexp_stack_memory_address));
- __ ld(a0, MemOperand(a0, 0));
- __ li(a2, Operand(address_of_regexp_stack_memory_size));
- __ ld(a2, MemOperand(a2, 0));
- __ daddu(a0, a0, a2);
- __ sd(a0, MemOperand(sp, 3 * kPointerSize));
-
- // Argument 6: Set the number of capture registers to zero to force global
- // regexps to behave as non-global. This does not affect non-global regexps.
- __ mov(a0, zero_reg);
- __ sd(a0, MemOperand(sp, 2 * kPointerSize));
+ // Argument 9: Pass current isolate address.
+ __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
+ __ sd(a0, MemOperand(sp, 1 * kPointerSize));
- // Argument 5: static offsets vector buffer.
- __ li(a0, Operand(
- ExternalReference::address_of_static_offsets_vector(isolate())));
- __ sd(a0, MemOperand(sp, 1 * kPointerSize));
- }
+ // Argument 8: Indicate that this is a direct call from JavaScript.
+ __ li(a7, Operand(1));
+
+ // Argument 7: Start (high end) of backtracking stack memory area.
+ __ li(a0, Operand(address_of_regexp_stack_memory_address));
+ __ ld(a0, MemOperand(a0, 0));
+ __ li(a2, Operand(address_of_regexp_stack_memory_size));
+ __ ld(a2, MemOperand(a2, 0));
+ __ daddu(a6, a0, a2);
+
+ // Argument 6: Set the number of capture registers to zero to force global
+ // regexps to behave as non-global. This does not affect non-global regexps.
+ __ mov(a5, zero_reg);
+
+ // Argument 5: static offsets vector buffer.
+ __ li(
+ a4,
+ Operand(ExternalReference::address_of_static_offsets_vector(isolate())));
// For arguments 4 and 3 get string length, calculate start of string data
// and calculate the shift of the index (0 for one_byte and 1 for two byte).
@@ -2719,7 +2289,8 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ sd(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
__ bind(&call_function);
- __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+ __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
+ tail_call_mode()),
RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
USE_DELAY_SLOT);
__ li(a0, Operand(argc)); // In delay slot.
@@ -2759,7 +2330,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize));
__ bind(&call);
- __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+ __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
USE_DELAY_SLOT);
__ li(a0, Operand(argc)); // In delay slot.
@@ -3163,8 +2734,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Locate first character of substring to copy.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ dsll(a4, a3, 1);
- __ Daddu(a5, a5, a4);
+ __ Dlsa(a5, a5, a3, 1);
// Locate first character of result.
__ Daddu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
@@ -3291,6 +2861,39 @@ void ToStringStub::Generate(MacroAssembler* masm) {
}
+void ToNameStub::Generate(MacroAssembler* masm) {
+ // The ToName stub takes on argument in a0.
+ Label is_number;
+ __ JumpIfSmi(a0, &is_number);
+
+ Label not_name;
+ STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+ __ GetObjectType(a0, a1, a1);
+ // a0: receiver
+ // a1: receiver instance type
+ __ Branch(&not_name, gt, a1, Operand(LAST_NAME_TYPE));
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
+ __ bind(&not_name);
+
+ Label not_heap_number;
+ __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
+ __ bind(&is_number);
+ NumberToStringStub stub(isolate());
+ __ TailCallStub(&stub);
+ __ bind(&not_heap_number);
+
+ Label not_oddball;
+ __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
+ __ Ret(USE_DELAY_SLOT);
+ __ ld(v0, FieldMemOperand(a0, Oddball::kToStringOffset));
+ __ bind(&not_oddball);
+
+ __ push(a0); // Push argument.
+ __ TailCallRuntime(Runtime::kToName);
+}
+
+
void StringHelper::GenerateFlatOneByteStringEquals(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3) {
@@ -3463,18 +3066,14 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ CheckMap(a1, a2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
__ CheckMap(a0, a3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
- if (op() != Token::EQ_STRICT && is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
- } else {
- if (!Token::IsEqualityOp(op())) {
- __ ld(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
- __ AssertSmi(a1);
- __ ld(a0, FieldMemOperand(a0, Oddball::kToNumberOffset));
- __ AssertSmi(a0);
- }
- __ Ret(USE_DELAY_SLOT);
- __ Dsubu(v0, a1, a0);
+ if (!Token::IsEqualityOp(op())) {
+ __ ld(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
+ __ AssertSmi(a1);
+ __ ld(a0, FieldMemOperand(a0, Oddball::kToNumberOffset));
+ __ AssertSmi(a0);
}
+ __ Ret(USE_DELAY_SLOT);
+ __ Dsubu(v0, a1, a0);
__ bind(&miss);
GenerateMiss(masm);
@@ -3572,7 +3171,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&unordered);
__ bind(&generic_stub);
- CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
+ CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
CompareICState::GENERIC, CompareICState::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
@@ -3802,8 +3401,6 @@ void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
if (Token::IsEqualityOp(op())) {
__ Ret(USE_DELAY_SLOT);
__ dsubu(v0, a0, a1);
- } else if (is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (op() == Token::LT || op() == Token::LTE) {
__ li(a2, Operand(Smi::FromInt(GREATER)));
@@ -3899,16 +3496,14 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
// Scale the index by multiplying by the entry size.
STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- __ dsll(at, index, 1);
- __ Daddu(index, index, at); // index *= 3.
+ __ Dlsa(index, index, index, 1); // index *= 3.
Register entity_name = scratch0;
// Having undefined at this place means the name is not contained.
STATIC_ASSERT(kSmiTagSize == 1);
Register tmp = properties;
- __ dsll(scratch0, index, kPointerSizeLog2);
- __ Daddu(tmp, properties, scratch0);
+ __ Dlsa(tmp, properties, index, kPointerSizeLog2);
__ ld(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
DCHECK(!tmp.is(entity_name));
@@ -3997,13 +3592,10 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
// Scale the index by multiplying by the entry size.
STATIC_ASSERT(NameDictionary::kEntrySize == 3);
// scratch2 = scratch2 * 3.
-
- __ dsll(at, scratch2, 1);
- __ Daddu(scratch2, scratch2, at);
+ __ Dlsa(scratch2, scratch2, scratch2, 1);
// Check if the key is identical to the name.
- __ dsll(at, scratch2, kPointerSizeLog2);
- __ Daddu(scratch2, elements, at);
+ __ Dlsa(scratch2, elements, scratch2, kPointerSizeLog2);
__ ld(at, FieldMemOperand(scratch2, kElementsStartOffset));
__ Branch(done, eq, name, Operand(at));
}
@@ -4084,14 +3676,10 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// Scale the index by multiplying by the entry size.
STATIC_ASSERT(NameDictionary::kEntrySize == 3);
// index *= 3.
- __ mov(at, index);
- __ dsll(index, index, 1);
- __ Daddu(index, index, at);
-
+ __ Dlsa(index, index, index, 1);
STATIC_ASSERT(kSmiTagSize == 1);
- __ dsll(index, index, kPointerSizeLog2);
- __ Daddu(index, index, dictionary);
+ __ Dlsa(index, dictionary, index, kPointerSizeLog2);
__ ld(entry_key, FieldMemOperand(index, kElementsStartOffset));
// Having undefined at this place means the name is not contained.
@@ -4191,11 +3779,8 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
regs_.scratch0(),
&dont_need_remembered_set);
- __ CheckPageFlag(regs_.object(),
- regs_.scratch0(),
- 1 << MemoryChunk::SCAN_ON_SCAVENGE,
- ne,
- &dont_need_remembered_set);
+ __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
+ &dont_need_remembered_set);
// First notify the incremental marker if necessary, then update the
// remembered set.
@@ -5076,8 +4661,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
switch (argument_count()) {
case ANY:
case MORE_THAN_ONE:
- __ dsll(at, a0, kPointerSizeLog2);
- __ Daddu(at, sp, at);
+ __ Dlsa(at, sp, a0, kPointerSizeLog2);
__ sd(a1, MemOperand(at));
__ li(at, Operand(3));
__ Daddu(a0, a0, at);
@@ -5170,6 +4754,609 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void FastNewObjectStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a1 : target
+ // -- a3 : new target
+ // -- cp : context
+ // -- ra : return address
+ // -----------------------------------
+ __ AssertFunction(a1);
+ __ AssertReceiver(a3);
+
+ // Verify that the new target is a JSFunction.
+ Label new_object;
+ __ GetObjectType(a3, a2, a2);
+ __ Branch(&new_object, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+ // Load the initial map and verify that it's in fact a map.
+ __ ld(a2, FieldMemOperand(a3, JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(a2, &new_object);
+ __ GetObjectType(a2, a0, a0);
+ __ Branch(&new_object, ne, a0, Operand(MAP_TYPE));
+
+ // Fall back to runtime if the target differs from the new target's
+ // initial map constructor.
+ __ ld(a0, FieldMemOperand(a2, Map::kConstructorOrBackPointerOffset));
+ __ Branch(&new_object, ne, a0, Operand(a1));
+
+ // Allocate the JSObject on the heap.
+ Label allocate, done_allocate;
+ __ lbu(a4, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+ __ Allocate(a4, v0, a5, a0, &allocate, SIZE_IN_WORDS);
+ __ bind(&done_allocate);
+
+ // Initialize the JSObject fields.
+ __ sd(a2, MemOperand(v0, JSObject::kMapOffset));
+ __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
+ __ sd(a3, MemOperand(v0, JSObject::kPropertiesOffset));
+ __ sd(a3, MemOperand(v0, JSObject::kElementsOffset));
+ STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
+ __ Daddu(a1, v0, Operand(JSObject::kHeaderSize));
+
+ // ----------- S t a t e -------------
+ // -- v0 : result (untagged)
+ // -- a1 : result fields (untagged)
+ // -- a5 : result end (untagged)
+ // -- a2 : initial map
+ // -- cp : context
+ // -- ra : return address
+ // -----------------------------------
+
+ // Perform in-object slack tracking if requested.
+ Label slack_tracking;
+ STATIC_ASSERT(Map::kNoSlackTracking == 0);
+ __ lwu(a3, FieldMemOperand(a2, Map::kBitField3Offset));
+ __ And(at, a3, Operand(Map::ConstructionCounter::kMask));
+ __ Branch(USE_DELAY_SLOT, &slack_tracking, ne, at, Operand(zero_reg));
+ __ LoadRoot(a0, Heap::kUndefinedValueRootIndex); // In delay slot.
+ {
+ // Initialize all in-object fields with undefined.
+ __ InitializeFieldsWithFiller(a1, a5, a0);
+
+ // Add the object tag to make the JSObject real.
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ Ret(USE_DELAY_SLOT);
+ __ Daddu(v0, v0, Operand(kHeapObjectTag)); // In delay slot.
+ }
+ __ bind(&slack_tracking);
+ {
+ // Decrease generous allocation count.
+ STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+ __ Subu(a3, a3, Operand(1 << Map::ConstructionCounter::kShift));
+ __ sw(a3, FieldMemOperand(a2, Map::kBitField3Offset));
+
+ // Initialize the in-object fields with undefined.
+ __ lbu(a4, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
+ __ dsll(a4, a4, kPointerSizeLog2);
+ __ Dsubu(a4, a5, a4);
+ __ InitializeFieldsWithFiller(a1, a4, a0);
+
+ // Initialize the remaining (reserved) fields with one pointer filler map.
+ __ LoadRoot(a0, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(a1, a5, a0);
+
+ // Check if we can finalize the instance size.
+ Label finalize;
+ STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
+ __ And(a3, a3, Operand(Map::ConstructionCounter::kMask));
+ __ Branch(USE_DELAY_SLOT, &finalize, eq, a3, Operand(zero_reg));
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ Daddu(v0, v0, Operand(kHeapObjectTag)); // In delay slot.
+ __ Ret();
+
+ // Finalize the instance size.
+ __ bind(&finalize);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(v0, a2);
+ __ CallRuntime(Runtime::kFinalizeInstanceSize);
+ __ Pop(v0);
+ }
+ __ Ret();
+ }
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ STATIC_ASSERT(kSmiTag == 0);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ __ dsll(a4, a4, kPointerSizeLog2 + kSmiShiftSize + kSmiTagSize);
+ __ SmiTag(a4);
+ __ Push(a2, a4);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ Pop(a2);
+ }
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ Dsubu(v0, v0, Operand(kHeapObjectTag));
+ __ lbu(a5, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+ __ Dlsa(a5, v0, a5, kPointerSizeLog2);
+ __ jmp(&done_allocate);
+
+ // Fall back to %NewObject.
+ __ bind(&new_object);
+ __ Push(a1, a3);
+ __ TailCallRuntime(Runtime::kNewObject);
+}
+
+
+void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a1 : function
+ // -- cp : context
+ // -- fp : frame pointer
+ // -- ra : return address
+ // -----------------------------------
+ __ AssertFunction(a1);
+
+ // For Ignition we need to skip all possible handler/stub frames until
+ // we reach the JavaScript frame for the function (similar to what the
+ // runtime fallback implementation does). So make a2 point to that
+ // JavaScript frame.
+ {
+ Label loop, loop_entry;
+ __ Branch(USE_DELAY_SLOT, &loop_entry);
+ __ mov(a2, fp); // In delay slot.
+ __ bind(&loop);
+ __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&loop_entry);
+ __ ld(a3, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+ __ Branch(&loop, ne, a1, Operand(a3));
+ }
+
+ // Check if we have rest parameters (only possible if we have an
+ // arguments adaptor frame below the function frame).
+ Label no_rest_parameters;
+ __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+ __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+ __ Branch(&no_rest_parameters, ne, a3,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // Check if the arguments adaptor frame contains more arguments than
+ // specified by the function's internal formal parameter count.
+ Label rest_parameters;
+ __ SmiLoadUntag(
+ a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ ld(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a1,
+ FieldMemOperand(a1, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Dsubu(a0, a0, Operand(a1));
+ __ Branch(&rest_parameters, gt, a0, Operand(zero_reg));
+
+ // Return an empty rest parameter array.
+ __ bind(&no_rest_parameters);
+ {
+ // ----------- S t a t e -------------
+ // -- cp : context
+ // -- ra : return address
+ // -----------------------------------
+
+ // Allocate an empty rest parameter array.
+ Label allocate, done_allocate;
+ __ Allocate(JSArray::kSize, v0, a0, a1, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Setup the rest parameter array in v0.
+ __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, a1);
+ __ sd(a1, FieldMemOperand(v0, JSArray::kMapOffset));
+ __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
+ __ sd(a1, FieldMemOperand(v0, JSArray::kPropertiesOffset));
+ __ sd(a1, FieldMemOperand(v0, JSArray::kElementsOffset));
+ __ Move(a1, Smi::FromInt(0));
+ __ Ret(USE_DELAY_SLOT);
+ __ sd(a1, FieldMemOperand(v0, JSArray::kLengthOffset)); // In delay slot
+ STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(Smi::FromInt(JSArray::kSize));
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ }
+ __ jmp(&done_allocate);
+ }
+
+ __ bind(&rest_parameters);
+ {
+ // Compute the pointer to the first rest parameter (skippping the receiver).
+ __ Dlsa(a2, a2, a0, kPointerSizeLog2);
+ __ Daddu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
+ 1 * kPointerSize));
+
+ // ----------- S t a t e -------------
+ // -- cp : context
+ // -- a0 : number of rest parameters
+ // -- a2 : pointer to first rest parameters
+ // -- ra : return address
+ // -----------------------------------
+
+ // Allocate space for the rest parameter array plus the backing store.
+ Label allocate, done_allocate;
+ __ li(a1, Operand(JSArray::kSize + FixedArray::kHeaderSize));
+ __ Dlsa(a1, a1, a0, kPointerSizeLog2);
+ __ Allocate(a1, v0, a3, a4, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Compute arguments.length in a4.
+ __ SmiTag(a4, a0);
+
+ // Setup the elements array in v0.
+ __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+ __ sd(at, FieldMemOperand(v0, FixedArray::kMapOffset));
+ __ sd(a4, FieldMemOperand(v0, FixedArray::kLengthOffset));
+ __ Daddu(a3, v0, Operand(FixedArray::kHeaderSize));
+ {
+ Label loop, done_loop;
+ __ Dlsa(a1, a3, a0, kPointerSizeLog2);
+ __ bind(&loop);
+ __ Branch(&done_loop, eq, a1, Operand(a3));
+ __ ld(at, MemOperand(a2, 0 * kPointerSize));
+ __ sd(at, FieldMemOperand(a3, 0 * kPointerSize));
+ __ Dsubu(a2, a2, Operand(1 * kPointerSize));
+ __ Daddu(a3, a3, Operand(1 * kPointerSize));
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Setup the rest parameter array in a3.
+ __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, at);
+ __ sd(at, FieldMemOperand(a3, JSArray::kMapOffset));
+ __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
+ __ sd(at, FieldMemOperand(a3, JSArray::kPropertiesOffset));
+ __ sd(v0, FieldMemOperand(a3, JSArray::kElementsOffset));
+ __ sd(a4, FieldMemOperand(a3, JSArray::kLengthOffset));
+ STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a3); // In delay slot
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(a0);
+ __ SmiTag(a1);
+ __ Push(a0, a2, a1);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ Pop(a0, a2);
+ __ SmiUntag(a0);
+ }
+ __ jmp(&done_allocate);
+ }
+}
+
+
+void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a1 : function
+ // -- cp : context
+ // -- fp : frame pointer
+ // -- ra : return address
+ // -----------------------------------
+ __ AssertFunction(a1);
+
+ // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
+ __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a2,
+ FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Lsa(a3, fp, a2, kPointerSizeLog2);
+ __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ SmiTag(a2);
+
+ // a1 : function
+ // a2 : number of parameters (tagged)
+ // a3 : parameters pointer
+ // Registers used over whole function:
+ // a5 : arguments count (tagged)
+ // a6 : mapped parameter count (tagged)
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, try_allocate, runtime;
+ __ ld(a4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ld(a0, MemOperand(a4, StandardFrameConstants::kContextOffset));
+ __ Branch(&adaptor_frame, eq, a0,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+ // No adaptor, parameter count = argument count.
+ __ mov(a5, a2);
+ __ Branch(USE_DELAY_SLOT, &try_allocate);
+ __ mov(a6, a2); // In delay slot.
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ ld(a5, MemOperand(a4, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiScale(t2, a5, kPointerSizeLog2);
+ __ Daddu(a4, a4, Operand(t2));
+ __ Daddu(a3, a4, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // a5 = argument count (tagged)
+ // a6 = parameter count (tagged)
+ // Compute the mapped parameter count = min(a6, a5) in a6.
+ __ mov(a6, a2);
+ __ Branch(&try_allocate, le, a6, Operand(a5));
+ __ mov(a6, a5);
+
+ __ bind(&try_allocate);
+
+ // Compute the sizes of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has 2 extra words containing context and backing store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+ // If there are no mapped parameters, we do not need the parameter_map.
+ Label param_map_size;
+ DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
+ __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a6, Operand(zero_reg));
+ __ mov(t1, zero_reg); // In delay slot: param map size = 0 when a6 == 0.
+ __ SmiScale(t1, a6, kPointerSizeLog2);
+ __ daddiu(t1, t1, kParameterMapHeaderSize);
+ __ bind(&param_map_size);
+
+ // 2. Backing store.
+ __ SmiScale(t2, a5, kPointerSizeLog2);
+ __ Daddu(t1, t1, Operand(t2));
+ __ Daddu(t1, t1, Operand(FixedArray::kHeaderSize));
+
+ // 3. Arguments object.
+ __ Daddu(t1, t1, Operand(JSSloppyArgumentsObject::kSize));
+
+ // Do the allocation of all three objects in one go.
+ __ Allocate(t1, v0, t1, a4, &runtime, TAG_OBJECT);
+
+ // v0 = address of new object(s) (tagged)
+ // a2 = argument count (smi-tagged)
+ // Get the arguments boilerplate from the current native context into a4.
+ const int kNormalOffset =
+ Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
+ const int kAliasedOffset =
+ Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
+
+ __ ld(a4, NativeContextMemOperand());
+ Label skip2_ne, skip2_eq;
+ __ Branch(&skip2_ne, ne, a6, Operand(zero_reg));
+ __ ld(a4, MemOperand(a4, kNormalOffset));
+ __ bind(&skip2_ne);
+
+ __ Branch(&skip2_eq, eq, a6, Operand(zero_reg));
+ __ ld(a4, MemOperand(a4, kAliasedOffset));
+ __ bind(&skip2_eq);
+
+ // v0 = address of new object (tagged)
+ // a2 = argument count (smi-tagged)
+ // a4 = address of arguments map (tagged)
+ // a6 = mapped parameter count (tagged)
+ __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
+ __ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex);
+ __ sd(t1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sd(t1, FieldMemOperand(v0, JSObject::kElementsOffset));
+
+ // Set up the callee in-object property.
+ __ AssertNotSmi(a1);
+ __ sd(a1, FieldMemOperand(v0, JSSloppyArgumentsObject::kCalleeOffset));
+
+ // Use the length (smi tagged) and set that as an in-object property too.
+ __ AssertSmi(a5);
+ __ sd(a5, FieldMemOperand(v0, JSSloppyArgumentsObject::kLengthOffset));
+
+ // Set up the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, a4 will point there, otherwise
+ // it will point to the backing store.
+ __ Daddu(a4, v0, Operand(JSSloppyArgumentsObject::kSize));
+ __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
+
+ // v0 = address of new object (tagged)
+ // a2 = argument count (tagged)
+ // a4 = address of parameter map or backing store (tagged)
+ // a6 = mapped parameter count (tagged)
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ Label skip3;
+ __ Branch(&skip3, ne, a6, Operand(Smi::FromInt(0)));
+ // Move backing store address to a1, because it is
+ // expected there when filling in the unmapped arguments.
+ __ mov(a1, a4);
+ __ bind(&skip3);
+
+ __ Branch(&skip_parameter_map, eq, a6, Operand(Smi::FromInt(0)));
+
+ __ LoadRoot(a5, Heap::kSloppyArgumentsElementsMapRootIndex);
+ __ sd(a5, FieldMemOperand(a4, FixedArray::kMapOffset));
+ __ Daddu(a5, a6, Operand(Smi::FromInt(2)));
+ __ sd(a5, FieldMemOperand(a4, FixedArray::kLengthOffset));
+ __ sd(cp, FieldMemOperand(a4, FixedArray::kHeaderSize + 0 * kPointerSize));
+ __ SmiScale(t2, a6, kPointerSizeLog2);
+ __ Daddu(a5, a4, Operand(t2));
+ __ Daddu(a5, a5, Operand(kParameterMapHeaderSize));
+ __ sd(a5, FieldMemOperand(a4, FixedArray::kHeaderSize + 1 * kPointerSize));
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. They index the context,
+ // where parameters are stored in reverse order, at
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+ // The mapped parameter thus need to get indices
+ // MIN_CONTEXT_SLOTS+parameter_count-1 ..
+ // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+ // We loop from right to left.
+ Label parameters_loop, parameters_test;
+ __ mov(a5, a6);
+ __ Daddu(t1, a2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+ __ Dsubu(t1, t1, Operand(a6));
+ __ LoadRoot(a7, Heap::kTheHoleValueRootIndex);
+ __ SmiScale(t2, a5, kPointerSizeLog2);
+ __ Daddu(a1, a4, Operand(t2));
+ __ Daddu(a1, a1, Operand(kParameterMapHeaderSize));
+
+ // a1 = address of backing store (tagged)
+ // a4 = address of parameter map (tagged)
+ // a0 = temporary scratch (a.o., for address calculation)
+ // t1 = loop variable (tagged)
+ // a7 = the hole value
+ __ jmp(&parameters_test);
+
+ __ bind(&parameters_loop);
+ __ Dsubu(a5, a5, Operand(Smi::FromInt(1)));
+ __ SmiScale(a0, a5, kPointerSizeLog2);
+ __ Daddu(a0, a0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+ __ Daddu(t2, a4, a0);
+ __ sd(t1, MemOperand(t2));
+ __ Dsubu(a0, a0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
+ __ Daddu(t2, a1, a0);
+ __ sd(a7, MemOperand(t2));
+ __ Daddu(t1, t1, Operand(Smi::FromInt(1)));
+ __ bind(&parameters_test);
+ __ Branch(&parameters_loop, ne, a5, Operand(Smi::FromInt(0)));
+
+ // Restore t1 = argument count (tagged).
+ __ ld(a5, FieldMemOperand(v0, JSSloppyArgumentsObject::kLengthOffset));
+
+ __ bind(&skip_parameter_map);
+ // v0 = address of new object (tagged)
+ // a1 = address of backing store (tagged)
+ // a5 = argument count (tagged)
+ // a6 = mapped parameter count (tagged)
+ // t1 = scratch
+ // Copy arguments header and remaining slots (if there are any).
+ __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
+ __ sd(t1, FieldMemOperand(a1, FixedArray::kMapOffset));
+ __ sd(a5, FieldMemOperand(a1, FixedArray::kLengthOffset));
+
+ Label arguments_loop, arguments_test;
+ __ SmiScale(t2, a6, kPointerSizeLog2);
+ __ Dsubu(a3, a3, Operand(t2));
+ __ jmp(&arguments_test);
+
+ __ bind(&arguments_loop);
+ __ Dsubu(a3, a3, Operand(kPointerSize));
+ __ ld(a4, MemOperand(a3, 0));
+ __ SmiScale(t2, a6, kPointerSizeLog2);
+ __ Daddu(t1, a1, Operand(t2));
+ __ sd(a4, FieldMemOperand(t1, FixedArray::kHeaderSize));
+ __ Daddu(a6, a6, Operand(Smi::FromInt(1)));
+
+ __ bind(&arguments_test);
+ __ Branch(&arguments_loop, lt, a6, Operand(a5));
+
+ // Return.
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ // a5 = argument count (tagged)
+ __ bind(&runtime);
+ __ Push(a1, a3, a5);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
+}
+
+
+void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a1 : function
+ // -- cp : context
+ // -- fp : frame pointer
+ // -- ra : return address
+ // -----------------------------------
+ __ AssertFunction(a1);
+
+ // For Ignition we need to skip all possible handler/stub frames until
+ // we reach the JavaScript frame for the function (similar to what the
+ // runtime fallback implementation does). So make a2 point to that
+ // JavaScript frame.
+ {
+ Label loop, loop_entry;
+ __ Branch(USE_DELAY_SLOT, &loop_entry);
+ __ mov(a2, fp); // In delay slot.
+ __ bind(&loop);
+ __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&loop_entry);
+ __ ld(a3, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+ __ Branch(&loop, ne, a1, Operand(a3));
+ }
+
+ // Check if we have an arguments adaptor frame below the function frame.
+ Label arguments_adaptor, arguments_done;
+ __ ld(a3, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+ __ ld(a0, MemOperand(a3, StandardFrameConstants::kContextOffset));
+ __ Branch(&arguments_adaptor, eq, a0,
+ Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ {
+ __ ld(a1, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a0,
+ FieldMemOperand(a1, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Dlsa(a2, a2, a0, kPointerSizeLog2);
+ __ Daddu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
+ 1 * kPointerSize));
+ }
+ __ Branch(&arguments_done);
+ __ bind(&arguments_adaptor);
+ {
+ __ SmiLoadUntag(
+ a0, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ Dlsa(a2, a3, a0, kPointerSizeLog2);
+ __ Daddu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
+ 1 * kPointerSize));
+ }
+ __ bind(&arguments_done);
+
+ // ----------- S t a t e -------------
+ // -- cp : context
+ // -- a0 : number of rest parameters
+ // -- a2 : pointer to first rest parameters
+ // -- ra : return address
+ // -----------------------------------
+
+ // Allocate space for the rest parameter array plus the backing store.
+ Label allocate, done_allocate;
+ __ li(a1, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
+ __ Dlsa(a1, a1, a0, kPointerSizeLog2);
+ __ Allocate(a1, v0, a3, a4, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Compute arguments.length in a4.
+ __ SmiTag(a4, a0);
+
+ // Setup the elements array in v0.
+ __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+ __ sd(at, FieldMemOperand(v0, FixedArray::kMapOffset));
+ __ sd(a4, FieldMemOperand(v0, FixedArray::kLengthOffset));
+ __ Daddu(a3, v0, Operand(FixedArray::kHeaderSize));
+ {
+ Label loop, done_loop;
+ __ Dlsa(a1, a3, a0, kPointerSizeLog2);
+ __ bind(&loop);
+ __ Branch(&done_loop, eq, a1, Operand(a3));
+ __ ld(at, MemOperand(a2, 0 * kPointerSize));
+ __ sd(at, FieldMemOperand(a3, 0 * kPointerSize));
+ __ Dsubu(a2, a2, Operand(1 * kPointerSize));
+ __ Daddu(a3, a3, Operand(1 * kPointerSize));
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Setup the strict arguments object in a3.
+ __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, at);
+ __ sd(at, FieldMemOperand(a3, JSStrictArgumentsObject::kMapOffset));
+ __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
+ __ sd(at, FieldMemOperand(a3, JSStrictArgumentsObject::kPropertiesOffset));
+ __ sd(v0, FieldMemOperand(a3, JSStrictArgumentsObject::kElementsOffset));
+ __ sd(a4, FieldMemOperand(a3, JSStrictArgumentsObject::kLengthOffset));
+ STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a3); // In delay slot
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(a0);
+ __ SmiTag(a1);
+ __ Push(a0, a2, a1);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ Pop(a0, a2);
+ __ SmiUntag(a0);
+ }
+ __ jmp(&done_allocate);
+}
+
+
void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
Register context_reg = cp;
Register slot_reg = a2;
@@ -5183,8 +5370,7 @@ void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
}
// Load the PropertyCell value at the specified slot.
- __ dsll(at, slot_reg, kPointerSizeLog2);
- __ Daddu(at, at, Operand(context_reg));
+ __ Dlsa(at, context_reg, slot_reg, kPointerSizeLog2);
__ ld(result_reg, ContextMemOperand(at, 0));
__ ld(result_reg, FieldMemOperand(result_reg, PropertyCell::kValueOffset));
@@ -5222,8 +5408,7 @@ void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
}
// Load the PropertyCell at the specified slot.
- __ dsll(at, slot_reg, kPointerSizeLog2);
- __ Daddu(at, at, Operand(context_reg));
+ __ Dlsa(at, context_reg, slot_reg, kPointerSizeLog2);
__ ld(cell_reg, ContextMemOperand(at, 0));
// Load PropertyDetails for the cell (actually only the cell_type and kind).
@@ -5451,11 +5636,10 @@ static void CallApiFunctionAndReturn(
__ jmp(&leave_exit_frame);
}
-
static void CallApiFunctionStubHelper(MacroAssembler* masm,
const ParameterCount& argc,
bool return_first_arg,
- bool call_data_undefined) {
+ bool call_data_undefined, bool is_lazy) {
// ----------- S t a t e -------------
// -- a0 : callee
// -- a4 : call_data
@@ -5491,8 +5675,10 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
// Save context, callee and call data.
__ Push(context, callee, call_data);
- // Load context from callee.
- __ ld(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+ if (!is_lazy) {
+ // Load context from callee.
+ __ ld(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+ }
Register scratch = call_data;
if (!call_data_undefined) {
@@ -5577,7 +5763,7 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
void CallApiFunctionStub::Generate(MacroAssembler* masm) {
bool call_data_undefined = this->call_data_undefined();
CallApiFunctionStubHelper(masm, ParameterCount(a3), false,
- call_data_undefined);
+ call_data_undefined, false);
}
@@ -5585,41 +5771,49 @@ void CallApiAccessorStub::Generate(MacroAssembler* masm) {
bool is_store = this->is_store();
int argc = this->argc();
bool call_data_undefined = this->call_data_undefined();
+ bool is_lazy = this->is_lazy();
CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
- call_data_undefined);
+ call_data_undefined, is_lazy);
}
void CallApiGetterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- sp[0] : name
- // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
+ // -- sp[0] : name
+ // -- sp[8 .. (8 + kArgsLength*8)] : v8::PropertyCallbackInfo::args_
// -- ...
- // -- a2 : api_function_address
+ // -- a2 : api_function_address
// -----------------------------------
Register api_function_address = ApiGetterDescriptor::function_address();
DCHECK(api_function_address.is(a2));
- __ mov(a0, sp); // a0 = Handle<Name>
- __ Daddu(a1, a0, Operand(1 * kPointerSize)); // a1 = PCA
+ // v8::PropertyCallbackInfo::args_ array and name handle.
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
+ __ mov(a0, sp); // a0 = Handle<Name>
+ __ Daddu(a1, a0, Operand(1 * kPointerSize)); // a1 = v8::PCI::args_
const int kApiStackSpace = 1;
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
- // Create PropertyAccessorInfo instance on the stack above the exit frame with
- // a1 (internal::Object** args_) as the data.
+ // Create v8::PropertyCallbackInfo object on the stack and initialize
+ // it's args_ field.
__ sd(a1, MemOperand(sp, 1 * kPointerSize));
- __ Daddu(a1, sp, Operand(1 * kPointerSize)); // a1 = AccessorInfo&
-
- const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+ __ Daddu(a1, sp, Operand(1 * kPointerSize));
+ // a1 = v8::PropertyCallbackInfo&
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback(isolate());
+
+ // +3 is to skip prolog, return address and name handle.
+ MemOperand return_value_operand(
+ fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
kStackUnwindSpace, kInvalidStackOffset,
- MemOperand(fp, 6 * kPointerSize), NULL);
+ return_value_operand, NULL);
}
diff --git a/deps/v8/src/mips64/codegen-mips64.cc b/deps/v8/src/mips64/codegen-mips64.cc
index 022426e7d7..c8cde97883 100644
--- a/deps/v8/src/mips64/codegen-mips64.cc
+++ b/deps/v8/src/mips64/codegen-mips64.cc
@@ -1078,8 +1078,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ And(at, result, Operand(kStringEncodingMask));
__ Branch(&one_byte, ne, at, Operand(zero_reg));
// Two-byte string.
- __ dsll(at, index, 1);
- __ Daddu(at, string, at);
+ __ Dlsa(at, string, index, 1);
__ lhu(result, MemOperand(at));
__ jmp(&done);
__ bind(&one_byte);
@@ -1151,8 +1150,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
// Must not call ExpConstant() after overwriting temp3!
__ li(temp3, Operand(ExternalReference::math_exp_log_table()));
- __ dsll(at, temp2, 3);
- __ Daddu(temp3, temp3, Operand(at));
+ __ Dlsa(temp3, temp3, temp2, 3);
__ lwu(temp2, MemOperand(temp3, Register::kMantissaOffset));
__ lwu(temp3, MemOperand(temp3, Register::kExponentOffset));
// The first word is loaded is the lower number register.
diff --git a/deps/v8/src/mips64/constants-mips64.h b/deps/v8/src/mips64/constants-mips64.h
index 226e3ed5ba..57e947b138 100644
--- a/deps/v8/src/mips64/constants-mips64.h
+++ b/deps/v8/src/mips64/constants-mips64.h
@@ -45,27 +45,6 @@ enum ArchVariants {
#error Unknown endianness
#endif
-// TODO(plind): consider deriving ABI from compiler flags or build system.
-
-// ABI-dependent definitions are made with #define in simulator-mips64.h,
-// so the ABI choice must be available to the pre-processor. However, in all
-// other cases, we should use the enum AbiVariants with normal if statements.
-
-#define MIPS_ABI_N64 1
-// #define MIPS_ABI_O32 1
-
-// The only supported Abi's are O32, and n64.
-enum AbiVariants {
- kO32,
- kN64 // Use upper case N for 'n64' ABI to conform to style standard.
-};
-
-#ifdef MIPS_ABI_N64
-static const AbiVariants kMipsAbi = kN64;
-#else
-static const AbiVariants kMipsAbi = kO32;
-#endif
-
// TODO(plind): consider renaming these ...
#if(defined(__mips_hard_float) && __mips_hard_float != 0)
@@ -840,6 +819,7 @@ enum CheckForInexactConversion {
kDontCheckForInexactConversion
};
+enum class MaxMinKind : int { kMin = 0, kMax = 1 };
// -----------------------------------------------------------------------------
// Hints.
@@ -1184,7 +1164,7 @@ class Instruction {
// MIPS assembly various constants.
// C/C++ argument slots size.
-const int kCArgSlotCount = (kMipsAbi == kN64) ? 0 : 4;
+const int kCArgSlotCount = 0;
// TODO(plind): below should be based on kPointerSize
// TODO(plind): find all usages and remove the needless instructions for n64.
@@ -1226,6 +1206,7 @@ Instruction::Type Instruction::InstructionType(TypeChecks checks) const {
case SPECIAL3:
switch (FunctionFieldRaw()) {
case INS:
+ case DINS:
case EXT:
case DEXT:
case DEXTM:
diff --git a/deps/v8/src/mips64/deoptimizer-mips64.cc b/deps/v8/src/mips64/deoptimizer-mips64.cc
index 8daba04ac7..ec610f0281 100644
--- a/deps/v8/src/mips64/deoptimizer-mips64.cc
+++ b/deps/v8/src/mips64/deoptimizer-mips64.cc
@@ -80,27 +80,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
-void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
- // Set the register values. The values are not important as there are no
- // callee saved registers in JavaScript frames, so all registers are
- // spilled. Registers fp and sp are set to the correct values though.
-
- for (int i = 0; i < Register::kNumRegisters; i++) {
- input_->SetRegister(i, i * 4);
- }
- input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
- input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) {
- input_->SetDoubleRegister(i, 0.0);
- }
-
- // Fill the frame content from the actual data on the frame.
- for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
- input_->SetFrameSlot(i, Memory::uint64_at(tos + i));
- }
-}
-
-
void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
ApiFunction function(descriptor->deoptimization_handler());
@@ -119,8 +98,7 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
}
}
-
-bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
// There is no dynamic alignment padding on MIPS in the input frame.
return false;
}
@@ -188,15 +166,9 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
// a2: bailout id already loaded.
// a3: code address or 0 already loaded.
- if (kMipsAbi == kN64) {
- // a4: already has fp-to-sp delta.
- __ li(a5, Operand(ExternalReference::isolate_address(isolate())));
- } else { // O32 abi.
- // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack.
- __ sd(a4, CFunctionArgumentOperand(5)); // Fp-to-sp delta.
- __ li(a5, Operand(ExternalReference::isolate_address(isolate())));
- __ sd(a5, CFunctionArgumentOperand(6)); // Isolate.
- }
+ // a4: already has fp-to-sp delta.
+ __ li(a5, Operand(ExternalReference::isolate_address(isolate())));
+
// Call Deoptimizer::New().
{
AllowExternalCallThatCantCauseGC scope(masm());
@@ -273,8 +245,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// a1 = one past the last FrameDescription**.
__ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
__ ld(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_.
- __ dsll(a1, a1, kPointerSizeLog2); // Count to offset.
- __ daddu(a1, a4, a1); // a1 = one past the last FrameDescription**.
+ __ Dlsa(a1, a4, a1, kPointerSizeLog2);
__ BranchShort(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: a2 = current FrameDescription*, a3 = loop index.
diff --git a/deps/v8/src/mips64/interface-descriptors-mips64.cc b/deps/v8/src/mips64/interface-descriptors-mips64.cc
index c5c1311d94..73df66ea8e 100644
--- a/deps/v8/src/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/mips64/interface-descriptors-mips64.cc
@@ -54,20 +54,6 @@ const Register StringCompareDescriptor::LeftRegister() { return a1; }
const Register StringCompareDescriptor::RightRegister() { return a0; }
-const Register ArgumentsAccessReadDescriptor::index() { return a1; }
-const Register ArgumentsAccessReadDescriptor::parameter_count() { return a0; }
-
-
-const Register ArgumentsAccessNewDescriptor::function() { return a1; }
-const Register ArgumentsAccessNewDescriptor::parameter_count() { return a2; }
-const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return a3; }
-
-
-const Register RestParamAccessDescriptor::parameter_count() { return a2; }
-const Register RestParamAccessDescriptor::parameter_pointer() { return a3; }
-const Register RestParamAccessDescriptor::rest_parameter_index() { return a4; }
-
-
const Register ApiGetterDescriptor::function_address() { return a2; }
@@ -96,6 +82,32 @@ void FastNewContextDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
+void FastNewObjectDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a1, a3};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+void FastNewRestParameterDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a1};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a1};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {a1};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
void ToNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -113,6 +125,10 @@ const Register ToStringDescriptor::ReceiverRegister() { return a0; }
// static
+const Register ToNameDescriptor::ReceiverRegister() { return a0; }
+
+
+// static
const Register ToObjectDescriptor::ReceiverRegister() { return a0; }
@@ -165,13 +181,6 @@ void CreateWeakCellDescriptor::InitializePlatformSpecific(
}
-void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a3, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a1, a3};
@@ -407,6 +416,14 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void InterpreterDispatchDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
+ kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
+ kInterpreterDispatchTableRegister};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -418,7 +435,6 @@ void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@@ -430,7 +446,6 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index 7b73ac74e4..b49fa76e06 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -161,9 +161,9 @@ void MacroAssembler::InNewSpace(Register object,
Condition cc,
Label* branch) {
DCHECK(cc == eq || cc == ne);
- And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
- Branch(branch, cc, scratch,
- Operand(ExternalReference::new_space_start(isolate())));
+ const int mask =
+ 1 << MemoryChunk::IN_FROM_SPACE | 1 << MemoryChunk::IN_TO_SPACE;
+ CheckPageFlag(object, scratch, mask, cc, branch);
}
@@ -371,6 +371,67 @@ void MacroAssembler::RecordWrite(
}
}
+void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
+ Register code_entry,
+ Register scratch) {
+ const int offset = JSFunction::kCodeEntryOffset;
+
+ // Since a code entry (value) is always in old space, we don't need to update
+ // remembered set. If incremental marking is off, there is nothing for us to
+ // do.
+ if (!FLAG_incremental_marking) return;
+
+ DCHECK(js_function.is(a1));
+ DCHECK(code_entry.is(a4));
+ DCHECK(scratch.is(a5));
+ AssertNotSmi(js_function);
+
+ if (emit_debug_code()) {
+ Daddu(scratch, js_function, Operand(offset - kHeapObjectTag));
+ ld(at, MemOperand(scratch));
+ Assert(eq, kWrongAddressOrValuePassedToRecordWrite, at,
+ Operand(code_entry));
+ }
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis and stores into young gen.
+ Label done;
+
+ CheckPageFlag(code_entry, scratch,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
+ CheckPageFlag(js_function, scratch,
+ MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
+
+ const Register dst = scratch;
+ Daddu(dst, js_function, Operand(offset - kHeapObjectTag));
+
+ // Save caller-saved registers. js_function and code_entry are in the
+ // caller-saved register list.
+ DCHECK(kJSCallerSaved & js_function.bit());
+ DCHECK(kJSCallerSaved & code_entry.bit());
+ MultiPush(kJSCallerSaved | ra.bit());
+
+ int argument_count = 3;
+
+ PrepareCallCFunction(argument_count, code_entry);
+
+ Move(a0, js_function);
+ Move(a1, dst);
+ li(a2, Operand(ExternalReference::isolate_address(isolate())));
+
+ {
+ AllowExternalCallThatCantCauseGC scope(this);
+ CallCFunction(
+ ExternalReference::incremental_marking_record_write_code_entry_function(
+ isolate()),
+ argument_count);
+ }
+
+ // Restore caller-saved registers.
+ MultiPop(kJSCallerSaved | ra.bit());
+
+ bind(&done);
+}
void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Register address,
@@ -503,16 +564,14 @@ void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
// hash = ~hash + (hash << 15);
// The algorithm uses 32-bit integer values.
nor(scratch, reg0, zero_reg);
- sll(at, reg0, 15);
- addu(reg0, scratch, at);
+ Lsa(reg0, scratch, reg0, 15);
// hash = hash ^ (hash >> 12);
srl(at, reg0, 12);
xor_(reg0, reg0, at);
// hash = hash + (hash << 2);
- sll(at, reg0, 2);
- addu(reg0, reg0, at);
+ Lsa(reg0, reg0, reg0, 2);
// hash = hash ^ (hash >> 4);
srl(at, reg0, 4);
@@ -520,8 +579,7 @@ void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
// hash = hash * 2057;
sll(scratch, reg0, 11);
- sll(at, reg0, 3);
- addu(reg0, reg0, at);
+ Lsa(reg0, reg0, reg0, 3);
addu(reg0, reg0, scratch);
// hash = hash ^ (hash >> 16);
@@ -581,12 +639,10 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
// Scale the index by multiplying by the element size.
DCHECK(SeededNumberDictionary::kEntrySize == 3);
- dsll(at, reg2, 1); // 2x.
- daddu(reg2, reg2, at); // reg2 = reg2 * 3.
+ Dlsa(reg2, reg2, reg2, 1); // reg2 = reg2 * 3.
// Check if the key is identical to the name.
- dsll(at, reg2, kPointerSizeLog2);
- daddu(reg2, elements, at);
+ Dlsa(reg2, elements, reg2, kPointerSizeLog2);
ld(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
if (i != kNumberDictionaryProbes - 1) {
@@ -1302,6 +1358,35 @@ void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
}
}
+static inline int64_t ShiftAndFixSignExtension(int64_t imm, int bitnum) {
+ if ((imm >> (bitnum - 1)) & 0x1) {
+ imm = (imm >> bitnum) + 1;
+ } else {
+ imm = imm >> bitnum;
+ }
+ return imm;
+}
+
+bool MacroAssembler::LiLower32BitHelper(Register rd, Operand j) {
+ bool higher_bits_sign_extended = false;
+ if (is_int16(j.imm64_)) {
+ daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask));
+ } else if (!(j.imm64_ & kHiMask)) {
+ ori(rd, zero_reg, (j.imm64_ & kImm16Mask));
+ } else if (!(j.imm64_ & kImm16Mask)) {
+ lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
+ if ((j.imm64_ >> (kLuiShift + 15)) & 0x1) {
+ higher_bits_sign_extended = true;
+ }
+ } else {
+ lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
+ ori(rd, rd, (j.imm64_ & kImm16Mask));
+ if ((j.imm64_ >> (kLuiShift + 15)) & 0x1) {
+ higher_bits_sign_extended = true;
+ }
+ }
+ return higher_bits_sign_extended;
+}
void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
DCHECK(!j.is_reg());
@@ -1309,50 +1394,57 @@ void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
// Normal load of an immediate value which does not need Relocation Info.
if (is_int32(j.imm64_)) {
- if (is_int16(j.imm64_)) {
- daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask));
- } else if (!(j.imm64_ & kHiMask)) {
- ori(rd, zero_reg, (j.imm64_ & kImm16Mask));
- } else if (!(j.imm64_ & kImm16Mask)) {
- lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
- } else {
- lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
- ori(rd, rd, (j.imm64_ & kImm16Mask));
- }
+ LiLower32BitHelper(rd, j);
} else {
- if (is_int48(j.imm64_)) {
- if ((j.imm64_ >> 32) & kImm16Mask) {
- lui(rd, (j.imm64_ >> 32) & kImm16Mask);
- if ((j.imm64_ >> 16) & kImm16Mask) {
- ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
- }
- } else {
- ori(rd, zero_reg, (j.imm64_ >> 16) & kImm16Mask);
+ if (kArchVariant == kMips64r6) {
+ int64_t imm = j.imm64_;
+ bool higher_bits_sign_extended = LiLower32BitHelper(rd, j);
+ imm = ShiftAndFixSignExtension(imm, 32);
+ // If LUI writes 1s to higher bits, we need both DAHI/DATI.
+ if ((imm & kImm16Mask) ||
+ (higher_bits_sign_extended && (j.imm64_ > 0))) {
+ dahi(rd, imm & kImm16Mask);
}
- dsll(rd, rd, 16);
- if (j.imm64_ & kImm16Mask) {
- ori(rd, rd, j.imm64_ & kImm16Mask);
+ imm = ShiftAndFixSignExtension(imm, 16);
+ if ((!is_int48(j.imm64_) && (imm & kImm16Mask)) ||
+ (higher_bits_sign_extended && (j.imm64_ > 0))) {
+ dati(rd, imm & kImm16Mask);
}
} else {
- lui(rd, (j.imm64_ >> 48) & kImm16Mask);
- if ((j.imm64_ >> 32) & kImm16Mask) {
- ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
- }
- if ((j.imm64_ >> 16) & kImm16Mask) {
+ if (is_int48(j.imm64_)) {
+ if ((j.imm64_ >> 32) & kImm16Mask) {
+ lui(rd, (j.imm64_ >> 32) & kImm16Mask);
+ if ((j.imm64_ >> 16) & kImm16Mask) {
+ ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
+ }
+ } else {
+ ori(rd, zero_reg, (j.imm64_ >> 16) & kImm16Mask);
+ }
dsll(rd, rd, 16);
- ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
if (j.imm64_ & kImm16Mask) {
- dsll(rd, rd, 16);
ori(rd, rd, j.imm64_ & kImm16Mask);
- } else {
- dsll(rd, rd, 16);
}
} else {
- if (j.imm64_ & kImm16Mask) {
- dsll32(rd, rd, 0);
- ori(rd, rd, j.imm64_ & kImm16Mask);
+ lui(rd, (j.imm64_ >> 48) & kImm16Mask);
+ if ((j.imm64_ >> 32) & kImm16Mask) {
+ ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
+ }
+ if ((j.imm64_ >> 16) & kImm16Mask) {
+ dsll(rd, rd, 16);
+ ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
+ if (j.imm64_ & kImm16Mask) {
+ dsll(rd, rd, 16);
+ ori(rd, rd, j.imm64_ & kImm16Mask);
+ } else {
+ dsll(rd, rd, 16);
+ }
} else {
- dsll32(rd, rd, 0);
+ if (j.imm64_ & kImm16Mask) {
+ dsll32(rd, rd, 0);
+ ori(rd, rd, j.imm64_ & kImm16Mask);
+ } else {
+ dsll32(rd, rd, 0);
+ }
}
}
}
@@ -1371,12 +1463,32 @@ void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
dsll(rd, rd, 16);
ori(rd, rd, j.imm64_ & kImm16Mask);
} else {
- lui(rd, (j.imm64_ >> 48) & kImm16Mask);
- ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
- dsll(rd, rd, 16);
- ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
- dsll(rd, rd, 16);
- ori(rd, rd, j.imm64_ & kImm16Mask);
+ if (kArchVariant == kMips64r6) {
+ int64_t imm = j.imm64_;
+ lui(rd, (imm >> kLuiShift) & kImm16Mask);
+ if (imm & kImm16Mask) {
+ ori(rd, rd, (imm & kImm16Mask));
+ }
+ if ((imm >> 31) & 0x1) {
+ imm = (imm >> 32) + 1;
+ } else {
+ imm = imm >> 32;
+ }
+ dahi(rd, imm & kImm16Mask);
+ if ((imm >> 15) & 0x1) {
+ imm = (imm >> 16) + 1;
+ } else {
+ imm = imm >> 16;
+ }
+ dati(rd, imm & kImm16Mask);
+ } else {
+ lui(rd, (j.imm64_ >> 48) & kImm16Mask);
+ ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
+ dsll(rd, rd, 16);
+ ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
+ dsll(rd, rd, 16);
+ ori(rd, rd, j.imm64_ & kImm16Mask);
+ }
}
}
@@ -1596,6 +1708,22 @@ void MacroAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
bind(&conversion_done);
}
+void MacroAssembler::Cvt_s_uw(FPURegister fd, FPURegister fs) {
+ // Move the data from fs to t8.
+ mfc1(t8, fs);
+ Cvt_s_uw(fd, t8);
+}
+
+void MacroAssembler::Cvt_s_uw(FPURegister fd, Register rs) {
+ // Convert rs to a FP value in fd.
+ DCHECK(!rs.is(t9));
+ DCHECK(!rs.is(at));
+
+ // Zero extend int32 in rs.
+ Dext(t9, rs, 0, 32);
+ dmtc1(t9, fd);
+ cvt_s_l(fd, fd);
+}
void MacroAssembler::Cvt_s_ul(FPURegister fd, FPURegister fs) {
// Move the data from fs to t8.
@@ -1672,6 +1800,12 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
mtc1(t8, fd);
}
+void MacroAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs,
+ FPURegister scratch) {
+ Trunc_uw_s(fs, t8, scratch);
+ mtc1(t8, fd);
+}
+
void MacroAssembler::Trunc_ul_d(FPURegister fd, FPURegister fs,
FPURegister scratch, Register result) {
Trunc_ul_d(fs, t8, scratch, result);
@@ -1738,6 +1872,35 @@ void MacroAssembler::Trunc_uw_d(FPURegister fd,
bind(&done);
}
+void MacroAssembler::Trunc_uw_s(FPURegister fd, Register rs,
+ FPURegister scratch) {
+ DCHECK(!fd.is(scratch));
+ DCHECK(!rs.is(at));
+
+ // Load 2^31 into scratch as its float representation.
+ li(at, 0x4F000000);
+ mtc1(at, scratch);
+ // Test if scratch > fd.
+ // If fd < 2^31 we can convert it normally.
+ Label simple_convert;
+ BranchF32(&simple_convert, NULL, lt, fd, scratch);
+
+ // First we subtract 2^31 from fd, then trunc it to rs
+ // and add 2^31 to rs.
+ sub_s(scratch, fd, scratch);
+ trunc_w_s(scratch, scratch);
+ mfc1(rs, scratch);
+ Or(rs, rs, 1 << 31);
+
+ Label done;
+ Branch(&done);
+ // Simple conversion.
+ bind(&simple_convert);
+ trunc_w_s(scratch, fd);
+ mfc1(rs, scratch);
+
+ bind(&done);
+}
void MacroAssembler::Trunc_ul_d(FPURegister fd, Register rs,
FPURegister scratch, Register result) {
@@ -3714,7 +3877,7 @@ void MacroAssembler::Allocate(int object_size,
return;
}
- DCHECK(!AreAliased(result, scratch1, scratch2, t9));
+ DCHECK(!AreAliased(result, scratch1, scratch2, t9, at));
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
@@ -3792,8 +3955,8 @@ void MacroAssembler::Allocate(Register object_size, Register result,
}
// |object_size| and |result_end| may overlap, other registers must not.
- DCHECK(!AreAliased(object_size, result, scratch, t9));
- DCHECK(!AreAliased(result_end, result, scratch, t9));
+ DCHECK(!AreAliased(object_size, result, scratch, t9, at));
+ DCHECK(!AreAliased(result_end, result, scratch, t9, at));
// Check relative positions of allocation top and limit addresses.
// ARM adds additional checks to make sure the ldm instruction can be
@@ -3839,8 +4002,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
- dsll(result_end, object_size, kPointerSizeLog2);
- Daddu(result_end, result, result_end);
+ Dlsa(result_end, result, object_size, kPointerSizeLog2);
} else {
Daddu(result_end, result, Operand(object_size));
}
@@ -4365,7 +4527,7 @@ void MacroAssembler::MovToFloatResult(DoubleRegister src) {
void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
DoubleRegister src2) {
if (!IsMipsSoftFloatABI) {
- const DoubleRegister fparg2 = (kMipsAbi == kN64) ? f13 : f14;
+ const DoubleRegister fparg2 = f13;
if (src2.is(f12)) {
DCHECK(!src1.is(fparg2));
Move(fparg2, src2);
@@ -4479,7 +4641,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
}
Push(fun);
Push(fun);
- CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
Pop(fun);
if (new_target.is_valid()) {
Pop(new_target);
@@ -5230,18 +5392,6 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
}
-void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- // You can't call a builtin without a valid frame.
- DCHECK(flag == JUMP_FUNCTION || has_frame());
-
- // Fake a parameter count to avoid emitting code to do the check.
- ParameterCount expected(0);
- LoadNativeContextSlot(native_context_index, a1);
- InvokeFunctionCode(a1, no_reg, expected, expected, flag, call_wrapper);
-}
-
-
void MacroAssembler::SetCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
if (FLAG_native_code_counters && counter->Enabled()) {
@@ -5338,9 +5488,9 @@ void MacroAssembler::Abort(BailoutReason reason) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 1);
+ CallRuntime(Runtime::kAbort);
} else {
- CallRuntime(Runtime::kAbort, 1);
+ CallRuntime(Runtime::kAbort);
}
// Will not return here.
if (is_trampoline_pool_blocked()) {
@@ -5596,8 +5746,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
if (argument_count_is_length) {
daddu(sp, sp, argument_count);
} else {
- dsll(t8, argument_count, kPointerSizeLog2);
- daddu(sp, sp, t8);
+ Dlsa(sp, sp, argument_count, kPointerSizeLog2, t8);
}
}
@@ -5880,6 +6029,17 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
+void MacroAssembler::AssertReceiver(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, kOperandIsASmiAndNotAReceiver, t8, Operand(zero_reg));
+ GetObjectType(object, t8, t8);
+ Check(ge, kOperandIsNotAReceiver, t8, Operand(FIRST_JS_RECEIVER_TYPE));
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (emit_debug_code()) {
@@ -5969,8 +6129,7 @@ void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
}
-
-static const int kRegisterPassedArguments = (kMipsAbi == kN64) ? 8 : 4;
+static const int kRegisterPassedArguments = 8;
int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments) {
@@ -6185,8 +6344,7 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
- dsll(t8, t8, Bitmap::kBytesPerCellLog2);
- Daddu(bitmap_reg, bitmap_reg, t8);
+ Dlsa(bitmap_reg, bitmap_reg, t8, Bitmap::kBytesPerCellLog2);
li(t8, Operand(1));
dsllv(mask_reg, t8, mask_reg);
}
@@ -6251,7 +6409,8 @@ void MacroAssembler::LoadAccessor(Register dst, Register holder,
}
-void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
+void MacroAssembler::CheckEnumCache(Label* call_runtime) {
+ Register null_value = a5;
Register empty_fixed_array_value = a6;
LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
Label next, start;
@@ -6265,6 +6424,7 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
Branch(
call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
+ LoadRoot(null_value, Heap::kNullValueRootIndex);
jmp(&start);
bind(&next);
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.h b/deps/v8/src/mips64/macro-assembler-mips64.h
index 31ed8a32e1..7f44ab9cc5 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/macro-assembler-mips64.h
@@ -15,6 +15,7 @@ namespace internal {
// Give alias names to registers for calling conventions.
const Register kReturnRegister0 = {Register::kCode_v0};
const Register kReturnRegister1 = {Register::kCode_v1};
+const Register kReturnRegister2 = {Register::kCode_a0};
const Register kJSFunctionRegister = {Register::kCode_a1};
const Register kContextRegister = {Register::kCpRegister};
const Register kInterpreterAccumulatorRegister = {Register::kCode_v0};
@@ -235,6 +236,11 @@ class MacroAssembler: public Assembler {
Heap::RootListIndex index,
BranchDelaySlot bdslot = PROTECT);
+ // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a
+ // functor/function with 'Label *func(size_t index)' declaration.
+ template <typename Func>
+ void GenerateSwitchTable(Register index, size_t case_count,
+ Func GetLabelFunction);
#undef COND_ARGS
// Emit code to discard a non-negative number of pointer-sized elements
@@ -385,7 +391,7 @@ class MacroAssembler: public Assembler {
void JumpIfNotInNewSpace(Register object,
Register scratch,
Label* branch) {
- InNewSpace(object, scratch, ne, branch);
+ InNewSpace(object, scratch, eq, branch);
}
// Check if object is in new space. Jumps if the object is in new space.
@@ -393,7 +399,7 @@ class MacroAssembler: public Assembler {
void JumpIfInNewSpace(Register object,
Register scratch,
Label* branch) {
- InNewSpace(object, scratch, eq, branch);
+ InNewSpace(object, scratch, ne, branch);
}
// Check if an object has a given incremental marking color.
@@ -455,6 +461,11 @@ class MacroAssembler: public Assembler {
pointers_to_here_check_for_value);
}
+ // Notify the garbage collector that we wrote a code entry into a
+ // JSFunction. Only scratch is clobbered by the operation.
+ void RecordWriteCodeEntryField(Register js_function, Register code_entry,
+ Register scratch);
+
void RecordWriteForMap(
Register object,
Register map,
@@ -688,6 +699,7 @@ class MacroAssembler: public Assembler {
// Load int32 in the rd register.
void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
+ inline bool LiLower32BitHelper(Register rd, Operand j);
inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) {
li(rd, Operand(j), mode);
}
@@ -821,6 +833,10 @@ class MacroAssembler: public Assembler {
void Cvt_d_ul(FPURegister fd, FPURegister fs);
void Cvt_d_ul(FPURegister fd, Register rs);
+ // Convert unsigned word to float.
+ void Cvt_s_uw(FPURegister fd, FPURegister fs);
+ void Cvt_s_uw(FPURegister fd, Register rs);
+
// Convert unsigned long to float.
void Cvt_s_ul(FPURegister fd, FPURegister fs);
void Cvt_s_ul(FPURegister fd, Register rs);
@@ -837,6 +853,10 @@ class MacroAssembler: public Assembler {
void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
+ // Convert single to unsigned word.
+ void Trunc_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch);
+ void Trunc_uw_s(FPURegister fd, Register rs, FPURegister scratch);
+
// Convert double to unsigned long.
void Trunc_ul_d(FPURegister fd, FPURegister fs, FPURegister scratch,
Register result = no_reg);
@@ -1121,6 +1141,11 @@ class MacroAssembler: public Assembler {
Register map,
Register type_reg);
+ void GetInstanceType(Register object_map, Register object_instance_type) {
+ lbu(object_instance_type,
+ FieldMemOperand(object_map, Map::kInstanceTypeOffset));
+ }
+
// Check if a map for a JSObject indicates that the object has fast elements.
// Jump to the specified label if it does not.
void CheckFastElements(Register map,
@@ -1449,10 +1474,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void JumpToExternalReference(const ExternalReference& builtin,
BranchDelaySlot bd = PROTECT);
- // Invoke specified builtin JavaScript function.
- void InvokeBuiltin(int native_context_index, InvokeFlag flag,
- const CallWrapper& call_wrapper = NullCallWrapper());
-
struct Unresolved {
int pc;
uint32_t flags; // See Bootstrapper::FixupFlags decoders/encoders.
@@ -1644,6 +1665,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// enabled via --debug-code.
void AssertBoundFunction(Register object);
+ // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
+ void AssertReceiver(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@@ -1747,7 +1771,7 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Expects object in a0 and returns map with validated enum cache
// in a0. Assumes that any other register can be used as a scratch.
- void CheckEnumCache(Register null_value, Label* call_runtime);
+ void CheckEnumCache(Label* call_runtime);
// AllocationMemento support. Arrays may have an associated
// AllocationMemento object that can be checked for in order to pretransition
@@ -1836,9 +1860,8 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
Register scratch2);
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
- void InNewSpace(Register object,
- Register scratch,
- Condition cond, // eq for new space, ne otherwise.
+ void InNewSpace(Register object, Register scratch,
+ Condition cond, // ne for new space, eq otherwise.
Label* branch);
// Helper for finding the mark bits for an address. Afterwards, the
@@ -1901,7 +1924,36 @@ class CodePatcher {
FlushICache flush_cache_; // Whether to flush the I cache after patching.
};
-
+template <typename Func>
+void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
+ Func GetLabelFunction) {
+ // Ensure that dd-ed labels following this instruction use 8 bytes aligned
+ // addresses.
+ if (kArchVariant >= kMips64r6) {
+ BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 + 6);
+ // Opposite of Align(8) as we have odd number of instructions in this case.
+ if ((pc_offset() & 7) == 0) {
+ nop();
+ }
+ addiupc(at, 5);
+ dlsa(at, at, index, kPointerSizeLog2);
+ ld(at, MemOperand(at));
+ } else {
+ Label here;
+ BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 + 7);
+ Align(8);
+ bal(&here);
+ dsll(at, index, kPointerSizeLog2); // Branch delay slot.
+ bind(&here);
+ daddu(at, at, ra);
+ ld(at, MemOperand(at, 4 * v8::internal::Assembler::kInstrSize));
+ }
+ jr(at);
+ nop(); // Branch delay slot nop.
+ for (size_t index = 0; index < case_count; ++index) {
+ dd(GetLabelFunction(index));
+ }
+}
#ifdef GENERATED_CODE_COVERAGE
#define CODE_COVERAGE_STRINGIFY(x) #x
diff --git a/deps/v8/src/mips64/simulator-mips64.cc b/deps/v8/src/mips64/simulator-mips64.cc
index 7fa96442f9..70c06c885f 100644
--- a/deps/v8/src/mips64/simulator-mips64.cc
+++ b/deps/v8/src/mips64/simulator-mips64.cc
@@ -16,6 +16,7 @@
#include "src/mips64/constants-mips64.h"
#include "src/mips64/simulator-mips64.h"
#include "src/ostreams.h"
+#include "src/runtime/runtime-utils.h"
// Only build the simulator if not compiling for real MIPS hardware.
#if defined(USE_SIMULATOR)
@@ -520,7 +521,8 @@ void MipsDebugger::Debug() {
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
int64_t value = *cur;
Heap* current_heap = sim_->isolate_->heap();
- if (((value & 1) == 0) || current_heap->Contains(obj)) {
+ if (((value & 1) == 0) ||
+ current_heap->ContainsSlow(obj->address())) {
PrintF(" (");
if ((value & 1) == 0) {
PrintF("smi %d", static_cast<int>(value >> 32));
@@ -1159,7 +1161,7 @@ double Simulator::get_fpu_register_double(int fpureg) const {
// from a0-a3 or f12 and f13 (n64), or f14 (O32).
void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
if (!IsMipsSoftFloatABI) {
- const int fparg2 = (kMipsAbi == kN64) ? 13 : 14;
+ const int fparg2 = 13;
*x = get_fpu_register_double(12);
*y = get_fpu_register_double(fparg2);
*z = static_cast<int32_t>(get_register(a2));
@@ -1964,11 +1966,6 @@ void Simulator::Format(Instruction* instr, const char* format) {
// 64 bits of result. If they don't, the v1 result register contains a bogus
// value, which is fine because it is caller-saved.
-struct ObjectPair {
- Object* x;
- Object* y;
-};
-
typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0,
int64_t arg1,
int64_t arg2,
@@ -1976,6 +1973,9 @@ typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0,
int64_t arg4,
int64_t arg5);
+typedef ObjectTriple (*SimulatorRuntimeTripleCall)(int64_t arg0, int64_t arg1,
+ int64_t arg2, int64_t arg3,
+ int64_t arg4);
// These prototypes handle the four types of FP calls.
typedef int64_t (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
@@ -2010,15 +2010,9 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
int64_t arg3 = get_register(a3);
int64_t arg4, arg5;
- if (kMipsAbi == kN64) {
- arg4 = get_register(a4); // Abi n64 register a4.
- arg5 = get_register(a5); // Abi n64 register a5.
- } else { // Abi O32.
- int64_t* stack_pointer = reinterpret_cast<int64_t*>(get_register(sp));
- // Args 4 and 5 are on the stack after the reserved space for args 0..3.
- arg4 = stack_pointer[4];
- arg5 = stack_pointer[5];
- }
+ arg4 = get_register(a4); // Abi n64 register a4.
+ arg5 = get_register(a5); // Abi n64 register a5.
+
bool fp_call =
(redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
(redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
@@ -2175,7 +2169,30 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
SimulatorRuntimeProfilingGetterCall target =
reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
target(arg0, arg1, Redirection::ReverseRedirection(arg2));
+ } else if (redirection->type() == ExternalReference::BUILTIN_CALL_TRIPLE) {
+ // builtin call returning ObjectTriple.
+ SimulatorRuntimeTripleCall target =
+ reinterpret_cast<SimulatorRuntimeTripleCall>(external);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF(
+ "Call to host triple returning runtime function %p "
+ "args %016" PRIx64 ", %016" PRIx64 ", %016" PRIx64 ", %016" PRIx64
+ ", %016" PRIx64 "\n",
+ FUNCTION_ADDR(target), arg1, arg2, arg3, arg4, arg5);
+ }
+ // arg0 is a hidden argument pointing to the return location, so don't
+ // pass it to the target function.
+ ObjectTriple result = target(arg1, arg2, arg3, arg4, arg5);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned { %p, %p, %p }\n", result.x, result.y, result.z);
+ }
+ // Return is passed back in address pointed to by hidden first argument.
+ ObjectTriple* sim_result = reinterpret_cast<ObjectTriple*>(arg0);
+ *sim_result = result;
+ set_register(v0, arg0);
} else {
+ DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL ||
+ redirection->type() == ExternalReference::BUILTIN_CALL_PAIR);
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
if (::v8::internal::FLAG_trace_sim) {
@@ -2316,6 +2333,89 @@ void Simulator::SignalException(Exception e) {
static_cast<int>(e));
}
+// Min/Max template functions for Double and Single arguments.
+
+template <typename T>
+static T FPAbs(T a);
+
+template <>
+double FPAbs<double>(double a) {
+ return fabs(a);
+}
+
+template <>
+float FPAbs<float>(float a) {
+ return fabsf(a);
+}
+
+template <typename T>
+static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T& result) {
+ if (std::isnan(a) && std::isnan(b)) {
+ result = a;
+ } else if (std::isnan(a)) {
+ result = b;
+ } else if (std::isnan(b)) {
+ result = a;
+ } else if (b == a) {
+ // Handle -0.0 == 0.0 case.
+ // std::signbit() returns int 0 or 1 so substracting MaxMinKind::kMax
+ // negates the result.
+ result = std::signbit(b) - static_cast<int>(kind) ? b : a;
+ } else {
+ return false;
+ }
+ return true;
+}
+
+template <typename T>
+static T FPUMin(T a, T b) {
+ T result;
+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+ return result;
+ } else {
+ return b < a ? b : a;
+ }
+}
+
+template <typename T>
+static T FPUMax(T a, T b) {
+ T result;
+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMax, result)) {
+ return result;
+ } else {
+ return b > a ? b : a;
+ }
+}
+
+template <typename T>
+static T FPUMinA(T a, T b) {
+ T result;
+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+ if (FPAbs(a) < FPAbs(b)) {
+ result = a;
+ } else if (FPAbs(b) < FPAbs(a)) {
+ result = b;
+ } else {
+ result = a < b ? a : b;
+ }
+ }
+ return result;
+}
+
+template <typename T>
+static T FPUMaxA(T a, T b) {
+ T result;
+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, result)) {
+ if (FPAbs(a) > FPAbs(b)) {
+ result = a;
+ } else if (FPAbs(b) > FPAbs(a)) {
+ result = b;
+ } else {
+ result = a > b ? a : b;
+ }
+ }
+ return result;
+}
// Handle execution based on instruction types.
@@ -2600,71 +2700,19 @@ void Simulator::DecodeTypeRegisterSRsType() {
}
case MINA:
DCHECK(kArchVariant == kMips64r6);
- fs = get_fpu_register_float(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else {
- float result;
- if (fabs(fs) > fabs(ft)) {
- result = ft;
- } else if (fabs(fs) < fabs(ft)) {
- result = fs;
- } else {
- result = (fs < ft ? fs : ft);
- }
- set_fpu_register_float(fd_reg(), result);
- }
+ set_fpu_register_float(fd_reg(), FPUMinA(ft, fs));
break;
case MAXA:
DCHECK(kArchVariant == kMips64r6);
- fs = get_fpu_register_float(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else {
- float result;
- if (fabs(fs) < fabs(ft)) {
- result = ft;
- } else if (fabs(fs) > fabs(ft)) {
- result = fs;
- } else {
- result = (fs > ft ? fs : ft);
- }
- set_fpu_register_float(fd_reg(), result);
- }
+ set_fpu_register_float(fd_reg(), FPUMaxA(ft, fs));
break;
case MIN:
DCHECK(kArchVariant == kMips64r6);
- fs = get_fpu_register_float(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else {
- set_fpu_register_float(fd_reg(), (fs >= ft) ? ft : fs);
- }
+ set_fpu_register_float(fd_reg(), FPUMin(ft, fs));
break;
case MAX:
DCHECK(kArchVariant == kMips64r6);
- fs = get_fpu_register_float(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg(), fs);
- } else {
- set_fpu_register_float(fd_reg(), (fs <= ft) ? ft : fs);
- }
+ set_fpu_register_float(fd_reg(), FPUMax(ft, fs));
break;
case SEL:
DCHECK(kArchVariant == kMips64r6);
@@ -2809,71 +2857,19 @@ void Simulator::DecodeTypeRegisterDRsType() {
}
case MINA:
DCHECK(kArchVariant == kMips64r6);
- fs = get_fpu_register_double(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else {
- double result;
- if (fabs(fs) > fabs(ft)) {
- result = ft;
- } else if (fabs(fs) < fabs(ft)) {
- result = fs;
- } else {
- result = (fs < ft ? fs : ft);
- }
- set_fpu_register_double(fd_reg(), result);
- }
+ set_fpu_register_double(fd_reg(), FPUMinA(ft, fs));
break;
case MAXA:
DCHECK(kArchVariant == kMips64r6);
- fs = get_fpu_register_double(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else {
- double result;
- if (fabs(fs) < fabs(ft)) {
- result = ft;
- } else if (fabs(fs) > fabs(ft)) {
- result = fs;
- } else {
- result = (fs > ft ? fs : ft);
- }
- set_fpu_register_double(fd_reg(), result);
- }
+ set_fpu_register_double(fd_reg(), FPUMaxA(ft, fs));
break;
case MIN:
DCHECK(kArchVariant == kMips64r6);
- fs = get_fpu_register_double(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else {
- set_fpu_register_double(fd_reg(), (fs >= ft) ? ft : fs);
- }
+ set_fpu_register_double(fd_reg(), FPUMin(ft, fs));
break;
case MAX:
DCHECK(kArchVariant == kMips64r6);
- fs = get_fpu_register_double(fs_reg());
- if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), ft);
- } else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg(), fs);
- } else {
- set_fpu_register_double(fd_reg(), (fs <= ft) ? ft : fs);
- }
+ set_fpu_register_double(fd_reg(), FPUMax(ft, fs));
break;
case ADD_D:
set_fpu_register_double(fd_reg(), fs + ft);
@@ -4777,7 +4773,7 @@ void Simulator::CallInternal(byte* entry) {
int64_t Simulator::Call(byte* entry, int argument_count, ...) {
- const int kRegisterPassedArguments = (kMipsAbi == kN64) ? 8 : 4;
+ const int kRegisterPassedArguments = 8;
va_list parameters;
va_start(parameters, argument_count);
// Set up arguments.
@@ -4789,14 +4785,12 @@ int64_t Simulator::Call(byte* entry, int argument_count, ...) {
set_register(a2, va_arg(parameters, int64_t));
set_register(a3, va_arg(parameters, int64_t));
- if (kMipsAbi == kN64) {
- // Up to eight arguments passed in registers in N64 ABI.
- // TODO(plind): N64 ABI calls these regs a4 - a7. Clarify this.
- if (argument_count >= 5) set_register(a4, va_arg(parameters, int64_t));
- if (argument_count >= 6) set_register(a5, va_arg(parameters, int64_t));
- if (argument_count >= 7) set_register(a6, va_arg(parameters, int64_t));
- if (argument_count >= 8) set_register(a7, va_arg(parameters, int64_t));
- }
+ // Up to eight arguments passed in registers in N64 ABI.
+ // TODO(plind): N64 ABI calls these regs a4 - a7. Clarify this.
+ if (argument_count >= 5) set_register(a4, va_arg(parameters, int64_t));
+ if (argument_count >= 6) set_register(a5, va_arg(parameters, int64_t));
+ if (argument_count >= 7) set_register(a6, va_arg(parameters, int64_t));
+ if (argument_count >= 8) set_register(a7, va_arg(parameters, int64_t));
// Remaining arguments passed on stack.
int64_t original_stack = get_register(sp);
@@ -4831,7 +4825,7 @@ int64_t Simulator::Call(byte* entry, int argument_count, ...) {
double Simulator::CallFP(byte* entry, double d0, double d1) {
if (!IsMipsSoftFloatABI) {
- const FPURegister fparg2 = (kMipsAbi == kN64) ? f13 : f14;
+ const FPURegister fparg2 = f13;
set_fpu_register_double(f12, d0);
set_fpu_register_double(fparg2, d1);
} else {
diff --git a/deps/v8/src/mips64/simulator-mips64.h b/deps/v8/src/mips64/simulator-mips64.h
index 1d156d860f..7f60a74639 100644
--- a/deps/v8/src/mips64/simulator-mips64.h
+++ b/deps/v8/src/mips64/simulator-mips64.h
@@ -31,7 +31,6 @@ namespace internal {
// should act as a function matching the type arm_regexp_matcher.
// The fifth (or ninth) argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
-#ifdef MIPS_ABI_N64
typedef int (*mips_regexp_matcher)(String* input,
int64_t start_offset,
const byte* input_start,
@@ -48,26 +47,6 @@ typedef int (*mips_regexp_matcher)(String* input,
(FUNCTION_CAST<mips_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
NULL, p8))
-#else // O32 Abi.
-
-typedef int (*mips_regexp_matcher)(String* input,
- int32_t start_offset,
- const byte* input_start,
- const byte* input_end,
- void* return_address,
- int* output,
- int32_t output_size,
- Address stack_base,
- int32_t direct_call,
- Isolate* isolate);
-
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- (FUNCTION_CAST<mips_regexp_matcher>(entry)(p0, p1, p2, p3, NULL, p4, p5, p6, \
- p7, p8))
-
-#endif // MIPS_ABI_N64
-
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on mips uses the C stack, we
@@ -516,18 +495,11 @@ class Simulator {
reinterpret_cast<int64_t*>(p3), reinterpret_cast<int64_t*>(p4)))
-#ifdef MIPS_ABI_N64
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
p7, p8) \
static_cast<int>(Simulator::current(isolate)->Call( \
entry, 10, p0, p1, p2, p3, p4, reinterpret_cast<int64_t*>(p5), p6, p7, \
NULL, p8))
-#else // Must be O32 Abi.
-#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
- p7, p8) \
- static_cast<int>(Simulator::current(isolate)->Call( \
- entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
-#endif // MIPS_ABI_N64
// The simulator has its own stack. Thus it has a different stack limit from
diff --git a/deps/v8/src/objects-body-descriptors-inl.h b/deps/v8/src/objects-body-descriptors-inl.h
index ba3c4be52f..cd4be13967 100644
--- a/deps/v8/src/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects-body-descriptors-inl.h
@@ -193,19 +193,24 @@ class JSArrayBuffer::BodyDescriptor final : public BodyDescriptorBase {
class BytecodeArray::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(HeapObject* obj, int offset) {
- return offset == kConstantPoolOffset;
+ return offset >= kConstantPoolOffset &&
+ offset <= kSourcePositionTableOffset;
}
template <typename ObjectVisitor>
static inline void IterateBody(HeapObject* obj, int object_size,
ObjectVisitor* v) {
IteratePointer(obj, kConstantPoolOffset, v);
+ IteratePointer(obj, kHandlerTableOffset, v);
+ IteratePointer(obj, kSourcePositionTableOffset, v);
}
template <typename StaticVisitor>
static inline void IterateBody(HeapObject* obj, int object_size) {
Heap* heap = obj->GetHeap();
IteratePointer<StaticVisitor>(heap, obj, kConstantPoolOffset);
+ IteratePointer<StaticVisitor>(heap, obj, kHandlerTableOffset);
+ IteratePointer<StaticVisitor>(heap, obj, kSourcePositionTableOffset);
}
static inline int SizeOf(Map* map, HeapObject* obj) {
@@ -464,7 +469,6 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
case JS_MAP_TYPE:
case JS_SET_ITERATOR_TYPE:
case JS_MAP_ITERATOR_TYPE:
- case JS_ITERATOR_RESULT_TYPE:
case JS_REGEXP_TYPE:
case JS_GLOBAL_PROXY_TYPE:
case JS_GLOBAL_OBJECT_TYPE:
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index b6dd42553c..0d01ec2f5b 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -7,6 +7,7 @@
#include "src/bootstrapper.h"
#include "src/disasm.h"
#include "src/disassembler.h"
+#include "src/field-type.h"
#include "src/macro-assembler.h"
#include "src/ostreams.h"
#include "src/regexp/jsregexp.h"
@@ -150,9 +151,6 @@ void HeapObject::HeapObjectVerify() {
case JS_MAP_ITERATOR_TYPE:
JSMapIterator::cast(this)->JSMapIteratorVerify();
break;
- case JS_ITERATOR_RESULT_TYPE:
- JSIteratorResult::cast(this)->JSIteratorResultVerify();
- break;
case JS_WEAK_MAP_TYPE:
JSWeakMap::cast(this)->JSWeakMapVerify();
break;
@@ -210,7 +208,7 @@ void HeapObject::VerifyHeapPointer(Object* p) {
void Symbol::SymbolVerify() {
CHECK(IsSymbol());
CHECK(HasHashCode());
- CHECK_GT(Hash(), 0u);
+ CHECK(GetHeap()->hidden_properties_symbol() == this || Hash() > 0u);
CHECK(name()->IsUndefined() || name()->IsString());
}
@@ -298,9 +296,9 @@ void JSObject::JSObjectVerify() {
if (value->IsUninitialized()) continue;
if (r.IsSmi()) DCHECK(value->IsSmi());
if (r.IsHeapObject()) DCHECK(value->IsHeapObject());
- HeapType* field_type = descriptors->GetFieldType(i);
- bool type_is_none = field_type->Is(HeapType::None());
- bool type_is_any = HeapType::Any()->Is(field_type);
+ FieldType* field_type = descriptors->GetFieldType(i);
+ bool type_is_none = field_type->IsNone();
+ bool type_is_any = field_type->IsAny();
if (r.IsNone()) {
CHECK(type_is_none);
} else if (!type_is_any && !(type_is_none && r.IsHeapObject())) {
@@ -318,7 +316,8 @@ void JSObject::JSObjectVerify() {
// pointer may point to a one pointer filler map.
if (ElementsAreSafeToExamine()) {
CHECK_EQ((map()->has_fast_smi_or_object_elements() ||
- (elements() == GetHeap()->empty_fixed_array())),
+ (elements() == GetHeap()->empty_fixed_array()) ||
+ HasFastStringWrapperElements()),
(elements()->map() == GetHeap()->fixed_array_map() ||
elements()->map() == GetHeap()->fixed_cow_array_map()));
CHECK(map()->has_fast_object_elements() == HasFastObjectElements());
@@ -553,9 +552,7 @@ void JSBoundFunction::JSBoundFunctionVerify() {
VerifyObjectField(kBoundThisOffset);
VerifyObjectField(kBoundTargetFunctionOffset);
VerifyObjectField(kBoundArgumentsOffset);
- VerifyObjectField(kCreationContextOffset);
CHECK(bound_target_function()->IsCallable());
- CHECK(creation_context()->IsNativeContext());
CHECK(IsCallable());
CHECK_EQ(IsConstructor(), bound_target_function()->IsConstructor());
}
@@ -765,14 +762,6 @@ void JSMapIterator::JSMapIteratorVerify() {
}
-void JSIteratorResult::JSIteratorResultVerify() {
- CHECK(IsJSIteratorResult());
- JSObjectVerify();
- VerifyPointer(done());
- VerifyPointer(value());
-}
-
-
void JSWeakMap::JSWeakMapVerify() {
CHECK(IsJSWeakMap());
JSObjectVerify();
@@ -911,12 +900,6 @@ void PrototypeInfo::PrototypeInfoVerify() {
}
-void AccessorInfo::AccessorInfoVerify() {
- VerifyPointer(name());
- VerifyPointer(expected_receiver_type());
-}
-
-
void SloppyBlockWithEvalContextExtension::
SloppyBlockWithEvalContextExtensionVerify() {
CHECK(IsSloppyBlockWithEvalContextExtension());
@@ -925,9 +908,10 @@ void SloppyBlockWithEvalContextExtension::
}
-void ExecutableAccessorInfo::ExecutableAccessorInfoVerify() {
- CHECK(IsExecutableAccessorInfo());
- AccessorInfoVerify();
+void AccessorInfo::AccessorInfoVerify() {
+ CHECK(IsAccessorInfo());
+ VerifyPointer(name());
+ VerifyPointer(expected_receiver_type());
VerifyPointer(getter());
VerifyPointer(setter());
VerifyPointer(data());
@@ -1038,7 +1022,7 @@ void NormalizedMapCache::NormalizedMapCacheVerify() {
void DebugInfo::DebugInfoVerify() {
CHECK(IsDebugInfo());
VerifyPointer(shared());
- VerifyPointer(code());
+ VerifyPointer(abstract_code());
VerifyPointer(break_points());
}
@@ -1076,7 +1060,8 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
- case FAST_ELEMENTS: {
+ case FAST_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS: {
info->number_of_objects_with_fast_elements_++;
int holes = 0;
FixedArray* e = FixedArray::cast(elements());
@@ -1100,7 +1085,8 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
info->number_of_fast_used_elements_ += e->length();
break;
}
- case DICTIONARY_ELEMENTS: {
+ case DICTIONARY_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS: {
SeededNumberDictionary* dict = element_dictionary();
info->number_of_slow_used_elements_ += dict->NumberOfElements();
info->number_of_slow_unused_elements_ +=
@@ -1109,6 +1095,7 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
}
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ case NO_ELEMENTS:
break;
}
}
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 0509a80b23..1abc7ebf07 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -18,6 +18,7 @@
#include "src/conversions-inl.h"
#include "src/factory.h"
#include "src/field-index-inl.h"
+#include "src/handles-inl.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/isolate.h"
@@ -28,7 +29,6 @@
#include "src/prototype.h"
#include "src/transitions-inl.h"
#include "src/type-feedback-vector-inl.h"
-#include "src/types-inl.h"
#include "src/v8memory.h"
namespace v8 {
@@ -54,14 +54,11 @@ int PropertyDetails::field_width_in_words() const {
return representation().IsDouble() ? kDoubleSize / kPointerSize : 1;
}
-
-#define TYPE_CHECKER(type, instancetype) \
- bool Object::Is##type() const { \
- return Object::IsHeapObject() && \
- HeapObject::cast(this)->map()->instance_type() == instancetype; \
+#define TYPE_CHECKER(type, instancetype) \
+ bool HeapObject::Is##type() const { \
+ return map()->instance_type() == instancetype; \
}
-
#define CAST_ACCESSOR(type) \
type* type::cast(Object* object) { \
SLOW_DCHECK(object->Is##type()); \
@@ -128,143 +125,118 @@ int PropertyDetails::field_width_in_words() const {
set_##field(BooleanBit::set(field(), offset, value)); \
}
-
-bool Object::IsFixedArrayBase() const {
+bool HeapObject::IsFixedArrayBase() const {
return IsFixedArray() || IsFixedDoubleArray() || IsFixedTypedArrayBase();
}
-
-bool Object::IsFixedArray() const {
- if (!IsHeapObject()) return false;
- InstanceType instance_type = HeapObject::cast(this)->map()->instance_type();
+bool HeapObject::IsFixedArray() const {
+ InstanceType instance_type = map()->instance_type();
return instance_type == FIXED_ARRAY_TYPE ||
instance_type == TRANSITION_ARRAY_TYPE;
}
// External objects are not extensible, so the map check is enough.
-bool Object::IsExternal() const {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map() ==
- HeapObject::cast(this)->GetHeap()->external_map();
+bool HeapObject::IsExternal() const {
+ return map() == GetHeap()->external_map();
}
-bool Object::IsAccessorInfo() const { return IsExecutableAccessorInfo(); }
-
-
TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE)
TYPE_CHECKER(MutableHeapNumber, MUTABLE_HEAP_NUMBER_TYPE)
TYPE_CHECKER(Symbol, SYMBOL_TYPE)
TYPE_CHECKER(Simd128Value, SIMD128_VALUE_TYPE)
-
#define SIMD128_TYPE_CHECKER(TYPE, Type, type, lane_count, lane_type) \
- bool Object::Is##Type() const { \
- return Object::IsHeapObject() && \
- HeapObject::cast(this)->map() == \
- HeapObject::cast(this)->GetHeap()->type##_map(); \
- }
+ bool HeapObject::Is##Type() const { return map() == GetHeap()->type##_map(); }
SIMD128_TYPES(SIMD128_TYPE_CHECKER)
#undef SIMD128_TYPE_CHECKER
+#define IS_TYPE_FUNCTION_DEF(type_) \
+ bool Object::Is##type_() const { \
+ return IsHeapObject() && HeapObject::cast(this)->Is##type_(); \
+ }
+HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DEF)
+ODDBALL_LIST(IS_TYPE_FUNCTION_DEF)
+#undef IS_TYPE_FUNCTION_DEF
-bool Object::IsString() const {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() < FIRST_NONSTRING_TYPE;
+bool HeapObject::IsString() const {
+ return map()->instance_type() < FIRST_NONSTRING_TYPE;
}
-
-bool Object::IsName() const {
- STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map()->instance_type() <= LAST_NAME_TYPE;
+bool HeapObject::IsName() const {
+ return map()->instance_type() <= LAST_NAME_TYPE;
}
-
-bool Object::IsUniqueName() const {
+bool HeapObject::IsUniqueName() const {
return IsInternalizedString() || IsSymbol();
}
-
-bool Object::IsFunction() const {
- STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map()->instance_type() >= FIRST_FUNCTION_TYPE;
+bool Name::IsUniqueName() const {
+ uint32_t type = map()->instance_type();
+ return (type & (kIsNotStringMask | kIsNotInternalizedMask)) !=
+ (kStringTag | kNotInternalizedTag);
}
-
-bool Object::IsCallable() const {
- return Object::IsHeapObject() && HeapObject::cast(this)->map()->is_callable();
+bool HeapObject::IsFunction() const {
+ STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
+ return map()->instance_type() >= FIRST_FUNCTION_TYPE;
}
+bool HeapObject::IsCallable() const { return map()->is_callable(); }
-bool Object::IsConstructor() const {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map()->is_constructor();
-}
+bool HeapObject::IsConstructor() const { return map()->is_constructor(); }
-
-bool Object::IsTemplateInfo() const {
+bool HeapObject::IsTemplateInfo() const {
return IsObjectTemplateInfo() || IsFunctionTemplateInfo();
}
-
-bool Object::IsInternalizedString() const {
- if (!this->IsHeapObject()) return false;
- uint32_t type = HeapObject::cast(this)->map()->instance_type();
+bool HeapObject::IsInternalizedString() const {
+ uint32_t type = map()->instance_type();
STATIC_ASSERT(kNotInternalizedTag != 0);
return (type & (kIsNotStringMask | kIsNotInternalizedMask)) ==
(kStringTag | kInternalizedTag);
}
-
-bool Object::IsConsString() const {
+bool HeapObject::IsConsString() const {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsCons();
}
-
-bool Object::IsSlicedString() const {
+bool HeapObject::IsSlicedString() const {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsSliced();
}
-
-bool Object::IsSeqString() const {
+bool HeapObject::IsSeqString() const {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsSequential();
}
-
-bool Object::IsSeqOneByteString() const {
+bool HeapObject::IsSeqOneByteString() const {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsSequential() &&
String::cast(this)->IsOneByteRepresentation();
}
-
-bool Object::IsSeqTwoByteString() const {
+bool HeapObject::IsSeqTwoByteString() const {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsSequential() &&
String::cast(this)->IsTwoByteRepresentation();
}
-
-bool Object::IsExternalString() const {
+bool HeapObject::IsExternalString() const {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsExternal();
}
-
-bool Object::IsExternalOneByteString() const {
+bool HeapObject::IsExternalOneByteString() const {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsExternal() &&
String::cast(this)->IsOneByteRepresentation();
}
-
-bool Object::IsExternalTwoByteString() const {
+bool HeapObject::IsExternalTwoByteString() const {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsExternal() &&
String::cast(this)->IsTwoByteRepresentation();
@@ -664,10 +636,8 @@ TYPE_CHECKER(ByteArray, BYTE_ARRAY_TYPE)
TYPE_CHECKER(BytecodeArray, BYTECODE_ARRAY_TYPE)
TYPE_CHECKER(FreeSpace, FREE_SPACE_TYPE)
-
-bool Object::IsFiller() const {
- if (!Object::IsHeapObject()) return false;
- InstanceType instance_type = HeapObject::cast(this)->map()->instance_type();
+bool HeapObject::IsFiller() const {
+ InstanceType instance_type = map()->instance_type();
return instance_type == FREE_SPACE_TYPE || instance_type == FILLER_TYPE;
}
@@ -679,41 +649,28 @@ bool Object::IsFiller() const {
TYPED_ARRAYS(TYPED_ARRAY_TYPE_CHECKER)
#undef TYPED_ARRAY_TYPE_CHECKER
-
-bool Object::IsFixedTypedArrayBase() const {
- if (!Object::IsHeapObject()) return false;
-
- InstanceType instance_type =
- HeapObject::cast(this)->map()->instance_type();
+bool HeapObject::IsFixedTypedArrayBase() const {
+ InstanceType instance_type = map()->instance_type();
return (instance_type >= FIRST_FIXED_TYPED_ARRAY_TYPE &&
instance_type <= LAST_FIXED_TYPED_ARRAY_TYPE);
}
-
-bool Object::IsJSReceiver() const {
+bool HeapObject::IsJSReceiver() const {
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- return IsHeapObject() &&
- HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_RECEIVER_TYPE;
+ return map()->instance_type() >= FIRST_JS_RECEIVER_TYPE;
}
-
-bool Object::IsJSObject() const {
+bool HeapObject::IsJSObject() const {
STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
- return IsHeapObject() && HeapObject::cast(this)->map()->IsJSObjectMap();
-}
-
-
-bool Object::IsJSProxy() const {
- if (!Object::IsHeapObject()) return false;
- return HeapObject::cast(this)->map()->IsJSProxyMap();
+ return map()->IsJSObjectMap();
}
+bool HeapObject::IsJSProxy() const { return map()->IsJSProxyMap(); }
TYPE_CHECKER(JSSet, JS_SET_TYPE)
TYPE_CHECKER(JSMap, JS_MAP_TYPE)
TYPE_CHECKER(JSSetIterator, JS_SET_ITERATOR_TYPE)
TYPE_CHECKER(JSMapIterator, JS_MAP_ITERATOR_TYPE)
-TYPE_CHECKER(JSIteratorResult, JS_ITERATOR_RESULT_TYPE)
TYPE_CHECKER(JSWeakMap, JS_WEAK_MAP_TYPE)
TYPE_CHECKER(JSWeakSet, JS_WEAK_SET_TYPE)
TYPE_CHECKER(JSContextExtensionObject, JS_CONTEXT_EXTENSION_OBJECT_TYPE)
@@ -722,35 +679,25 @@ TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
TYPE_CHECKER(WeakFixedArray, FIXED_ARRAY_TYPE)
TYPE_CHECKER(TransitionArray, TRANSITION_ARRAY_TYPE)
-
-bool Object::IsJSWeakCollection() const {
+bool HeapObject::IsJSWeakCollection() const {
return IsJSWeakMap() || IsJSWeakSet();
}
+bool HeapObject::IsDescriptorArray() const { return IsFixedArray(); }
-bool Object::IsDescriptorArray() const {
- return IsFixedArray();
-}
-
-
-bool Object::IsArrayList() const { return IsFixedArray(); }
-
+bool HeapObject::IsArrayList() const { return IsFixedArray(); }
bool Object::IsLayoutDescriptor() const {
return IsSmi() || IsFixedTypedArrayBase();
}
+bool HeapObject::IsTypeFeedbackVector() const { return IsFixedArray(); }
-bool Object::IsTypeFeedbackVector() const { return IsFixedArray(); }
-
-
-bool Object::IsTypeFeedbackMetadata() const { return IsFixedArray(); }
+bool HeapObject::IsTypeFeedbackMetadata() const { return IsFixedArray(); }
+bool HeapObject::IsLiteralsArray() const { return IsFixedArray(); }
-bool Object::IsLiteralsArray() const { return IsFixedArray(); }
-
-
-bool Object::IsDeoptimizationInputData() const {
+bool HeapObject::IsDeoptimizationInputData() const {
// Must be a fixed array.
if (!IsFixedArray()) return false;
@@ -765,8 +712,7 @@ bool Object::IsDeoptimizationInputData() const {
return length >= 0 && length % DeoptimizationInputData::kDeoptEntrySize == 0;
}
-
-bool Object::IsDeoptimizationOutputData() const {
+bool HeapObject::IsDeoptimizationOutputData() const {
if (!IsFixedArray()) return false;
// There's actually no way to see the difference between a fixed array and
// a deoptimization data array. Since this is used for asserts we can check
@@ -775,27 +721,23 @@ bool Object::IsDeoptimizationOutputData() const {
return true;
}
-
-bool Object::IsHandlerTable() const {
+bool HeapObject::IsHandlerTable() const {
if (!IsFixedArray()) return false;
// There's actually no way to see the difference between a fixed array and
// a handler table array.
return true;
}
-
-bool Object::IsDependentCode() const {
+bool HeapObject::IsDependentCode() const {
if (!IsFixedArray()) return false;
// There's actually no way to see the difference between a fixed array and
// a dependent codes array.
return true;
}
-
-bool Object::IsContext() const {
- if (!Object::IsHeapObject()) return false;
- Map* map = HeapObject::cast(this)->map();
- Heap* heap = map->GetHeap();
+bool HeapObject::IsContext() const {
+ Map* map = this->map();
+ Heap* heap = GetHeap();
return (map == heap->function_context_map() ||
map == heap->catch_context_map() ||
map == heap->with_context_map() ||
@@ -805,26 +747,16 @@ bool Object::IsContext() const {
map == heap->script_context_map());
}
-
-bool Object::IsNativeContext() const {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map() ==
- HeapObject::cast(this)->GetHeap()->native_context_map();
+bool HeapObject::IsNativeContext() const {
+ return map() == GetHeap()->native_context_map();
}
-
-bool Object::IsScriptContextTable() const {
- if (!Object::IsHeapObject()) return false;
- Map* map = HeapObject::cast(this)->map();
- Heap* heap = map->GetHeap();
- return map == heap->script_context_table_map();
+bool HeapObject::IsScriptContextTable() const {
+ return map() == GetHeap()->script_context_table_map();
}
-
-bool Object::IsScopeInfo() const {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map() ==
- HeapObject::cast(this)->GetHeap()->scope_info_map();
+bool HeapObject::IsScopeInfo() const {
+ return map() == GetHeap()->scope_info_map();
}
@@ -849,16 +781,18 @@ TYPE_CHECKER(JSValue, JS_VALUE_TYPE)
TYPE_CHECKER(JSDate, JS_DATE_TYPE)
TYPE_CHECKER(JSMessageObject, JS_MESSAGE_OBJECT_TYPE)
+bool HeapObject::IsAbstractCode() const {
+ return IsBytecodeArray() || IsCode();
+}
-bool Object::IsStringWrapper() const {
+bool HeapObject::IsStringWrapper() const {
return IsJSValue() && JSValue::cast(this)->value()->IsString();
}
TYPE_CHECKER(Foreign, FOREIGN_TYPE)
-
-bool Object::IsBoolean() const {
+bool HeapObject::IsBoolean() const {
return IsOddball() &&
((Oddball::cast(this)->kind() & Oddball::kNotBooleanMask) == 0);
}
@@ -869,8 +803,7 @@ TYPE_CHECKER(JSArrayBuffer, JS_ARRAY_BUFFER_TYPE)
TYPE_CHECKER(JSTypedArray, JS_TYPED_ARRAY_TYPE)
TYPE_CHECKER(JSDataView, JS_DATA_VIEW_TYPE)
-
-bool Object::IsJSArrayBufferView() const {
+bool HeapObject::IsJSArrayBufferView() const {
return IsJSDataView() || IsJSTypedArray();
}
@@ -882,22 +815,14 @@ template <> inline bool Is<JSArray>(Object* obj) {
return obj->IsJSArray();
}
-
-bool Object::IsHashTable() const {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map() ==
- HeapObject::cast(this)->GetHeap()->hash_table_map();
+bool HeapObject::IsHashTable() const {
+ return map() == GetHeap()->hash_table_map();
}
+bool HeapObject::IsWeakHashTable() const { return IsHashTable(); }
-bool Object::IsWeakHashTable() const {
- return IsHashTable();
-}
-
-
-bool Object::IsDictionary() const {
- return IsHashTable() &&
- this != HeapObject::cast(this)->GetHeap()->string_table();
+bool HeapObject::IsDictionary() const {
+ return IsHashTable() && this != GetHeap()->string_table();
}
@@ -918,13 +843,9 @@ bool Object::IsUnseededNumberDictionary() const {
return IsDictionary();
}
+bool HeapObject::IsStringTable() const { return IsHashTable(); }
-bool Object::IsStringTable() const {
- return IsHashTable();
-}
-
-
-bool Object::IsNormalizedMapCache() const {
+bool HeapObject::IsNormalizedMapCache() const {
return NormalizedMapCache::IsNormalizedMapCache(this);
}
@@ -933,51 +854,34 @@ int NormalizedMapCache::GetIndex(Handle<Map> map) {
return map->Hash() % NormalizedMapCache::kEntries;
}
-
-bool NormalizedMapCache::IsNormalizedMapCache(const Object* obj) {
+bool NormalizedMapCache::IsNormalizedMapCache(const HeapObject* obj) {
if (!obj->IsFixedArray()) return false;
if (FixedArray::cast(obj)->length() != NormalizedMapCache::kEntries) {
return false;
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
- reinterpret_cast<NormalizedMapCache*>(const_cast<Object*>(obj))->
- NormalizedMapCacheVerify();
+ reinterpret_cast<NormalizedMapCache*>(const_cast<HeapObject*>(obj))
+ ->NormalizedMapCacheVerify();
}
#endif
return true;
}
+bool HeapObject::IsCompilationCacheTable() const { return IsHashTable(); }
-bool Object::IsCompilationCacheTable() const {
- return IsHashTable();
-}
-
+bool HeapObject::IsCodeCacheHashTable() const { return IsHashTable(); }
-bool Object::IsCodeCacheHashTable() const {
+bool HeapObject::IsPolymorphicCodeCacheHashTable() const {
return IsHashTable();
}
+bool HeapObject::IsMapCache() const { return IsHashTable(); }
-bool Object::IsPolymorphicCodeCacheHashTable() const {
- return IsHashTable();
-}
-
+bool HeapObject::IsObjectHashTable() const { return IsHashTable(); }
-bool Object::IsMapCache() const {
- return IsHashTable();
-}
-
-
-bool Object::IsObjectHashTable() const {
- return IsHashTable();
-}
-
-
-bool Object::IsOrderedHashTable() const {
- return IsHeapObject() &&
- HeapObject::cast(this)->map() ==
- HeapObject::cast(this)->GetHeap()->ordered_hash_table_map();
+bool HeapObject::IsOrderedHashTable() const {
+ return map() == GetHeap()->ordered_hash_table_map();
}
@@ -995,40 +899,30 @@ bool Object::IsPrimitive() const {
return IsSmi() || HeapObject::cast(this)->map()->IsPrimitiveMap();
}
-
-bool Object::IsJSGlobalProxy() const {
- bool result = IsHeapObject() &&
- (HeapObject::cast(this)->map()->instance_type() ==
- JS_GLOBAL_PROXY_TYPE);
- DCHECK(!result ||
- HeapObject::cast(this)->map()->is_access_check_needed());
+bool HeapObject::IsJSGlobalProxy() const {
+ bool result = map()->instance_type() == JS_GLOBAL_PROXY_TYPE;
+ DCHECK(!result || map()->is_access_check_needed());
return result;
}
TYPE_CHECKER(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE)
-
-bool Object::IsUndetectableObject() const {
- return IsHeapObject()
- && HeapObject::cast(this)->map()->is_undetectable();
+bool HeapObject::IsUndetectableObject() const {
+ return map()->is_undetectable();
}
-
-bool Object::IsAccessCheckNeeded() const {
- if (!IsHeapObject()) return false;
+bool HeapObject::IsAccessCheckNeeded() const {
if (IsJSGlobalProxy()) {
const JSGlobalProxy* proxy = JSGlobalProxy::cast(this);
JSGlobalObject* global = proxy->GetIsolate()->context()->global_object();
return proxy->IsDetachedFrom(global);
}
- return HeapObject::cast(this)->map()->is_access_check_needed();
+ return map()->is_access_check_needed();
}
-
-bool Object::IsStruct() const {
- if (!IsHeapObject()) return false;
- switch (HeapObject::cast(this)->map()->instance_type()) {
+bool HeapObject::IsStruct() const {
+ switch (map()->instance_type()) {
#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return true;
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
@@ -1036,56 +930,23 @@ bool Object::IsStruct() const {
}
}
-
-#define MAKE_STRUCT_PREDICATE(NAME, Name, name) \
- bool Object::Is##Name() const { \
- return Object::IsHeapObject() \
- && HeapObject::cast(this)->map()->instance_type() == NAME##_TYPE; \
+#define MAKE_STRUCT_PREDICATE(NAME, Name, name) \
+ bool Object::Is##Name() const { \
+ return IsHeapObject() && HeapObject::cast(this)->Is##Name(); \
+ } \
+ bool HeapObject::Is##Name() const { \
+ return map()->instance_type() == NAME##_TYPE; \
}
- STRUCT_LIST(MAKE_STRUCT_PREDICATE)
+STRUCT_LIST(MAKE_STRUCT_PREDICATE)
#undef MAKE_STRUCT_PREDICATE
+#define MAKE_ODDBALL_PREDICATE(Name) \
+ bool HeapObject::Is##Name() const { \
+ return IsOddball() && Oddball::cast(this)->kind() == Oddball::k##Name; \
+ }
+ODDBALL_LIST(MAKE_ODDBALL_PREDICATE)
-bool Object::IsUndefined() const {
- return IsOddball() && Oddball::cast(this)->kind() == Oddball::kUndefined;
-}
-
-
-bool Object::IsNull() const {
- return IsOddball() && Oddball::cast(this)->kind() == Oddball::kNull;
-}
-
-
-bool Object::IsTheHole() const {
- return IsOddball() && Oddball::cast(this)->kind() == Oddball::kTheHole;
-}
-
-
-bool Object::IsException() const {
- return IsOddball() && Oddball::cast(this)->kind() == Oddball::kException;
-}
-
-
-bool Object::IsUninitialized() const {
- return IsOddball() && Oddball::cast(this)->kind() == Oddball::kUninitialized;
-}
-
-
-bool Object::IsTrue() const {
- return IsOddball() && Oddball::cast(this)->kind() == Oddball::kTrue;
-}
-
-
-bool Object::IsFalse() const {
- return IsOddball() && Oddball::cast(this)->kind() == Oddball::kFalse;
-}
-
-
-bool Object::IsArgumentsMarker() const {
- return IsOddball() && Oddball::cast(this)->kind() == Oddball::kArgumentMarker;
-}
-
-
+#undef MAKE_ODDBALL_PREDICATE
double Object::Number() const {
DCHECK(IsNumber());
return IsSmi()
@@ -1130,14 +991,14 @@ ElementsKind Object::OptimalElementsKind() {
bool Object::FitsRepresentation(Representation representation) {
- if (FLAG_track_fields && representation.IsNone()) {
- return false;
- } else if (FLAG_track_fields && representation.IsSmi()) {
+ if (FLAG_track_fields && representation.IsSmi()) {
return IsSmi();
} else if (FLAG_track_double_fields && representation.IsDouble()) {
return IsMutableHeapNumber() || IsNumber();
} else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
return IsHeapObject();
+ } else if (FLAG_track_fields && representation.IsNone()) {
+ return false;
}
return true;
}
@@ -1146,8 +1007,8 @@ bool Object::FitsRepresentation(Representation representation) {
// static
MaybeHandle<JSReceiver> Object::ToObject(Isolate* isolate,
Handle<Object> object) {
- return ToObject(
- isolate, object, handle(isolate->context()->native_context(), isolate));
+ if (object->IsJSReceiver()) return Handle<JSReceiver>::cast(object);
+ return ToObject(isolate, object, isolate->native_context());
}
@@ -1163,20 +1024,16 @@ bool Object::HasSpecificClassOf(String* name) {
return this->IsJSObject() && (JSObject::cast(this)->class_name() == name);
}
-
MaybeHandle<Object> Object::GetProperty(Handle<Object> object,
- Handle<Name> name,
- LanguageMode language_mode) {
+ Handle<Name> name) {
LookupIterator it(object, name);
- return GetProperty(&it, language_mode);
+ return GetProperty(&it);
}
-
MaybeHandle<Object> Object::GetElement(Isolate* isolate, Handle<Object> object,
- uint32_t index,
- LanguageMode language_mode) {
+ uint32_t index) {
LookupIterator it(isolate, object, index);
- return GetProperty(&it, language_mode);
+ return GetProperty(&it);
}
@@ -1189,25 +1046,23 @@ MaybeHandle<Object> Object::SetElement(Isolate* isolate, Handle<Object> object,
return value;
}
-
-MaybeHandle<Object> Object::GetPrototype(Isolate* isolate,
- Handle<Object> receiver) {
+MaybeHandle<Object> JSReceiver::GetPrototype(Isolate* isolate,
+ Handle<JSReceiver> receiver) {
// We don't expect access checks to be needed on JSProxy objects.
DCHECK(!receiver->IsAccessCheckNeeded() || receiver->IsJSObject());
PrototypeIterator iter(isolate, receiver,
- PrototypeIterator::START_AT_RECEIVER);
+ PrototypeIterator::START_AT_RECEIVER,
+ PrototypeIterator::END_AT_NON_HIDDEN);
do {
if (!iter.AdvanceFollowingProxies()) return MaybeHandle<Object>();
- } while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN));
+ } while (!iter.IsAtEnd());
return PrototypeIterator::GetCurrent(iter);
}
-
MaybeHandle<Object> Object::GetProperty(Isolate* isolate, Handle<Object> object,
- const char* name,
- LanguageMode language_mode) {
+ const char* name) {
Handle<String> str = isolate->factory()->InternalizeUtf8String(name);
- return GetProperty(object, str, language_mode);
+ return GetProperty(object, str);
}
@@ -1241,12 +1096,10 @@ MaybeHandle<Object> Object::GetProperty(Isolate* isolate, Handle<Object> object,
reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
reinterpret_cast<base::AtomicWord>(value));
-#define WRITE_BARRIER(heap, object, offset, value) \
- heap->incremental_marking()->RecordWrite( \
- object, HeapObject::RawField(object, offset), value); \
- if (heap->InNewSpace(value)) { \
- heap->RecordWrite(object->address(), offset); \
- }
+#define WRITE_BARRIER(heap, object, offset, value) \
+ heap->incremental_marking()->RecordWrite( \
+ object, HeapObject::RawField(object, offset), value); \
+ heap->RecordWrite(object, offset, value);
#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \
if (mode != SKIP_WRITE_BARRIER) { \
@@ -1254,9 +1107,7 @@ MaybeHandle<Object> Object::GetProperty(Isolate* isolate, Handle<Object> object,
heap->incremental_marking()->RecordWrite( \
object, HeapObject::RawField(object, offset), value); \
} \
- if (heap->InNewSpace(value)) { \
- heap->RecordWrite(object->address(), offset); \
- } \
+ heap->RecordWrite(object, offset, value); \
}
#define READ_DOUBLE_FIELD(p, offset) \
@@ -1390,8 +1241,9 @@ void HeapObject::VerifySmiField(int offset) {
Heap* HeapObject::GetHeap() const {
- Heap* heap =
- MemoryChunk::FromAddress(reinterpret_cast<const byte*>(this))->heap();
+ Heap* heap = MemoryChunk::FromAddress(
+ reinterpret_cast<Address>(const_cast<HeapObject*>(this)))
+ ->heap();
SLOW_DCHECK(heap != NULL);
return heap;
}
@@ -1858,6 +1710,9 @@ AllocationSite* AllocationMemento::GetAllocationSite() {
return AllocationSite::cast(allocation_site());
}
+Address AllocationMemento::GetAllocationSiteUnchecked() {
+ return reinterpret_cast<Address>(allocation_site());
+}
void JSObject::EnsureCanContainHeapObjectElements(Handle<JSObject> object) {
JSObject::ValidateElements(object);
@@ -1953,7 +1808,8 @@ void JSObject::SetMapAndElements(Handle<JSObject> object,
Handle<FixedArrayBase> value) {
JSObject::MigrateToMap(object, new_map);
DCHECK((object->map()->has_fast_smi_or_object_elements() ||
- (*value == object->GetHeap()->empty_fixed_array())) ==
+ (*value == object->GetHeap()->empty_fixed_array()) ||
+ object->map()->has_fast_string_wrapper_elements()) ==
(value->map() == object->GetHeap()->fixed_array_map() ||
value->map() == object->GetHeap()->fixed_cow_array_map()));
DCHECK((*value == object->GetHeap()->empty_fixed_array()) ||
@@ -2040,9 +1896,7 @@ void WeakCell::initialize(HeapObject* val) {
// We just have to execute the generational barrier here because we never
// mark through a weak cell and collect evacuation candidates when we process
// all weak cells.
- if (heap->InNewSpace(val)) {
- heap->RecordWrite(address(), kValueOffset);
- }
+ heap->RecordWrite(this, kValueOffset, val);
}
@@ -2110,8 +1964,6 @@ int JSObject::GetHeaderSize(InstanceType type) {
return JSSetIterator::kSize;
case JS_MAP_ITERATOR_TYPE:
return JSMapIterator::kSize;
- case JS_ITERATOR_RESULT_TYPE:
- return JSIteratorResult::kSize;
case JS_WEAK_MAP_TYPE:
return JSWeakMap::kSize;
case JS_WEAK_SET_TYPE:
@@ -2236,15 +2088,10 @@ void JSObject::FastPropertyAtPut(FieldIndex index, Object* value) {
}
}
-
-void JSObject::WriteToField(int descriptor, Object* value) {
- DisallowHeapAllocation no_gc;
-
- DescriptorArray* desc = map()->instance_descriptors();
- PropertyDetails details = desc->GetDetails(descriptor);
-
+void JSObject::WriteToField(int descriptor, PropertyDetails details,
+ Object* value) {
DCHECK(details.type() == DATA);
-
+ DisallowHeapAllocation no_gc;
FieldIndex index = FieldIndex::ForDescriptor(map(), descriptor);
if (details.representation().IsDouble()) {
// Nothing more to be done.
@@ -2261,6 +2108,11 @@ void JSObject::WriteToField(int descriptor, Object* value) {
}
}
+void JSObject::WriteToField(int descriptor, Object* value) {
+ DescriptorArray* desc = map()->instance_descriptors();
+ PropertyDetails details = desc->GetDetails(descriptor);
+ WriteToField(descriptor, details, value);
+}
int JSObject::GetInObjectPropertyOffset(int index) {
return map()->GetInObjectPropertyOffset(index);
@@ -2335,19 +2187,6 @@ bool Object::ToArrayIndex(uint32_t* index) {
}
-bool Object::IsStringObjectWithCharacterAt(uint32_t index) {
- if (!this->IsJSValue()) return false;
-
- JSValue* js_value = JSValue::cast(this);
- if (!js_value->value()->IsString()) return false;
-
- String* str = String::cast(js_value->value());
- if (index >= static_cast<uint32_t>(str->length())) return false;
-
- return true;
-}
-
-
void Object::VerifyApiCallResultType() {
#if DEBUG
if (!(IsSmi() || IsString() || IsSymbol() || IsJSReceiver() ||
@@ -2364,9 +2203,8 @@ Object* FixedArray::get(int index) const {
return READ_FIELD(this, kHeaderSize + index * kPointerSize);
}
-
-Handle<Object> FixedArray::get(Handle<FixedArray> array, int index) {
- return handle(array->get(index), array->GetIsolate());
+Handle<Object> FixedArray::get(FixedArray* array, int index, Isolate* isolate) {
+ return handle(array->get(index), isolate);
}
@@ -2387,7 +2225,8 @@ void FixedArray::set(int index, Smi* value) {
void FixedArray::set(int index, Object* value) {
DCHECK_NE(GetHeap()->fixed_cow_array_map(), map());
DCHECK(IsFixedArray());
- DCHECK(index >= 0 && index < this->length());
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, this->length());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
WRITE_BARRIER(GetHeap(), this, offset, value);
@@ -2411,13 +2250,12 @@ uint64_t FixedDoubleArray::get_representation(int index) {
return READ_UINT64_FIELD(this, offset);
}
-
-Handle<Object> FixedDoubleArray::get(Handle<FixedDoubleArray> array,
- int index) {
+Handle<Object> FixedDoubleArray::get(FixedDoubleArray* array, int index,
+ Isolate* isolate) {
if (array->is_the_hole(index)) {
- return array->GetIsolate()->factory()->the_hole_value();
+ return isolate->factory()->the_hole_value();
} else {
- return array->GetIsolate()->factory()->NewNumber(array->get_scalar(index));
+ return isolate->factory()->NewNumber(array->get_scalar(index));
}
}
@@ -2706,15 +2544,14 @@ Object** DescriptorArray::GetEnumCacheSlot() {
kEnumCacheOffset);
}
-
-// Perform a binary search in a fixed array. Low and high are entry indices. If
-// there are three entries in this array it should be called with low=0 and
-// high=2.
+// Perform a binary search in a fixed array.
template <SearchMode search_mode, typename T>
-int BinarySearch(T* array, Name* name, int low, int high, int valid_entries,
+int BinarySearch(T* array, Name* name, int valid_entries,
int* out_insertion_index) {
DCHECK(search_mode == ALL_ENTRIES || out_insertion_index == NULL);
- uint32_t hash = name->Hash();
+ int low = 0;
+ int high = array->number_of_entries() - 1;
+ uint32_t hash = name->hash_field();
int limit = high;
DCHECK(low <= high);
@@ -2722,7 +2559,7 @@ int BinarySearch(T* array, Name* name, int low, int high, int valid_entries,
while (low != high) {
int mid = low + (high - low) / 2;
Name* mid_name = array->GetSortedKey(mid);
- uint32_t mid_hash = mid_name->Hash();
+ uint32_t mid_hash = mid_name->hash_field();
if (mid_hash >= hash) {
high = mid;
@@ -2734,14 +2571,14 @@ int BinarySearch(T* array, Name* name, int low, int high, int valid_entries,
for (; low <= limit; ++low) {
int sort_index = array->GetSortedKeyIndex(low);
Name* entry = array->GetKey(sort_index);
- uint32_t current_hash = entry->Hash();
+ uint32_t current_hash = entry->hash_field();
if (current_hash != hash) {
- if (out_insertion_index != NULL) {
+ if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) {
*out_insertion_index = sort_index + (current_hash > hash ? 0 : 1);
}
return T::kNotFound;
}
- if (entry->Equals(name)) {
+ if (entry == name) {
if (search_mode == ALL_ENTRIES || sort_index < valid_entries) {
return sort_index;
}
@@ -2749,7 +2586,9 @@ int BinarySearch(T* array, Name* name, int low, int high, int valid_entries,
}
}
- if (out_insertion_index != NULL) *out_insertion_index = limit + 1;
+ if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) {
+ *out_insertion_index = limit + 1;
+ }
return T::kNotFound;
}
@@ -2757,29 +2596,28 @@ int BinarySearch(T* array, Name* name, int low, int high, int valid_entries,
// Perform a linear search in this fixed array. len is the number of entry
// indices that are valid.
template <SearchMode search_mode, typename T>
-int LinearSearch(T* array, Name* name, int len, int valid_entries,
+int LinearSearch(T* array, Name* name, int valid_entries,
int* out_insertion_index) {
- uint32_t hash = name->Hash();
- if (search_mode == ALL_ENTRIES) {
+ if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) {
+ uint32_t hash = name->hash_field();
+ int len = array->number_of_entries();
for (int number = 0; number < len; number++) {
int sorted_index = array->GetSortedKeyIndex(number);
Name* entry = array->GetKey(sorted_index);
- uint32_t current_hash = entry->Hash();
+ uint32_t current_hash = entry->hash_field();
if (current_hash > hash) {
- if (out_insertion_index != NULL) *out_insertion_index = sorted_index;
+ *out_insertion_index = sorted_index;
return T::kNotFound;
}
- if (current_hash == hash && entry->Equals(name)) return sorted_index;
+ if (entry == name) return sorted_index;
}
- if (out_insertion_index != NULL) *out_insertion_index = len;
+ *out_insertion_index = len;
return T::kNotFound;
} else {
- DCHECK(len >= valid_entries);
+ DCHECK_LE(valid_entries, array->number_of_entries());
DCHECK_NULL(out_insertion_index); // Not supported here.
for (int number = 0; number < valid_entries; number++) {
- Name* entry = array->GetKey(number);
- uint32_t current_hash = entry->Hash();
- if (current_hash == hash && entry->Equals(name)) return number;
+ if (array->GetKey(number) == name) return number;
}
return T::kNotFound;
}
@@ -2788,44 +2626,39 @@ int LinearSearch(T* array, Name* name, int len, int valid_entries,
template <SearchMode search_mode, typename T>
int Search(T* array, Name* name, int valid_entries, int* out_insertion_index) {
- if (search_mode == VALID_ENTRIES) {
- SLOW_DCHECK(array->IsSortedNoDuplicates(valid_entries));
- } else {
- SLOW_DCHECK(array->IsSortedNoDuplicates());
- }
+ SLOW_DCHECK(array->IsSortedNoDuplicates());
- int nof = array->number_of_entries();
- if (nof == 0) {
- if (out_insertion_index != NULL) *out_insertion_index = 0;
+ if (valid_entries == 0) {
+ if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) {
+ *out_insertion_index = 0;
+ }
return T::kNotFound;
}
// Fast case: do linear search for small arrays.
const int kMaxElementsForLinearSearch = 8;
- if ((search_mode == ALL_ENTRIES &&
- nof <= kMaxElementsForLinearSearch) ||
- (search_mode == VALID_ENTRIES &&
- valid_entries <= (kMaxElementsForLinearSearch * 3))) {
- return LinearSearch<search_mode>(array, name, nof, valid_entries,
+ if (valid_entries <= kMaxElementsForLinearSearch) {
+ return LinearSearch<search_mode>(array, name, valid_entries,
out_insertion_index);
}
// Slow case: perform binary search.
- return BinarySearch<search_mode>(array, name, 0, nof - 1, valid_entries,
+ return BinarySearch<search_mode>(array, name, valid_entries,
out_insertion_index);
}
int DescriptorArray::Search(Name* name, int valid_descriptors) {
+ DCHECK(name->IsUniqueName());
return internal::Search<VALID_ENTRIES>(this, name, valid_descriptors, NULL);
}
-
-int DescriptorArray::SearchWithCache(Name* name, Map* map) {
+int DescriptorArray::SearchWithCache(Isolate* isolate, Name* name, Map* map) {
+ DCHECK(name->IsUniqueName());
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
if (number_of_own_descriptors == 0) return kNotFound;
- DescriptorLookupCache* cache = GetIsolate()->descriptor_lookup_cache();
+ DescriptorLookupCache* cache = isolate->descriptor_lookup_cache();
int number = cache->Lookup(map, name);
if (number == DescriptorLookupCache::kAbsent) {
@@ -2836,7 +2669,6 @@ int DescriptorArray::SearchWithCache(Name* name, Map* map) {
return number;
}
-
PropertyDetails Map::GetLastDescriptorDetails() {
return instance_descriptors()->GetDetails(LastAdded());
}
@@ -2874,8 +2706,7 @@ void Map::SetEnumLength(int length) {
FixedArrayBase* Map::GetInitialElements() {
- if (has_fast_smi_or_object_elements() ||
- has_fast_double_elements()) {
+ if (has_fast_elements() || has_fast_string_wrapper_elements()) {
DCHECK(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
return GetHeap()->empty_fixed_array();
} else if (has_fixed_typed_array_elements()) {
@@ -2976,18 +2807,6 @@ int DescriptorArray::GetFieldIndex(int descriptor_number) {
return GetDetails(descriptor_number).field_index();
}
-
-HeapType* DescriptorArray::GetFieldType(int descriptor_number) {
- DCHECK(GetDetails(descriptor_number).location() == kField);
- Object* value = GetValue(descriptor_number);
- if (value->IsWeakCell()) {
- if (WeakCell::cast(value)->cleared()) return HeapType::None();
- value = WeakCell::cast(value)->value();
- }
- return HeapType::cast(value);
-}
-
-
Object* DescriptorArray::GetConstant(int descriptor_number) {
return GetValue(descriptor_number);
}
@@ -3180,8 +2999,7 @@ void SeededNumberDictionary::set_requires_slow_elements() {
// ------------------------------------
// Cast operations
-
-CAST_ACCESSOR(AccessorInfo)
+CAST_ACCESSOR(AbstractCode)
CAST_ACCESSOR(ArrayList)
CAST_ACCESSOR(Bool16x8)
CAST_ACCESSOR(Bool32x4)
@@ -3232,7 +3050,6 @@ CAST_ACCESSOR(JSReceiver)
CAST_ACCESSOR(JSRegExp)
CAST_ACCESSOR(JSSet)
CAST_ACCESSOR(JSSetIterator)
-CAST_ACCESSOR(JSIteratorResult)
CAST_ACCESSOR(JSTypedArray)
CAST_ACCESSOR(JSValue)
CAST_ACCESSOR(JSWeakMap)
@@ -3418,6 +3235,28 @@ int LiteralsArray::literals_count() const {
return length() - kFirstLiteralIndex;
}
+int HandlerTable::GetRangeStart(int index) const {
+ return Smi::cast(get(index * kRangeEntrySize + kRangeStartIndex))->value();
+}
+
+int HandlerTable::GetRangeEnd(int index) const {
+ return Smi::cast(get(index * kRangeEntrySize + kRangeEndIndex))->value();
+}
+
+int HandlerTable::GetRangeHandler(int index) const {
+ return HandlerOffsetField::decode(
+ Smi::cast(get(index * kRangeEntrySize + kRangeHandlerIndex))->value());
+}
+
+int HandlerTable::GetRangeData(int index) const {
+ return Smi::cast(get(index * kRangeEntrySize + kRangeDataIndex))->value();
+}
+
+HandlerTable::CatchPrediction HandlerTable::GetRangePrediction(
+ int index) const {
+ return HandlerPredictionField::decode(
+ Smi::cast(get(index * kRangeEntrySize + kRangeHandlerIndex))->value());
+}
void HandlerTable::SetRangeStart(int index, int value) {
set(index * kRangeEntrySize + kRangeStartIndex, Smi::FromInt(value));
@@ -3436,9 +3275,8 @@ void HandlerTable::SetRangeHandler(int index, int offset,
set(index * kRangeEntrySize + kRangeHandlerIndex, Smi::FromInt(value));
}
-
-void HandlerTable::SetRangeDepth(int index, int value) {
- set(index * kRangeEntrySize + kRangeDepthIndex, Smi::FromInt(value));
+void HandlerTable::SetRangeData(int index, int value) {
+ set(index * kRangeEntrySize + kRangeDataIndex, Smi::FromInt(value));
}
@@ -3454,6 +3292,9 @@ void HandlerTable::SetReturnHandler(int index, int offset,
set(index * kReturnEntrySize + kReturnHandlerIndex, Smi::FromInt(value));
}
+int HandlerTable::NumberOfRangeEntries() const {
+ return length() / kRangeEntrySize;
+}
#define MAKE_STRUCT_CAST(NAME, Name, name) CAST_ACCESSOR(Name)
STRUCT_LIST(MAKE_STRUCT_CAST)
@@ -3584,12 +3425,6 @@ Handle<String> String::Flatten(Handle<String> string, PretenureFlag pretenure) {
}
-Handle<Name> Name::Flatten(Handle<Name> name, PretenureFlag pretenure) {
- if (name->IsSymbol()) return name;
- return String::Flatten(Handle<String>::cast(name));
-}
-
-
uint16_t String::Get(int index) {
DCHECK(index >= 0 && index < length());
switch (StringShape(this).full_representation_tag()) {
@@ -4045,6 +3880,14 @@ void BytecodeArray::set_parameter_count(int number_of_parameters) {
(number_of_parameters << kPointerSizeLog2));
}
+int BytecodeArray::interrupt_budget() const {
+ return READ_INT_FIELD(this, kInterruptBudgetOffset);
+}
+
+void BytecodeArray::set_interrupt_budget(int interrupt_budget) {
+ DCHECK_GE(interrupt_budget, 0);
+ WRITE_INT_FIELD(this, kInterruptBudgetOffset, interrupt_budget);
+}
int BytecodeArray::parameter_count() const {
// Parameter count is stored as the size on stack of the parameters to allow
@@ -4054,6 +3897,9 @@ int BytecodeArray::parameter_count() const {
ACCESSORS(BytecodeArray, constant_pool, FixedArray, kConstantPoolOffset)
+ACCESSORS(BytecodeArray, handler_table, FixedArray, kHandlerTableOffset)
+ACCESSORS(BytecodeArray, source_position_table, FixedArray,
+ kSourcePositionTableOffset)
Address BytecodeArray::GetFirstBytecodeAddress() {
@@ -4219,11 +4065,9 @@ double FixedTypedArray<Float64ArrayTraits>::from_double(double value) {
return value;
}
-
template <class Traits>
-Handle<Object> FixedTypedArray<Traits>::get(
- Handle<FixedTypedArray<Traits> > array,
- int index) {
+Handle<Object> FixedTypedArray<Traits>::get(FixedTypedArray<Traits>* array,
+ int index) {
return Traits::ToHandle(array->GetIsolate(), array->get_scalar(index));
}
@@ -4470,8 +4314,12 @@ bool Map::has_non_instance_prototype() {
}
-void Map::set_is_constructor() {
- set_bit_field(bit_field() | (1 << kIsConstructor));
+void Map::set_is_constructor(bool value) {
+ if (value) {
+ set_bit_field(bit_field() | (1 << kIsConstructor));
+ } else {
+ set_bit_field(bit_field() & ~(1 << kIsConstructor));
+ }
}
@@ -4479,14 +4327,12 @@ bool Map::is_constructor() const {
return ((1 << kIsConstructor) & bit_field()) != 0;
}
-
-void Map::set_is_hidden_prototype() {
- set_bit_field3(IsHiddenPrototype::update(bit_field3(), true));
+void Map::set_has_hidden_prototype(bool value) {
+ set_bit_field3(HasHiddenPrototype::update(bit_field3(), value));
}
-
-bool Map::is_hidden_prototype() const {
- return IsHiddenPrototype::decode(bit_field3());
+bool Map::has_hidden_prototype() const {
+ return HasHiddenPrototype::decode(bit_field3());
}
@@ -4596,6 +4442,10 @@ bool Map::has_sloppy_arguments_elements() {
return IsSloppyArgumentsElements(elements_kind());
}
+bool Map::has_fast_string_wrapper_elements() {
+ return elements_kind() == FAST_STRING_WRAPPER_ELEMENTS;
+}
+
bool Map::has_fixed_typed_array_elements() {
return IsFixedTypedArrayElementsKind(elements_kind());
}
@@ -4898,6 +4748,13 @@ inline bool Code::is_interpreter_entry_trampoline() {
return interpreter_entry.location() != nullptr && *interpreter_entry == this;
}
+inline bool Code::is_interpreter_enter_bytecode_dispatch() {
+ Handle<Code> interpreter_handler =
+ GetIsolate()->builtins()->InterpreterEnterBytecodeDispatch();
+ return interpreter_handler.location() != nullptr &&
+ *interpreter_handler == this;
+}
+
inline void Code::set_is_crankshafted(bool value) {
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
int updated = IsCrankshaftedField::update(previous, value);
@@ -5274,6 +5131,19 @@ class Code::FindAndReplacePattern {
friend class Code;
};
+int AbstractCode::Size() {
+ if (IsCode()) {
+ return GetCode()->instruction_size();
+ } else {
+ return GetBytecodeArray()->length();
+ }
+}
+
+Code* AbstractCode::GetCode() { return Code::cast(this); }
+
+BytecodeArray* AbstractCode::GetBytecodeArray() {
+ return BytecodeArray::cast(this);
+}
Object* Map::prototype() const {
return READ_FIELD(this, kPrototypeOffset);
@@ -5458,7 +5328,6 @@ ACCESSORS(JSBoundFunction, bound_target_function, JSReceiver,
kBoundTargetFunctionOffset)
ACCESSORS(JSBoundFunction, bound_this, Object, kBoundThisOffset)
ACCESSORS(JSBoundFunction, bound_arguments, FixedArray, kBoundArgumentsOffset)
-ACCESSORS(JSBoundFunction, creation_context, Context, kCreationContextOffset)
ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
ACCESSORS(JSFunction, literals, LiteralsArray, kLiteralsOffset)
@@ -5475,9 +5344,9 @@ SMI_ACCESSORS(AccessorInfo, flag, kFlagOffset)
ACCESSORS(AccessorInfo, expected_receiver_type, Object,
kExpectedReceiverTypeOffset)
-ACCESSORS(ExecutableAccessorInfo, getter, Object, kGetterOffset)
-ACCESSORS(ExecutableAccessorInfo, setter, Object, kSetterOffset)
-ACCESSORS(ExecutableAccessorInfo, data, Object, kDataOffset)
+ACCESSORS(AccessorInfo, getter, Object, kGetterOffset)
+ACCESSORS(AccessorInfo, setter, Object, kSetterOffset)
+ACCESSORS(AccessorInfo, data, Object, kDataOffset)
ACCESSORS(Box, value, Object, kValueOffset)
@@ -5515,11 +5384,11 @@ ACCESSORS(CallHandlerInfo, data, Object, kDataOffset)
ACCESSORS(CallHandlerInfo, fast_handler, Object, kFastHandlerOffset)
ACCESSORS(TemplateInfo, tag, Object, kTagOffset)
+ACCESSORS(TemplateInfo, serial_number, Object, kSerialNumberOffset)
SMI_ACCESSORS(TemplateInfo, number_of_properties, kNumberOfProperties)
ACCESSORS(TemplateInfo, property_list, Object, kPropertyListOffset)
ACCESSORS(TemplateInfo, property_accessors, Object, kPropertyAccessorsOffset)
-ACCESSORS(FunctionTemplateInfo, serial_number, Object, kSerialNumberOffset)
ACCESSORS(FunctionTemplateInfo, call_code, Object, kCallCodeOffset)
ACCESSORS(FunctionTemplateInfo, prototype_template, Object,
kPrototypeTemplateOffset)
@@ -5601,10 +5470,14 @@ void Script::set_origin_options(ScriptOriginOptions origin_options) {
ACCESSORS(DebugInfo, shared, SharedFunctionInfo, kSharedFunctionInfoIndex)
-ACCESSORS(DebugInfo, code, Code, kCodeIndex)
+ACCESSORS(DebugInfo, abstract_code, AbstractCode, kAbstractCodeIndex)
ACCESSORS(DebugInfo, break_points, FixedArray, kBreakPointsStateIndex)
-SMI_ACCESSORS(BreakPointInfo, code_position, kCodePositionIndex)
+BytecodeArray* DebugInfo::original_bytecode_array() {
+ return shared()->bytecode_array();
+}
+
+SMI_ACCESSORS(BreakPointInfo, code_offset, kCodeOffsetIndex)
SMI_ACCESSORS(BreakPointInfo, source_position, kSourcePositionIndex)
SMI_ACCESSORS(BreakPointInfo, statement_position, kStatementPositionIndex)
ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex)
@@ -5641,8 +5514,8 @@ BOOL_ACCESSORS(FunctionTemplateInfo, flag, do_not_cache,
BOOL_ACCESSORS(FunctionTemplateInfo, flag, instantiated, kInstantiatedBit)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, accept_any_receiver,
kAcceptAnyReceiver)
-BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_expression,
- kIsExpressionBit)
+BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_named_expression,
+ kIsNamedExpressionBit)
BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_toplevel,
kIsTopLevelBit)
@@ -5664,7 +5537,8 @@ BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, asm_function, kIsAsmFunction)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, deserialized, kDeserialized)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, never_compiled,
kNeverCompiled)
-
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_declaration,
+ kIsDeclaration)
#if V8_HOST_ARCH_32_BIT
SMI_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
@@ -5804,7 +5678,8 @@ BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, force_inline, kForceInline)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints,
name_should_print_as_anonymous,
kNameShouldPrintAsAnonymous)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_anonymous, kIsAnonymous)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_anonymous_expression,
+ kIsAnonymousExpression)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_function, kIsFunction)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_crankshaft,
kDontCrankshaft)
@@ -5813,8 +5688,10 @@ BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_arrow, kIsArrow)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_generator, kIsGenerator)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_concise_method,
kIsConciseMethod)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_accessor_function,
- kIsAccessorFunction)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_getter_function,
+ kIsGetterFunction)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_setter_function,
+ kIsSetterFunction)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_default_constructor,
kIsDefaultConstructor)
@@ -5928,7 +5805,8 @@ DebugInfo* SharedFunctionInfo::GetDebugInfo() {
bool SharedFunctionInfo::HasDebugCode() {
- return code()->kind() == Code::FUNCTION && code()->has_debug_break_slots();
+ return HasBytecodeArray() ||
+ (code()->kind() == Code::FUNCTION && code()->has_debug_break_slots());
}
@@ -6298,6 +6176,7 @@ void Foreign::set_foreign_address(Address value) {
ACCESSORS(JSGeneratorObject, function, JSFunction, kFunctionOffset)
ACCESSORS(JSGeneratorObject, context, Context, kContextOffset)
ACCESSORS(JSGeneratorObject, receiver, Object, kReceiverOffset)
+ACCESSORS(JSGeneratorObject, input, Object, kInputOffset)
SMI_ACCESSORS(JSGeneratorObject, continuation, kContinuationOffset)
ACCESSORS(JSGeneratorObject, operand_stack, FixedArray, kOperandStackOffset)
@@ -6720,23 +6599,31 @@ bool JSObject::HasSloppyArgumentsElements() {
return IsSloppyArgumentsElements(GetElementsKind());
}
+bool JSObject::HasStringWrapperElements() {
+ return IsStringWrapperElementsKind(GetElementsKind());
+}
-bool JSObject::HasFixedTypedArrayElements() {
- HeapObject* array = elements();
- DCHECK(array != NULL);
- return array->IsFixedTypedArrayBase();
+bool JSObject::HasFastStringWrapperElements() {
+ return GetElementsKind() == FAST_STRING_WRAPPER_ELEMENTS;
}
+bool JSObject::HasSlowStringWrapperElements() {
+ return GetElementsKind() == SLOW_STRING_WRAPPER_ELEMENTS;
+}
-#define FIXED_TYPED_ELEMENTS_CHECK(Type, type, TYPE, ctype, size) \
-bool JSObject::HasFixed##Type##Elements() { \
- HeapObject* array = elements(); \
- DCHECK(array != NULL); \
- if (!array->IsHeapObject()) \
- return false; \
- return array->map()->instance_type() == FIXED_##TYPE##_ARRAY_TYPE; \
+bool JSObject::HasFixedTypedArrayElements() {
+ DCHECK_NOT_NULL(elements());
+ return map()->has_fixed_typed_array_elements();
}
+#define FIXED_TYPED_ELEMENTS_CHECK(Type, type, TYPE, ctype, size) \
+ bool JSObject::HasFixed##Type##Elements() { \
+ HeapObject* array = elements(); \
+ DCHECK(array != NULL); \
+ if (!array->IsHeapObject()) return false; \
+ return array->map()->instance_type() == FIXED_##TYPE##_ARRAY_TYPE; \
+ }
+
TYPED_ARRAYS(FIXED_TYPED_ELEMENTS_CHECK)
#undef FIXED_TYPED_ELEMENTS_CHECK
@@ -6760,7 +6647,7 @@ GlobalDictionary* JSObject::global_dictionary() {
SeededNumberDictionary* JSObject::element_dictionary() {
- DCHECK(HasDictionaryElements());
+ DCHECK(HasDictionaryElements() || HasSlowStringWrapperElements());
return SeededNumberDictionary::cast(elements());
}
@@ -6975,9 +6862,8 @@ String* String::GetForwardedInternalizedString() {
// static
-Maybe<bool> Object::GreaterThan(Handle<Object> x, Handle<Object> y,
- Strength strength) {
- Maybe<ComparisonResult> result = Compare(x, y, strength);
+Maybe<bool> Object::GreaterThan(Handle<Object> x, Handle<Object> y) {
+ Maybe<ComparisonResult> result = Compare(x, y);
if (result.IsJust()) {
switch (result.FromJust()) {
case ComparisonResult::kGreaterThan:
@@ -6993,9 +6879,8 @@ Maybe<bool> Object::GreaterThan(Handle<Object> x, Handle<Object> y,
// static
-Maybe<bool> Object::GreaterThanOrEqual(Handle<Object> x, Handle<Object> y,
- Strength strength) {
- Maybe<ComparisonResult> result = Compare(x, y, strength);
+Maybe<bool> Object::GreaterThanOrEqual(Handle<Object> x, Handle<Object> y) {
+ Maybe<ComparisonResult> result = Compare(x, y);
if (result.IsJust()) {
switch (result.FromJust()) {
case ComparisonResult::kEqual:
@@ -7011,9 +6896,8 @@ Maybe<bool> Object::GreaterThanOrEqual(Handle<Object> x, Handle<Object> y,
// static
-Maybe<bool> Object::LessThan(Handle<Object> x, Handle<Object> y,
- Strength strength) {
- Maybe<ComparisonResult> result = Compare(x, y, strength);
+Maybe<bool> Object::LessThan(Handle<Object> x, Handle<Object> y) {
+ Maybe<ComparisonResult> result = Compare(x, y);
if (result.IsJust()) {
switch (result.FromJust()) {
case ComparisonResult::kLessThan:
@@ -7029,9 +6913,8 @@ Maybe<bool> Object::LessThan(Handle<Object> x, Handle<Object> y,
// static
-Maybe<bool> Object::LessThanOrEqual(Handle<Object> x, Handle<Object> y,
- Strength strength) {
- Maybe<ComparisonResult> result = Compare(x, y, strength);
+Maybe<bool> Object::LessThanOrEqual(Handle<Object> x, Handle<Object> y) {
+ Maybe<ComparisonResult> result = Compare(x, y);
if (result.IsJust()) {
switch (result.FromJust()) {
case ComparisonResult::kEqual:
@@ -7045,23 +6928,19 @@ Maybe<bool> Object::LessThanOrEqual(Handle<Object> x, Handle<Object> y,
return Nothing<bool>();
}
-
MaybeHandle<Object> Object::GetPropertyOrElement(Handle<Object> object,
- Handle<Name> name,
- LanguageMode language_mode) {
+ Handle<Name> name) {
LookupIterator it =
LookupIterator::PropertyOrElement(name->GetIsolate(), object, name);
- return GetProperty(&it, language_mode);
+ return GetProperty(&it);
}
-
-MaybeHandle<Object> Object::GetPropertyOrElement(Handle<JSReceiver> holder,
+MaybeHandle<Object> Object::GetPropertyOrElement(Handle<Object> receiver,
Handle<Name> name,
- Handle<Object> receiver,
- LanguageMode language_mode) {
+ Handle<JSReceiver> holder) {
LookupIterator it = LookupIterator::PropertyOrElement(
name->GetIsolate(), receiver, name, holder);
- return GetProperty(&it, language_mode);
+ return GetProperty(&it);
}
@@ -7706,10 +7585,6 @@ Object* JSMapIterator::CurrentValue() {
}
-ACCESSORS(JSIteratorResult, done, Object, kDoneOffset)
-ACCESSORS(JSIteratorResult, value, Object, kValueOffset)
-
-
String::SubStringRange::SubStringRange(String* string, int first, int length)
: string_(string),
first_(first),
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index db716505de..67bc62e7e2 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -33,7 +33,7 @@ void Object::Print(std::ostream& os) { // NOLINT
void HeapObject::PrintHeader(std::ostream& os, const char* id) { // NOLINT
- os << reinterpret_cast<void*>(this) << ": [" << id << "]\n";
+ os << reinterpret_cast<void*>(this) << ": [" << id << "]";
}
@@ -146,9 +146,6 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_MAP_ITERATOR_TYPE:
JSMapIterator::cast(this)->JSMapIteratorPrint(os);
break;
- case JS_ITERATOR_RESULT_TYPE:
- JSIteratorResult::cast(this)->JSIteratorResultPrint(os);
- break;
case JS_WEAK_MAP_TYPE:
JSWeakMap::cast(this)->JSWeakMapPrint(os);
break;
@@ -326,7 +323,8 @@ void JSObject::PrintElements(std::ostream& os) { // NOLINT
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
- case FAST_ELEMENTS: {
+ case FAST_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS: {
// Print in array notation for non-sparse arrays.
FixedArray* p = FixedArray::cast(elements());
for (int i = 0; i < p->length(); i++) {
@@ -371,6 +369,8 @@ void JSObject::PrintElements(std::ostream& os) { // NOLINT
#undef PRINT_ELEMENTS
case DICTIONARY_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ os << "\n - elements: ";
elements()->Print(os);
break;
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
@@ -384,6 +384,8 @@ void JSObject::PrintElements(std::ostream& os) { // NOLINT
<< "\n arguments: " << Brief(p->get(1));
break;
}
+ case NO_ELEMENTS:
+ break;
}
}
@@ -394,7 +396,7 @@ static void JSObjectPrintHeader(std::ostream& os, JSObject* obj,
// Don't call GetElementsKind, its validation code can cause the printer to
// fail when debugging.
PrototypeIterator iter(obj->GetIsolate(), obj);
- os << " - map = " << reinterpret_cast<void*>(obj->map()) << " ["
+ os << "\n - map = " << reinterpret_cast<void*>(obj->map()) << " ["
<< ElementsKindToString(obj->map()->elements_kind())
<< "]\n - prototype = " << reinterpret_cast<void*>(iter.GetCurrent());
}
@@ -433,7 +435,7 @@ void JSModule::JSModulePrint(std::ostream& os) { // NOLINT
void Symbol::SymbolPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "Symbol");
- os << " - hash: " << Hash();
+ os << "\n - hash: " << Hash();
os << "\n - name: " << Brief(name());
if (name()->IsUndefined()) {
os << " (" << PrivateSymbolToName() << ")";
@@ -445,31 +447,31 @@ void Symbol::SymbolPrint(std::ostream& os) { // NOLINT
void Map::MapPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "Map");
- os << " - type: " << instance_type() << "\n";
- os << " - instance size: " << instance_size() << "\n";
+ os << "\n - type: " << instance_type();
+ os << "\n - instance size: " << instance_size();
if (IsJSObjectMap()) {
- os << " - inobject properties: " << GetInObjectProperties() << "\n";
+ os << "\n - inobject properties: " << GetInObjectProperties();
}
- os << " - elements kind: " << ElementsKindToString(elements_kind()) << "\n";
- os << " - unused property fields: " << unused_property_fields() << "\n";
- if (is_deprecated()) os << " - deprecated_map\n";
- if (is_stable()) os << " - stable_map\n";
- if (is_dictionary_map()) os << " - dictionary_map\n";
- if (is_hidden_prototype()) os << " - hidden_prototype\n";
- if (has_named_interceptor()) os << " - named_interceptor\n";
- if (has_indexed_interceptor()) os << " - indexed_interceptor\n";
- if (is_undetectable()) os << " - undetectable\n";
- if (is_callable()) os << " - callable\n";
- if (is_constructor()) os << " - constructor\n";
- if (is_access_check_needed()) os << " - access_check_needed\n";
- if (!is_extensible()) os << " - non-extensible\n";
- if (is_observed()) os << " - observed\n";
- if (is_strong()) os << " - strong_map\n";
+ os << "\n - elements kind: " << ElementsKindToString(elements_kind());
+ os << "\n - unused property fields: " << unused_property_fields();
+ if (is_deprecated()) os << "\n - deprecated_map";
+ if (is_stable()) os << "\n - stable_map";
+ if (is_dictionary_map()) os << "\n - dictionary_map";
+ if (has_hidden_prototype()) os << "\n - has_hidden_prototype";
+ if (has_named_interceptor()) os << " - named_interceptor";
+ if (has_indexed_interceptor()) os << "\n - indexed_interceptor";
+ if (is_undetectable()) os << "\n - undetectable";
+ if (is_callable()) os << "\n - callable";
+ if (is_constructor()) os << "\n - constructor";
+ if (is_access_check_needed()) os << "\n - access_check_needed";
+ if (!is_extensible()) os << "\n - non-extensible";
+ if (is_observed()) os << "\n - observed";
+ if (is_strong()) os << "\n - strong_map";
if (is_prototype_map()) {
- os << " - prototype_map\n";
- os << " - prototype info: " << Brief(prototype_info());
+ os << "\n - prototype_map";
+ os << "\n - prototype info: " << Brief(prototype_info());
} else {
- os << " - back pointer: " << Brief(GetBackPointer());
+ os << "\n - back pointer: " << Brief(GetBackPointer());
}
os << "\n - instance descriptors " << (owns_descriptors() ? "(own) " : "")
<< "#" << NumberOfOwnDescriptors() << ": "
@@ -508,7 +510,7 @@ void PolymorphicCodeCache::PolymorphicCodeCachePrint(
void TypeFeedbackInfo::TypeFeedbackInfoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "TypeFeedbackInfo");
- os << " - ic_total_count: " << ic_total_count()
+ os << "\n - ic_total_count: " << ic_total_count()
<< ", ic_with_type_info_count: " << ic_with_type_info_count()
<< ", ic_generic_count: " << ic_generic_count() << "\n";
}
@@ -523,7 +525,7 @@ void AliasedArgumentsEntry::AliasedArgumentsEntryPrint(
void FixedArray::FixedArrayPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "FixedArray");
- os << " - length: " << length();
+ os << "\n - length: " << length();
for (int i = 0; i < length(); i++) {
os << "\n [" << i << "]: " << Brief(get(i));
}
@@ -533,7 +535,7 @@ void FixedArray::FixedArrayPrint(std::ostream& os) { // NOLINT
void FixedDoubleArray::FixedDoubleArrayPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "FixedDoubleArray");
- os << " - length: " << length();
+ os << "\n - length: " << length();
for (int i = 0; i < length(); i++) {
os << "\n [" << i << "]: ";
if (is_the_hole(i)) {
@@ -548,7 +550,7 @@ void FixedDoubleArray::FixedDoubleArrayPrint(std::ostream& os) { // NOLINT
void TransitionArray::TransitionArrayPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "TransitionArray");
- os << " - capacity: " << length();
+ os << "\n - capacity: " << length();
for (int i = 0; i < length(); i++) {
os << "\n [" << i << "]: " << Brief(get(i));
if (i == kNextLinkIndex) os << " (next link)";
@@ -569,7 +571,7 @@ void TypeFeedbackMetadata::Print() {
void TypeFeedbackMetadata::TypeFeedbackMetadataPrint(
std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "TypeFeedbackMetadata");
- os << " - length: " << length();
+ os << "\n - length: " << length();
if (length() == 0) {
os << " (empty)\n";
return;
@@ -594,7 +596,7 @@ void TypeFeedbackVector::Print() {
void TypeFeedbackVector::TypeFeedbackVectorPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "TypeFeedbackVector");
- os << " - length: " << length();
+ os << "\n - length: " << length();
if (length() == 0) {
os << " (empty)\n";
return;
@@ -737,7 +739,7 @@ void JSDate::JSDatePrint(std::ostream& os) { // NOLINT
void JSProxy::JSProxyPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "JSProxy");
- os << " - map = " << reinterpret_cast<void*>(map());
+ os << "\n - map = " << reinterpret_cast<void*>(map());
os << "\n - target = ";
target()->ShortPrint(os);
os << "\n - handler = ";
@@ -795,14 +797,6 @@ void JSMapIterator::JSMapIteratorPrint(std::ostream& os) { // NOLINT
}
-void JSIteratorResult::JSIteratorResultPrint(std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSIteratorResult");
- os << "\n - done = " << Brief(done());
- os << "\n - value = " << Brief(value());
- os << "\n";
-}
-
-
void JSWeakMap::JSWeakMapPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSWeakMap");
os << "\n - table = " << Brief(table());
@@ -874,7 +868,7 @@ void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "SharedFunctionInfo");
- os << " - name: " << Brief(name());
+ os << "\n - name: " << Brief(name());
os << "\n - expected_nof_properties: " << expected_nof_properties();
os << "\n - ast_node_count: " << ast_node_count();
os << "\n - instance class name = ";
@@ -892,10 +886,16 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
// Script files are often large, hard to read.
// os << "\n - script =";
// script()->Print(os);
+ if (is_named_expression()) {
+ os << "\n - named expression";
+ } else if (is_anonymous_expression()) {
+ os << "\n - anonymous expression";
+ } else if (is_declaration()) {
+ os << "\n - declaration";
+ }
os << "\n - function token position = " << function_token_position();
os << "\n - start position = " << start_position();
os << "\n - end position = " << end_position();
- os << "\n - is expression = " << is_expression();
os << "\n - debug info = " << Brief(debug_info());
os << "\n - length = " << length();
os << "\n - optimized_code_map = " << Brief(optimized_code_map());
@@ -926,14 +926,14 @@ void JSGlobalObject::JSGlobalObjectPrint(std::ostream& os) { // NOLINT
void Cell::CellPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "Cell");
- os << " - value: " << Brief(value());
+ os << "\n - value: " << Brief(value());
os << "\n";
}
void PropertyCell::PropertyCellPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "PropertyCell");
- os << " - value: " << Brief(value());
+ os << "\n - value: " << Brief(value());
os << "\n - details: " << property_details();
os << "\n";
}
@@ -952,6 +952,7 @@ void WeakCell::WeakCellPrint(std::ostream& os) { // NOLINT
void Code::CodePrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "Code");
+ os << "\n";
#ifdef ENABLE_DISASSEMBLER
if (FLAG_use_verbose_printer) {
Disassemble(NULL, os);
@@ -966,9 +967,8 @@ void Foreign::ForeignPrint(std::ostream& os) { // NOLINT
}
-void ExecutableAccessorInfo::ExecutableAccessorInfoPrint(
- std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "ExecutableAccessorInfo");
+void AccessorInfo::AccessorInfoPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "AccessorInfo");
os << "\n - name: " << Brief(name());
os << "\n - flag: " << flag();
os << "\n - getter: " << Brief(getter());
@@ -1046,8 +1046,8 @@ void FunctionTemplateInfo::FunctionTemplateInfoPrint(
HeapObject::PrintHeader(os, "FunctionTemplateInfo");
os << "\n - class name: " << Brief(class_name());
os << "\n - tag: " << Brief(tag());
- os << "\n - property_list: " << Brief(property_list());
os << "\n - serial_number: " << Brief(serial_number());
+ os << "\n - property_list: " << Brief(property_list());
os << "\n - call_code: " << Brief(call_code());
os << "\n - property_accessors: " << Brief(property_accessors());
os << "\n - prototype_template: " << Brief(prototype_template());
@@ -1067,7 +1067,8 @@ void FunctionTemplateInfo::FunctionTemplateInfoPrint(
void ObjectTemplateInfo::ObjectTemplateInfoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "ObjectTemplateInfo");
- os << " - tag: " << Brief(tag());
+ os << "\n - tag: " << Brief(tag());
+ os << "\n - serial_number: " << Brief(serial_number());
os << "\n - property_list: " << Brief(property_list());
os << "\n - property_accessors: " << Brief(property_accessors());
os << "\n - constructor: " << Brief(constructor());
@@ -1078,7 +1079,7 @@ void ObjectTemplateInfo::ObjectTemplateInfoPrint(std::ostream& os) { // NOLINT
void AllocationSite::AllocationSitePrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "AllocationSite");
- os << " - weak_next: " << Brief(weak_next());
+ os << "\n - weak_next: " << Brief(weak_next());
os << "\n - dependent code: " << Brief(dependent_code());
os << "\n - nested site: " << Brief(nested_site());
os << "\n - memento found count: "
@@ -1102,7 +1103,7 @@ void AllocationSite::AllocationSitePrint(std::ostream& os) { // NOLINT
void AllocationMemento::AllocationMementoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "AllocationMemento");
- os << " - allocation site: ";
+ os << "\n - allocation site: ";
if (IsValid()) {
GetAllocationSite()->Print(os);
} else {
@@ -1134,7 +1135,7 @@ void Script::ScriptPrint(std::ostream& os) { // NOLINT
void DebugInfo::DebugInfoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "DebugInfo");
os << "\n - shared: " << Brief(shared());
- os << "\n - code: " << Brief(code());
+ os << "\n - code: " << Brief(abstract_code());
os << "\n - break_points: ";
break_points()->Print(os);
}
@@ -1142,7 +1143,7 @@ void DebugInfo::DebugInfoPrint(std::ostream& os) { // NOLINT
void BreakPointInfo::BreakPointInfoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "BreakPointInfo");
- os << "\n - code_position: " << code_position();
+ os << "\n - code_offset: " << code_offset();
os << "\n - source_position: " << source_position();
os << "\n - statement_position: " << statement_position();
os << "\n - break_point_objects: " << Brief(break_point_objects());
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index d9d8213e24..67a5963831 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -11,6 +11,7 @@
#include "src/accessors.h"
#include "src/allocation-site-scopes.h"
#include "src/api.h"
+#include "src/api-natives.h"
#include "src/arguments.h"
#include "src/base/bits.h"
#include "src/base/utils/random-number-generator.h"
@@ -24,12 +25,14 @@
#include "src/deoptimizer.h"
#include "src/elements.h"
#include "src/execution.h"
-#include "src/field-index.h"
#include "src/field-index-inl.h"
+#include "src/field-index.h"
+#include "src/field-type.h"
#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
#include "src/identity-map.h"
#include "src/interpreter/bytecodes.h"
+#include "src/interpreter/source-position-table.h"
#include "src/isolate-inl.h"
#include "src/key-accumulator.h"
#include "src/list.h"
@@ -70,20 +73,19 @@ std::ostream& operator<<(std::ostream& os, InstanceType instance_type) {
return os << "UNKNOWN"; // Keep the compiler happy.
}
-
-Handle<HeapType> Object::OptimalType(Isolate* isolate,
- Representation representation) {
- if (representation.IsNone()) return HeapType::None(isolate);
+Handle<FieldType> Object::OptimalType(Isolate* isolate,
+ Representation representation) {
+ if (representation.IsNone()) return FieldType::None(isolate);
if (FLAG_track_field_types) {
if (representation.IsHeapObject() && IsHeapObject()) {
// We can track only JavaScript objects with stable maps.
Handle<Map> map(HeapObject::cast(this)->map(), isolate);
if (map->is_stable() && map->IsJSReceiverMap()) {
- return HeapType::Class(map, isolate);
+ return FieldType::Class(map, isolate);
}
}
}
- return HeapType::Any(isolate);
+ return FieldType::Any(isolate);
}
@@ -98,7 +100,9 @@ MaybeHandle<JSReceiver> Object::ToObject(Isolate* isolate,
int constructor_function_index =
Handle<HeapObject>::cast(object)->map()->GetConstructorFunctionIndex();
if (constructor_function_index == Map::kNoConstructorFunctionIndex) {
- return MaybeHandle<JSReceiver>();
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kUndefinedOrNullToObject),
+ JSReceiver);
}
constructor = handle(
JSFunction::cast(native_context->get(constructor_function_index)),
@@ -259,14 +263,11 @@ bool NumberEquals(Handle<Object> x, Handle<Object> y) {
// static
-Maybe<ComparisonResult> Object::Compare(Handle<Object> x, Handle<Object> y,
- Strength strength) {
- if (!is_strong(strength)) {
- // ES6 section 7.2.11 Abstract Relational Comparison step 3 and 4.
- if (!Object::ToPrimitive(x, ToPrimitiveHint::kNumber).ToHandle(&x) ||
- !Object::ToPrimitive(y, ToPrimitiveHint::kNumber).ToHandle(&y)) {
- return Nothing<ComparisonResult>();
- }
+Maybe<ComparisonResult> Object::Compare(Handle<Object> x, Handle<Object> y) {
+ // ES6 section 7.2.11 Abstract Relational Comparison step 3 and 4.
+ if (!Object::ToPrimitive(x, ToPrimitiveHint::kNumber).ToHandle(&x) ||
+ !Object::ToPrimitive(y, ToPrimitiveHint::kNumber).ToHandle(&y)) {
+ return Nothing<ComparisonResult>();
}
if (x->IsString() && y->IsString()) {
// ES6 section 7.2.11 Abstract Relational Comparison step 5.
@@ -274,23 +275,8 @@ Maybe<ComparisonResult> Object::Compare(Handle<Object> x, Handle<Object> y,
String::Compare(Handle<String>::cast(x), Handle<String>::cast(y)));
}
// ES6 section 7.2.11 Abstract Relational Comparison step 6.
- if (!is_strong(strength)) {
- if (!Object::ToNumber(x).ToHandle(&x) ||
- !Object::ToNumber(y).ToHandle(&y)) {
- return Nothing<ComparisonResult>();
- }
- } else {
- if (!x->IsNumber()) {
- Isolate* const isolate = Handle<HeapObject>::cast(x)->GetIsolate();
- isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kStrongImplicitConversion));
- return Nothing<ComparisonResult>();
- } else if (!y->IsNumber()) {
- Isolate* const isolate = Handle<HeapObject>::cast(y)->GetIsolate();
- isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kStrongImplicitConversion));
- return Nothing<ComparisonResult>();
- }
+ if (!Object::ToNumber(x).ToHandle(&x) || !Object::ToNumber(y).ToHandle(&y)) {
+ return Nothing<ComparisonResult>();
}
return Just(NumberCompare(x->Number(), y->Number()));
}
@@ -410,10 +396,10 @@ bool Object::StrictEquals(Object* that) {
// static
Handle<String> Object::TypeOf(Isolate* isolate, Handle<Object> object) {
if (object->IsNumber()) return isolate->factory()->number_string();
- if (object->IsUndefined() || object->IsUndetectableObject()) {
+ if (object->IsOddball()) return handle(Oddball::cast(*object)->type_of());
+ if (object->IsUndetectableObject()) {
return isolate->factory()->undefined_string();
}
- if (object->IsBoolean()) return isolate->factory()->boolean_string();
if (object->IsString()) return isolate->factory()->string_string();
if (object->IsSymbol()) return isolate->factory()->symbol_string();
if (object->IsString()) return isolate->factory()->string_string();
@@ -428,13 +414,8 @@ Handle<String> Object::TypeOf(Isolate* isolate, Handle<Object> object) {
// static
MaybeHandle<Object> Object::Multiply(Isolate* isolate, Handle<Object> lhs,
- Handle<Object> rhs, Strength strength) {
+ Handle<Object> rhs) {
if (!lhs->IsNumber() || !rhs->IsNumber()) {
- if (is_strong(strength)) {
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kStrongImplicitConversion),
- Object);
- }
ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
}
@@ -444,13 +425,8 @@ MaybeHandle<Object> Object::Multiply(Isolate* isolate, Handle<Object> lhs,
// static
MaybeHandle<Object> Object::Divide(Isolate* isolate, Handle<Object> lhs,
- Handle<Object> rhs, Strength strength) {
+ Handle<Object> rhs) {
if (!lhs->IsNumber() || !rhs->IsNumber()) {
- if (is_strong(strength)) {
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kStrongImplicitConversion),
- Object);
- }
ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
}
@@ -460,13 +436,8 @@ MaybeHandle<Object> Object::Divide(Isolate* isolate, Handle<Object> lhs,
// static
MaybeHandle<Object> Object::Modulus(Isolate* isolate, Handle<Object> lhs,
- Handle<Object> rhs, Strength strength) {
+ Handle<Object> rhs) {
if (!lhs->IsNumber() || !rhs->IsNumber()) {
- if (is_strong(strength)) {
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kStrongImplicitConversion),
- Object);
- }
ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
}
@@ -476,16 +447,12 @@ MaybeHandle<Object> Object::Modulus(Isolate* isolate, Handle<Object> lhs,
// static
MaybeHandle<Object> Object::Add(Isolate* isolate, Handle<Object> lhs,
- Handle<Object> rhs, Strength strength) {
+ Handle<Object> rhs) {
if (lhs->IsNumber() && rhs->IsNumber()) {
return isolate->factory()->NewNumber(lhs->Number() + rhs->Number());
} else if (lhs->IsString() && rhs->IsString()) {
return isolate->factory()->NewConsString(Handle<String>::cast(lhs),
Handle<String>::cast(rhs));
- } else if (is_strong(strength)) {
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kStrongImplicitConversion),
- Object);
}
ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToPrimitive(lhs), Object);
ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToPrimitive(rhs), Object);
@@ -505,13 +472,8 @@ MaybeHandle<Object> Object::Add(Isolate* isolate, Handle<Object> lhs,
// static
MaybeHandle<Object> Object::Subtract(Isolate* isolate, Handle<Object> lhs,
- Handle<Object> rhs, Strength strength) {
+ Handle<Object> rhs) {
if (!lhs->IsNumber() || !rhs->IsNumber()) {
- if (is_strong(strength)) {
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kStrongImplicitConversion),
- Object);
- }
ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
}
@@ -521,13 +483,8 @@ MaybeHandle<Object> Object::Subtract(Isolate* isolate, Handle<Object> lhs,
// static
MaybeHandle<Object> Object::ShiftLeft(Isolate* isolate, Handle<Object> lhs,
- Handle<Object> rhs, Strength strength) {
+ Handle<Object> rhs) {
if (!lhs->IsNumber() || !rhs->IsNumber()) {
- if (is_strong(strength)) {
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kStrongImplicitConversion),
- Object);
- }
ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
}
@@ -538,13 +495,8 @@ MaybeHandle<Object> Object::ShiftLeft(Isolate* isolate, Handle<Object> lhs,
// static
MaybeHandle<Object> Object::ShiftRight(Isolate* isolate, Handle<Object> lhs,
- Handle<Object> rhs, Strength strength) {
+ Handle<Object> rhs) {
if (!lhs->IsNumber() || !rhs->IsNumber()) {
- if (is_strong(strength)) {
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kStrongImplicitConversion),
- Object);
- }
ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
}
@@ -556,14 +508,8 @@ MaybeHandle<Object> Object::ShiftRight(Isolate* isolate, Handle<Object> lhs,
// static
MaybeHandle<Object> Object::ShiftRightLogical(Isolate* isolate,
Handle<Object> lhs,
- Handle<Object> rhs,
- Strength strength) {
+ Handle<Object> rhs) {
if (!lhs->IsNumber() || !rhs->IsNumber()) {
- if (is_strong(strength)) {
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kStrongImplicitConversion),
- Object);
- }
ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
}
@@ -574,13 +520,8 @@ MaybeHandle<Object> Object::ShiftRightLogical(Isolate* isolate,
// static
MaybeHandle<Object> Object::BitwiseAnd(Isolate* isolate, Handle<Object> lhs,
- Handle<Object> rhs, Strength strength) {
+ Handle<Object> rhs) {
if (!lhs->IsNumber() || !rhs->IsNumber()) {
- if (is_strong(strength)) {
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kStrongImplicitConversion),
- Object);
- }
ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
}
@@ -591,13 +532,8 @@ MaybeHandle<Object> Object::BitwiseAnd(Isolate* isolate, Handle<Object> lhs,
// static
MaybeHandle<Object> Object::BitwiseOr(Isolate* isolate, Handle<Object> lhs,
- Handle<Object> rhs, Strength strength) {
+ Handle<Object> rhs) {
if (!lhs->IsNumber() || !rhs->IsNumber()) {
- if (is_strong(strength)) {
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kStrongImplicitConversion),
- Object);
- }
ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
}
@@ -608,13 +544,8 @@ MaybeHandle<Object> Object::BitwiseOr(Isolate* isolate, Handle<Object> lhs,
// static
MaybeHandle<Object> Object::BitwiseXor(Isolate* isolate, Handle<Object> lhs,
- Handle<Object> rhs, Strength strength) {
+ Handle<Object> rhs) {
if (!lhs->IsNumber() || !rhs->IsNumber()) {
- if (is_strong(strength)) {
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kStrongImplicitConversion),
- Object);
- }
ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
}
@@ -777,8 +708,7 @@ Maybe<bool> JSReceiver::HasProperty(LookupIterator* it) {
// static
-MaybeHandle<Object> Object::GetProperty(LookupIterator* it,
- LanguageMode language_mode) {
+MaybeHandle<Object> Object::GetProperty(LookupIterator* it) {
for (; it->IsFound(); it->Next()) {
switch (it->state()) {
case LookupIterator::NOT_FOUND:
@@ -786,8 +716,7 @@ MaybeHandle<Object> Object::GetProperty(LookupIterator* it,
UNREACHABLE();
case LookupIterator::JSPROXY:
return JSProxy::GetProperty(it->isolate(), it->GetHolder<JSProxy>(),
- it->GetName(), it->GetReceiver(),
- language_mode);
+ it->GetName(), it->GetReceiver());
case LookupIterator::INTERCEPTOR: {
bool done;
Handle<Object> result;
@@ -801,14 +730,14 @@ MaybeHandle<Object> Object::GetProperty(LookupIterator* it,
if (it->HasAccess()) break;
return JSObject::GetPropertyWithFailedAccessCheck(it);
case LookupIterator::ACCESSOR:
- return GetPropertyWithAccessor(it, language_mode);
+ return GetPropertyWithAccessor(it);
case LookupIterator::INTEGER_INDEXED_EXOTIC:
- return ReadAbsentProperty(it, language_mode);
+ return ReadAbsentProperty(it);
case LookupIterator::DATA:
return it->GetDataValue();
}
}
- return ReadAbsentProperty(it, language_mode);
+ return ReadAbsentProperty(it);
}
@@ -827,8 +756,7 @@ MaybeHandle<Object> Object::GetProperty(LookupIterator* it,
MaybeHandle<Object> JSProxy::GetProperty(Isolate* isolate,
Handle<JSProxy> proxy,
Handle<Name> name,
- Handle<Object> receiver,
- LanguageMode language_mode) {
+ Handle<Object> receiver) {
if (receiver->IsJSGlobalObject()) {
THROW_NEW_ERROR(
isolate,
@@ -861,7 +789,7 @@ MaybeHandle<Object> JSProxy::GetProperty(Isolate* isolate,
// 7.a Return target.[[Get]](P, Receiver).
LookupIterator it =
LookupIterator::PropertyOrElement(isolate, receiver, name, target);
- return Object::GetProperty(&it, language_mode);
+ return Object::GetProperty(&it);
}
// 8. Let trapResult be ? Call(trap, handler, Ā«target, P, ReceiverĀ»).
Handle<Object> trap_result;
@@ -934,9 +862,8 @@ Handle<Object> JSReceiver::GetDataProperty(LookupIterator* it) {
it->NotFound();
return it->isolate()->factory()->undefined_value();
case LookupIterator::ACCESSOR:
- // TODO(verwaest): For now this doesn't call into
- // ExecutableAccessorInfo, since clients don't need it. Update once
- // relevant.
+ // TODO(verwaest): For now this doesn't call into AccessorInfo, since
+ // clients don't need it. Update once relevant.
it->NotFound();
return it->isolate()->factory()->undefined_value();
case LookupIterator::INTEGER_INDEXED_EXOTIC:
@@ -1020,9 +947,10 @@ Object* FunctionTemplateInfo::GetCompatibleReceiver(Isolate* isolate,
if (recv_type->IsUndefined()) return receiver;
FunctionTemplateInfo* signature = FunctionTemplateInfo::cast(recv_type);
// Check the receiver.
- for (PrototypeIterator iter(isolate, receiver,
- PrototypeIterator::START_AT_RECEIVER);
- !iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN); iter.Advance()) {
+ for (PrototypeIterator iter(isolate, JSObject::cast(receiver),
+ PrototypeIterator::START_AT_RECEIVER,
+ PrototypeIterator::END_AT_NON_HIDDEN);
+ !iter.IsAtEnd(); iter.Advance()) {
if (signature->IsTemplateFor(iter.GetCurrent())) return iter.GetCurrent();
}
return isolate->heap()->null_value();
@@ -1058,7 +986,8 @@ MaybeHandle<JSObject> JSObject::New(Handle<JSFunction> constructor,
Handle<FixedArray> JSObject::EnsureWritableFastElements(
Handle<JSObject> object) {
- DCHECK(object->HasFastSmiOrObjectElements());
+ DCHECK(object->HasFastSmiOrObjectElements() ||
+ object->HasFastStringWrapperElements());
Isolate* isolate = object->GetIsolate();
Handle<FixedArray> elems(FixedArray::cast(object->elements()), isolate);
if (elems->map() != isolate->heap()->fixed_cow_array_map()) return elems;
@@ -1096,7 +1025,7 @@ MaybeHandle<Object> JSProxy::GetPrototype(Handle<JSProxy> proxy) {
Object);
// 6. If trap is undefined, then return target.[[GetPrototypeOf]]().
if (trap->IsUndefined()) {
- return Object::GetPrototype(isolate, target);
+ return JSReceiver::GetPrototype(isolate, target);
}
// 7. Let handlerProto be ? Call(trap, handler, Ā«targetĀ»).
Handle<Object> argv[] = {target};
@@ -1118,7 +1047,7 @@ MaybeHandle<Object> JSProxy::GetPrototype(Handle<JSProxy> proxy) {
// 11. Let targetProto be ? target.[[GetPrototypeOf]]().
Handle<Object> target_proto;
ASSIGN_RETURN_ON_EXCEPTION(isolate, target_proto,
- Object::GetPrototype(isolate, target), Object);
+ JSReceiver::GetPrototype(isolate, target), Object);
// 12. If SameValue(handlerProto, targetProto) is false, throw a TypeError.
if (!handler_proto->SameValue(*target_proto)) {
THROW_NEW_ERROR(
@@ -1130,9 +1059,7 @@ MaybeHandle<Object> JSProxy::GetPrototype(Handle<JSProxy> proxy) {
return handler_proto;
}
-
-MaybeHandle<Object> Object::GetPropertyWithAccessor(
- LookupIterator* it, LanguageMode language_mode) {
+MaybeHandle<Object> Object::GetPropertyWithAccessor(LookupIterator* it) {
Isolate* isolate = it->isolate();
Handle<Object> structure = it->GetAccessors();
Handle<Object> receiver = it->GetReceiver();
@@ -1145,8 +1072,7 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(
if (structure->IsAccessorInfo()) {
Handle<JSObject> holder = it->GetHolder<JSObject>();
Handle<Name> name = it->GetName();
- Handle<ExecutableAccessorInfo> info =
- Handle<ExecutableAccessorInfo>::cast(structure);
+ Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(structure);
if (!info->IsCompatibleReceiver(*receiver)) {
THROW_NEW_ERROR(isolate,
NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
@@ -1159,11 +1085,12 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(
if (call_fun == nullptr) return isolate->factory()->undefined_value();
LOG(isolate, ApiNamedPropertyAccess("load", *holder, *name));
- PropertyCallbackArguments args(isolate, info->data(), *receiver, *holder);
+ PropertyCallbackArguments args(isolate, info->data(), *receiver, *holder,
+ Object::DONT_THROW);
v8::Local<v8::Value> result = args.Call(call_fun, v8::Utils::ToLocal(name));
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
if (result.IsEmpty()) {
- return ReadAbsentProperty(isolate, receiver, name, language_mode);
+ return ReadAbsentProperty(isolate, receiver, name);
}
Handle<Object> return_value = v8::Utils::OpenHandle(*result);
return_value->VerifyApiCallResultType();
@@ -1173,13 +1100,24 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(
// Regular accessor.
Handle<Object> getter(AccessorPair::cast(*structure)->getter(), isolate);
- if (getter->IsCallable()) {
+ if (getter->IsFunctionTemplateInfo()) {
+ auto result = Builtins::InvokeApiFunction(
+ Handle<FunctionTemplateInfo>::cast(getter), receiver, 0, nullptr);
+ if (isolate->has_pending_exception()) {
+ return MaybeHandle<Object>();
+ }
+ Handle<Object> return_value;
+ if (result.ToHandle(&return_value)) {
+ return_value->VerifyApiCallResultType();
+ return handle(*return_value, isolate);
+ }
+ } else if (getter->IsCallable()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
return Object::GetPropertyWithDefinedGetter(
receiver, Handle<JSReceiver>::cast(getter));
}
// Getter is not a function.
- return ReadAbsentProperty(isolate, receiver, it->GetName(), language_mode);
+ return ReadAbsentProperty(isolate, receiver, it->GetName());
}
@@ -1192,7 +1130,6 @@ bool AccessorInfo::IsCompatibleReceiverMap(Isolate* isolate,
->IsTemplateFor(*map);
}
-
Maybe<bool> Object::SetPropertyWithAccessor(LookupIterator* it,
Handle<Object> value,
ShouldThrow should_throw) {
@@ -1205,11 +1142,10 @@ Maybe<bool> Object::SetPropertyWithAccessor(LookupIterator* it,
DCHECK(!structure->IsForeign());
// API style callbacks.
- if (structure->IsExecutableAccessorInfo()) {
+ if (structure->IsAccessorInfo()) {
Handle<JSObject> holder = it->GetHolder<JSObject>();
Handle<Name> name = it->GetName();
- Handle<ExecutableAccessorInfo> info =
- Handle<ExecutableAccessorInfo>::cast(structure);
+ Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(structure);
if (!info->IsCompatibleReceiver(*receiver)) {
isolate->Throw(*isolate->factory()->NewTypeError(
MessageTemplate::kIncompatibleMethodReceiver, name, receiver));
@@ -1218,14 +1154,14 @@ Maybe<bool> Object::SetPropertyWithAccessor(LookupIterator* it,
v8::AccessorNameSetterCallback call_fun =
v8::ToCData<v8::AccessorNameSetterCallback>(info->setter());
+ // TODO(verwaest): We should not get here anymore once all AccessorInfos are
+ // marked as special_data_property. They cannot both be writable and not
+ // have a setter.
if (call_fun == nullptr) return Just(true);
- // TODO(verwaest): Shouldn't this case be unreachable (at least in the
- // long run?) Should we have ExecutableAccessorPairs with missing setter
- // that are "writable"? If they aren't writable, shouldn't we have bailed
- // out already earlier?
LOG(isolate, ApiNamedPropertyAccess("store", *holder, *name));
- PropertyCallbackArguments args(isolate, info->data(), *receiver, *holder);
+ PropertyCallbackArguments args(isolate, info->data(), *receiver, *holder,
+ should_throw);
args.Call(call_fun, v8::Utils::ToLocal(name), v8::Utils::ToLocal(value));
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
return Just(true);
@@ -1233,7 +1169,16 @@ Maybe<bool> Object::SetPropertyWithAccessor(LookupIterator* it,
// Regular accessor.
Handle<Object> setter(AccessorPair::cast(*structure)->setter(), isolate);
- if (setter->IsCallable()) {
+ if (setter->IsFunctionTemplateInfo()) {
+ Handle<Object> argv[] = {value};
+ auto result =
+ Builtins::InvokeApiFunction(Handle<FunctionTemplateInfo>::cast(setter),
+ receiver, arraysize(argv), argv);
+ if (isolate->has_pending_exception()) {
+ return Nothing<bool>();
+ }
+ return Just(true);
+ } else if (setter->IsCallable()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
return SetPropertyWithDefinedSetter(
receiver, Handle<JSReceiver>::cast(setter), value, should_throw);
@@ -1322,7 +1267,7 @@ MaybeHandle<Object> JSObject::GetPropertyWithFailedAccessCheck(
Handle<JSObject> checked = it->GetHolder<JSObject>();
while (AllCanRead(it)) {
if (it->state() == LookupIterator::ACCESSOR) {
- return GetPropertyWithAccessor(it, SLOPPY);
+ return GetPropertyWithAccessor(it);
}
DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
bool done;
@@ -1350,7 +1295,7 @@ Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithFailedAccessCheck(
Handle<JSObject> checked = it->GetHolder<JSObject>();
while (AllCanRead(it)) {
if (it->state() == LookupIterator::ACCESSOR) {
- return Just(it->property_details().attributes());
+ return Just(it->property_attributes());
}
DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
auto result = GetPropertyAttributesWithInterceptor(it);
@@ -1436,14 +1381,16 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object,
}
}
-
-Maybe<bool> Object::HasInPrototypeChain(Isolate* isolate, Handle<Object> object,
- Handle<Object> proto) {
+Maybe<bool> JSReceiver::HasInPrototypeChain(Isolate* isolate,
+ Handle<JSReceiver> object,
+ Handle<Object> proto) {
PrototypeIterator iter(isolate, object, PrototypeIterator::START_AT_RECEIVER);
while (true) {
if (!iter.AdvanceFollowingProxies()) return Nothing<bool>();
if (iter.IsAtEnd()) return Just(false);
- if (iter.IsAtEnd(proto)) return Just(true);
+ if (PrototypeIterator::GetCurrent(iter).is_identical_to(proto)) {
+ return Just(true);
+ }
}
}
@@ -1544,25 +1491,22 @@ bool Object::SameValue(Object* other) {
if (IsString() && other->IsString()) {
return String::cast(this)->Equals(String::cast(other));
}
- if (IsSimd128Value() && other->IsSimd128Value()) {
- if (IsFloat32x4() && other->IsFloat32x4()) {
- Float32x4* a = Float32x4::cast(this);
- Float32x4* b = Float32x4::cast(other);
- for (int i = 0; i < 4; i++) {
- float x = a->get_lane(i);
- float y = b->get_lane(i);
- // Implements the ES5 SameValue operation for floating point types.
- // http://www.ecma-international.org/ecma-262/6.0/#sec-samevalue
- if (x != y && !(std::isnan(x) && std::isnan(y))) return false;
- if (std::signbit(x) != std::signbit(y)) return false;
- }
- return true;
- } else {
- Simd128Value* a = Simd128Value::cast(this);
- Simd128Value* b = Simd128Value::cast(other);
- return a->map()->instance_type() == b->map()->instance_type() &&
- a->BitwiseEquals(b);
+ if (IsFloat32x4() && other->IsFloat32x4()) {
+ Float32x4* a = Float32x4::cast(this);
+ Float32x4* b = Float32x4::cast(other);
+ for (int i = 0; i < 4; i++) {
+ float x = a->get_lane(i);
+ float y = b->get_lane(i);
+ // Implements the ES5 SameValue operation for floating point types.
+ // http://www.ecma-international.org/ecma-262/6.0/#sec-samevalue
+ if (x != y && !(std::isnan(x) && std::isnan(y))) return false;
+ if (std::signbit(x) != std::signbit(y)) return false;
}
+ return true;
+ } else if (IsSimd128Value() && other->IsSimd128Value()) {
+ Simd128Value* a = Simd128Value::cast(this);
+ Simd128Value* b = Simd128Value::cast(other);
+ return a->map() == b->map() && a->BitwiseEquals(b);
}
return false;
}
@@ -1583,25 +1527,22 @@ bool Object::SameValueZero(Object* other) {
if (IsString() && other->IsString()) {
return String::cast(this)->Equals(String::cast(other));
}
- if (IsSimd128Value() && other->IsSimd128Value()) {
- if (IsFloat32x4() && other->IsFloat32x4()) {
- Float32x4* a = Float32x4::cast(this);
- Float32x4* b = Float32x4::cast(other);
- for (int i = 0; i < 4; i++) {
- float x = a->get_lane(i);
- float y = b->get_lane(i);
- // Implements the ES6 SameValueZero operation for floating point types.
- // http://www.ecma-international.org/ecma-262/6.0/#sec-samevaluezero
- if (x != y && !(std::isnan(x) && std::isnan(y))) return false;
- // SameValueZero doesn't distinguish between 0 and -0.
- }
- return true;
- } else {
- Simd128Value* a = Simd128Value::cast(this);
- Simd128Value* b = Simd128Value::cast(other);
- return a->map()->instance_type() == b->map()->instance_type() &&
- a->BitwiseEquals(b);
+ if (IsFloat32x4() && other->IsFloat32x4()) {
+ Float32x4* a = Float32x4::cast(this);
+ Float32x4* b = Float32x4::cast(other);
+ for (int i = 0; i < 4; i++) {
+ float x = a->get_lane(i);
+ float y = b->get_lane(i);
+ // Implements the ES6 SameValueZero operation for floating point types.
+ // http://www.ecma-international.org/ecma-262/6.0/#sec-samevaluezero
+ if (x != y && !(std::isnan(x) && std::isnan(y))) return false;
+ // SameValueZero doesn't distinguish between 0 and -0.
}
+ return true;
+ } else if (IsSimd128Value() && other->IsSimd128Value()) {
+ Simd128Value* a = Simd128Value::cast(this);
+ Simd128Value* b = Simd128Value::cast(other);
+ return a->map() == b->map() && a->BitwiseEquals(b);
}
return false;
}
@@ -1610,8 +1551,14 @@ bool Object::SameValueZero(Object* other) {
MaybeHandle<Object> Object::ArraySpeciesConstructor(
Isolate* isolate, Handle<Object> original_array) {
Handle<Context> native_context = isolate->native_context();
+ Handle<Object> default_species = isolate->array_function();
if (!FLAG_harmony_species) {
- return Handle<Object>(native_context->array_function(), isolate);
+ return default_species;
+ }
+ if (original_array->IsJSArray() &&
+ Handle<JSReceiver>::cast(original_array)->map()->new_target_is_base() &&
+ isolate->IsArraySpeciesLookupChainIntact()) {
+ return default_species;
}
Handle<Object> constructor = isolate->factory()->undefined_value();
Maybe<bool> is_array = Object::IsArray(original_array);
@@ -1645,7 +1592,7 @@ MaybeHandle<Object> Object::ArraySpeciesConstructor(
}
}
if (constructor->IsUndefined()) {
- return Handle<Object>(native_context->array_function(), isolate);
+ return default_species;
} else {
if (!constructor->IsConstructor()) {
THROW_NEW_ERROR(isolate,
@@ -2104,17 +2051,12 @@ void Map::PrintReconfiguration(FILE* file, int modify_index, PropertyKind kind,
os << "]\n";
}
-
-void Map::PrintGeneralization(FILE* file,
- const char* reason,
- int modify_index,
- int split,
- int descriptors,
- bool constant_to_field,
- Representation old_representation,
- Representation new_representation,
- HeapType* old_field_type,
- HeapType* new_field_type) {
+void Map::PrintGeneralization(
+ FILE* file, const char* reason, int modify_index, int split,
+ int descriptors, bool constant_to_field, Representation old_representation,
+ Representation new_representation, MaybeHandle<FieldType> old_field_type,
+ MaybeHandle<Object> old_value, MaybeHandle<FieldType> new_field_type,
+ MaybeHandle<Object> new_value) {
OFStream os(file);
os << "[generalizing]";
Name* name = instance_descriptors()->GetKey(modify_index);
@@ -2128,11 +2070,19 @@ void Map::PrintGeneralization(FILE* file,
os << "c";
} else {
os << old_representation.Mnemonic() << "{";
- old_field_type->PrintTo(os, HeapType::SEMANTIC_DIM);
+ if (old_field_type.is_null()) {
+ os << Brief(*(old_value.ToHandleChecked()));
+ } else {
+ old_field_type.ToHandleChecked()->PrintTo(os);
+ }
os << "}";
}
os << "->" << new_representation.Mnemonic() << "{";
- new_field_type->PrintTo(os, HeapType::SEMANTIC_DIM);
+ if (new_field_type.is_null()) {
+ os << Brief(*(new_value.ToHandleChecked()));
+ } else {
+ new_field_type.ToHandleChecked()->PrintTo(os);
+ }
os << "} (";
if (strlen(reason) > 0) {
os << reason;
@@ -2569,33 +2519,31 @@ Handle<String> JSReceiver::GetConstructorName(Handle<JSReceiver> receiver) {
Context* JSReceiver::GetCreationContext() {
- if (IsJSBoundFunction()) {
- return JSBoundFunction::cast(this)->creation_context();
+ JSReceiver* receiver = this;
+ while (receiver->IsJSBoundFunction()) {
+ receiver = JSBoundFunction::cast(receiver)->bound_target_function();
}
- Object* constructor = map()->GetConstructor();
+ Object* constructor = receiver->map()->GetConstructor();
JSFunction* function;
if (constructor->IsJSFunction()) {
function = JSFunction::cast(constructor);
} else {
// Functions have null as a constructor,
// but any JSFunction knows its context immediately.
- CHECK(IsJSFunction());
- function = JSFunction::cast(this);
+ CHECK(receiver->IsJSFunction());
+ function = JSFunction::cast(receiver);
}
return function->context()->native_context();
}
-
-static Handle<Object> WrapType(Handle<HeapType> type) {
- if (type->IsClass()) return Map::WeakCellForMap(type->AsClass()->Map());
+static Handle<Object> WrapType(Handle<FieldType> type) {
+ if (type->IsClass()) return Map::WeakCellForMap(type->AsClass());
return type;
}
-
-MaybeHandle<Map> Map::CopyWithField(Handle<Map> map,
- Handle<Name> name,
- Handle<HeapType> type,
+MaybeHandle<Map> Map::CopyWithField(Handle<Map> map, Handle<Name> name,
+ Handle<FieldType> type,
PropertyAttributes attributes,
Representation representation,
TransitionFlag flag) {
@@ -2615,7 +2563,7 @@ MaybeHandle<Map> Map::CopyWithField(Handle<Map> map,
if (map->instance_type() == JS_CONTEXT_EXTENSION_OBJECT_TYPE) {
representation = Representation::Tagged();
- type = HeapType::Any(isolate);
+ type = FieldType::Any(isolate);
}
Handle<Object> wrapped_type(WrapType(type));
@@ -2787,59 +2735,7 @@ void JSObject::UpdatePrototypeUserRegistration(Handle<Map> old_map,
}
}
-
-void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map,
- int expected_additional_properties) {
- if (object->map() == *new_map) return;
- // If this object is a prototype (the callee will check), invalidate any
- // prototype chains involving it.
- InvalidatePrototypeChains(object->map());
- Handle<Map> old_map(object->map());
-
- // If the map was registered with its prototype before, ensure that it
- // registers with its new prototype now. This preserves the invariant that
- // when a map on a prototype chain is registered with its prototype, then
- // all prototypes further up the chain are also registered with their
- // respective prototypes.
- UpdatePrototypeUserRegistration(old_map, new_map, new_map->GetIsolate());
-
- if (object->HasFastProperties()) {
- if (!new_map->is_dictionary_map()) {
- MigrateFastToFast(object, new_map);
- if (old_map->is_prototype_map()) {
- DCHECK(!old_map->is_stable());
- DCHECK(new_map->is_stable());
- // Clear out the old descriptor array to avoid problems to sharing
- // the descriptor array without using an explicit.
- old_map->InitializeDescriptors(
- old_map->GetHeap()->empty_descriptor_array(),
- LayoutDescriptor::FastPointerLayout());
- // Ensure that no transition was inserted for prototype migrations.
- DCHECK_EQ(0, TransitionArray::NumberOfTransitions(
- old_map->raw_transitions()));
- DCHECK(new_map->GetBackPointer()->IsUndefined());
- }
- } else {
- MigrateFastToSlow(object, new_map, expected_additional_properties);
- }
- } else {
- // For slow-to-fast migrations JSObject::MigrateSlowToFast()
- // must be used instead.
- CHECK(new_map->is_dictionary_map());
-
- // Slow-to-slow migration is trivial.
- object->set_map(*new_map);
- }
-
- // Careful: Don't allocate here!
- // For some callers of this method, |object| might be in an inconsistent
- // state now: the new map might have a new elements_kind, but the object's
- // elements pointer hasn't been updated yet. Callers will fix this, but in
- // the meantime, (indirectly) calling JSObjectVerify() must be avoided.
- // When adding code here, add a DisallowHeapAllocation too.
-}
-
-
+namespace {
// To migrate a fast instance to a fast map:
// - First check whether the instance needs to be rewritten. If not, simply
// change the map.
@@ -2855,48 +2751,46 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map,
// to temporarily store the inobject properties.
// * If there are properties left in the backing store, install the backing
// store.
-void JSObject::MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
+void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
Isolate* isolate = object->GetIsolate();
Handle<Map> old_map(object->map());
- int old_number_of_fields;
- int number_of_fields = new_map->NumberOfFields();
- int inobject = new_map->GetInObjectProperties();
- int unused = new_map->unused_property_fields();
-
- // Nothing to do if no functions were converted to fields and no smis were
- // converted to doubles.
- if (!old_map->InstancesNeedRewriting(*new_map, number_of_fields, inobject,
- unused, &old_number_of_fields)) {
- object->synchronized_set_map(*new_map);
- return;
- }
-
- int total_size = number_of_fields + unused;
- int external = total_size - inobject;
+ // In case of a regular transition.
+ if (new_map->GetBackPointer() == *old_map) {
+ // If the map does not add named properties, simply set the map.
+ if (old_map->NumberOfOwnDescriptors() ==
+ new_map->NumberOfOwnDescriptors()) {
+ object->synchronized_set_map(*new_map);
+ return;
+ }
- if (number_of_fields != old_number_of_fields &&
- new_map->GetBackPointer() == *old_map) {
PropertyDetails details = new_map->GetLastDescriptorDetails();
+ // Either new_map adds an kDescriptor property, or a kField property for
+ // which there is still space, and which does not require a mutable double
+ // box (an out-of-object double).
+ if (details.location() == kDescriptor ||
+ (old_map->unused_property_fields() > 0 &&
+ ((FLAG_unbox_double_fields && object->properties()->length() == 0) ||
+ !details.representation().IsDouble()))) {
+ object->synchronized_set_map(*new_map);
+ return;
+ }
+ // If there is still space in the object, we need to allocate a mutable
+ // double box.
if (old_map->unused_property_fields() > 0) {
- if (details.representation().IsDouble()) {
- FieldIndex index =
- FieldIndex::ForDescriptor(*new_map, new_map->LastAdded());
- if (new_map->IsUnboxedDoubleField(index)) {
- object->RawFastDoublePropertyAtPut(index, 0);
- } else {
- Handle<Object> value = isolate->factory()->NewHeapNumber(0, MUTABLE);
- object->RawFastPropertyAtPut(index, *value);
- }
- }
+ FieldIndex index =
+ FieldIndex::ForDescriptor(*new_map, new_map->LastAdded());
+ DCHECK(details.representation().IsDouble());
+ DCHECK(!new_map->IsUnboxedDoubleField(index));
+ Handle<Object> value = isolate->factory()->NewHeapNumber(0, MUTABLE);
+ object->RawFastPropertyAtPut(index, *value);
object->synchronized_set_map(*new_map);
return;
}
- DCHECK(number_of_fields == old_number_of_fields + 1);
// This migration is a transition from a map that has run out of property
- // space. Therefore it could be done by extending the backing store.
- int grow_by = external - object->properties()->length();
+ // space. Extend the backing store.
+ int grow_by = new_map->unused_property_fields() + 1;
Handle<FixedArray> old_storage = handle(object->properties(), isolate);
Handle<FixedArray> new_storage =
isolate->factory()->CopyFixedArrayAndGrow(old_storage, grow_by);
@@ -2908,8 +2802,8 @@ void JSObject::MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
} else {
value = isolate->factory()->uninitialized_value();
}
- DCHECK(details.type() == DATA);
- int target_index = details.field_index() - inobject;
+ DCHECK_EQ(DATA, details.type());
+ int target_index = details.field_index() - new_map->GetInObjectProperties();
DCHECK(target_index >= 0); // Must be a backing store index.
new_storage->set(target_index, *value);
@@ -2921,6 +2815,23 @@ void JSObject::MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
object->synchronized_set_map(*new_map);
return;
}
+
+ int old_number_of_fields;
+ int number_of_fields = new_map->NumberOfFields();
+ int inobject = new_map->GetInObjectProperties();
+ int unused = new_map->unused_property_fields();
+
+ // Nothing to do if no functions were converted to fields and no smis were
+ // converted to doubles.
+ if (!old_map->InstancesNeedRewriting(*new_map, number_of_fields, inobject,
+ unused, &old_number_of_fields)) {
+ object->synchronized_set_map(*new_map);
+ return;
+ }
+
+ int total_size = number_of_fields + unused;
+ int external = total_size - inobject;
+
Handle<FixedArray> array = isolate->factory()->NewFixedArray(total_size);
Handle<DescriptorArray> old_descriptors(old_map->instance_descriptors());
@@ -2994,6 +2905,8 @@ void JSObject::MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
// From here on we cannot fail and we shouldn't GC anymore.
DisallowHeapAllocation no_allocation;
+ Heap* heap = isolate->heap();
+
// Copy (real) inobject properties. If necessary, stop at number_of_fields to
// avoid overwriting |one_pointer_filler_map|.
int limit = Min(inobject, number_of_fields);
@@ -3006,12 +2919,16 @@ void JSObject::MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
DCHECK(value->IsMutableHeapNumber());
object->RawFastDoublePropertyAtPut(index,
HeapNumber::cast(value)->value());
+ if (i < old_number_of_fields && !old_map->IsUnboxedDoubleField(index)) {
+ // Transition from tagged to untagged slot.
+ heap->ClearRecordedSlot(*object,
+ HeapObject::RawField(*object, index.offset()));
+ }
} else {
object->RawFastPropertyAtPut(index, value);
}
}
- Heap* heap = isolate->heap();
// If there are properties in the new backing store, trim it to the correct
// size and install the backing store into the object.
@@ -3038,6 +2955,173 @@ void JSObject::MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
object->synchronized_set_map(*new_map);
}
+void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
+ int expected_additional_properties) {
+ // The global object is always normalized.
+ DCHECK(!object->IsJSGlobalObject());
+ // JSGlobalProxy must never be normalized
+ DCHECK(!object->IsJSGlobalProxy());
+
+ Isolate* isolate = object->GetIsolate();
+ HandleScope scope(isolate);
+ Handle<Map> map(object->map());
+
+ // Allocate new content.
+ int real_size = map->NumberOfOwnDescriptors();
+ int property_count = real_size;
+ if (expected_additional_properties > 0) {
+ property_count += expected_additional_properties;
+ } else {
+ property_count += 2; // Make space for two more properties.
+ }
+ Handle<NameDictionary> dictionary =
+ NameDictionary::New(isolate, property_count);
+
+ Handle<DescriptorArray> descs(map->instance_descriptors());
+ for (int i = 0; i < real_size; i++) {
+ PropertyDetails details = descs->GetDetails(i);
+ Handle<Name> key(descs->GetKey(i));
+ switch (details.type()) {
+ case DATA_CONSTANT: {
+ Handle<Object> value(descs->GetConstant(i), isolate);
+ PropertyDetails d(details.attributes(), DATA, i + 1,
+ PropertyCellType::kNoCell);
+ dictionary = NameDictionary::Add(dictionary, key, value, d);
+ break;
+ }
+ case DATA: {
+ FieldIndex index = FieldIndex::ForDescriptor(*map, i);
+ Handle<Object> value;
+ if (object->IsUnboxedDoubleField(index)) {
+ double old_value = object->RawFastDoublePropertyAt(index);
+ value = isolate->factory()->NewHeapNumber(old_value);
+ } else {
+ value = handle(object->RawFastPropertyAt(index), isolate);
+ if (details.representation().IsDouble()) {
+ DCHECK(value->IsMutableHeapNumber());
+ Handle<HeapNumber> old = Handle<HeapNumber>::cast(value);
+ value = isolate->factory()->NewHeapNumber(old->value());
+ }
+ }
+ PropertyDetails d(details.attributes(), DATA, i + 1,
+ PropertyCellType::kNoCell);
+ dictionary = NameDictionary::Add(dictionary, key, value, d);
+ break;
+ }
+ case ACCESSOR: {
+ FieldIndex index = FieldIndex::ForDescriptor(*map, i);
+ Handle<Object> value(object->RawFastPropertyAt(index), isolate);
+ PropertyDetails d(details.attributes(), ACCESSOR_CONSTANT, i + 1,
+ PropertyCellType::kNoCell);
+ dictionary = NameDictionary::Add(dictionary, key, value, d);
+ break;
+ }
+ case ACCESSOR_CONSTANT: {
+ Handle<Object> value(descs->GetCallbacksObject(i), isolate);
+ PropertyDetails d(details.attributes(), ACCESSOR_CONSTANT, i + 1,
+ PropertyCellType::kNoCell);
+ dictionary = NameDictionary::Add(dictionary, key, value, d);
+ break;
+ }
+ }
+ }
+
+ // Copy the next enumeration index from instance descriptor.
+ dictionary->SetNextEnumerationIndex(real_size + 1);
+
+ // From here on we cannot fail and we shouldn't GC anymore.
+ DisallowHeapAllocation no_allocation;
+
+ // Resize the object in the heap if necessary.
+ int new_instance_size = new_map->instance_size();
+ int instance_size_delta = map->instance_size() - new_instance_size;
+ DCHECK(instance_size_delta >= 0);
+
+ if (instance_size_delta > 0) {
+ Heap* heap = isolate->heap();
+ heap->CreateFillerObjectAt(object->address() + new_instance_size,
+ instance_size_delta);
+ heap->AdjustLiveBytes(*object, -instance_size_delta,
+ Heap::CONCURRENT_TO_SWEEPER);
+ }
+
+ // We are storing the new map using release store after creating a filler for
+ // the left-over space to avoid races with the sweeper thread.
+ object->synchronized_set_map(*new_map);
+
+ object->set_properties(*dictionary);
+
+ // Ensure that in-object space of slow-mode object does not contain random
+ // garbage.
+ int inobject_properties = new_map->GetInObjectProperties();
+ for (int i = 0; i < inobject_properties; i++) {
+ FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
+ object->RawFastPropertyAtPut(index, Smi::FromInt(0));
+ }
+
+ isolate->counters()->props_to_dictionary()->Increment();
+
+#ifdef DEBUG
+ if (FLAG_trace_normalization) {
+ OFStream os(stdout);
+ os << "Object properties have been normalized:\n";
+ object->Print(os);
+ }
+#endif
+}
+
+} // namespace
+
+void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map,
+ int expected_additional_properties) {
+ if (object->map() == *new_map) return;
+ Handle<Map> old_map(object->map());
+ if (old_map->is_prototype_map()) {
+ // If this object is a prototype (the callee will check), invalidate any
+ // prototype chains involving it.
+ InvalidatePrototypeChains(object->map());
+
+ // If the map was registered with its prototype before, ensure that it
+ // registers with its new prototype now. This preserves the invariant that
+ // when a map on a prototype chain is registered with its prototype, then
+ // all prototypes further up the chain are also registered with their
+ // respective prototypes.
+ UpdatePrototypeUserRegistration(old_map, new_map, new_map->GetIsolate());
+ }
+
+ if (old_map->is_dictionary_map()) {
+ // For slow-to-fast migrations JSObject::MigrateSlowToFast()
+ // must be used instead.
+ CHECK(new_map->is_dictionary_map());
+
+ // Slow-to-slow migration is trivial.
+ object->set_map(*new_map);
+ } else if (!new_map->is_dictionary_map()) {
+ MigrateFastToFast(object, new_map);
+ if (old_map->is_prototype_map()) {
+ DCHECK(!old_map->is_stable());
+ DCHECK(new_map->is_stable());
+ // Clear out the old descriptor array to avoid problems to sharing
+ // the descriptor array without using an explicit.
+ old_map->InitializeDescriptors(
+ old_map->GetHeap()->empty_descriptor_array(),
+ LayoutDescriptor::FastPointerLayout());
+ // Ensure that no transition was inserted for prototype migrations.
+ DCHECK_EQ(
+ 0, TransitionArray::NumberOfTransitions(old_map->raw_transitions()));
+ DCHECK(new_map->GetBackPointer()->IsUndefined());
+ }
+ } else {
+ MigrateFastToSlow(object, new_map, expected_additional_properties);
+ }
+
+ // Careful: Don't allocate here!
+ // For some callers of this method, |object| might be in an inconsistent
+ // state now: the new map might have a new elements_kind, but the object's
+ // elements pointer hasn't been updated yet. Callers will fix this, but in
+ // the meantime, (indirectly) calling JSObjectVerify() must be avoided.
+ // When adding code here, add a DisallowHeapAllocation too.
+}
int Map::NumberOfFields() {
DescriptorArray* descriptors = instance_descriptors();
@@ -3061,7 +3145,7 @@ Handle<Map> Map::CopyGeneralizeAllRepresentations(
for (int i = 0; i < number_of_own_descriptors; i++) {
descriptors->SetRepresentation(i, Representation::Tagged());
if (descriptors->GetDetails(i).type() == DATA) {
- descriptors->SetValue(i, HeapType::Any());
+ descriptors->SetValue(i, FieldType::Any());
}
}
@@ -3093,16 +3177,18 @@ Handle<Map> Map::CopyGeneralizeAllRepresentations(
}
if (FLAG_trace_generalization) {
- HeapType* field_type =
- (details.type() == DATA)
- ? map->instance_descriptors()->GetFieldType(modify_index)
- : NULL;
+ MaybeHandle<FieldType> field_type = FieldType::None(isolate);
+ if (details.type() == DATA) {
+ field_type = handle(
+ map->instance_descriptors()->GetFieldType(modify_index), isolate);
+ }
map->PrintGeneralization(
stdout, reason, modify_index, new_map->NumberOfOwnDescriptors(),
new_map->NumberOfOwnDescriptors(),
details.type() == DATA_CONSTANT && store_mode == FORCE_FIELD,
details.representation(), Representation::Tagged(), field_type,
- HeapType::Any());
+ MaybeHandle<Object>(), FieldType::Any(isolate),
+ MaybeHandle<Object>());
}
}
return new_map;
@@ -3195,7 +3281,7 @@ Map* Map::FindLastMatchMap(int verbatim,
if (!details.representation().Equals(next_details.representation())) break;
if (next_details.location() == kField) {
- HeapType* next_field_type = next_descriptors->GetFieldType(i);
+ FieldType* next_field_type = next_descriptors->GetFieldType(i);
if (!descriptors->GetFieldType(i)->NowIs(next_field_type)) {
break;
}
@@ -3251,42 +3337,41 @@ void Map::UpdateFieldType(int descriptor, Handle<Name> name,
instance_descriptors()->Replace(descriptor, &d);
}
-
-bool FieldTypeIsCleared(Representation rep, HeapType* type) {
- return type->Is(HeapType::None()) && rep.IsHeapObject();
+bool FieldTypeIsCleared(Representation rep, FieldType* type) {
+ return type->IsNone() && rep.IsHeapObject();
}
// static
-Handle<HeapType> Map::GeneralizeFieldType(Representation rep1,
- Handle<HeapType> type1,
- Representation rep2,
- Handle<HeapType> type2,
- Isolate* isolate) {
+Handle<FieldType> Map::GeneralizeFieldType(Representation rep1,
+ Handle<FieldType> type1,
+ Representation rep2,
+ Handle<FieldType> type2,
+ Isolate* isolate) {
// Cleared field types need special treatment. They represent lost knowledge,
// so we must be conservative, so their generalization with any other type
// is "Any".
if (FieldTypeIsCleared(rep1, *type1) || FieldTypeIsCleared(rep2, *type2)) {
- return HeapType::Any(isolate);
+ return FieldType::Any(isolate);
}
if (type1->NowIs(type2)) return type2;
if (type2->NowIs(type1)) return type1;
- return HeapType::Any(isolate);
+ return FieldType::Any(isolate);
}
// static
void Map::GeneralizeFieldType(Handle<Map> map, int modify_index,
Representation new_representation,
- Handle<HeapType> new_field_type) {
+ Handle<FieldType> new_field_type) {
Isolate* isolate = map->GetIsolate();
// Check if we actually need to generalize the field type at all.
Handle<DescriptorArray> old_descriptors(map->instance_descriptors(), isolate);
Representation old_representation =
old_descriptors->GetDetails(modify_index).representation();
- Handle<HeapType> old_field_type(old_descriptors->GetFieldType(modify_index),
- isolate);
+ Handle<FieldType> old_field_type(old_descriptors->GetFieldType(modify_index),
+ isolate);
if (old_representation.Equals(new_representation) &&
!FieldTypeIsCleared(new_representation, *new_field_type) &&
@@ -3320,20 +3405,16 @@ void Map::GeneralizeFieldType(Handle<Map> map, int modify_index,
if (FLAG_trace_generalization) {
map->PrintGeneralization(
- stdout, "field type generalization",
- modify_index, map->NumberOfOwnDescriptors(),
- map->NumberOfOwnDescriptors(), false,
- details.representation(), details.representation(),
- *old_field_type, *new_field_type);
+ stdout, "field type generalization", modify_index,
+ map->NumberOfOwnDescriptors(), map->NumberOfOwnDescriptors(), false,
+ details.representation(), details.representation(), old_field_type,
+ MaybeHandle<Object>(), new_field_type, MaybeHandle<Object>());
}
}
-
-static inline Handle<HeapType> GetFieldType(Isolate* isolate,
- Handle<DescriptorArray> descriptors,
- int descriptor,
- PropertyLocation location,
- Representation representation) {
+static inline Handle<FieldType> GetFieldType(
+ Isolate* isolate, Handle<DescriptorArray> descriptors, int descriptor,
+ PropertyLocation location, Representation representation) {
#ifdef DEBUG
PropertyDetails details = descriptors->GetDetails(descriptor);
DCHECK_EQ(kData, details.kind());
@@ -3378,7 +3459,7 @@ Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
PropertyKind new_kind,
PropertyAttributes new_attributes,
Representation new_representation,
- Handle<HeapType> new_field_type,
+ Handle<FieldType> new_field_type,
StoreMode store_mode) {
DCHECK_NE(kAccessor, new_kind); // TODO(ishell): not supported yet.
DCHECK(store_mode != FORCE_FIELD || modify_index >= 0);
@@ -3407,8 +3488,9 @@ Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
stdout, "uninitialized field", modify_index,
old_map->NumberOfOwnDescriptors(),
old_map->NumberOfOwnDescriptors(), false, old_representation,
- new_representation, old_descriptors->GetFieldType(modify_index),
- *new_field_type);
+ new_representation,
+ handle(old_descriptors->GetFieldType(modify_index), isolate),
+ MaybeHandle<Object>(), new_field_type, MaybeHandle<Object>());
}
Handle<Map> field_owner(old_map->FindFieldOwner(modify_index), isolate);
@@ -3524,11 +3606,11 @@ Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
PropertyLocation tmp_location = tmp_details.location();
if (tmp_location == kField) {
if (next_kind == kData) {
- Handle<HeapType> next_field_type;
+ Handle<FieldType> next_field_type;
if (modify_index == i) {
next_field_type = new_field_type;
if (!property_kind_reconfiguration) {
- Handle<HeapType> old_field_type =
+ Handle<FieldType> old_field_type =
GetFieldType(isolate, old_descriptors, i,
old_details.location(), tmp_representation);
Representation old_representation = old_details.representation();
@@ -3537,7 +3619,7 @@ Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
next_field_type, isolate);
}
} else {
- Handle<HeapType> old_field_type =
+ Handle<FieldType> old_field_type =
GetFieldType(isolate, old_descriptors, i, old_details.location(),
tmp_representation);
next_field_type = old_field_type;
@@ -3692,17 +3774,17 @@ Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
if (next_location == kField) {
if (next_kind == kData) {
- Handle<HeapType> target_field_type =
+ Handle<FieldType> target_field_type =
GetFieldType(isolate, target_descriptors, i,
target_details.location(), next_representation);
- Handle<HeapType> next_field_type;
+ Handle<FieldType> next_field_type;
if (modify_index == i) {
next_field_type = GeneralizeFieldType(
target_details.representation(), target_field_type,
new_representation, new_field_type, isolate);
if (!property_kind_reconfiguration) {
- Handle<HeapType> old_field_type =
+ Handle<FieldType> old_field_type =
GetFieldType(isolate, old_descriptors, i,
old_details.location(), next_representation);
next_field_type = GeneralizeFieldType(
@@ -3710,7 +3792,7 @@ Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
next_representation, next_field_type, isolate);
}
} else {
- Handle<HeapType> old_field_type =
+ Handle<FieldType> old_field_type =
GetFieldType(isolate, old_descriptors, i, old_details.location(),
next_representation);
next_field_type = GeneralizeFieldType(
@@ -3769,11 +3851,11 @@ Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
if (next_location == kField) {
if (next_kind == kData) {
- Handle<HeapType> next_field_type;
+ Handle<FieldType> next_field_type;
if (modify_index == i) {
next_field_type = new_field_type;
if (!property_kind_reconfiguration) {
- Handle<HeapType> old_field_type =
+ Handle<FieldType> old_field_type =
GetFieldType(isolate, old_descriptors, i,
old_details.location(), next_representation);
next_field_type = GeneralizeFieldType(
@@ -3781,7 +3863,7 @@ Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
next_representation, next_field_type, isolate);
}
} else {
- Handle<HeapType> old_field_type =
+ Handle<FieldType> old_field_type =
GetFieldType(isolate, old_descriptors, i, old_details.location(),
next_representation);
next_field_type = old_field_type;
@@ -3849,23 +3931,28 @@ Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
if (FLAG_trace_generalization && modify_index >= 0) {
PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
PropertyDetails new_details = new_descriptors->GetDetails(modify_index);
- Handle<HeapType> old_field_type =
- (old_details.type() == DATA)
- ? handle(old_descriptors->GetFieldType(modify_index), isolate)
- : HeapType::Constant(
- handle(old_descriptors->GetValue(modify_index), isolate),
- isolate);
- Handle<HeapType> new_field_type =
- (new_details.type() == DATA)
- ? handle(new_descriptors->GetFieldType(modify_index), isolate)
- : HeapType::Constant(
- handle(new_descriptors->GetValue(modify_index), isolate),
- isolate);
+ MaybeHandle<FieldType> old_field_type;
+ MaybeHandle<FieldType> new_field_type;
+ MaybeHandle<Object> old_value;
+ MaybeHandle<Object> new_value;
+ if (old_details.type() == DATA) {
+ old_field_type =
+ handle(old_descriptors->GetFieldType(modify_index), isolate);
+ } else {
+ old_value = handle(old_descriptors->GetValue(modify_index), isolate);
+ }
+ if (new_details.type() == DATA) {
+ new_field_type =
+ handle(new_descriptors->GetFieldType(modify_index), isolate);
+ } else {
+ new_value = handle(new_descriptors->GetValue(modify_index), isolate);
+ }
+
old_map->PrintGeneralization(
stdout, "", modify_index, split_nof, old_nof,
old_details.location() == kDescriptor && store_mode == FORCE_FIELD,
old_details.representation(), new_details.representation(),
- *old_field_type, *new_field_type);
+ old_field_type, old_value, new_field_type, new_value);
}
Handle<LayoutDescriptor> new_layout_descriptor =
@@ -3891,7 +3978,7 @@ Handle<Map> Map::GeneralizeAllFieldRepresentations(
if (details.type() == DATA) {
map = ReconfigureProperty(map, i, kData, details.attributes(),
Representation::Tagged(),
- HeapType::Any(map->GetIsolate()), FORCE_FIELD);
+ FieldType::Any(map->GetIsolate()), FORCE_FIELD);
}
}
return map;
@@ -3940,7 +4027,7 @@ MaybeHandle<Map> Map::TryUpdate(Handle<Map> old_map) {
}
switch (new_details.type()) {
case DATA: {
- HeapType* new_type = new_descriptors->GetFieldType(i);
+ FieldType* new_type = new_descriptors->GetFieldType(i);
// Cleared field types need special treatment. They represent lost
// knowledge, so we must first generalize the new_type to "Any".
if (FieldTypeIsCleared(new_details.representation(), new_type)) {
@@ -3948,7 +4035,7 @@ MaybeHandle<Map> Map::TryUpdate(Handle<Map> old_map) {
}
PropertyType old_property_type = old_details.type();
if (old_property_type == DATA) {
- HeapType* old_type = old_descriptors->GetFieldType(i);
+ FieldType* old_type = old_descriptors->GetFieldType(i);
if (FieldTypeIsCleared(old_details.representation(), old_type) ||
!old_type->NowIs(new_type)) {
return MaybeHandle<Map>();
@@ -3964,8 +4051,8 @@ MaybeHandle<Map> Map::TryUpdate(Handle<Map> old_map) {
}
case ACCESSOR: {
#ifdef DEBUG
- HeapType* new_type = new_descriptors->GetFieldType(i);
- DCHECK(HeapType::Any()->Is(new_type));
+ FieldType* new_type = new_descriptors->GetFieldType(i);
+ DCHECK(new_type->IsAny());
#endif
break;
}
@@ -3990,12 +4077,13 @@ MaybeHandle<Map> Map::TryUpdate(Handle<Map> old_map) {
Handle<Map> Map::Update(Handle<Map> map) {
if (!map->is_deprecated()) return map;
return ReconfigureProperty(map, -1, kData, NONE, Representation::None(),
- HeapType::None(map->GetIsolate()),
+ FieldType::None(map->GetIsolate()),
ALLOW_IN_DESCRIPTOR);
}
Maybe<bool> JSObject::SetPropertyWithInterceptor(LookupIterator* it,
+ ShouldThrow should_throw,
Handle<Object> value) {
Isolate* isolate = it->isolate();
// Make sure that the top context does not change when doing callbacks or
@@ -4009,7 +4097,7 @@ Maybe<bool> JSObject::SetPropertyWithInterceptor(LookupIterator* it,
Handle<JSObject> holder = it->GetHolder<JSObject>();
v8::Local<v8::Value> result;
PropertyCallbackArguments args(isolate, interceptor->data(),
- *it->GetReceiver(), *holder);
+ *it->GetReceiver(), *holder, should_throw);
if (it->IsElement()) {
uint32_t index = it->index();
@@ -4062,6 +4150,7 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
LanguageMode language_mode,
StoreFromKeyed store_mode,
bool* found) {
+ it->UpdateProtector();
ShouldThrow should_throw =
is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
@@ -4090,7 +4179,8 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
case LookupIterator::INTERCEPTOR:
if (it->HolderIsReceiverOrHiddenPrototype()) {
- Maybe<bool> result = JSObject::SetPropertyWithInterceptor(it, value);
+ Maybe<bool> result =
+ JSObject::SetPropertyWithInterceptor(it, should_throw, value);
if (result.IsNothing() || result.FromJust()) return result;
} else {
Maybe<PropertyAttributes> maybe_attributes =
@@ -4155,16 +4245,12 @@ Maybe<bool> Object::SetPropertyInternal(LookupIterator* it,
Maybe<bool> Object::SetProperty(LookupIterator* it, Handle<Object> value,
LanguageMode language_mode,
StoreFromKeyed store_mode) {
- ShouldThrow should_throw =
- is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
- if (it->GetReceiver()->IsJSProxy() && it->GetName()->IsPrivate()) {
- RETURN_FAILURE(it->isolate(), should_throw,
- NewTypeError(MessageTemplate::kProxyPrivate));
- }
bool found = false;
Maybe<bool> result =
SetPropertyInternal(it, value, language_mode, store_mode, &found);
if (found) return result;
+ ShouldThrow should_throw =
+ is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
return AddDataProperty(it, value, NONE, should_throw, store_mode);
}
@@ -4172,13 +4258,7 @@ Maybe<bool> Object::SetProperty(LookupIterator* it, Handle<Object> value,
Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
LanguageMode language_mode,
StoreFromKeyed store_mode) {
- ShouldThrow should_throw =
- is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
Isolate* isolate = it->isolate();
- if (it->GetReceiver()->IsJSProxy() && it->GetName()->IsPrivate()) {
- RETURN_FAILURE(isolate, should_throw,
- NewTypeError(MessageTemplate::kProxyPrivate));
- }
bool found = false;
Maybe<bool> result =
@@ -4188,6 +4268,9 @@ Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
// The property either doesn't exist on the holder or exists there as a data
// property.
+ ShouldThrow should_throw =
+ is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
+
if (!it->GetReceiver()->IsJSReceiver()) {
return WriteToReadOnlyProperty(it, value, should_throw);
}
@@ -4207,14 +4290,21 @@ Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
}
break;
- case LookupIterator::INTEGER_INDEXED_EXOTIC:
case LookupIterator::ACCESSOR:
+ if (own_lookup.GetAccessors()->IsAccessorInfo()) {
+ if (own_lookup.IsReadOnly()) {
+ return WriteToReadOnlyProperty(&own_lookup, value, should_throw);
+ }
+ return JSObject::SetPropertyWithAccessor(&own_lookup, value,
+ should_throw);
+ }
+ // Fall through.
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
return RedefineIncompatibleProperty(isolate, it->GetName(), value,
should_throw);
case LookupIterator::DATA: {
- PropertyDetails details = own_lookup.property_details();
- if (details.IsReadOnly()) {
+ if (own_lookup.IsReadOnly()) {
return WriteToReadOnlyProperty(&own_lookup, value, should_throw);
}
return SetDataProperty(&own_lookup, value);
@@ -4252,28 +4342,13 @@ Maybe<bool> Object::SetSuperProperty(LookupIterator* it, Handle<Object> value,
store_mode);
}
-
-MaybeHandle<Object> Object::ReadAbsentProperty(LookupIterator* it,
- LanguageMode language_mode) {
- if (is_strong(language_mode)) {
- THROW_NEW_ERROR(it->isolate(),
- NewTypeError(MessageTemplate::kStrongPropertyAccess,
- it->GetName(), it->GetReceiver()),
- Object);
- }
+MaybeHandle<Object> Object::ReadAbsentProperty(LookupIterator* it) {
return it->isolate()->factory()->undefined_value();
}
MaybeHandle<Object> Object::ReadAbsentProperty(Isolate* isolate,
Handle<Object> receiver,
- Handle<Object> name,
- LanguageMode language_mode) {
- if (is_strong(language_mode)) {
- THROW_NEW_ERROR(
- isolate,
- NewTypeError(MessageTemplate::kStrongPropertyAccess, name, receiver),
- Object);
- }
+ Handle<Object> name) {
return isolate->factory()->undefined_value();
}
@@ -4330,8 +4405,7 @@ Maybe<bool> Object::SetDataProperty(LookupIterator* it, Handle<Object> value) {
// Fetch before transforming the object since the encoding may become
// incompatible with what's cached in |it|.
bool is_observed = receiver->map()->is_observed() &&
- (it->IsElement() ||
- !it->isolate()->IsInternallyUsedPropertyName(it->name()));
+ (it->IsElement() || !it->name()->IsPrivate());
MaybeHandle<Object> maybe_old;
if (is_observed) maybe_old = it->GetDataValue();
@@ -4341,13 +4415,6 @@ Maybe<bool> Object::SetDataProperty(LookupIterator* it, Handle<Object> value) {
if (!value->IsNumber() && !value->IsUndefined()) {
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
it->isolate(), to_assign, Object::ToNumber(value), Nothing<bool>());
- // ToNumber above might modify the receiver, causing the cached
- // holder_map to mismatch the actual holder->map() after this point.
- // Reload the map to be in consistent state. Other cached state cannot
- // have been invalidated since typed array elements cannot be reconfigured
- // in any way.
- it->ReloadHolderMap();
-
// We have to recheck the length. However, it can only change if the
// underlying buffer was neutered, so just check that.
if (Handle<JSArrayBufferView>::cast(receiver)->WasNeutered()) {
@@ -4427,8 +4494,11 @@ Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
PropertyAttributes attributes,
ShouldThrow should_throw,
StoreFromKeyed store_mode) {
- DCHECK(!it->GetReceiver()->IsJSProxy());
if (!it->GetReceiver()->IsJSObject()) {
+ if (it->GetReceiver()->IsJSProxy() && it->GetName()->IsPrivate()) {
+ RETURN_FAILURE(it->isolate(), should_throw,
+ NewTypeError(MessageTemplate::kProxyPrivate));
+ }
return CannotCreateProperty(it->isolate(), it->GetReceiver(), it->GetName(),
value, should_throw);
}
@@ -4443,8 +4513,7 @@ Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
Isolate* isolate = it->isolate();
- if (!receiver->map()->is_extensible() &&
- (it->IsElement() || !isolate->IsInternallyUsedPropertyName(it->name()))) {
+ if (it->ExtendingNonExtensible(receiver)) {
RETURN_FAILURE(
isolate, should_throw,
NewTypeError(MessageTemplate::kObjectNotExtensible, it->GetName()));
@@ -4477,14 +4546,13 @@ Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
} else {
// Migrate to the most up-to-date map that will be able to store |value|
// under it->name() with |attributes|.
- it->PrepareTransitionToDataProperty(value, attributes, store_mode);
+ it->PrepareTransitionToDataProperty(receiver, value, attributes,
+ store_mode);
DCHECK_EQ(LookupIterator::TRANSITION, it->state());
- it->ApplyTransitionToDataProperty();
+ it->ApplyTransitionToDataProperty(receiver);
// TODO(verwaest): Encapsulate dictionary handling better.
if (receiver->map()->is_dictionary_map()) {
- // TODO(verwaest): Probably should ensure this is done beforehand.
- it->InternalizeName();
// TODO(dcarney): just populate TransitionPropertyCell here?
JSObject::AddSlowProperty(receiver, it->name(), value, attributes);
} else {
@@ -4493,8 +4561,7 @@ Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
}
// Send the change record if there are observers.
- if (receiver->map()->is_observed() &&
- !isolate->IsInternallyUsedPropertyName(it->name())) {
+ if (receiver->map()->is_observed() && !it->name()->IsPrivate()) {
RETURN_ON_EXCEPTION_VALUE(isolate, JSObject::EnqueueChangeRecord(
receiver, "add", it->name(),
it->factory()->the_hole_value()),
@@ -5195,8 +5262,7 @@ void JSObject::AddProperty(Handle<JSObject> object, Handle<Name> name,
Maybe<PropertyAttributes> maybe = GetPropertyAttributes(&it);
DCHECK(maybe.IsJust());
DCHECK(!it.IsFound());
- DCHECK(object->map()->is_extensible() ||
- it.isolate()->IsInternallyUsedPropertyName(name));
+ DCHECK(object->map()->is_extensible() || name->IsPrivate());
#endif
CHECK(AddDataProperty(&it, value, attributes, THROW_ON_ERROR,
CERTAINLY_NOT_STORE_FROM_KEYED)
@@ -5204,20 +5270,13 @@ void JSObject::AddProperty(Handle<JSObject> object, Handle<Name> name,
}
-// static
-void ExecutableAccessorInfo::ClearSetter(Handle<ExecutableAccessorInfo> info) {
- Handle<Object> object = v8::FromCData(info->GetIsolate(), nullptr);
- info->set_setter(*object);
-}
-
-
// Reconfigures a property to a data property with attributes, even if it is not
// reconfigurable.
// Requires a LookupIterator that does not look at the prototype chain beyond
// hidden prototypes.
MaybeHandle<Object> JSObject::DefineOwnPropertyIgnoreAttributes(
LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
- ExecutableAccessorInfoHandling handling) {
+ AccessorInfoHandling handling) {
MAYBE_RETURN_NULL(DefineOwnPropertyIgnoreAttributes(
it, value, attributes, THROW_ON_ERROR, handling));
return value;
@@ -5226,11 +5285,11 @@ MaybeHandle<Object> JSObject::DefineOwnPropertyIgnoreAttributes(
Maybe<bool> JSObject::DefineOwnPropertyIgnoreAttributes(
LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
- ShouldThrow should_throw, ExecutableAccessorInfoHandling handling) {
+ ShouldThrow should_throw, AccessorInfoHandling handling) {
+ it->UpdateProtector();
Handle<JSObject> object = Handle<JSObject>::cast(it->GetReceiver());
bool is_observed = object->map()->is_observed() &&
- (it->IsElement() ||
- !it->isolate()->IsInternallyUsedPropertyName(it->name()));
+ (it->IsElement() || !it->name()->IsPrivate());
for (; it->IsFound(); it->Next()) {
switch (it->state()) {
@@ -5257,7 +5316,8 @@ Maybe<bool> JSObject::DefineOwnPropertyIgnoreAttributes(
// they throw. Here we should do the same.
case LookupIterator::INTERCEPTOR:
if (handling == DONT_FORCE_FIELD) {
- Maybe<bool> result = JSObject::SetPropertyWithInterceptor(it, value);
+ Maybe<bool> result =
+ JSObject::SetPropertyWithInterceptor(it, should_throw, value);
if (result.IsNothing() || result.FromJust()) return result;
}
break;
@@ -5265,32 +5325,26 @@ Maybe<bool> JSObject::DefineOwnPropertyIgnoreAttributes(
case LookupIterator::ACCESSOR: {
Handle<Object> accessors = it->GetAccessors();
- // Special handling for ExecutableAccessorInfo, which behaves like a
- // data property.
- if (accessors->IsExecutableAccessorInfo() &&
- handling == DONT_FORCE_FIELD) {
- PropertyDetails details = it->property_details();
+ // Special handling for AccessorInfo, which behaves like a data
+ // property.
+ if (accessors->IsAccessorInfo() && handling == DONT_FORCE_FIELD) {
+ PropertyAttributes current_attributes = it->property_attributes();
// Ensure the context isn't changed after calling into accessors.
AssertNoContextChange ncc(it->isolate());
+ // Update the attributes before calling the setter. The setter may
+ // later change the shape of the property.
+ if (current_attributes != attributes) {
+ it->TransitionToAccessorPair(accessors, attributes);
+ }
+
Maybe<bool> result =
JSObject::SetPropertyWithAccessor(it, value, should_throw);
- if (result.IsNothing() || !result.FromJust()) return result;
-
- if (details.attributes() == attributes) return Just(true);
-
- // Reconfigure the accessor if attributes mismatch.
- Handle<ExecutableAccessorInfo> new_data = Accessors::CloneAccessor(
- it->isolate(), Handle<ExecutableAccessorInfo>::cast(accessors));
- new_data->set_property_attributes(attributes);
- // By clearing the setter we don't have to introduce a lookup to
- // the setter, simply make it unavailable to reflect the
- // attributes.
- if (attributes & READ_ONLY) {
- ExecutableAccessorInfo::ClearSetter(new_data);
+
+ if (current_attributes == attributes || result.IsNothing()) {
+ return result;
}
- it->TransitionToAccessorPair(new_data, attributes);
} else {
it->ReconfigureDataProperty(value, attributes);
}
@@ -5310,10 +5364,9 @@ Maybe<bool> JSObject::DefineOwnPropertyIgnoreAttributes(
should_throw);
case LookupIterator::DATA: {
- PropertyDetails details = it->property_details();
Handle<Object> old_value = it->factory()->the_hole_value();
// Regular property update if the attributes match.
- if (details.attributes() == attributes) {
+ if (it->property_attributes() == attributes) {
return SetDataProperty(it, value);
}
@@ -5347,32 +5400,29 @@ Maybe<bool> JSObject::DefineOwnPropertyIgnoreAttributes(
CERTAINLY_NOT_STORE_FROM_KEYED);
}
-
MaybeHandle<Object> JSObject::SetOwnPropertyIgnoreAttributes(
Handle<JSObject> object, Handle<Name> name, Handle<Object> value,
- PropertyAttributes attributes, ExecutableAccessorInfoHandling handling) {
+ PropertyAttributes attributes) {
DCHECK(!value->IsTheHole());
LookupIterator it(object, name, LookupIterator::OWN);
- return DefineOwnPropertyIgnoreAttributes(&it, value, attributes, handling);
+ return DefineOwnPropertyIgnoreAttributes(&it, value, attributes);
}
-
MaybeHandle<Object> JSObject::SetOwnElementIgnoreAttributes(
Handle<JSObject> object, uint32_t index, Handle<Object> value,
- PropertyAttributes attributes, ExecutableAccessorInfoHandling handling) {
+ PropertyAttributes attributes) {
Isolate* isolate = object->GetIsolate();
LookupIterator it(isolate, object, index, LookupIterator::OWN);
- return DefineOwnPropertyIgnoreAttributes(&it, value, attributes, handling);
+ return DefineOwnPropertyIgnoreAttributes(&it, value, attributes);
}
-
MaybeHandle<Object> JSObject::DefinePropertyOrElementIgnoreAttributes(
Handle<JSObject> object, Handle<Name> name, Handle<Object> value,
- PropertyAttributes attributes, ExecutableAccessorInfoHandling handling) {
+ PropertyAttributes attributes) {
Isolate* isolate = object->GetIsolate();
LookupIterator it = LookupIterator::PropertyOrElement(isolate, object, name,
LookupIterator::OWN);
- return DefineOwnPropertyIgnoreAttributes(&it, value, attributes, handling);
+ return DefineOwnPropertyIgnoreAttributes(&it, value, attributes);
}
@@ -5391,7 +5441,8 @@ Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithInterceptor(
return Just(ABSENT);
}
PropertyCallbackArguments args(isolate, interceptor->data(),
- *it->GetReceiver(), *holder);
+ *it->GetReceiver(), *holder,
+ Object::DONT_THROW);
if (!interceptor->query()->IsUndefined()) {
v8::Local<v8::Integer> result;
if (it->IsElement()) {
@@ -5468,7 +5519,7 @@ Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes(
return Just(ABSENT);
case LookupIterator::ACCESSOR:
case LookupIterator::DATA:
- return Just(it->property_details().attributes());
+ return Just(it->property_attributes());
}
}
return Just(ABSENT);
@@ -5531,123 +5582,6 @@ void JSObject::NormalizeProperties(Handle<JSObject> object,
}
-void JSObject::MigrateFastToSlow(Handle<JSObject> object,
- Handle<Map> new_map,
- int expected_additional_properties) {
- // The global object is always normalized.
- DCHECK(!object->IsJSGlobalObject());
- // JSGlobalProxy must never be normalized
- DCHECK(!object->IsJSGlobalProxy());
-
- Isolate* isolate = object->GetIsolate();
- HandleScope scope(isolate);
- Handle<Map> map(object->map());
-
- // Allocate new content.
- int real_size = map->NumberOfOwnDescriptors();
- int property_count = real_size;
- if (expected_additional_properties > 0) {
- property_count += expected_additional_properties;
- } else {
- property_count += 2; // Make space for two more properties.
- }
- Handle<NameDictionary> dictionary =
- NameDictionary::New(isolate, property_count);
-
- Handle<DescriptorArray> descs(map->instance_descriptors());
- for (int i = 0; i < real_size; i++) {
- PropertyDetails details = descs->GetDetails(i);
- Handle<Name> key(descs->GetKey(i));
- switch (details.type()) {
- case DATA_CONSTANT: {
- Handle<Object> value(descs->GetConstant(i), isolate);
- PropertyDetails d(details.attributes(), DATA, i + 1,
- PropertyCellType::kNoCell);
- dictionary = NameDictionary::Add(dictionary, key, value, d);
- break;
- }
- case DATA: {
- FieldIndex index = FieldIndex::ForDescriptor(*map, i);
- Handle<Object> value;
- if (object->IsUnboxedDoubleField(index)) {
- double old_value = object->RawFastDoublePropertyAt(index);
- value = isolate->factory()->NewHeapNumber(old_value);
- } else {
- value = handle(object->RawFastPropertyAt(index), isolate);
- if (details.representation().IsDouble()) {
- DCHECK(value->IsMutableHeapNumber());
- Handle<HeapNumber> old = Handle<HeapNumber>::cast(value);
- value = isolate->factory()->NewHeapNumber(old->value());
- }
- }
- PropertyDetails d(details.attributes(), DATA, i + 1,
- PropertyCellType::kNoCell);
- dictionary = NameDictionary::Add(dictionary, key, value, d);
- break;
- }
- case ACCESSOR: {
- FieldIndex index = FieldIndex::ForDescriptor(*map, i);
- Handle<Object> value(object->RawFastPropertyAt(index), isolate);
- PropertyDetails d(details.attributes(), ACCESSOR_CONSTANT, i + 1,
- PropertyCellType::kNoCell);
- dictionary = NameDictionary::Add(dictionary, key, value, d);
- break;
- }
- case ACCESSOR_CONSTANT: {
- Handle<Object> value(descs->GetCallbacksObject(i), isolate);
- PropertyDetails d(details.attributes(), ACCESSOR_CONSTANT, i + 1,
- PropertyCellType::kNoCell);
- dictionary = NameDictionary::Add(dictionary, key, value, d);
- break;
- }
- }
- }
-
- // Copy the next enumeration index from instance descriptor.
- dictionary->SetNextEnumerationIndex(real_size + 1);
-
- // From here on we cannot fail and we shouldn't GC anymore.
- DisallowHeapAllocation no_allocation;
-
- // Resize the object in the heap if necessary.
- int new_instance_size = new_map->instance_size();
- int instance_size_delta = map->instance_size() - new_instance_size;
- DCHECK(instance_size_delta >= 0);
-
- if (instance_size_delta > 0) {
- Heap* heap = isolate->heap();
- heap->CreateFillerObjectAt(object->address() + new_instance_size,
- instance_size_delta);
- heap->AdjustLiveBytes(*object, -instance_size_delta,
- Heap::CONCURRENT_TO_SWEEPER);
- }
-
- // We are storing the new map using release store after creating a filler for
- // the left-over space to avoid races with the sweeper thread.
- object->synchronized_set_map(*new_map);
-
- object->set_properties(*dictionary);
-
- // Ensure that in-object space of slow-mode object does not contain random
- // garbage.
- int inobject_properties = new_map->GetInObjectProperties();
- for (int i = 0; i < inobject_properties; i++) {
- FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
- object->RawFastPropertyAtPut(index, Smi::FromInt(0));
- }
-
- isolate->counters()->props_to_dictionary()->Increment();
-
-#ifdef DEBUG
- if (FLAG_trace_normalization) {
- OFStream os(stdout);
- os << "Object properties have been normalized:\n";
- object->Print(os);
- }
-#endif
-}
-
-
void JSObject::MigrateSlowToFast(Handle<JSObject> object,
int unused_property_fields,
const char* reason) {
@@ -5717,7 +5651,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
// Allocate the instance descriptor.
Handle<DescriptorArray> descriptors = DescriptorArray::Allocate(
- isolate, instance_descriptor_length);
+ isolate, instance_descriptor_length, 0, TENURED);
int number_of_allocated_fields =
number_of_fields + unused_property_fields - inobject_props;
@@ -5885,14 +5819,18 @@ Handle<SeededNumberDictionary> JSObject::NormalizeElements(
DCHECK(object->HasFastSmiOrObjectElements() ||
object->HasFastDoubleElements() ||
- object->HasFastArgumentsElements());
+ object->HasFastArgumentsElements() ||
+ object->HasFastStringWrapperElements());
Handle<SeededNumberDictionary> dictionary =
GetNormalizedElementDictionary(object, elements);
// Switch to using the dictionary as the backing storage for elements.
- ElementsKind target_kind =
- is_arguments ? SLOW_SLOPPY_ARGUMENTS_ELEMENTS : DICTIONARY_ELEMENTS;
+ ElementsKind target_kind = is_arguments
+ ? SLOW_SLOPPY_ARGUMENTS_ELEMENTS
+ : object->HasFastStringWrapperElements()
+ ? SLOW_STRING_WRAPPER_ELEMENTS
+ : DICTIONARY_ELEMENTS;
Handle<Map> new_map = JSObject::GetElementsTransitionMap(object, target_kind);
// Set the new map first to satify the elements type assert in set_elements().
JSObject::MigrateToMap(object, new_map);
@@ -5913,7 +5851,9 @@ Handle<SeededNumberDictionary> JSObject::NormalizeElements(
}
#endif
- DCHECK(object->HasDictionaryElements() || object->HasSlowArgumentsElements());
+ DCHECK(object->HasDictionaryElements() ||
+ object->HasSlowArgumentsElements() ||
+ object->HasSlowStringWrapperElements());
return dictionary;
}
@@ -6076,10 +6016,12 @@ void JSObject::DeleteHiddenProperty(Handle<JSObject> object, Handle<Name> key) {
bool JSObject::HasHiddenProperties(Handle<JSObject> object) {
- Handle<Name> hidden = object->GetIsolate()->factory()->hidden_string();
- LookupIterator it(object, hidden, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ Isolate* isolate = object->GetIsolate();
+ Handle<Symbol> hidden = isolate->factory()->hidden_properties_symbol();
+ LookupIterator it(object, hidden);
Maybe<PropertyAttributes> maybe = GetPropertyAttributes(&it);
- // Cannot get an exception since the hidden_string isn't accessible to JS.
+ // Cannot get an exception since the hidden_properties_symbol isn't exposed to
+ // JS.
DCHECK(maybe.IsJust());
return maybe.FromJust() != ABSENT;
}
@@ -6095,7 +6037,8 @@ Object* JSObject::GetHiddenPropertiesHashTable() {
DescriptorArray* descriptors = this->map()->instance_descriptors();
if (descriptors->number_of_descriptors() > 0) {
int sorted_index = descriptors->GetSortedKeyIndex(0);
- if (descriptors->GetKey(sorted_index) == GetHeap()->hidden_string() &&
+ if (descriptors->GetKey(sorted_index) ==
+ GetHeap()->hidden_properties_symbol() &&
sorted_index < map()->NumberOfOwnDescriptors()) {
DCHECK(descriptors->GetType(sorted_index) == DATA);
DCHECK(descriptors->GetDetails(sorted_index).representation().
@@ -6110,9 +6053,8 @@ Object* JSObject::GetHiddenPropertiesHashTable() {
return GetHeap()->undefined_value();
}
} else {
- Isolate* isolate = GetIsolate();
- LookupIterator it(handle(this), isolate->factory()->hidden_string(),
- LookupIterator::OWN_SKIP_INTERCEPTOR);
+ Handle<Symbol> hidden = GetIsolate()->factory()->hidden_properties_symbol();
+ LookupIterator it(handle(this), hidden);
// Access check is always skipped for the hidden string anyways.
return *GetDataProperty(&it);
}
@@ -6141,13 +6083,14 @@ Handle<Object> JSObject::SetHiddenPropertiesHashTable(Handle<JSObject> object,
Handle<Object> value) {
DCHECK(!object->IsJSGlobalProxy());
Isolate* isolate = object->GetIsolate();
- Handle<Name> name = isolate->factory()->hidden_string();
+ Handle<Symbol> name = isolate->factory()->hidden_properties_symbol();
SetOwnPropertyIgnoreAttributes(object, name, value, DONT_ENUM).Assert();
return object;
}
-Maybe<bool> JSObject::DeletePropertyWithInterceptor(LookupIterator* it) {
+Maybe<bool> JSObject::DeletePropertyWithInterceptor(LookupIterator* it,
+ ShouldThrow should_throw) {
Isolate* isolate = it->isolate();
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
@@ -6160,7 +6103,7 @@ Maybe<bool> JSObject::DeletePropertyWithInterceptor(LookupIterator* it) {
Handle<JSObject> holder = it->GetHolder<JSObject>();
PropertyCallbackArguments args(isolate, interceptor->data(),
- *it->GetReceiver(), *holder);
+ *it->GetReceiver(), *holder, should_throw);
v8::Local<v8::Boolean> result;
if (it->IsElement()) {
uint32_t index = it->index();
@@ -6223,6 +6166,8 @@ void JSReceiver::DeleteNormalizedProperty(Handle<JSReceiver> object,
Maybe<bool> JSReceiver::DeleteProperty(LookupIterator* it,
LanguageMode language_mode) {
+ it->UpdateProtector();
+
Isolate* isolate = it->isolate();
if (it->state() == LookupIterator::JSPROXY) {
@@ -6233,16 +6178,15 @@ Maybe<bool> JSReceiver::DeleteProperty(LookupIterator* it,
if (it->GetReceiver()->IsJSProxy()) {
if (it->state() != LookupIterator::NOT_FOUND) {
DCHECK_EQ(LookupIterator::DATA, it->state());
- DCHECK(it->GetName()->IsPrivate());
+ DCHECK(it->name()->IsPrivate());
it->Delete();
}
return Just(true);
}
Handle<JSObject> receiver = Handle<JSObject>::cast(it->GetReceiver());
- bool is_observed =
- receiver->map()->is_observed() &&
- (it->IsElement() || !isolate->IsInternallyUsedPropertyName(it->name()));
+ bool is_observed = receiver->map()->is_observed() &&
+ (it->IsElement() || !it->name()->IsPrivate());
Handle<Object> old_value = it->factory()->the_hole_value();
@@ -6258,7 +6202,10 @@ Maybe<bool> JSReceiver::DeleteProperty(LookupIterator* it,
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
return Just(false);
case LookupIterator::INTERCEPTOR: {
- Maybe<bool> result = JSObject::DeletePropertyWithInterceptor(it);
+ ShouldThrow should_throw =
+ is_sloppy(language_mode) ? DONT_THROW : THROW_ON_ERROR;
+ Maybe<bool> result =
+ JSObject::DeletePropertyWithInterceptor(it, should_throw);
// An exception was thrown in the interceptor. Propagate.
if (isolate->has_pending_exception()) return Nothing<bool>();
// Delete with interceptor succeeded. Return result.
@@ -6413,8 +6360,8 @@ MaybeHandle<Object> JSReceiver::DefineProperties(Isolate* isolate,
// 5. ReturnIfAbrupt(keys).
Handle<FixedArray> keys;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, keys,
- JSReceiver::GetKeys(props, JSReceiver::OWN_ONLY, ALL_PROPERTIES), Object);
+ isolate, keys, JSReceiver::GetKeys(props, OWN_ONLY, ALL_PROPERTIES),
+ Object);
// 6. Let descriptors be an empty List.
int capacity = keys->length();
std::vector<PropertyDescriptor> descriptors(capacity);
@@ -6599,8 +6546,8 @@ Maybe<bool> JSReceiver::ValidateAndApplyPropertyDescriptor(
? desc->value()
: Handle<Object>::cast(isolate->factory()->undefined_value()));
MaybeHandle<Object> result =
- JSObject::DefineOwnPropertyIgnoreAttributes(
- it, value, desc->ToAttributes(), JSObject::DONT_FORCE_FIELD);
+ JSObject::DefineOwnPropertyIgnoreAttributes(it, value,
+ desc->ToAttributes());
if (result.is_null()) return Nothing<bool>();
}
} else {
@@ -6792,8 +6739,8 @@ Maybe<bool> JSReceiver::ValidateAndApplyPropertyDescriptor(
? current->value()
: Handle<Object>::cast(
isolate->factory()->undefined_value()));
- MaybeHandle<Object> result = JSObject::DefineOwnPropertyIgnoreAttributes(
- it, value, attrs, JSObject::DONT_FORCE_FIELD);
+ MaybeHandle<Object> result =
+ JSObject::DefineOwnPropertyIgnoreAttributes(it, value, attrs);
if (result.is_null()) return Nothing<bool>();
} else {
DCHECK(desc_is_accessor_descriptor ||
@@ -6857,10 +6804,9 @@ Maybe<bool> JSObject::CreateDataProperty(LookupIterator* it,
return Just(false);
}
- RETURN_ON_EXCEPTION_VALUE(
- it->isolate(),
- DefineOwnPropertyIgnoreAttributes(it, value, NONE, DONT_FORCE_FIELD),
- Nothing<bool>());
+ RETURN_ON_EXCEPTION_VALUE(it->isolate(),
+ DefineOwnPropertyIgnoreAttributes(it, value, NONE),
+ Nothing<bool>());
return Just(true);
}
@@ -7081,7 +7027,7 @@ Maybe<bool> JSProxy::DefineOwnProperty(Isolate* isolate, Handle<JSProxy> proxy,
ShouldThrow should_throw) {
STACK_CHECK(Nothing<bool>());
if (key->IsSymbol() && Handle<Symbol>::cast(key)->IsPrivate()) {
- return AddPrivateProperty(isolate, proxy, Handle<Symbol>::cast(key), desc,
+ return SetPrivateProperty(isolate, proxy, Handle<Symbol>::cast(key), desc,
should_throw);
}
Handle<String> trap_name = isolate->factory()->defineProperty_string();
@@ -7187,7 +7133,7 @@ Maybe<bool> JSProxy::DefineOwnProperty(Isolate* isolate, Handle<JSProxy> proxy,
// static
-Maybe<bool> JSProxy::AddPrivateProperty(Isolate* isolate, Handle<JSProxy> proxy,
+Maybe<bool> JSProxy::SetPrivateProperty(Isolate* isolate, Handle<JSProxy> proxy,
Handle<Symbol> private_name,
PropertyDescriptor* desc,
ShouldThrow should_throw) {
@@ -7207,7 +7153,7 @@ Maybe<bool> JSProxy::AddPrivateProperty(Isolate* isolate, Handle<JSProxy> proxy,
if (it.IsFound()) {
DCHECK_EQ(LookupIterator::DATA, it.state());
- DCHECK_EQ(DONT_ENUM, it.property_details().attributes());
+ DCHECK_EQ(DONT_ENUM, it.property_attributes());
it.WriteDataValue(value);
return Just(true);
}
@@ -7277,9 +7223,9 @@ Maybe<bool> JSReceiver::GetOwnPropertyDescriptor(LookupIterator* it,
Handle<AccessorPair> accessors =
Handle<AccessorPair>::cast(it->GetAccessors());
// 6a. Set D.[[Get]] to the value of X's [[Get]] attribute.
- desc->set_get(handle(accessors->GetComponent(ACCESSOR_GETTER), isolate));
+ desc->set_get(AccessorPair::GetComponent(accessors, ACCESSOR_GETTER));
// 6b. Set D.[[Set]] to the value of X's [[Set]] attribute.
- desc->set_set(handle(accessors->GetComponent(ACCESSOR_SETTER), isolate));
+ desc->set_set(AccessorPair::GetComponent(accessors, ACCESSOR_SETTER));
}
// 7. Set D.[[Enumerable]] to the value of X's [[Enumerable]] attribute.
@@ -7412,9 +7358,7 @@ Maybe<bool> JSProxy::GetOwnPropertyDescriptor(Isolate* isolate,
bool JSObject::ReferencesObjectFromElements(FixedArray* elements,
ElementsKind kind,
Object* object) {
- DCHECK(IsFastObjectElementsKind(kind) ||
- kind == DICTIONARY_ELEMENTS);
- if (IsFastObjectElementsKind(kind)) {
+ if (IsFastObjectElementsKind(kind) || kind == FAST_STRING_WRAPPER_ELEMENTS) {
int length = IsJSArray()
? Smi::cast(JSArray::cast(this)->length())->value()
: elements->length();
@@ -7423,6 +7367,7 @@ bool JSObject::ReferencesObjectFromElements(FixedArray* elements,
if (!element->IsTheHole() && element == object) return true;
}
} else {
+ DCHECK(kind == DICTIONARY_ELEMENTS || kind == SLOW_STRING_WRAPPER_ELEMENTS);
Object* key =
SeededNumberDictionary::cast(elements)->SlowReverseLookup(object);
if (!key->IsUndefined()) return true;
@@ -7473,7 +7418,9 @@ bool JSObject::ReferencesObject(Object* obj) {
break;
case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
- case DICTIONARY_ELEMENTS: {
+ case DICTIONARY_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS: {
FixedArray* elements = FixedArray::cast(this->elements());
if (ReferencesObjectFromElements(elements, kind, obj)) return true;
break;
@@ -7494,6 +7441,8 @@ bool JSObject::ReferencesObject(Object* obj) {
if (ReferencesObjectFromElements(arguments, kind, obj)) return true;
break;
}
+ case NO_ELEMENTS:
+ break;
}
// For functions check the context.
@@ -7875,7 +7824,8 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
Handle<SeededNumberDictionary> new_element_dictionary;
if (!object->HasFixedTypedArrayElements() &&
- !object->HasDictionaryElements()) {
+ !object->HasDictionaryElements() &&
+ !object->HasSlowStringWrapperElements()) {
int length =
object->IsJSArray()
? Smi::cast(Handle<JSArray>::cast(object)->length())->value()
@@ -7902,7 +7852,8 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
if (transition != NULL) {
Handle<Map> transition_map(transition, isolate);
DCHECK(transition_map->has_dictionary_elements() ||
- transition_map->has_fixed_typed_array_elements());
+ transition_map->has_fixed_typed_array_elements() ||
+ transition_map->elements_kind() == SLOW_STRING_WRAPPER_ELEMENTS);
DCHECK(!transition_map->is_extensible());
JSObject::MigrateToMap(object, transition_map);
} else if (TransitionArray::CanHaveMoreTransitions(old_map)) {
@@ -7922,7 +7873,11 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
Map::Copy(handle(object->map()), "SlowCopyForPreventExtensions");
new_map->set_is_extensible(false);
if (!new_element_dictionary.is_null()) {
- new_map->set_elements_kind(DICTIONARY_ELEMENTS);
+ ElementsKind new_kind =
+ IsStringWrapperElementsKind(old_map->elements_kind())
+ ? SLOW_STRING_WRAPPER_ELEMENTS
+ : DICTIONARY_ELEMENTS;
+ new_map->set_elements_kind(new_kind);
}
JSObject::MigrateToMap(object, new_map);
@@ -7947,7 +7902,8 @@ Maybe<bool> JSObject::PreventExtensionsWithTransition(
return Just(true);
}
- DCHECK(object->map()->has_dictionary_elements());
+ DCHECK(object->map()->has_dictionary_elements() ||
+ object->map()->elements_kind() == SLOW_STRING_WRAPPER_ELEMENTS);
if (!new_element_dictionary.is_null()) {
object->set_elements(*new_element_dictionary);
}
@@ -7999,8 +7955,9 @@ Handle<Object> JSObject::FastPropertyAt(Handle<JSObject> object,
return Object::WrapForRead(isolate, raw_value, representation);
}
+enum class BoilerplateKind { kNormalBoilerplate, kApiBoilerplate };
-template<class ContextObject>
+template <class ContextObject, BoilerplateKind boilerplate_kind>
class JSObjectWalkVisitor {
public:
JSObjectWalkVisitor(ContextObject* site_context, bool copying,
@@ -8032,10 +7989,9 @@ class JSObjectWalkVisitor {
const JSObject::DeepCopyHints hints_;
};
-
-template <class ContextObject>
-MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
- Handle<JSObject> object) {
+template <class ContextObject, BoilerplateKind boilerplate_kind>
+MaybeHandle<JSObject> JSObjectWalkVisitor<
+ ContextObject, boilerplate_kind>::StructureWalk(Handle<JSObject> object) {
Isolate* isolate = this->isolate();
bool copying = this->copying();
bool shallow = hints_ == JSObject::kObjectIsShallow;
@@ -8055,6 +8011,26 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
Handle<JSObject> copy;
if (copying) {
+ if (boilerplate_kind == BoilerplateKind::kApiBoilerplate) {
+ if (object->IsJSFunction()) {
+#ifdef DEBUG
+ // Ensure that it is an Api function and template_instantiations_cache
+ // contains an entry for function's FunctionTemplateInfo.
+ JSFunction* function = JSFunction::cast(*object);
+ CHECK(function->shared()->IsApiFunction());
+ FunctionTemplateInfo* data = function->shared()->get_api_func_data();
+ auto serial_number = handle(Smi::cast(data->serial_number()), isolate);
+ CHECK(serial_number->value());
+ auto cache = isolate->template_instantiations_cache();
+ Object* element = cache->Lookup(serial_number);
+ CHECK_EQ(function, element);
+#endif
+ return object;
+ }
+ } else {
+ // JSFunction objects are not allowed to be in normal boilerplates at all.
+ DCHECK(!object->IsJSFunction());
+ }
Handle<AllocationSite> site_to_pass;
if (site_context()->ShouldCreateMemento(object)) {
site_to_pass = site_context()->current();
@@ -8115,7 +8091,7 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
// an array.
PropertyFilter filter = static_cast<PropertyFilter>(
ONLY_WRITABLE | ONLY_ENUMERABLE | ONLY_CONFIGURABLE);
- KeyAccumulator accumulator(isolate, filter);
+ KeyAccumulator accumulator(isolate, OWN_ONLY, filter);
accumulator.NextPrototype();
copy->CollectOwnPropertyNames(&accumulator, filter);
Handle<FixedArray> names = accumulator.GetKeys();
@@ -8139,12 +8115,8 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
}
// Deep copy own elements.
- // Pixel elements cannot be created using an object literal.
- DCHECK(!copy->HasFixedTypedArrayElements());
switch (kind) {
- case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS: {
Handle<FixedArray> elements(FixedArray::cast(copy->elements()));
if (elements->map() == isolate->heap()->fixed_cow_array_map()) {
@@ -8156,9 +8128,6 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
} else {
for (int i = 0; i < elements->length(); i++) {
Handle<Object> value(elements->get(i), isolate);
- DCHECK(value->IsSmi() ||
- value->IsTheHole() ||
- (IsFastObjectElementsKind(copy->GetElementsKind())));
if (value->IsJSObject()) {
Handle<JSObject> result;
ASSIGN_RETURN_ON_EXCEPTION(
@@ -8199,16 +8168,25 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
UNIMPLEMENTED();
break;
-
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ UNREACHABLE();
+ break;
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
case TYPE##_ELEMENTS: \
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
+ // Typed elements cannot be created using an object literal.
+ UNREACHABLE();
+ break;
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case NO_ELEMENTS:
// No contained objects, nothing to do.
break;
}
@@ -8221,8 +8199,9 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
MaybeHandle<JSObject> JSObject::DeepWalk(
Handle<JSObject> object,
AllocationSiteCreationContext* site_context) {
- JSObjectWalkVisitor<AllocationSiteCreationContext> v(site_context, false,
- kNoHints);
+ JSObjectWalkVisitor<AllocationSiteCreationContext,
+ BoilerplateKind::kNormalBoilerplate> v(site_context,
+ false, kNoHints);
MaybeHandle<JSObject> result = v.StructureWalk(object);
Handle<JSObject> for_assert;
DCHECK(!result.ToHandle(&for_assert) || for_assert.is_identical_to(object));
@@ -8234,13 +8213,35 @@ MaybeHandle<JSObject> JSObject::DeepCopy(
Handle<JSObject> object,
AllocationSiteUsageContext* site_context,
DeepCopyHints hints) {
- JSObjectWalkVisitor<AllocationSiteUsageContext> v(site_context, true, hints);
+ JSObjectWalkVisitor<AllocationSiteUsageContext,
+ BoilerplateKind::kNormalBoilerplate> v(site_context, true,
+ hints);
MaybeHandle<JSObject> copy = v.StructureWalk(object);
Handle<JSObject> for_assert;
DCHECK(!copy.ToHandle(&for_assert) || !for_assert.is_identical_to(object));
return copy;
}
+class DummyContextObject : public AllocationSiteContext {
+ public:
+ explicit DummyContextObject(Isolate* isolate)
+ : AllocationSiteContext(isolate) {}
+
+ bool ShouldCreateMemento(Handle<JSObject> object) { return false; }
+ Handle<AllocationSite> EnterNewScope() { return Handle<AllocationSite>(); }
+ void ExitScope(Handle<AllocationSite> site, Handle<JSObject> object) {}
+};
+
+MaybeHandle<JSObject> JSObject::DeepCopyApiBoilerplate(
+ Handle<JSObject> object) {
+ DummyContextObject dummy_context_object(object->GetIsolate());
+ JSObjectWalkVisitor<DummyContextObject, BoilerplateKind::kApiBoilerplate> v(
+ &dummy_context_object, true, kNoHints);
+ MaybeHandle<JSObject> copy = v.StructureWalk(object);
+ Handle<JSObject> for_assert;
+ DCHECK(!copy.ToHandle(&for_assert) || !for_assert.is_identical_to(object));
+ return copy;
+}
// static
MaybeHandle<Object> JSReceiver::ToPrimitive(Handle<JSReceiver> receiver,
@@ -8314,12 +8315,6 @@ MaybeHandle<Object> JSReceiver::OrdinaryToPrimitive(
// TODO(cbruni/jkummerow): Consider moving this into elements.cc.
bool HasEnumerableElements(JSObject* object) {
- if (object->IsJSValue()) {
- Object* value = JSValue::cast(object)->value();
- if (value->IsString()) {
- if (String::cast(value)->length() > 0) return true;
- }
- }
switch (object->GetElementsKind()) {
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
@@ -8371,6 +8366,14 @@ bool HasEnumerableElements(JSObject* object) {
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
// We're approximating non-empty arguments objects here.
return true;
+ case FAST_STRING_WRAPPER_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
+ if (String::cast(JSValue::cast(object)->value())->length() > 0) {
+ return true;
+ }
+ return object->elements()->length() > 0;
+ case NO_ELEMENTS:
+ return false;
}
UNREACHABLE();
return true;
@@ -8444,22 +8447,27 @@ static bool ContainsOnlyValidKeys(Handle<FixedArray> array) {
static Handle<FixedArray> ReduceFixedArrayTo(
Handle<FixedArray> array, int length) {
- DCHECK(array->length() >= length);
+ DCHECK_LE(length, array->length());
if (array->length() == length) return array;
-
- Handle<FixedArray> new_array =
- array->GetIsolate()->factory()->NewFixedArray(length);
- for (int i = 0; i < length; ++i) new_array->set(i, array->get(i));
- return new_array;
+ return array->GetIsolate()->factory()->CopyFixedArrayUpTo(array, length);
}
+bool Map::OnlyHasSimpleProperties() {
+ // Wrapped string elements aren't explicitly stored in the elements backing
+ // store, but are loaded indirectly from the underlying string.
+ return !IsStringWrapperElementsKind(elements_kind()) &&
+ !is_access_check_needed() && !has_named_interceptor() &&
+ !has_indexed_interceptor() && !has_hidden_prototype() &&
+ !is_dictionary_map();
+}
namespace {
Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
- Handle<JSObject> object,
- bool cache_enum_length) {
+ Handle<JSObject> object) {
Handle<Map> map(object->map());
+ bool cache_enum_length = map->OnlyHasSimpleProperties();
+
Handle<DescriptorArray> descs =
Handle<DescriptorArray>(map->instance_descriptors(), isolate);
int own_property_count = map->EnumLength();
@@ -8532,12 +8540,10 @@ Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
} // namespace
-
-Handle<FixedArray> JSObject::GetEnumPropertyKeys(Handle<JSObject> object,
- bool cache_enum_length) {
+Handle<FixedArray> JSObject::GetEnumPropertyKeys(Handle<JSObject> object) {
Isolate* isolate = object->GetIsolate();
if (object->HasFastProperties()) {
- return GetFastEnumPropertyKeys(isolate, object, cache_enum_length);
+ return GetFastEnumPropertyKeys(isolate, object);
} else if (object->IsJSGlobalObject()) {
Handle<GlobalDictionary> dictionary(object->global_dictionary());
int length = dictionary->NumberOfEnumElements();
@@ -8583,7 +8589,7 @@ static Maybe<bool> GetKeysFromInterceptor(Isolate* isolate,
return Just(true);
}
PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
- *object);
+ *object, Object::DONT_THROW);
v8::Local<v8::Object> result;
if (!interceptor->enumerator()->IsUndefined()) {
Callback enum_fun = v8::ToCData<Callback>(interceptor->enumerator());
@@ -8603,8 +8609,8 @@ static Maybe<bool> GetKeysFromInterceptor(Isolate* isolate,
accumulator->AddElementKeysFromInterceptor(
Handle<JSObject>::cast(v8::Utils::OpenHandle(*result)));
} else {
- accumulator->AddKeys(
- Handle<JSObject>::cast(v8::Utils::OpenHandle(*result)));
+ accumulator->AddKeys(Handle<JSObject>::cast(v8::Utils::OpenHandle(*result)),
+ DO_NOT_CONVERT);
}
return Just(true);
}
@@ -8616,7 +8622,7 @@ static Maybe<bool> GetKeysFromJSObject(Isolate* isolate,
Handle<JSReceiver> receiver,
Handle<JSObject> object,
PropertyFilter* filter,
- JSReceiver::KeyCollectionType type,
+ KeyCollectionType type,
KeyAccumulator* accumulator) {
accumulator->NextPrototype();
// Check access rights if required.
@@ -8624,11 +8630,11 @@ static Maybe<bool> GetKeysFromJSObject(Isolate* isolate,
!isolate->MayAccess(handle(isolate->context()), object)) {
// The cross-origin spec says that [[Enumerate]] shall return an empty
// iterator when it doesn't have access...
- if (type == JSReceiver::INCLUDE_PROTOS) {
+ if (type == INCLUDE_PROTOS) {
return Just(false);
}
// ...whereas [[OwnPropertyKeys]] shall return whitelisted properties.
- DCHECK(type == JSReceiver::OWN_ONLY);
+ DCHECK_EQ(OWN_ONLY, type);
*filter = static_cast<PropertyFilter>(*filter | ONLY_ALL_CAN_READ);
}
@@ -8641,33 +8647,8 @@ static Maybe<bool> GetKeysFromJSObject(Isolate* isolate,
MAYBE_RETURN(success, Nothing<bool>());
if (*filter == ENUMERABLE_STRINGS) {
- // We can cache the computed property keys if access checks are
- // not needed and no interceptors are involved.
- //
- // We do not use the cache if the object has elements and
- // therefore it does not make sense to cache the property names
- // for arguments objects. Arguments objects will always have
- // elements.
- // Wrapped strings have elements, but don't have an elements
- // array or dictionary. So the fast inline test for whether to
- // use the cache says yes, so we should not create a cache.
- Handle<JSFunction> arguments_function(
- JSFunction::cast(isolate->sloppy_arguments_map()->GetConstructor()));
- bool has_hidden_prototype = false;
- Object* prototype = object->map()->prototype();
- if (prototype->IsJSObject()) {
- has_hidden_prototype =
- JSObject::cast(prototype)->map()->is_hidden_prototype();
- }
- bool cache_enum_length =
- ((object->map()->GetConstructor() != *arguments_function) &&
- !object->IsJSValue() && !object->IsAccessCheckNeeded() &&
- !object->HasNamedInterceptor() && !object->HasIndexedInterceptor() &&
- !has_hidden_prototype);
- // Compute the property keys and cache them if possible.
- Handle<FixedArray> enum_keys =
- JSObject::GetEnumPropertyKeys(object, cache_enum_length);
- accumulator->AddKeys(enum_keys);
+ Handle<FixedArray> enum_keys = JSObject::GetEnumPropertyKeys(object);
+ accumulator->AddKeys(enum_keys, DO_NOT_CONVERT);
} else {
object->CollectOwnPropertyNames(accumulator, *filter);
}
@@ -8686,28 +8667,22 @@ static Maybe<bool> GetKeysFromJSObject(Isolate* isolate,
static Maybe<bool> GetKeys_Internal(Isolate* isolate,
Handle<JSReceiver> receiver,
Handle<JSReceiver> object,
- JSReceiver::KeyCollectionType type,
+ KeyCollectionType type,
PropertyFilter filter,
KeyAccumulator* accumulator) {
- PrototypeIterator::WhereToEnd end = type == JSReceiver::OWN_ONLY
+ PrototypeIterator::WhereToEnd end = type == OWN_ONLY
? PrototypeIterator::END_AT_NON_HIDDEN
: PrototypeIterator::END_AT_NULL;
for (PrototypeIterator iter(isolate, object,
- PrototypeIterator::START_AT_RECEIVER);
- !iter.IsAtEnd(end); iter.Advance()) {
+ PrototypeIterator::START_AT_RECEIVER, end);
+ !iter.IsAtEnd(); iter.Advance()) {
Handle<JSReceiver> current =
PrototypeIterator::GetCurrent<JSReceiver>(iter);
Maybe<bool> result = Just(false); // Dummy initialization.
if (current->IsJSProxy()) {
- if (type == JSReceiver::OWN_ONLY) {
- result = JSProxy::OwnPropertyKeys(isolate, receiver,
- Handle<JSProxy>::cast(current),
- filter, accumulator);
- } else {
- DCHECK(type == JSReceiver::INCLUDE_PROTOS);
- result = JSProxy::Enumerate(
- isolate, receiver, Handle<JSProxy>::cast(current), accumulator);
- }
+ result = JSProxy::OwnPropertyKeys(isolate, receiver,
+ Handle<JSProxy>::cast(current), filter,
+ accumulator);
} else {
DCHECK(current->IsJSObject());
result = GetKeysFromJSObject(isolate, receiver,
@@ -8721,54 +8696,6 @@ static Maybe<bool> GetKeys_Internal(Isolate* isolate,
}
-// ES6 9.5.11
-// Returns false in case of exception.
-// static
-Maybe<bool> JSProxy::Enumerate(Isolate* isolate, Handle<JSReceiver> receiver,
- Handle<JSProxy> proxy,
- KeyAccumulator* accumulator) {
- STACK_CHECK(Nothing<bool>());
- // 1. Let handler be the value of the [[ProxyHandler]] internal slot of O.
- Handle<Object> handler(proxy->handler(), isolate);
- // 2. If handler is null, throw a TypeError exception.
- // 3. Assert: Type(handler) is Object.
- if (proxy->IsRevoked()) {
- isolate->Throw(*isolate->factory()->NewTypeError(
- MessageTemplate::kProxyRevoked,
- isolate->factory()->enumerate_string()));
- return Nothing<bool>();
- }
- // 4. Let target be the value of the [[ProxyTarget]] internal slot of O.
- Handle<JSReceiver> target(proxy->target(), isolate);
- // 5. Let trap be ? GetMethod(handler, "enumerate").
- Handle<Object> trap;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, trap, Object::GetMethod(Handle<JSReceiver>::cast(handler),
- isolate->factory()->enumerate_string()),
- Nothing<bool>());
- // 6. If trap is undefined, then
- if (trap->IsUndefined()) {
- // 6a. Return target.[[Enumerate]]().
- return GetKeys_Internal(isolate, receiver, target, INCLUDE_PROTOS,
- ENUMERABLE_STRINGS, accumulator);
- }
- // The "proxy_enumerate" helper calls the trap (steps 7 - 9), which returns
- // a generator; it then iterates over that generator until it's exhausted
- // and returns an array containing the generated values.
- Handle<Object> trap_result_array;
- Handle<Object> args[] = {trap, handler, target};
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, trap_result_array,
- Execution::Call(isolate, isolate->proxy_enumerate(),
- isolate->factory()->undefined_value(), arraysize(args),
- args),
- Nothing<bool>());
- accumulator->NextPrototype();
- accumulator->AddKeysFromProxy(Handle<JSObject>::cast(trap_result_array));
- return Just(true);
-}
-
-
// ES6 9.5.12
// Returns |true| on success, |nothing| in case of exception.
// static
@@ -8866,10 +8793,15 @@ Maybe<bool> JSProxy::OwnPropertyKeys(Isolate* isolate,
const int kPresent = 1;
const int kGone = 0;
IdentityMap<int> unchecked_result_keys(isolate->heap(), &set_zone);
- int unchecked_result_keys_size = trap_result->length();
+ int unchecked_result_keys_size = 0;
for (int i = 0; i < trap_result->length(); ++i) {
DCHECK(trap_result->get(i)->IsUniqueName());
- unchecked_result_keys.Set(trap_result->get(i), kPresent);
+ Object* key = trap_result->get(i);
+ int* entry = unchecked_result_keys.Get(key);
+ if (*entry != kPresent) {
+ *entry = kPresent;
+ unchecked_result_keys_size++;
+ }
}
// 17. Repeat, for each key that is an element of targetNonconfigurableKeys:
for (int i = 0; i < nonconfigurable_keys_length; ++i) {
@@ -8924,7 +8856,7 @@ MaybeHandle<FixedArray> JSReceiver::GetKeys(Handle<JSReceiver> object,
GetKeysConversion keys_conversion) {
USE(ContainsOnlyValidKeys);
Isolate* isolate = object->GetIsolate();
- KeyAccumulator accumulator(isolate, filter);
+ KeyAccumulator accumulator(isolate, type, filter);
MAYBE_RETURN(
GetKeys_Internal(isolate, object, object, type, filter, &accumulator),
MaybeHandle<FixedArray>());
@@ -8933,6 +8865,64 @@ MaybeHandle<FixedArray> JSReceiver::GetKeys(Handle<JSReceiver> object,
return keys;
}
+MaybeHandle<FixedArray> GetOwnValuesOrEntries(Isolate* isolate,
+ Handle<JSReceiver> object,
+ PropertyFilter filter,
+ bool get_entries) {
+ PropertyFilter key_filter =
+ static_cast<PropertyFilter>(filter & ~ONLY_ENUMERABLE);
+ KeyAccumulator accumulator(isolate, OWN_ONLY, key_filter);
+ MAYBE_RETURN(GetKeys_Internal(isolate, object, object, OWN_ONLY, key_filter,
+ &accumulator),
+ MaybeHandle<FixedArray>());
+ Handle<FixedArray> keys = accumulator.GetKeys(CONVERT_TO_STRING);
+ DCHECK(ContainsOnlyValidKeys(keys));
+
+ Handle<FixedArray> values_or_entries =
+ isolate->factory()->NewFixedArray(keys->length());
+ int length = 0;
+
+ for (int i = 0; i < keys->length(); ++i) {
+ Handle<Name> key = Handle<Name>::cast(handle(keys->get(i), isolate));
+
+ if (filter & ONLY_ENUMERABLE) {
+ PropertyDescriptor descriptor;
+ Maybe<bool> did_get_descriptor = JSReceiver::GetOwnPropertyDescriptor(
+ isolate, object, key, &descriptor);
+ MAYBE_RETURN(did_get_descriptor, MaybeHandle<FixedArray>());
+ if (!did_get_descriptor.FromJust() || !descriptor.enumerable()) continue;
+ }
+
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value, JSReceiver::GetPropertyOrElement(object, key),
+ MaybeHandle<FixedArray>());
+
+ if (get_entries) {
+ Handle<FixedArray> entry_storage =
+ isolate->factory()->NewUninitializedFixedArray(2);
+ entry_storage->set(0, *key);
+ entry_storage->set(1, *value);
+ value = isolate->factory()->NewJSArrayWithElements(entry_storage,
+ FAST_ELEMENTS, 2);
+ }
+
+ values_or_entries->set(length, *value);
+ length++;
+ }
+ if (length < values_or_entries->length()) values_or_entries->Shrink(length);
+ return values_or_entries;
+}
+
+MaybeHandle<FixedArray> JSReceiver::GetOwnValues(Handle<JSReceiver> object,
+ PropertyFilter filter) {
+ return GetOwnValuesOrEntries(object->GetIsolate(), object, filter, false);
+}
+
+MaybeHandle<FixedArray> JSReceiver::GetOwnEntries(Handle<JSReceiver> object,
+ PropertyFilter filter) {
+ return GetOwnValuesOrEntries(object->GetIsolate(), object, filter, true);
+}
bool Map::DictionaryElementsInPrototypeChainOnly() {
if (IsDictionaryElementsKind(elements_kind())) {
@@ -9000,7 +8990,7 @@ MaybeHandle<Object> JSObject::DefineAccessor(LookupIterator* it,
Handle<Object> old_value = isolate->factory()->the_hole_value();
bool is_observed = object->map()->is_observed() &&
- !isolate->IsInternallyUsedPropertyName(it->GetName());
+ (it->IsElement() || !it->name()->IsPrivate());
bool preexists = false;
if (is_observed) {
CHECK(GetPropertyAttributes(it).IsJust());
@@ -9011,8 +9001,10 @@ MaybeHandle<Object> JSObject::DefineAccessor(LookupIterator* it,
}
}
- DCHECK(getter->IsCallable() || getter->IsUndefined() || getter->IsNull());
- DCHECK(setter->IsCallable() || setter->IsUndefined() || setter->IsNull());
+ DCHECK(getter->IsCallable() || getter->IsUndefined() || getter->IsNull() ||
+ getter->IsFunctionTemplateInfo());
+ DCHECK(setter->IsCallable() || setter->IsUndefined() || setter->IsNull() ||
+ getter->IsFunctionTemplateInfo());
// At least one of the accessors needs to be a new value.
DCHECK(!getter->IsNull() || !setter->IsNull());
if (!getter->IsNull()) {
@@ -9111,9 +9103,8 @@ MaybeHandle<Object> JSObject::GetAccessor(Handle<JSObject> object,
case LookupIterator::ACCESSOR: {
Handle<Object> maybe_pair = it.GetAccessors();
if (maybe_pair->IsAccessorPair()) {
- return handle(
- AccessorPair::cast(*maybe_pair)->GetComponent(component),
- isolate);
+ return AccessorPair::GetComponent(
+ Handle<AccessorPair>::cast(maybe_pair), component);
}
}
}
@@ -9182,8 +9173,6 @@ Handle<Map> Map::RawCopy(Handle<Map> map, int instance_size) {
if (!map->is_dictionary_map()) {
new_bit_field3 = IsUnstable::update(new_bit_field3, false);
}
- new_bit_field3 =
- ConstructionCounter::update(new_bit_field3, kNoSlackTracking);
result->set_bit_field3(new_bit_field3);
return result;
}
@@ -9240,7 +9229,7 @@ Handle<Map> Map::Normalize(Handle<Map> fast_map, PropertyNormalizationMode mode,
new_map = Map::CopyNormalized(fast_map, mode);
if (use_cache) {
cache->Set(fast_map, new_map);
- isolate->counters()->normalized_maps()->Increment();
+ isolate->counters()->maps_normalized()->Increment();
}
#if TRACE_MAPS
if (FLAG_trace_maps) {
@@ -9270,6 +9259,7 @@ Handle<Map> Map::CopyNormalized(Handle<Map> map,
result->set_dictionary_map(true);
result->set_migration_target(false);
+ result->set_construction_counter(kNoSlackTracking);
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) result->DictionaryMapVerify();
@@ -9453,7 +9443,7 @@ Handle<Map> Map::CopyReplaceDescriptors(
for (int i = 0; i < length; i++) {
descriptors->SetRepresentation(i, Representation::Tagged());
if (descriptors->GetDetails(i).type() == DATA) {
- descriptors->SetValue(i, HeapType::Any());
+ descriptors->SetValue(i, FieldType::Any());
}
}
result->InitializeDescriptors(*descriptors,
@@ -9760,23 +9750,37 @@ Handle<Map> Map::CopyForPreventExtensions(Handle<Map> map,
transition_marker, reason, SPECIAL_TRANSITION);
new_map->set_is_extensible(false);
if (!IsFixedTypedArrayElementsKind(map->elements_kind())) {
- new_map->set_elements_kind(DICTIONARY_ELEMENTS);
+ ElementsKind new_kind = IsStringWrapperElementsKind(map->elements_kind())
+ ? SLOW_STRING_WRAPPER_ELEMENTS
+ : DICTIONARY_ELEMENTS;
+ new_map->set_elements_kind(new_kind);
}
return new_map;
}
+FieldType* DescriptorArray::GetFieldType(int descriptor_number) {
+ DCHECK(GetDetails(descriptor_number).location() == kField);
+ Object* value = GetValue(descriptor_number);
+ if (value->IsWeakCell()) {
+ if (WeakCell::cast(value)->cleared()) return FieldType::None();
+ value = WeakCell::cast(value)->value();
+ }
+ return FieldType::cast(value);
+}
-bool DescriptorArray::CanHoldValue(int descriptor, Object* value) {
- PropertyDetails details = GetDetails(descriptor);
+namespace {
+
+bool CanHoldValue(DescriptorArray* descriptors, int descriptor, Object* value) {
+ PropertyDetails details = descriptors->GetDetails(descriptor);
switch (details.type()) {
case DATA:
return value->FitsRepresentation(details.representation()) &&
- GetFieldType(descriptor)->NowContains(value);
+ descriptors->GetFieldType(descriptor)->NowContains(value);
case DATA_CONSTANT:
- DCHECK(GetConstant(descriptor) != value ||
+ DCHECK(descriptors->GetConstant(descriptor) != value ||
value->FitsRepresentation(details.representation()));
- return GetConstant(descriptor) == value;
+ return descriptors->GetConstant(descriptor) == value;
case ACCESSOR:
case ACCESSOR_CONSTANT:
@@ -9787,28 +9791,29 @@ bool DescriptorArray::CanHoldValue(int descriptor, Object* value) {
return false;
}
-
-// static
-Handle<Map> Map::PrepareForDataProperty(Handle<Map> map, int descriptor,
- Handle<Object> value) {
- // Dictionaries can store any property value.
- if (map->is_dictionary_map()) return map;
-
- // Migrate to the newest map before storing the property.
- map = Update(map);
-
- Handle<DescriptorArray> descriptors(map->instance_descriptors());
-
- if (descriptors->CanHoldValue(descriptor, *value)) return map;
+Handle<Map> UpdateDescriptorForValue(Handle<Map> map, int descriptor,
+ Handle<Object> value) {
+ if (CanHoldValue(map->instance_descriptors(), descriptor, *value)) return map;
Isolate* isolate = map->GetIsolate();
PropertyAttributes attributes =
- descriptors->GetDetails(descriptor).attributes();
+ map->instance_descriptors()->GetDetails(descriptor).attributes();
Representation representation = value->OptimalRepresentation();
- Handle<HeapType> type = value->OptimalType(isolate, representation);
+ Handle<FieldType> type = value->OptimalType(isolate, representation);
+
+ return Map::ReconfigureProperty(map, descriptor, kData, attributes,
+ representation, type, FORCE_FIELD);
+}
+
+} // namespace
- return ReconfigureProperty(map, descriptor, kData, attributes, representation,
- type, FORCE_FIELD);
+// static
+Handle<Map> Map::PrepareForDataProperty(Handle<Map> map, int descriptor,
+ Handle<Object> value) {
+ // Dictionaries can store any property value.
+ DCHECK(!map->is_dictionary_map());
+ // Update to the newest map before storing the property.
+ return UpdateDescriptorForValue(Update(map), descriptor, value);
}
@@ -9816,8 +9821,8 @@ Handle<Map> Map::TransitionToDataProperty(Handle<Map> map, Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes,
StoreFromKeyed store_mode) {
- // Dictionary maps can always have additional data properties.
- if (map->is_dictionary_map()) return map;
+ DCHECK(name->IsUniqueName());
+ DCHECK(!map->is_dictionary_map());
// Migrate to the newest map before storing the property.
map = Update(map);
@@ -9832,7 +9837,7 @@ Handle<Map> Map::TransitionToDataProperty(Handle<Map> map, Handle<Name> name,
->GetDetails(descriptor)
.attributes());
- return Map::PrepareForDataProperty(transition, descriptor, value);
+ return UpdateDescriptorForValue(transition, descriptor, value);
}
TransitionFlag flag = INSERT_TRANSITION;
@@ -9842,7 +9847,7 @@ Handle<Map> Map::TransitionToDataProperty(Handle<Map> map, Handle<Name> name,
} else if (!map->TooManyFastProperties(store_mode)) {
Isolate* isolate = name->GetIsolate();
Representation representation = value->OptimalRepresentation();
- Handle<HeapType> type = value->OptimalType(isolate, representation);
+ Handle<FieldType> type = value->OptimalType(isolate, representation);
maybe_map =
Map::CopyWithField(map, name, type, attributes, representation, flag);
}
@@ -9887,7 +9892,7 @@ Handle<Map> Map::ReconfigureExistingProperty(Handle<Map> map, int descriptor,
Isolate* isolate = map->GetIsolate();
Handle<Map> new_map = ReconfigureProperty(
map, descriptor, kind, attributes, Representation::None(),
- HeapType::None(isolate), FORCE_FIELD);
+ FieldType::None(isolate), FORCE_FIELD);
return new_map;
}
@@ -9897,6 +9902,7 @@ Handle<Map> Map::TransitionToAccessorProperty(Handle<Map> map,
AccessorComponent component,
Handle<Object> accessor,
PropertyAttributes attributes) {
+ DCHECK(name->IsUniqueName());
Isolate* isolate = name->GetIsolate();
// Dictionary maps can always have additional data properties.
@@ -9935,7 +9941,7 @@ Handle<Map> Map::TransitionToAccessorProperty(Handle<Map> map,
Handle<AccessorPair> pair;
DescriptorArray* old_descriptors = map->instance_descriptors();
- int descriptor = old_descriptors->SearchWithCache(*name, *map);
+ int descriptor = old_descriptors->SearchWithCache(isolate, *name, *map);
if (descriptor != DescriptorArray::kNotFound) {
if (descriptor != map->LastAdded()) {
return Map::Normalize(map, mode, "AccessorsOverwritingNonLast");
@@ -9981,9 +9987,6 @@ Handle<Map> Map::CopyAddDescriptor(Handle<Map> map,
TransitionFlag flag) {
Handle<DescriptorArray> descriptors(map->instance_descriptors());
- // Ensure the key is unique.
- descriptor->KeyToUniqueName();
-
// Share descriptors only if map owns descriptors and it not an initial map.
if (flag == INSERT_TRANSITION && map->owns_descriptors() &&
!map->GetBackPointer()->IsUndefined() &&
@@ -10012,11 +10015,9 @@ Handle<Map> Map::CopyInsertDescriptor(Handle<Map> map,
TransitionFlag flag) {
Handle<DescriptorArray> old_descriptors(map->instance_descriptors());
- // Ensure the key is unique.
- descriptor->KeyToUniqueName();
-
// We replace the key if it is already present.
- int index = old_descriptors->SearchWithCache(*descriptor->GetKey(), *map);
+ int index = old_descriptors->SearchWithCache(map->GetIsolate(),
+ *descriptor->GetKey(), *map);
if (index != DescriptorArray::kNotFound) {
return CopyReplaceDescriptor(map, old_descriptors, descriptor, index, flag);
}
@@ -10099,9 +10100,6 @@ Handle<Map> Map::CopyReplaceDescriptor(Handle<Map> map,
Descriptor* descriptor,
int insertion_index,
TransitionFlag flag) {
- // Ensure the key is unique.
- descriptor->KeyToUniqueName();
-
Handle<Name> key = descriptor->GetKey();
DCHECK(*key == descriptors->GetKey(insertion_index));
@@ -10796,17 +10794,18 @@ Handle<ArrayList> ArrayList::EnsureSpace(Handle<ArrayList> array, int length) {
return array;
}
-
Handle<DescriptorArray> DescriptorArray::Allocate(Isolate* isolate,
int number_of_descriptors,
- int slack) {
+ int slack,
+ PretenureFlag pretenure) {
DCHECK(0 <= number_of_descriptors);
Factory* factory = isolate->factory();
// Do not use DescriptorArray::cast on incomplete object.
int size = number_of_descriptors + slack;
if (size == 0) return factory->empty_descriptor_array();
// Allocate the array of keys.
- Handle<FixedArray> result = factory->NewFixedArray(LengthFor(size), TENURED);
+ Handle<FixedArray> result =
+ factory->NewFixedArray(LengthFor(size), pretenure);
result->set(kDescriptorLengthIndex, Smi::FromInt(number_of_descriptors));
result->set(kEnumCacheIndex, Smi::FromInt(0));
@@ -10921,13 +10920,21 @@ Handle<AccessorPair> AccessorPair::Copy(Handle<AccessorPair> pair) {
return copy;
}
-
-Object* AccessorPair::GetComponent(AccessorComponent component) {
- Object* accessor = get(component);
- return accessor->IsTheHole() ? GetHeap()->undefined_value() : accessor;
+Handle<Object> AccessorPair::GetComponent(Handle<AccessorPair> accessor_pair,
+ AccessorComponent component) {
+ Object* accessor = accessor_pair->get(component);
+ if (accessor->IsFunctionTemplateInfo()) {
+ return ApiNatives::InstantiateFunction(
+ handle(FunctionTemplateInfo::cast(accessor)))
+ .ToHandleChecked();
+ }
+ Isolate* isolate = accessor_pair->GetIsolate();
+ if (accessor->IsTheHole()) {
+ return isolate->factory()->undefined_value();
+ }
+ return handle(accessor, isolate);
}
-
Handle<DeoptimizationInputData> DeoptimizationInputData::New(
Isolate* isolate, int deopt_entry_count, PretenureFlag pretenure) {
return Handle<DeoptimizationInputData>::cast(
@@ -10963,23 +10970,31 @@ Handle<LiteralsArray> LiteralsArray::New(Isolate* isolate,
return casted_literals;
}
-
-int HandlerTable::LookupRange(int pc_offset, int* stack_depth_out,
+int HandlerTable::LookupRange(int pc_offset, int* data_out,
CatchPrediction* prediction_out) {
- int innermost_handler = -1, innermost_start = -1;
+ int innermost_handler = -1;
+#ifdef DEBUG
+ // Assuming that ranges are well nested, we don't need to track the innermost
+ // offsets. This is just to verify that the table is actually well nested.
+ int innermost_start = std::numeric_limits<int>::min();
+ int innermost_end = std::numeric_limits<int>::max();
+#endif
for (int i = 0; i < length(); i += kRangeEntrySize) {
int start_offset = Smi::cast(get(i + kRangeStartIndex))->value();
int end_offset = Smi::cast(get(i + kRangeEndIndex))->value();
int handler_field = Smi::cast(get(i + kRangeHandlerIndex))->value();
int handler_offset = HandlerOffsetField::decode(handler_field);
CatchPrediction prediction = HandlerPredictionField::decode(handler_field);
- int stack_depth = Smi::cast(get(i + kRangeDepthIndex))->value();
+ int handler_data = Smi::cast(get(i + kRangeDataIndex))->value();
if (pc_offset > start_offset && pc_offset <= end_offset) {
- DCHECK_NE(start_offset, innermost_start);
- if (start_offset < innermost_start) continue;
+ DCHECK_GE(start_offset, innermost_start);
+ DCHECK_LT(end_offset, innermost_end);
innermost_handler = handler_offset;
+#ifdef DEBUG
innermost_start = start_offset;
- *stack_depth_out = stack_depth;
+ innermost_end = end_offset;
+#endif
+ if (data_out) *data_out = handler_data;
if (prediction_out) *prediction_out = prediction;
}
}
@@ -12257,7 +12272,8 @@ bool CheckEquivalent(Map* first, Map* second) {
first->bit_field() == second->bit_field() &&
first->is_extensible() == second->is_extensible() &&
first->is_strong() == second->is_strong() &&
- first->is_hidden_prototype() == second->is_hidden_prototype();
+ first->new_target_is_base() == second->new_target_is_base() &&
+ first->has_hidden_prototype() == second->has_hidden_prototype();
}
} // namespace
@@ -12564,23 +12580,27 @@ static void ShrinkInstanceSize(Map* map, void* data) {
map->SetInObjectProperties(map->GetInObjectProperties() - slack);
map->set_unused_property_fields(map->unused_property_fields() - slack);
map->set_instance_size(map->instance_size() - slack * kPointerSize);
+ map->set_construction_counter(Map::kNoSlackTracking);
// Visitor id might depend on the instance size, recalculate it.
map->set_visitor_id(Heap::GetStaticVisitorIdForMap(map));
}
+static void StopSlackTracking(Map* map, void* data) {
+ map->set_construction_counter(Map::kNoSlackTracking);
+}
void Map::CompleteInobjectSlackTracking() {
// Has to be an initial map.
DCHECK(GetBackPointer()->IsUndefined());
- set_construction_counter(kNoSlackTracking);
-
int slack = unused_property_fields();
TransitionArray::TraverseTransitionTree(this, &GetMinInobjectSlack, &slack);
if (slack != 0) {
// Resize the initial map and all maps in its transition tree.
TransitionArray::TraverseTransitionTree(this, &ShrinkInstanceSize, &slack);
+ } else {
+ TransitionArray::TraverseTransitionTree(this, &StopSlackTracking, nullptr);
}
}
@@ -12820,10 +12840,22 @@ Handle<Cell> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
// static
void Map::SetPrototype(Handle<Map> map, Handle<Object> prototype,
PrototypeOptimizationMode proto_mode) {
+ bool is_hidden = false;
if (prototype->IsJSObject()) {
Handle<JSObject> prototype_jsobj = Handle<JSObject>::cast(prototype);
JSObject::OptimizeAsPrototype(prototype_jsobj, proto_mode);
+
+ Object* maybe_constructor = prototype_jsobj->map()->GetConstructor();
+ if (maybe_constructor->IsJSFunction()) {
+ JSFunction* constructor = JSFunction::cast(maybe_constructor);
+ Object* data = constructor->shared()->function_data();
+ is_hidden = (data->IsFunctionTemplateInfo() &&
+ FunctionTemplateInfo::cast(data)->hidden_prototype()) ||
+ prototype->IsJSGlobalObject();
+ }
}
+ map->set_has_hidden_prototype(is_hidden);
+
WriteBarrierMode wb_mode =
prototype->IsNull() ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
map->set_prototype(*prototype, wb_mode);
@@ -12923,7 +12955,8 @@ void JSFunction::SetInstancePrototype(Handle<JSFunction> function,
void JSFunction::SetPrototype(Handle<JSFunction> function,
Handle<Object> value) {
- DCHECK(function->IsConstructor());
+ DCHECK(function->IsConstructor() ||
+ IsGeneratorFunction(function->shared()->kind()));
Handle<Object> construct_prototype = value;
// If the value is not a JSReceiver, store the value in the map's
@@ -13011,7 +13044,6 @@ bool CanSubclassHaveInobjectProperties(InstanceType instance_type) {
case JS_MAP_TYPE:
case JS_SET_ITERATOR_TYPE:
case JS_MAP_ITERATOR_TYPE:
- case JS_ITERATOR_RESULT_TYPE:
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
case JS_PROMISE_TYPE:
@@ -13155,6 +13187,7 @@ MaybeHandle<Map> JSFunction::GetDerivedMap(Isolate* isolate,
JSFunction::SetInitialMap(function, map, prototype);
map->SetConstructor(*constructor);
+ map->set_construction_counter(Map::kNoSlackTracking);
map->StartInobjectSlackTracking();
return map;
}
@@ -13268,6 +13301,22 @@ Handle<String> JSFunction::GetDebugName(Handle<JSFunction> function) {
return JSFunction::GetName(function);
}
+void JSFunction::SetName(Handle<JSFunction> function, Handle<Name> name,
+ Handle<String> prefix) {
+ Isolate* isolate = function->GetIsolate();
+ Handle<String> function_name = Name::ToFunctionName(name).ToHandleChecked();
+ if (prefix->length() > 0) {
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendString(prefix);
+ builder.AppendCharacter(' ');
+ builder.AppendString(function_name);
+ function_name = builder.Finish().ToHandleChecked();
+ }
+ JSObject::DefinePropertyOrElementIgnoreAttributes(
+ function, isolate->factory()->name_string(), function_name,
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY))
+ .ToHandleChecked();
+}
namespace {
@@ -13339,7 +13388,7 @@ Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
}
if (shared_info->name_should_print_as_anonymous()) {
builder.AppendCString("anonymous");
- } else {
+ } else if (!shared_info->is_anonymous_expression()) {
builder.AppendString(handle(String::cast(shared_info->name()), isolate));
}
}
@@ -13774,8 +13823,9 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
shared_info->set_function_token_position(lit->function_token_position());
shared_info->set_start_position(lit->start_position());
shared_info->set_end_position(lit->end_position());
- shared_info->set_is_expression(lit->is_expression());
- shared_info->set_is_anonymous(lit->is_anonymous());
+ shared_info->set_is_declaration(lit->is_declaration());
+ shared_info->set_is_named_expression(lit->is_named_expression());
+ shared_info->set_is_anonymous_expression(lit->is_anonymous_expression());
shared_info->set_inferred_name(*lit->inferred_name());
shared_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
shared_info->set_allows_lazy_compilation_without_context(
@@ -14064,12 +14114,12 @@ void Code::CopyFrom(const CodeDesc& desc) {
Assembler::FlushICache(GetIsolate(), instruction_start(), instruction_size());
}
-
-// Locate the source position which is closest to the address in the code. This
-// is using the source position information embedded in the relocation info.
+// Locate the source position which is closest to the code offset. This is
+// using the source position information embedded in the relocation info.
// The position returned is relative to the beginning of the script where the
// source for this function is found.
-int Code::SourcePosition(Address pc) {
+int Code::SourcePosition(int code_offset) {
+ Address pc = instruction_start() + code_offset;
int distance = kMaxInt;
int position = RelocInfo::kNoPosition; // Initially no position found.
// Run through all the relocation info to find the best matching source
@@ -14101,10 +14151,10 @@ int Code::SourcePosition(Address pc) {
// Same as Code::SourcePosition above except it only looks for statement
// positions.
-int Code::SourceStatementPosition(Address pc) {
+int Code::SourceStatementPosition(int code_offset) {
// First find the position as close as possible using all position
// information.
- int position = SourcePosition(pc);
+ int position = SourcePosition(code_offset);
// Now find the closest statement position before the position.
int statement_position = 0;
RelocIterator it(this, RelocInfo::kPositionMask);
@@ -14305,6 +14355,15 @@ void Code::ClearInlineCaches(Code::Kind* kind) {
}
}
+int AbstractCode::SourcePosition(int offset) {
+ return IsBytecodeArray() ? GetBytecodeArray()->SourcePosition(offset)
+ : GetCode()->SourcePosition(offset);
+}
+
+int AbstractCode::SourceStatementPosition(int offset) {
+ return IsBytecodeArray() ? GetBytecodeArray()->SourceStatementPosition(offset)
+ : GetCode()->SourceStatementPosition(offset);
+}
void SharedFunctionInfo::ClearTypeFeedbackInfo() {
feedback_vector()->ClearSlots(this);
@@ -14526,7 +14585,9 @@ bool Code::CanDeoptAt(Address pc) {
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address address = code_start_address + deopt_data->Pc(i)->value();
- if (address == pc) return true;
+ if (address == pc && deopt_data->AstId(i) != BailoutId::None()) {
+ return true;
+ }
}
return false;
}
@@ -14640,11 +14701,6 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(
break;
}
- case Translation::JS_FRAME_FUNCTION: {
- os << "{function}";
- break;
- }
-
case Translation::COMPILED_STUB_FRAME: {
Code::Kind stub_kind = static_cast<Code::Kind>(iterator.Next());
os << "{kind=" << stub_kind << "}";
@@ -14786,10 +14842,10 @@ void HandlerTable::HandlerTableRangePrint(std::ostream& os) {
int handler_field = Smi::cast(get(i + kRangeHandlerIndex))->value();
int handler_offset = HandlerOffsetField::decode(handler_field);
CatchPrediction prediction = HandlerPredictionField::decode(handler_field);
- int depth = Smi::cast(get(i + kRangeDepthIndex))->value();
+ int data = Smi::cast(get(i + kRangeDataIndex))->value();
os << " (" << std::setw(4) << pc_start << "," << std::setw(4) << pc_end
<< ") -> " << std::setw(4) << handler_offset
- << " (prediction=" << prediction << ", depth=" << depth << ")\n";
+ << " (prediction=" << prediction << ", data=" << data << ")\n";
}
}
@@ -14997,6 +15053,34 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
}
#endif // ENABLE_DISASSEMBLER
+int BytecodeArray::SourcePosition(int offset) {
+ int last_position = 0;
+ for (interpreter::SourcePositionTableIterator iterator(this);
+ !iterator.done() && iterator.bytecode_offset() <= offset;
+ iterator.Advance()) {
+ last_position = iterator.source_position();
+ }
+ return last_position;
+}
+
+int BytecodeArray::SourceStatementPosition(int offset) {
+ // First find the position as close as possible using all position
+ // information.
+ int position = SourcePosition(offset);
+ // Now find the closest statement position before the position.
+ int statement_position = 0;
+ interpreter::SourcePositionTableIterator iterator(this);
+ while (!iterator.done()) {
+ if (iterator.is_statement()) {
+ int p = iterator.source_position();
+ if (statement_position < p && p <= position) {
+ statement_position = p;
+ }
+ }
+ iterator.Advance();
+ }
+ return statement_position;
+}
void BytecodeArray::Disassemble(std::ostream& os) {
os << "Parameter count " << parameter_count() << "\n";
@@ -15005,12 +15089,23 @@ void BytecodeArray::Disassemble(std::ostream& os) {
const uint8_t* first_bytecode_address = GetFirstBytecodeAddress();
int bytecode_size = 0;
+
+ interpreter::SourcePositionTableIterator source_positions(this);
+
for (int i = 0; i < this->length(); i += bytecode_size) {
const uint8_t* bytecode_start = &first_bytecode_address[i];
interpreter::Bytecode bytecode =
interpreter::Bytecodes::FromByte(bytecode_start[0]);
bytecode_size = interpreter::Bytecodes::Size(bytecode);
+ if (!source_positions.done() && i == source_positions.bytecode_offset()) {
+ os << std::setw(5) << source_positions.source_position();
+ os << (source_positions.is_statement() ? " S> " : " E> ");
+ source_positions.Advance();
+ } else {
+ os << " ";
+ }
+
SNPrintF(buf, "%p", bytecode_start);
os << buf.start() << " : ";
interpreter::Bytecodes::Decode(os, bytecode_start, parameter_count());
@@ -15033,13 +15128,29 @@ void BytecodeArray::Disassemble(std::ostream& os) {
SNPrintF(buf, " (%p)", bytecode_start + offset);
os << buf.start();
}
- os << "\n";
+
+ os << std::endl;
}
- os << "Constant pool (size = " << constant_pool()->length() << ")\n";
- constant_pool()->Print();
+ if (constant_pool()->length() > 0) {
+ os << "Constant pool (size = " << constant_pool()->length() << ")\n";
+ constant_pool()->Print();
+ }
+
+#ifdef ENABLE_DISASSEMBLER
+ if (handler_table()->length() > 0) {
+ os << "Handler Table (size = " << handler_table()->Size() << ")\n";
+ HandlerTable::cast(handler_table())->HandlerTableRangePrint(os);
+ }
+#endif
}
+void BytecodeArray::CopyBytecodesTo(BytecodeArray* to) {
+ BytecodeArray* from = this;
+ DCHECK_EQ(from->length(), to->length());
+ CopyBytes(to->GetFirstBytecodeAddress(), from->GetFirstBytecodeAddress(),
+ from->length());
+}
// static
void JSArray::Initialize(Handle<JSArray> array, int capacity, int length) {
@@ -15527,33 +15638,35 @@ Maybe<bool> JSProxy::SetPrototype(Handle<JSProxy> proxy, Handle<Object> value,
Execution::Call(isolate, trap, handler, arraysize(argv), argv),
Nothing<bool>());
bool bool_trap_result = trap_result->BooleanValue();
- // 9. Let extensibleTarget be ? IsExtensible(target).
+ // 9. If booleanTrapResult is false, return false.
+ if (!bool_trap_result) {
+ RETURN_FAILURE(
+ isolate, should_throw,
+ NewTypeError(MessageTemplate::kProxyTrapReturnedFalsish, trap_name));
+ }
+ // 10. Let extensibleTarget be ? IsExtensible(target).
Maybe<bool> is_extensible = JSReceiver::IsExtensible(target);
if (is_extensible.IsNothing()) return Nothing<bool>();
- // 10. If extensibleTarget is true, return booleanTrapResult.
+ // 11. If extensibleTarget is true, return true.
if (is_extensible.FromJust()) {
if (bool_trap_result) return Just(true);
RETURN_FAILURE(
isolate, should_throw,
NewTypeError(MessageTemplate::kProxyTrapReturnedFalsish, trap_name));
}
- // 11. Let targetProto be ? target.[[GetPrototypeOf]]().
+ // 12. Let targetProto be ? target.[[GetPrototypeOf]]().
Handle<Object> target_proto;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, target_proto,
- Object::GetPrototype(isolate, target),
+ JSReceiver::GetPrototype(isolate, target),
Nothing<bool>());
- // 12. If booleanTrapResult is true and SameValue(V, targetProto) is false,
- // throw a TypeError exception.
+ // 13. If SameValue(V, targetProto) is false, throw a TypeError exception.
if (bool_trap_result && !value->SameValue(*target_proto)) {
isolate->Throw(*isolate->factory()->NewTypeError(
MessageTemplate::kProxySetPrototypeOfNonExtensible));
return Nothing<bool>();
}
- // 13. Return booleanTrapResult.
- if (bool_trap_result) return Just(true);
- RETURN_FAILURE(
- isolate, should_throw,
- NewTypeError(MessageTemplate::kProxyTrapReturnedFalsish, trap_name));
+ // 14. Return true.
+ return Just(true);
}
@@ -15562,11 +15675,21 @@ Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
ShouldThrow should_throw) {
Isolate* isolate = object->GetIsolate();
+ // Setting the prototype of an Array instance invalidates the species
+ // protector
+ // because it could change the constructor property of the instance, which
+ // could change the @@species constructor.
+ if (object->IsJSArray() && isolate->IsArraySpeciesLookupChainIntact()) {
+ isolate->CountUsage(
+ v8::Isolate::UseCounterFeature::kArrayInstanceProtoModified);
+ isolate->InvalidateArraySpeciesProtector();
+ }
+
const bool observed = from_javascript && object->map()->is_observed();
Handle<Object> old_value;
if (observed) {
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, old_value,
- Object::GetPrototype(isolate, object),
+ JSReceiver::GetPrototype(isolate, object),
Nothing<bool>());
}
@@ -15577,7 +15700,7 @@ Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
if (result.FromJust() && observed) {
Handle<Object> new_value;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, new_value,
- Object::GetPrototype(isolate, object),
+ JSReceiver::GetPrototype(isolate, object),
Nothing<bool>());
if (!new_value->SameValue(*old_value)) {
RETURN_ON_EXCEPTION_VALUE(
@@ -15633,8 +15756,10 @@ Maybe<bool> JSObject::SetPrototypeUnobserved(Handle<JSObject> object,
if (from_javascript) {
// Find the first object in the chain whose prototype object is not
// hidden.
- PrototypeIterator iter(isolate, real_receiver);
- while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN)) {
+ PrototypeIterator iter(isolate, real_receiver,
+ PrototypeIterator::START_AT_PROTOTYPE,
+ PrototypeIterator::END_AT_NON_HIDDEN);
+ while (!iter.IsAtEnd()) {
// Casting to JSObject is fine because hidden prototypes are never
// JSProxies.
real_receiver = PrototypeIterator::GetCurrent<JSObject>(iter);
@@ -15663,13 +15788,15 @@ Maybe<bool> JSObject::SetPrototypeUnobserved(Handle<JSObject> object,
// Before we can set the prototype we need to be sure prototype cycles are
// prevented. It is sufficient to validate that the receiver is not in the
// new prototype chain.
- for (PrototypeIterator iter(isolate, *value,
- PrototypeIterator::START_AT_RECEIVER);
- !iter.IsAtEnd(); iter.Advance()) {
- if (iter.GetCurrent<JSReceiver>() == *object) {
- // Cycle detected.
- RETURN_FAILURE(isolate, should_throw,
- NewTypeError(MessageTemplate::kCyclicProto));
+ if (value->IsJSReceiver()) {
+ for (PrototypeIterator iter(isolate, JSReceiver::cast(*value),
+ PrototypeIterator::START_AT_RECEIVER);
+ !iter.IsAtEnd(); iter.Advance()) {
+ if (iter.GetCurrent<JSReceiver>() == *object) {
+ // Cycle detected.
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kCyclicProto));
+ }
}
}
@@ -15767,6 +15894,9 @@ static ElementsKind BestFittingFastElementsKind(JSObject* object) {
if (object->HasSloppyArgumentsElements()) {
return FAST_SLOPPY_ARGUMENTS_ELEMENTS;
}
+ if (object->HasStringWrapperElements()) {
+ return FAST_STRING_WRAPPER_ELEMENTS;
+ }
DCHECK(object->HasDictionaryElements());
SeededNumberDictionary* dictionary = object->element_dictionary();
ElementsKind kind = FAST_HOLEY_SMI_ELEMENTS;
@@ -15850,6 +15980,8 @@ Maybe<bool> JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
if (IsSloppyArgumentsElements(kind)) {
elements = FixedArrayBase::cast(FixedArray::cast(elements)->get(1));
dictionary_kind = SLOW_SLOPPY_ARGUMENTS_ELEMENTS;
+ } else if (IsStringWrapperElementsKind(kind)) {
+ dictionary_kind = SLOW_STRING_WRAPPER_ELEMENTS;
}
if (attributes != NONE) {
@@ -16037,7 +16169,8 @@ void JSObject::UpdateAllocationSite(Handle<JSObject> object,
{
DisallowHeapAllocation no_allocation;
- AllocationMemento* memento = heap->FindAllocationMemento(*object);
+ AllocationMemento* memento =
+ heap->FindAllocationMemento<Heap::kForRuntime>(*object);
if (memento == NULL) return;
// Walk through to the Allocation Site
@@ -16144,13 +16277,16 @@ int JSObject::GetFastElementsUsage() {
// Fall through.
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS:
return FastHoleyElementsUsage(this, FixedArray::cast(store));
case FAST_HOLEY_DOUBLE_ELEMENTS:
if (elements()->length() == 0) return 0;
return FastHoleyElementsUsage(this, FixedDoubleArray::cast(store));
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS:
case DICTIONARY_ELEMENTS:
+ case NO_ELEMENTS:
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
case TYPE##_ELEMENTS: \
@@ -16230,7 +16366,8 @@ MaybeHandle<Object> JSObject::GetPropertyWithInterceptor(LookupIterator* it,
Handle<JSObject> holder = it->GetHolder<JSObject>();
v8::Local<v8::Value> result;
PropertyCallbackArguments args(isolate, interceptor->data(),
- *it->GetReceiver(), *holder);
+ *it->GetReceiver(), *holder,
+ Object::DONT_THROW);
if (it->IsElement()) {
uint32_t index = it->index();
@@ -16421,7 +16558,7 @@ void JSObject::CollectOwnPropertyNames(KeyAccumulator* keys,
}
Name* key = descs->GetKey(i);
if (key->FilterKey(filter)) continue;
- keys->AddKey(key);
+ keys->AddKey(key, DO_NOT_CONVERT);
}
} else if (IsJSGlobalObject()) {
GlobalDictionary::CollectKeysTo(handle(global_dictionary()), keys, filter);
@@ -16450,21 +16587,6 @@ void JSObject::CollectOwnElementKeys(Handle<JSObject> object,
KeyAccumulator* keys,
PropertyFilter filter) {
if (filter & SKIP_STRINGS) return;
- uint32_t string_keys = 0;
-
- // If this is a String wrapper, add the string indices first,
- // as they're guaranteed to precede the elements in numerical order
- // and ascending order is required by ECMA-262, 6th, 9.1.12.
- if (object->IsJSValue()) {
- Object* val = JSValue::cast(*object)->value();
- if (val->IsString() && (filter & ONLY_ALL_CAN_READ) == 0) {
- String* str = String::cast(val);
- string_keys = str->length();
- for (uint32_t i = 0; i < string_keys; i++) {
- keys->AddKey(i);
- }
- }
- }
ElementsAccessor* accessor = object->GetElementsAccessor();
accessor->CollectElementIndices(object, keys, kMaxUInt32, filter, 0);
}
@@ -16493,7 +16615,8 @@ int JSObject::GetOwnElementKeys(FixedArray* storage, PropertyFilter filter) {
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_STRING_WRAPPER_ELEMENTS: {
int length = IsJSArray() ?
Smi::cast(JSArray::cast(this)->length())->value() :
FixedArray::cast(elements())->length();
@@ -16542,7 +16665,8 @@ int JSObject::GetOwnElementKeys(FixedArray* storage, PropertyFilter filter) {
break;
}
- case DICTIONARY_ELEMENTS: {
+ case DICTIONARY_ELEMENTS:
+ case SLOW_STRING_WRAPPER_ELEMENTS: {
if (storage != NULL) {
element_dictionary()->CopyKeysTo(storage, counter, filter,
SeededNumberDictionary::SORTED);
@@ -16592,6 +16716,8 @@ int JSObject::GetOwnElementKeys(FixedArray* storage, PropertyFilter filter) {
}
break;
}
+ case NO_ELEMENTS:
+ break;
}
DCHECK(!storage || storage->length() == counter);
@@ -16604,8 +16730,8 @@ MaybeHandle<String> Object::ObjectProtoToString(Isolate* isolate,
if (object->IsUndefined()) return isolate->factory()->undefined_to_string();
if (object->IsNull()) return isolate->factory()->null_to_string();
- Handle<JSReceiver> receiver;
- CHECK(Object::ToObject(isolate, object).ToHandle(&receiver));
+ Handle<JSReceiver> receiver =
+ Object::ToObject(isolate, object).ToHandleChecked();
Handle<String> tag;
if (FLAG_harmony_tostring) {
@@ -17550,6 +17676,11 @@ Handle<Object> JSObject::PrepareElementsForSort(Handle<JSObject> object,
return handle(Smi::FromInt(-1), isolate);
}
+ if (object->HasStringWrapperElements()) {
+ int len = String::cast(Handle<JSValue>::cast(object)->value())->length();
+ return handle(Smi::FromInt(len), isolate);
+ }
+
if (object->HasDictionaryElements()) {
// Convert to fast elements containing only the existing properties.
// Ordering is irrelevant, since we are going to sort anyway.
@@ -18489,7 +18620,7 @@ void Dictionary<Derived, Shape, Key>::CollectKeysTo(
for (int i = 0; i < array_size; i++) {
int index = Smi::cast(array->get(i))->value();
- keys->AddKey(dictionary->KeyAt(index));
+ keys->AddKey(dictionary->KeyAt(index), DO_NOT_CONVERT);
}
}
@@ -19040,35 +19171,31 @@ bool JSWeakCollection::Delete(Handle<JSWeakCollection> weak_collection,
return was_present;
}
-
-// Check if there is a break point at this code position.
-bool DebugInfo::HasBreakPoint(int code_position) {
- // Get the break point info object for this code position.
- Object* break_point_info = GetBreakPointInfo(code_position);
+// Check if there is a break point at this code offset.
+bool DebugInfo::HasBreakPoint(int code_offset) {
+ // Get the break point info object for this code offset.
+ Object* break_point_info = GetBreakPointInfo(code_offset);
// If there is no break point info object or no break points in the break
- // point info object there is no break point at this code position.
+ // point info object there is no break point at this code offset.
if (break_point_info->IsUndefined()) return false;
return BreakPointInfo::cast(break_point_info)->GetBreakPointCount() > 0;
}
-
-// Get the break point info object for this code position.
-Object* DebugInfo::GetBreakPointInfo(int code_position) {
- // Find the index of the break point info object for this code position.
- int index = GetBreakPointInfoIndex(code_position);
+// Get the break point info object for this code offset.
+Object* DebugInfo::GetBreakPointInfo(int code_offset) {
+ // Find the index of the break point info object for this code offset.
+ int index = GetBreakPointInfoIndex(code_offset);
// Return the break point info object if any.
if (index == kNoBreakPointInfo) return GetHeap()->undefined_value();
return BreakPointInfo::cast(break_points()->get(index));
}
-
-// Clear a break point at the specified code position.
-void DebugInfo::ClearBreakPoint(Handle<DebugInfo> debug_info,
- int code_position,
+// Clear a break point at the specified code offset.
+void DebugInfo::ClearBreakPoint(Handle<DebugInfo> debug_info, int code_offset,
Handle<Object> break_point_object) {
- Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_position),
+ Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_offset),
debug_info->GetIsolate());
if (break_point_info->IsUndefined()) return;
BreakPointInfo::ClearBreakPoint(
@@ -19076,14 +19203,11 @@ void DebugInfo::ClearBreakPoint(Handle<DebugInfo> debug_info,
break_point_object);
}
-
-void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info,
- int code_position,
- int source_position,
- int statement_position,
+void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info, int code_offset,
+ int source_position, int statement_position,
Handle<Object> break_point_object) {
Isolate* isolate = debug_info->GetIsolate();
- Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_position),
+ Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_offset),
isolate);
if (!break_point_info->IsUndefined()) {
BreakPointInfo::SetBreakPoint(
@@ -19092,7 +19216,7 @@ void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info,
return;
}
- // Adding a new break point for a code position which did not have any
+ // Adding a new break point for a code offset which did not have any
// break points before. Try to find a free slot.
int index = kNoBreakPointInfo;
for (int i = 0; i < debug_info->break_points()->length(); i++) {
@@ -19121,7 +19245,7 @@ void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info,
// Allocate new BreakPointInfo object and set the break point.
Handle<BreakPointInfo> new_break_point_info = Handle<BreakPointInfo>::cast(
isolate->factory()->NewStruct(BREAK_POINT_INFO_TYPE));
- new_break_point_info->set_code_position(code_position);
+ new_break_point_info->set_code_offset(code_offset);
new_break_point_info->set_source_position(source_position);
new_break_point_info->set_statement_position(statement_position);
new_break_point_info->set_break_point_objects(
@@ -19130,10 +19254,9 @@ void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info,
debug_info->break_points()->set(index, *new_break_point_info);
}
-
-// Get the break point objects for a code position.
-Handle<Object> DebugInfo::GetBreakPointObjects(int code_position) {
- Object* break_point_info = GetBreakPointInfo(code_position);
+// Get the break point objects for a code offset.
+Handle<Object> DebugInfo::GetBreakPointObjects(int code_offset) {
+ Object* break_point_info = GetBreakPointInfo(code_offset);
if (break_point_info->IsUndefined()) {
return GetIsolate()->factory()->undefined_value();
}
@@ -19179,13 +19302,13 @@ Handle<Object> DebugInfo::FindBreakPointInfo(
// Find the index of the break point info object for the specified code
// position.
-int DebugInfo::GetBreakPointInfoIndex(int code_position) {
+int DebugInfo::GetBreakPointInfoIndex(int code_offset) {
if (break_points()->IsUndefined()) return kNoBreakPointInfo;
for (int i = 0; i < break_points()->length(); i++) {
if (!break_points()->get(i)->IsUndefined()) {
BreakPointInfo* break_point_info =
BreakPointInfo::cast(break_points()->get(i));
- if (break_point_info->code_position() == code_position) {
+ if (break_point_info->code_offset() == code_offset) {
return i;
}
}
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index c55c5c9780..a3d6a72882 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -7,7 +7,6 @@
#include <iosfwd>
-#include "src/allocation.h"
#include "src/assert-scope.h"
#include "src/bailout-reason.h"
#include "src/base/bits.h"
@@ -127,6 +126,7 @@
// - Cell
// - PropertyCell
// - Code
+// - AbstractCode, a wrapper around Code or BytecodeArray
// - Map
// - Oddball
// - Foreign
@@ -134,7 +134,6 @@
// - Struct
// - Box
// - AccessorInfo
-// - ExecutableAccessorInfo
// - AccessorPair
// - AccessCheckInfo
// - InterceptorInfo
@@ -384,9 +383,7 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
\
V(FILLER_TYPE) \
\
- V(DECLARED_ACCESSOR_DESCRIPTOR_TYPE) \
- V(DECLARED_ACCESSOR_INFO_TYPE) \
- V(EXECUTABLE_ACCESSOR_INFO_TYPE) \
+ V(ACCESSOR_INFO_TYPE) \
V(ACCESSOR_PAIR_TYPE) \
V(ACCESS_CHECK_INFO_TYPE) \
V(INTERCEPTOR_INFO_TYPE) \
@@ -431,7 +428,6 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(JS_MAP_TYPE) \
V(JS_SET_ITERATOR_TYPE) \
V(JS_MAP_ITERATOR_TYPE) \
- V(JS_ITERATOR_RESULT_TYPE) \
V(JS_WEAK_MAP_TYPE) \
V(JS_WEAK_SET_TYPE) \
V(JS_PROMISE_TYPE) \
@@ -505,8 +501,7 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
// manually.
#define STRUCT_LIST(V) \
V(BOX, Box, box) \
- V(EXECUTABLE_ACCESSOR_INFO, ExecutableAccessorInfo, \
- executable_accessor_info) \
+ V(ACCESSOR_INFO, AccessorInfo, accessor_info) \
V(ACCESSOR_PAIR, AccessorPair, accessor_pair) \
V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info) \
V(INTERCEPTOR_INFO, InterceptorInfo, interceptor_info) \
@@ -676,9 +671,7 @@ enum InstanceType {
FILLER_TYPE, // LAST_DATA_TYPE
// Structs.
- DECLARED_ACCESSOR_DESCRIPTOR_TYPE,
- DECLARED_ACCESSOR_INFO_TYPE,
- EXECUTABLE_ACCESSOR_INFO_TYPE,
+ ACCESSOR_INFO_TYPE,
ACCESSOR_PAIR_TYPE,
ACCESS_CHECK_INFO_TYPE,
INTERCEPTOR_INFO_TYPE,
@@ -728,7 +721,6 @@ enum InstanceType {
JS_MAP_TYPE,
JS_SET_ITERATOR_TYPE,
JS_MAP_ITERATOR_TYPE,
- JS_ITERATOR_RESULT_TYPE,
JS_WEAK_MAP_TYPE,
JS_WEAK_SET_TYPE,
JS_PROMISE_TYPE,
@@ -846,6 +838,7 @@ class KeyAccumulator;
class LayoutDescriptor;
class LiteralsArray;
class LookupIterator;
+class FieldType;
class ObjectHashTable;
class ObjectVisitor;
class PropertyCell;
@@ -858,11 +851,6 @@ class TypeFeedbackVector;
class WeakCell;
class TransitionArray;
-// We cannot just say "class HeapType;" if it is created from a template... =8-?
-template<class> class TypeImpl;
-struct HeapTypeConfig;
-typedef TypeImpl<HeapTypeConfig> HeapType;
-
// A template-ized version of the IsXXX functions.
template <class C> inline bool Is(Object* obj);
@@ -879,10 +867,11 @@ template <class C> inline bool Is(Object* obj);
#define DECLARE_PRINTER(Name)
#endif
-
#define OBJECT_TYPE_LIST(V) \
V(Smi) \
+ V(LayoutDescriptor) \
V(HeapObject) \
+ V(Primitive) \
V(Number)
#define HEAP_OBJECT_TYPE_LIST(V) \
@@ -931,7 +920,6 @@ template <class C> inline bool Is(Object* obj);
V(JSContextExtensionObject) \
V(JSGeneratorObject) \
V(JSModule) \
- V(LayoutDescriptor) \
V(Map) \
V(DescriptorArray) \
V(TransitionArray) \
@@ -953,6 +941,7 @@ template <class C> inline bool Is(Object* obj);
V(JSBoundFunction) \
V(JSFunction) \
V(Code) \
+ V(AbstractCode) \
V(Oddball) \
V(SharedFunctionInfo) \
V(JSValue) \
@@ -971,7 +960,6 @@ template <class C> inline bool Is(Object* obj);
V(JSMap) \
V(JSSetIterator) \
V(JSMapIterator) \
- V(JSIteratorResult) \
V(JSWeakCollection) \
V(JSWeakMap) \
V(JSWeakSet) \
@@ -984,11 +972,18 @@ template <class C> inline bool Is(Object* obj);
V(CodeCacheHashTable) \
V(PolymorphicCodeCacheHashTable) \
V(MapCache) \
- V(Primitive) \
V(JSGlobalObject) \
V(JSGlobalProxy) \
V(UndetectableObject) \
V(AccessCheckNeeded) \
+ V(Callable) \
+ V(Function) \
+ V(Constructor) \
+ V(TemplateInfo) \
+ V(Filler) \
+ V(FixedArrayBase) \
+ V(External) \
+ V(Struct) \
V(Cell) \
V(PropertyCell) \
V(WeakCell) \
@@ -996,6 +991,16 @@ template <class C> inline bool Is(Object* obj);
V(WeakHashTable) \
V(OrderedHashTable)
+#define ODDBALL_LIST(V) \
+ V(Undefined) \
+ V(Null) \
+ V(TheHole) \
+ V(Exception) \
+ V(Uninitialized) \
+ V(True) \
+ V(False) \
+ V(ArgumentsMarker)
+
// The element types selection for CreateListFromArrayLike.
enum class ElementTypes { kAll, kStringAndSymbol };
@@ -1013,6 +1018,7 @@ class Object {
#define IS_TYPE_FUNCTION_DECL(type_) INLINE(bool Is##type_() const);
OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
+ ODDBALL_LIST(IS_TYPE_FUNCTION_DECL)
#undef IS_TYPE_FUNCTION_DECL
// A non-keyed store is of the form a.x = foo or a["x"] = foo whereas
@@ -1041,11 +1047,6 @@ class Object {
#define MAYBE_RETURN_NULL(call) MAYBE_RETURN(call, MaybeHandle<Object>())
- INLINE(bool IsFixedArrayBase() const);
- INLINE(bool IsExternal() const);
- INLINE(bool IsAccessorInfo() const);
-
- INLINE(bool IsStruct() const);
#define DECLARE_STRUCT_PREDICATE(NAME, Name, name) \
INLINE(bool Is##Name() const);
STRUCT_LIST(DECLARE_STRUCT_PREDICATE)
@@ -1054,16 +1055,6 @@ class Object {
// ES6, section 7.2.2 IsArray. NOT to be confused with %_IsArray.
MUST_USE_RESULT static Maybe<bool> IsArray(Handle<Object> object);
- // Test for JSBoundFunction or JSFunction.
- INLINE(bool IsFunction() const);
-
- // ES6, section 7.2.3 IsCallable.
- INLINE(bool IsCallable() const);
-
- // ES6, section 7.2.4 IsConstructor.
- INLINE(bool IsConstructor() const);
-
- INLINE(bool IsTemplateInfo()) const;
INLINE(bool IsNameDictionary() const);
INLINE(bool IsGlobalDictionary() const);
INLINE(bool IsSeededNumberDictionary() const);
@@ -1072,19 +1063,6 @@ class Object {
INLINE(bool IsOrderedHashMap() const);
static bool IsPromise(Handle<Object> object);
- // Oddball testing.
- INLINE(bool IsUndefined() const);
- INLINE(bool IsNull() const);
- INLINE(bool IsTheHole() const);
- INLINE(bool IsException() const);
- INLINE(bool IsUninitialized() const);
- INLINE(bool IsTrue() const);
- INLINE(bool IsFalse() const);
- INLINE(bool IsArgumentsMarker() const);
-
- // Filler objects (fillers and free space objects).
- INLINE(bool IsFiller() const);
-
// Extract the number.
inline double Number() const;
INLINE(bool IsNaN() const);
@@ -1105,7 +1083,8 @@ class Object {
inline bool FilterKey(PropertyFilter filter);
- Handle<HeapType> OptimalType(Isolate* isolate, Representation representation);
+ Handle<FieldType> OptimalType(Isolate* isolate,
+ Representation representation);
inline static Handle<Object> NewStorageFor(Isolate* isolate,
Handle<Object> object,
@@ -1124,8 +1103,8 @@ class Object {
bool BooleanValue(); // ECMA-262 9.2.
// ES6 section 7.2.11 Abstract Relational Comparison
- MUST_USE_RESULT static Maybe<ComparisonResult> Compare(
- Handle<Object> x, Handle<Object> y, Strength strength = Strength::WEAK);
+ MUST_USE_RESULT static Maybe<ComparisonResult> Compare(Handle<Object> x,
+ Handle<Object> y);
// ES6 section 7.2.12 Abstract Equality Comparison
MUST_USE_RESULT static Maybe<bool> Equals(Handle<Object> x, Handle<Object> y);
@@ -1135,8 +1114,8 @@ class Object {
// Convert to a JSObject if needed.
// native_context is used when creating wrapper object.
- static inline MaybeHandle<JSReceiver> ToObject(Isolate* isolate,
- Handle<Object> object);
+ MUST_USE_RESULT static inline MaybeHandle<JSReceiver> ToObject(
+ Isolate* isolate, Handle<Object> object);
MUST_USE_RESULT static MaybeHandle<JSReceiver> ToObject(
Isolate* isolate, Handle<Object> object, Handle<Context> context);
@@ -1186,58 +1165,56 @@ class Object {
static Handle<String> TypeOf(Isolate* isolate, Handle<Object> object);
// ES6 section 12.6 Multiplicative Operators
- MUST_USE_RESULT static MaybeHandle<Object> Multiply(
- Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
- Strength strength = Strength::WEAK);
- MUST_USE_RESULT static MaybeHandle<Object> Divide(
- Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
- Strength strength = Strength::WEAK);
- MUST_USE_RESULT static MaybeHandle<Object> Modulus(
- Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
- Strength strength = Strength::WEAK);
+ MUST_USE_RESULT static MaybeHandle<Object> Multiply(Isolate* isolate,
+ Handle<Object> lhs,
+ Handle<Object> rhs);
+ MUST_USE_RESULT static MaybeHandle<Object> Divide(Isolate* isolate,
+ Handle<Object> lhs,
+ Handle<Object> rhs);
+ MUST_USE_RESULT static MaybeHandle<Object> Modulus(Isolate* isolate,
+ Handle<Object> lhs,
+ Handle<Object> rhs);
// ES6 section 12.7 Additive Operators
- MUST_USE_RESULT static MaybeHandle<Object> Add(
- Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
- Strength strength = Strength::WEAK);
- MUST_USE_RESULT static MaybeHandle<Object> Subtract(
- Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
- Strength strength = Strength::WEAK);
+ MUST_USE_RESULT static MaybeHandle<Object> Add(Isolate* isolate,
+ Handle<Object> lhs,
+ Handle<Object> rhs);
+ MUST_USE_RESULT static MaybeHandle<Object> Subtract(Isolate* isolate,
+ Handle<Object> lhs,
+ Handle<Object> rhs);
// ES6 section 12.8 Bitwise Shift Operators
- MUST_USE_RESULT static MaybeHandle<Object> ShiftLeft(
- Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
- Strength strength = Strength::WEAK);
- MUST_USE_RESULT static MaybeHandle<Object> ShiftRight(
- Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
- Strength strength = Strength::WEAK);
+ MUST_USE_RESULT static MaybeHandle<Object> ShiftLeft(Isolate* isolate,
+ Handle<Object> lhs,
+ Handle<Object> rhs);
+ MUST_USE_RESULT static MaybeHandle<Object> ShiftRight(Isolate* isolate,
+ Handle<Object> lhs,
+ Handle<Object> rhs);
MUST_USE_RESULT static MaybeHandle<Object> ShiftRightLogical(
- Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
- Strength strength = Strength::WEAK);
+ Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs);
// ES6 section 12.9 Relational Operators
- MUST_USE_RESULT static inline Maybe<bool> GreaterThan(
- Handle<Object> x, Handle<Object> y, Strength strength = Strength::WEAK);
+ MUST_USE_RESULT static inline Maybe<bool> GreaterThan(Handle<Object> x,
+ Handle<Object> y);
MUST_USE_RESULT static inline Maybe<bool> GreaterThanOrEqual(
- Handle<Object> x, Handle<Object> y, Strength strength = Strength::WEAK);
- MUST_USE_RESULT static inline Maybe<bool> LessThan(
- Handle<Object> x, Handle<Object> y, Strength strength = Strength::WEAK);
- MUST_USE_RESULT static inline Maybe<bool> LessThanOrEqual(
- Handle<Object> x, Handle<Object> y, Strength strength = Strength::WEAK);
+ Handle<Object> x, Handle<Object> y);
+ MUST_USE_RESULT static inline Maybe<bool> LessThan(Handle<Object> x,
+ Handle<Object> y);
+ MUST_USE_RESULT static inline Maybe<bool> LessThanOrEqual(Handle<Object> x,
+ Handle<Object> y);
// ES6 section 12.11 Binary Bitwise Operators
- MUST_USE_RESULT static MaybeHandle<Object> BitwiseAnd(
- Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
- Strength strength = Strength::WEAK);
- MUST_USE_RESULT static MaybeHandle<Object> BitwiseOr(
- Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
- Strength strength = Strength::WEAK);
- MUST_USE_RESULT static MaybeHandle<Object> BitwiseXor(
- Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
- Strength strength = Strength::WEAK);
-
- MUST_USE_RESULT static MaybeHandle<Object> GetProperty(
- LookupIterator* it, LanguageMode language_mode = SLOPPY);
+ MUST_USE_RESULT static MaybeHandle<Object> BitwiseAnd(Isolate* isolate,
+ Handle<Object> lhs,
+ Handle<Object> rhs);
+ MUST_USE_RESULT static MaybeHandle<Object> BitwiseOr(Isolate* isolate,
+ Handle<Object> lhs,
+ Handle<Object> rhs);
+ MUST_USE_RESULT static MaybeHandle<Object> BitwiseXor(Isolate* isolate,
+ Handle<Object> lhs,
+ Handle<Object> rhs);
+
+ MUST_USE_RESULT static MaybeHandle<Object> GetProperty(LookupIterator* it);
// ES6 [[Set]] (when passed DONT_THROW)
// Invariants for this and related functions (unless stated otherwise):
@@ -1260,10 +1237,9 @@ class Object {
StoreFromKeyed store_mode);
MUST_USE_RESULT static MaybeHandle<Object> ReadAbsentProperty(
- LookupIterator* it, LanguageMode language_mode);
+ LookupIterator* it);
MUST_USE_RESULT static MaybeHandle<Object> ReadAbsentProperty(
- Isolate* isolate, Handle<Object> receiver, Handle<Object> name,
- LanguageMode language_mode);
+ Isolate* isolate, Handle<Object> receiver, Handle<Object> name);
MUST_USE_RESULT static Maybe<bool> CannotCreateProperty(
Isolate* isolate, Handle<Object> receiver, Handle<Object> name,
Handle<Object> value, ShouldThrow should_throw);
@@ -1281,20 +1257,16 @@ class Object {
LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
ShouldThrow should_throw, StoreFromKeyed store_mode);
MUST_USE_RESULT static inline MaybeHandle<Object> GetPropertyOrElement(
- Handle<Object> object, Handle<Name> name,
- LanguageMode language_mode = SLOPPY);
+ Handle<Object> object, Handle<Name> name);
MUST_USE_RESULT static inline MaybeHandle<Object> GetPropertyOrElement(
- Handle<JSReceiver> holder, Handle<Name> name, Handle<Object> receiver,
- LanguageMode language_mode = SLOPPY);
+ Handle<Object> receiver, Handle<Name> name, Handle<JSReceiver> holder);
MUST_USE_RESULT static inline MaybeHandle<Object> GetProperty(
- Isolate* isolate, Handle<Object> object, const char* key,
- LanguageMode language_mode = SLOPPY);
+ Isolate* isolate, Handle<Object> object, const char* key);
MUST_USE_RESULT static inline MaybeHandle<Object> GetProperty(
- Handle<Object> object, Handle<Name> name,
- LanguageMode language_mode = SLOPPY);
+ Handle<Object> object, Handle<Name> name);
MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithAccessor(
- LookupIterator* it, LanguageMode language_mode);
+ LookupIterator* it);
MUST_USE_RESULT static Maybe<bool> SetPropertyWithAccessor(
LookupIterator* it, Handle<Object> value, ShouldThrow should_throw);
@@ -1306,21 +1278,12 @@ class Object {
ShouldThrow should_throw);
MUST_USE_RESULT static inline MaybeHandle<Object> GetElement(
- Isolate* isolate, Handle<Object> object, uint32_t index,
- LanguageMode language_mode = SLOPPY);
+ Isolate* isolate, Handle<Object> object, uint32_t index);
MUST_USE_RESULT static inline MaybeHandle<Object> SetElement(
Isolate* isolate, Handle<Object> object, uint32_t index,
Handle<Object> value, LanguageMode language_mode);
- // Get the first non-hidden prototype.
- static inline MaybeHandle<Object> GetPrototype(Isolate* isolate,
- Handle<Object> receiver);
-
- MUST_USE_RESULT static Maybe<bool> HasInPrototypeChain(Isolate* isolate,
- Handle<Object> object,
- Handle<Object> proto);
-
// Returns the permanent hash code associated with this object. May return
// undefined if not yet created.
Object* GetHash();
@@ -1359,10 +1322,6 @@ class Object {
// allow kMaxUInt32.
inline bool ToArrayIndex(uint32_t* index);
- // Returns true if this is a JSValue containing a string and the index is
- // < the length of the string. Used to implement [] on strings.
- inline bool IsStringObjectWithCharacterAt(uint32_t index);
-
DECLARE_VERIFIER(Object)
#ifdef VERIFY_HEAP
// Verify a pointer is a valid object pointer.
@@ -1401,7 +1360,7 @@ class Object {
private:
friend class LookupIterator;
- friend class PrototypeIterator;
+ friend class StringStream;
// Return the map of the root of object's prototype chain.
Map* GetRootMap(Isolate* isolate);
@@ -1555,6 +1514,15 @@ class HeapObject: public Object {
// Convenience method to get current isolate.
inline Isolate* GetIsolate() const;
+#define IS_TYPE_FUNCTION_DECL(type_) INLINE(bool Is##type_() const);
+ HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
+ ODDBALL_LIST(IS_TYPE_FUNCTION_DECL)
+#undef IS_TYPE_FUNCTION_DECL
+#define DECLARE_STRUCT_PREDICATE(NAME, Name, name) \
+ INLINE(bool Is##Name() const);
+ STRUCT_LIST(DECLARE_STRUCT_PREDICATE)
+#undef DECLARE_STRUCT_PREDICATE
+
// Converts an address to a HeapObject pointer.
static inline HeapObject* FromAddress(Address address) {
DCHECK_TAG_ALIGNED(address);
@@ -1797,6 +1765,7 @@ enum AccessorComponent {
enum GetKeysConversion { KEEP_NUMBERS, CONVERT_TO_STRING };
+enum KeyCollectionType { OWN_ONLY, INCLUDE_PROTOS };
// JSReceiver includes types on which properties can be defined, i.e.,
// JSObject and JSProxy.
@@ -1826,6 +1795,13 @@ class JSReceiver: public HeapObject {
static MaybeHandle<Context> GetFunctionRealm(Handle<JSReceiver> receiver);
+ // Get the first non-hidden prototype.
+ static inline MaybeHandle<Object> GetPrototype(Isolate* isolate,
+ Handle<JSReceiver> receiver);
+
+ MUST_USE_RESULT static Maybe<bool> HasInPrototypeChain(
+ Isolate* isolate, Handle<JSReceiver> object, Handle<Object> proto);
+
// Implementation of [[HasProperty]], ECMA-262 5th edition, section 8.12.6.
MUST_USE_RESULT static Maybe<bool> HasProperty(LookupIterator* it);
MUST_USE_RESULT static inline Maybe<bool> HasProperty(
@@ -1958,13 +1934,10 @@ class JSReceiver: public HeapObject {
inline static Handle<Smi> GetOrCreateIdentityHash(
Handle<JSReceiver> object);
- enum KeyCollectionType { OWN_ONLY, INCLUDE_PROTOS };
-
// ES6 [[OwnPropertyKeys]] (modulo return type)
MUST_USE_RESULT static MaybeHandle<FixedArray> OwnPropertyKeys(
Handle<JSReceiver> object) {
- return GetKeys(object, JSReceiver::OWN_ONLY, ALL_PROPERTIES,
- CONVERT_TO_STRING);
+ return GetKeys(object, OWN_ONLY, ALL_PROPERTIES, CONVERT_TO_STRING);
}
// Computes the enumerable keys for a JSObject. Used for implementing
@@ -1973,6 +1946,12 @@ class JSReceiver: public HeapObject {
Handle<JSReceiver> object, KeyCollectionType type, PropertyFilter filter,
GetKeysConversion keys_conversion = KEEP_NUMBERS);
+ MUST_USE_RESULT static MaybeHandle<FixedArray> GetOwnValues(
+ Handle<JSReceiver> object, PropertyFilter filter);
+
+ MUST_USE_RESULT static MaybeHandle<FixedArray> GetOwnEntries(
+ Handle<JSReceiver> object, PropertyFilter filter);
+
// Layout description.
static const int kPropertiesOffset = HeapObject::kHeaderSize;
static const int kHeaderSize = HeapObject::kHeaderSize + kPointerSize;
@@ -2038,6 +2017,7 @@ class JSObject: public JSReceiver {
// ElementsKind.
inline bool HasFastHoleyElements();
inline bool HasSloppyArgumentsElements();
+ inline bool HasStringWrapperElements();
inline bool HasDictionaryElements();
inline bool HasFixedTypedArrayElements();
@@ -2055,6 +2035,8 @@ class JSObject: public JSReceiver {
inline bool HasFastArgumentsElements();
inline bool HasSlowArgumentsElements();
+ inline bool HasFastStringWrapperElements();
+ inline bool HasSlowStringWrapperElements();
inline SeededNumberDictionary* element_dictionary(); // Gets slow elements.
// Requires: HasFastElements().
@@ -2073,38 +2055,37 @@ class JSObject: public JSReceiver {
uint32_t limit);
MUST_USE_RESULT static Maybe<bool> SetPropertyWithInterceptor(
- LookupIterator* it, Handle<Object> value);
+ LookupIterator* it, ShouldThrow should_throw, Handle<Object> value);
- // SetLocalPropertyIgnoreAttributes converts callbacks to fields. We need to
- // grant an exemption to ExecutableAccessor callbacks in some cases.
- enum ExecutableAccessorInfoHandling { DEFAULT_HANDLING, DONT_FORCE_FIELD };
+ // The API currently still wants DefineOwnPropertyIgnoreAttributes to convert
+ // AccessorInfo objects to data fields. We allow FORCE_FIELD as an exception
+ // to the default behavior that calls the setter.
+ enum AccessorInfoHandling { FORCE_FIELD, DONT_FORCE_FIELD };
MUST_USE_RESULT static MaybeHandle<Object> DefineOwnPropertyIgnoreAttributes(
LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
- ExecutableAccessorInfoHandling handling = DEFAULT_HANDLING);
+ AccessorInfoHandling handling = DONT_FORCE_FIELD);
MUST_USE_RESULT static Maybe<bool> DefineOwnPropertyIgnoreAttributes(
LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
ShouldThrow should_throw,
- ExecutableAccessorInfoHandling handling = DEFAULT_HANDLING);
+ AccessorInfoHandling handling = DONT_FORCE_FIELD);
MUST_USE_RESULT static MaybeHandle<Object> SetOwnPropertyIgnoreAttributes(
Handle<JSObject> object, Handle<Name> name, Handle<Object> value,
- PropertyAttributes attributes,
- ExecutableAccessorInfoHandling handling = DEFAULT_HANDLING);
+ PropertyAttributes attributes);
MUST_USE_RESULT static MaybeHandle<Object> SetOwnElementIgnoreAttributes(
Handle<JSObject> object, uint32_t index, Handle<Object> value,
- PropertyAttributes attributes,
- ExecutableAccessorInfoHandling handling = DEFAULT_HANDLING);
+ PropertyAttributes attributes);
// Equivalent to one of the above depending on whether |name| can be converted
// to an array index.
MUST_USE_RESULT static MaybeHandle<Object>
- DefinePropertyOrElementIgnoreAttributes(
- Handle<JSObject> object, Handle<Name> name, Handle<Object> value,
- PropertyAttributes attributes = NONE,
- ExecutableAccessorInfoHandling handling = DEFAULT_HANDLING);
+ DefinePropertyOrElementIgnoreAttributes(Handle<JSObject> object,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes = NONE);
// Adds or reconfigures a property to attributes NONE. It will fail when it
// cannot.
@@ -2204,12 +2185,11 @@ class JSObject: public JSReceiver {
// Accessors for hidden properties object.
//
- // Hidden properties are not own properties of the object itself.
- // Instead they are stored in an auxiliary structure kept as an own
- // property with a special name Heap::hidden_string(). But if the
- // receiver is a JSGlobalProxy then the auxiliary object is a property
- // of its prototype, and if it's a detached proxy, then you can't have
- // hidden properties.
+ // Hidden properties are not own properties of the object itself. Instead
+ // they are stored in an auxiliary structure kept as an own property with a
+ // special name Heap::hidden_properties_symbol(). But if the receiver is a
+ // JSGlobalProxy then the auxiliary object is a property of its prototype, and
+ // if it's a detached proxy, then you can't have hidden properties.
// Sets a hidden property on this object. Returns this object if successful,
// undefined if called on a detached proxy.
@@ -2307,8 +2287,7 @@ class JSObject: public JSReceiver {
KeyAccumulator* keys,
PropertyFilter filter);
- static Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
- bool cache_result);
+ static Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object);
// Returns a new map with all transitions dropped from the object's current
// map and the ElementsKind set.
@@ -2355,6 +2334,8 @@ class JSObject: public JSReceiver {
inline void FastPropertyAtPut(FieldIndex index, Object* value);
inline void RawFastPropertyAtPut(FieldIndex index, Object* value);
inline void RawFastDoublePropertyAtPut(FieldIndex index, double value);
+ inline void WriteToField(int descriptor, PropertyDetails details,
+ Object* value);
inline void WriteToField(int descriptor, Object* value);
// Access to in object properties.
@@ -2397,6 +2378,10 @@ class JSObject: public JSReceiver {
Handle<JSObject> object,
AllocationSiteUsageContext* site_context,
DeepCopyHints hints = kNoHints);
+ // Deep copies given object with special handling for JSFunctions which
+ // 1) must be Api functions and 2) are not copied but left as is.
+ MUST_USE_RESULT static MaybeHandle<JSObject> DeepCopyApiBoilerplate(
+ Handle<JSObject> object);
MUST_USE_RESULT static MaybeHandle<JSObject> DeepWalk(
Handle<JSObject> object,
AllocationSiteCreationContext* site_context);
@@ -2505,11 +2490,6 @@ class JSObject: public JSReceiver {
friend class JSReceiver;
friend class Object;
- static void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map);
- static void MigrateFastToSlow(Handle<JSObject> object,
- Handle<Map> new_map,
- int expected_additional_properties);
-
// Used from Object::GetProperty().
MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithFailedAccessCheck(
LookupIterator* it);
@@ -2524,7 +2504,7 @@ class JSObject: public JSReceiver {
PropertyAttributes attributes);
MUST_USE_RESULT static Maybe<bool> DeletePropertyWithInterceptor(
- LookupIterator* it);
+ LookupIterator* it, ShouldThrow should_throw);
bool ReferencesObjectFromElements(FixedArray* elements,
ElementsKind kind,
@@ -2566,6 +2546,111 @@ class JSObject: public JSReceiver {
};
+// JSAccessorPropertyDescriptor is just a JSObject with a specific initial
+// map. This initial map adds in-object properties for "get", "set",
+// "enumerable" and "configurable" properties, as assigned by the
+// FromPropertyDescriptor function for regular accessor properties.
+class JSAccessorPropertyDescriptor: public JSObject {
+ public:
+ // Offsets of object fields.
+ static const int kGetOffset = JSObject::kHeaderSize;
+ static const int kSetOffset = kGetOffset + kPointerSize;
+ static const int kEnumerableOffset = kSetOffset + kPointerSize;
+ static const int kConfigurableOffset = kEnumerableOffset + kPointerSize;
+ static const int kSize = kConfigurableOffset + kPointerSize;
+ // Indices of in-object properties.
+ static const int kGetIndex = 0;
+ static const int kSetIndex = 1;
+ static const int kEnumerableIndex = 2;
+ static const int kConfigurableIndex = 3;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSAccessorPropertyDescriptor);
+};
+
+
+// JSDataPropertyDescriptor is just a JSObject with a specific initial map.
+// This initial map adds in-object properties for "value", "writable",
+// "enumerable" and "configurable" properties, as assigned by the
+// FromPropertyDescriptor function for regular data properties.
+class JSDataPropertyDescriptor: public JSObject {
+ public:
+ // Offsets of object fields.
+ static const int kValueOffset = JSObject::kHeaderSize;
+ static const int kWritableOffset = kValueOffset + kPointerSize;
+ static const int kEnumerableOffset = kWritableOffset + kPointerSize;
+ static const int kConfigurableOffset = kEnumerableOffset + kPointerSize;
+ static const int kSize = kConfigurableOffset + kPointerSize;
+ // Indices of in-object properties.
+ static const int kValueIndex = 0;
+ static const int kWritableIndex = 1;
+ static const int kEnumerableIndex = 2;
+ static const int kConfigurableIndex = 3;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSDataPropertyDescriptor);
+};
+
+
+// JSIteratorResult is just a JSObject with a specific initial map.
+// This initial map adds in-object properties for "done" and "value",
+// as specified by ES6 section 25.1.1.3 The IteratorResult Interface
+class JSIteratorResult: public JSObject {
+ public:
+ // Offsets of object fields.
+ static const int kValueOffset = JSObject::kHeaderSize;
+ static const int kDoneOffset = kValueOffset + kPointerSize;
+ static const int kSize = kDoneOffset + kPointerSize;
+ // Indices of in-object properties.
+ static const int kValueIndex = 0;
+ static const int kDoneIndex = 1;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSIteratorResult);
+};
+
+
+// Common superclass for JSSloppyArgumentsObject and JSStrictArgumentsObject.
+class JSArgumentsObject: public JSObject {
+ public:
+ // Offsets of object fields.
+ static const int kLengthOffset = JSObject::kHeaderSize;
+ static const int kHeaderSize = kLengthOffset + kPointerSize;
+ // Indices of in-object properties.
+ static const int kLengthIndex = 0;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSArgumentsObject);
+};
+
+
+// JSSloppyArgumentsObject is just a JSObject with specific initial map.
+// This initial map adds in-object properties for "length" and "callee".
+class JSSloppyArgumentsObject: public JSArgumentsObject {
+ public:
+ // Offsets of object fields.
+ static const int kCalleeOffset = JSArgumentsObject::kHeaderSize;
+ static const int kSize = kCalleeOffset + kPointerSize;
+ // Indices of in-object properties.
+ static const int kCalleeIndex = 1;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSSloppyArgumentsObject);
+};
+
+
+// JSStrictArgumentsObject is just a JSObject with specific initial map.
+// This initial map adds an in-object property for "length".
+class JSStrictArgumentsObject: public JSArgumentsObject {
+ public:
+ // Offsets of object fields.
+ static const int kSize = JSArgumentsObject::kHeaderSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSStrictArgumentsObject);
+};
+
+
// Common superclass for FixedArrays that allow implementations to share
// common accessors and some code paths.
class FixedArrayBase: public HeapObject {
@@ -2596,7 +2681,8 @@ class FixedArray: public FixedArrayBase {
public:
// Setter and getter for elements.
inline Object* get(int index) const;
- static inline Handle<Object> get(Handle<FixedArray> array, int index);
+ static inline Handle<Object> get(FixedArray* array, int index,
+ Isolate* isolate);
// Setter that uses write barrier.
inline void set(int index, Object* value);
inline bool is_the_hole(int index);
@@ -2683,7 +2769,8 @@ class FixedDoubleArray: public FixedArrayBase {
// Setter and getter for elements.
inline double get_scalar(int index);
inline uint64_t get_representation(int index);
- static inline Handle<Object> get(Handle<FixedDoubleArray> array, int index);
+ static inline Handle<Object> get(FixedDoubleArray* array, int index,
+ Isolate* isolate);
inline void set(int index, double value);
inline void set_the_hole(int index);
@@ -2864,8 +2951,6 @@ class DescriptorArray: public FixedArray {
Isolate* isolate, Handle<FixedArray> new_cache,
Handle<FixedArray> new_index_cache);
- bool CanHoldValue(int descriptor, Object* value);
-
// Accessors for fetching instance descriptor at descriptor number.
inline Name* GetKey(int descriptor_number);
inline Object** GetKeySlot(int descriptor_number);
@@ -2878,7 +2963,7 @@ class DescriptorArray: public FixedArray {
inline PropertyDetails GetDetails(int descriptor_number);
inline PropertyType GetType(int descriptor_number);
inline int GetFieldIndex(int descriptor_number);
- inline HeapType* GetFieldType(int descriptor_number);
+ FieldType* GetFieldType(int descriptor_number);
inline Object* GetConstant(int descriptor_number);
inline Object* GetCallbacksObject(int descriptor_number);
inline AccessorDescriptor* GetCallbacks(int descriptor_number);
@@ -2917,15 +3002,15 @@ class DescriptorArray: public FixedArray {
// As the above, but uses DescriptorLookupCache and updates it when
// necessary.
- INLINE(int SearchWithCache(Name* name, Map* map));
+ INLINE(int SearchWithCache(Isolate* isolate, Name* name, Map* map));
bool IsEqualUpTo(DescriptorArray* desc, int nof_descriptors);
// Allocates a DescriptorArray, but returns the singleton
// empty descriptor array object if number_of_descriptors is 0.
- static Handle<DescriptorArray> Allocate(Isolate* isolate,
- int number_of_descriptors,
- int slack = 0);
+ static Handle<DescriptorArray> Allocate(
+ Isolate* isolate, int number_of_descriptors, int slack,
+ PretenureFlag pretenure = NOT_TENURED);
DECLARE_CAST(DescriptorArray)
@@ -4086,10 +4171,6 @@ class ScopeInfo : public FixedArray {
// exposed to the user in a debugger.
bool LocalIsSynthetic(int var);
- String* StrongModeFreeVariableName(int var);
- int StrongModeFreeVariableStartPosition(int var);
- int StrongModeFreeVariableEndPosition(int var);
-
// Lookup support for serialized scope info. Returns the
// the stack slot index for a given slot name if the slot is
// present; otherwise returns a value < 0. The name must be an internalized
@@ -4159,8 +4240,7 @@ class ScopeInfo : public FixedArray {
V(ParameterCount) \
V(StackLocalCount) \
V(ContextLocalCount) \
- V(ContextGlobalCount) \
- V(StrongModeFreeVariableCount)
+ V(ContextGlobalCount)
#define FIELD_ACCESSORS(name) \
inline void Set##name(int value); \
@@ -4202,15 +4282,10 @@ class ScopeInfo : public FixedArray {
// the context locals in ContextLocalNameEntries. One slot is used per
// context local, so in total this part occupies ContextLocalCount()
// slots in the array.
- // 6. StrongModeFreeVariableNameEntries:
- // Stores the names of strong mode free variables.
- // 7. StrongModeFreeVariablePositionEntries:
- // Stores the locations (start and end position) of strong mode free
- // variables.
- // 8. RecieverEntryIndex:
+ // 6. RecieverEntryIndex:
// If the scope binds a "this" value, one slot is reserved to hold the
// context or stack slot index for the variable.
- // 9. FunctionNameEntryIndex:
+ // 7. FunctionNameEntryIndex:
// If the scope belongs to a named function expression this part contains
// information about the function variable. It always occupies two array
// slots: a. The name of the function variable.
@@ -4222,8 +4297,6 @@ class ScopeInfo : public FixedArray {
int ContextGlobalNameEntriesIndex();
int ContextLocalInfoEntriesIndex();
int ContextGlobalInfoEntriesIndex();
- int StrongModeFreeVariableNameEntriesIndex();
- int StrongModeFreeVariablePositionEntriesIndex();
int ReceiverEntryIndex();
int FunctionNameEntryIndex();
@@ -4286,7 +4359,7 @@ class NormalizedMapCache: public FixedArray {
DECLARE_CAST(NormalizedMapCache)
- static inline bool IsNormalizedMapCache(const Object* obj);
+ static inline bool IsNormalizedMapCache(const HeapObject* obj);
DECLARE_VERIFIER(NormalizedMapCache)
private:
@@ -4377,26 +4450,46 @@ class BytecodeArray : public FixedArrayBase {
inline int parameter_count() const;
inline void set_parameter_count(int number_of_parameters);
+ // Accessors for profiling count.
+ inline int interrupt_budget() const;
+ inline void set_interrupt_budget(int interrupt_budget);
+
// Accessors for the constant pool.
DECL_ACCESSORS(constant_pool, FixedArray)
+ // Accessors for handler table containing offsets of exception handlers.
+ DECL_ACCESSORS(handler_table, FixedArray)
+
+ // Accessors for source position table containing mappings between byte code
+ // offset and source position.
+ DECL_ACCESSORS(source_position_table, FixedArray)
+
DECLARE_CAST(BytecodeArray)
// Dispatched behavior.
inline int BytecodeArraySize();
+ inline int instruction_size();
+
+ int SourcePosition(int offset);
+ int SourceStatementPosition(int offset);
+
DECLARE_PRINTER(BytecodeArray)
DECLARE_VERIFIER(BytecodeArray)
void Disassemble(std::ostream& os);
+ void CopyBytecodesTo(BytecodeArray* to);
+
// Layout description.
- static const int kFrameSizeOffset = FixedArrayBase::kHeaderSize;
+ static const int kConstantPoolOffset = FixedArrayBase::kHeaderSize;
+ static const int kHandlerTableOffset = kConstantPoolOffset + kPointerSize;
+ static const int kSourcePositionTableOffset =
+ kHandlerTableOffset + kPointerSize;
+ static const int kFrameSizeOffset = kSourcePositionTableOffset + kPointerSize;
static const int kParameterSizeOffset = kFrameSizeOffset + kIntSize;
- static const int kConstantPoolOffset = kParameterSizeOffset + kIntSize;
- static const int kHeaderSize = kConstantPoolOffset + kPointerSize;
-
- static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
+ static const int kInterruptBudgetOffset = kParameterSizeOffset + kIntSize;
+ static const int kHeaderSize = kInterruptBudgetOffset + kIntSize;
// Maximal memory consumption for a single BytecodeArray.
static const int kMaxSize = 512 * MB;
@@ -4509,7 +4602,7 @@ class FixedTypedArray: public FixedTypedArrayBase {
DECLARE_CAST(FixedTypedArray<Traits>)
inline ElementType get_scalar(int index);
- static inline Handle<Object> get(Handle<FixedTypedArray> array, int index);
+ static inline Handle<Object> get(FixedTypedArray* array, int index);
inline void set(int index, ElementType value);
static inline ElementType from_int(int value);
@@ -4653,7 +4746,7 @@ class DeoptimizationOutputData: public FixedArray {
DECLARE_CAST(DeoptimizationOutputData)
-#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
+#ifdef ENABLE_DISASSEMBLER
void DeoptimizationOutputDataPrint(std::ostream& os); // NOLINT
#endif
};
@@ -4698,7 +4791,7 @@ class LiteralsArray : public FixedArray {
// 1) Based on ranges: Used for unoptimized code. Contains one entry per
// exception handler and a range representing the try-block covered by that
// handler. Layout looks as follows:
-// [ range-start , range-end , handler-offset , stack-depth ]
+// [ range-start , range-end , handler-offset , handler-data ]
// 2) Based on return addresses: Used for turbofanned code. Contains one entry
// per call-site that could throw an exception. Layout looks as follows:
// [ return-address-offset , handler-offset ]
@@ -4709,29 +4802,41 @@ class HandlerTable : public FixedArray {
// undecidable it is merely an approximation (e.g. useful for debugger).
enum CatchPrediction { UNCAUGHT, CAUGHT };
- // Accessors for handler table based on ranges.
+ // Getters for handler table based on ranges.
+ inline int GetRangeStart(int index) const;
+ inline int GetRangeEnd(int index) const;
+ inline int GetRangeHandler(int index) const;
+ inline int GetRangeData(int index) const;
+
+ // Setters for handler table based on ranges.
inline void SetRangeStart(int index, int value);
inline void SetRangeEnd(int index, int value);
inline void SetRangeHandler(int index, int offset, CatchPrediction pred);
- inline void SetRangeDepth(int index, int value);
+ inline void SetRangeData(int index, int value);
- // Accessors for handler table based on return addresses.
+ // Setters for handler table based on return addresses.
inline void SetReturnOffset(int index, int value);
inline void SetReturnHandler(int index, int offset, CatchPrediction pred);
// Lookup handler in a table based on ranges.
- int LookupRange(int pc_offset, int* stack_depth, CatchPrediction* prediction);
+ int LookupRange(int pc_offset, int* data, CatchPrediction* prediction);
// Lookup handler in a table based on return addresses.
int LookupReturn(int pc_offset, CatchPrediction* prediction);
+ // Returns the conservative catch predication.
+ inline CatchPrediction GetRangePrediction(int index) const;
+
+ // Returns the number of entries in the table.
+ inline int NumberOfRangeEntries() const;
+
// Returns the required length of the underlying fixed array.
static int LengthForRange(int entries) { return entries * kRangeEntrySize; }
static int LengthForReturn(int entries) { return entries * kReturnEntrySize; }
DECLARE_CAST(HandlerTable)
-#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
+#ifdef ENABLE_DISASSEMBLER
void HandlerTableRangePrint(std::ostream& os); // NOLINT
void HandlerTableReturnPrint(std::ostream& os); // NOLINT
#endif
@@ -4741,7 +4846,7 @@ class HandlerTable : public FixedArray {
static const int kRangeStartIndex = 0;
static const int kRangeEndIndex = 1;
static const int kRangeHandlerIndex = 2;
- static const int kRangeDepthIndex = 3;
+ static const int kRangeDataIndex = 3;
static const int kRangeEntrySize = 4;
// Layout description for handler table based on return addresses.
@@ -4897,7 +5002,6 @@ class Code: public HeapObject {
inline bool is_to_boolean_ic_stub();
inline bool is_keyed_stub();
inline bool is_optimized_code();
- inline bool is_interpreter_entry_trampoline();
inline bool embeds_maps_weakly();
inline bool IsCodeStubOrIC();
@@ -4906,6 +5010,10 @@ class Code: public HeapObject {
inline void set_raw_kind_specific_flags1(int value);
inline void set_raw_kind_specific_flags2(int value);
+ // Testers for interpreter builtins.
+ inline bool is_interpreter_entry_trampoline();
+ inline bool is_interpreter_enter_bytecode_dispatch();
+
// [is_crankshafted]: For kind STUB or ICs, tells whether or not a code
// object was generated by either the hydrogen or the TurboFan optimizing
// compiler (but it may not be an optimized function).
@@ -5096,8 +5204,8 @@ class Code: public HeapObject {
inline int ExecutableSize();
// Locating source position.
- int SourcePosition(Address pc);
- int SourceStatementPosition(Address pc);
+ int SourcePosition(int code_offset);
+ int SourceStatementPosition(int code_offset);
DECLARE_CAST(Code)
@@ -5294,6 +5402,16 @@ class Code: public HeapObject {
DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
};
+class AbstractCode : public HeapObject {
+ public:
+ int SourcePosition(int offset);
+ int SourceStatementPosition(int offset);
+
+ DECLARE_CAST(AbstractCode)
+ inline int Size();
+ inline Code* GetCode();
+ inline BytecodeArray* GetBytecodeArray();
+};
// Dependent code is a singly linked list of fixed arrays. Each array contains
// code objects in weak cells for one dependent group. The suffix of the array
@@ -5475,7 +5593,7 @@ class Map: public HeapObject {
STATIC_ASSERT(kDescriptorIndexBitCount + kDescriptorIndexBitCount == 20);
class DictionaryMap : public BitField<bool, 20, 1> {};
class OwnsDescriptors : public BitField<bool, 21, 1> {};
- class IsHiddenPrototype : public BitField<bool, 22, 1> {};
+ class HasHiddenPrototype : public BitField<bool, 22, 1> {};
class Deprecated : public BitField<bool, 23, 1> {};
class IsUnstable : public BitField<bool, 24, 1> {};
class IsMigrationTarget : public BitField<bool, 25, 1> {};
@@ -5487,7 +5605,11 @@ class Map: public HeapObject {
// Builtins::kJSConstructStubGeneric stub.
// This counter is used for in-object slack tracking.
// The in-object slack tracking is considered enabled when the counter is
- // non zero.
+ // non zero. The counter only has a valid count for initial maps. For
+ // transitioned maps only kNoSlackTracking has a meaning, namely that inobject
+ // slack tracking already finished for the transition tree. Any other value
+ // indicates that either inobject slack tracking is still in progress, or that
+ // the map isn't part of the transition tree anymore.
class ConstructionCounter : public BitField<int, 29, 3> {};
static const int kSlackTrackingCounterStart = 7;
static const int kSlackTrackingCounterEnd = 1;
@@ -5551,13 +5673,12 @@ class Map: public HeapObject {
// Tells whether the instance has a [[Construct]] internal method.
// This property is implemented according to ES6, section 7.2.4.
- inline void set_is_constructor();
+ inline void set_is_constructor(bool value);
inline bool is_constructor() const;
- // Tells whether the instance with this map should be ignored by the
- // Object.getPrototypeOf() function and the __proto__ accessor.
- inline void set_is_hidden_prototype();
- inline bool is_hidden_prototype() const;
+ // Tells whether the instance with this map has a hidden prototype.
+ inline void set_has_hidden_prototype(bool value);
+ inline bool has_hidden_prototype() const;
// Records and queries whether the instance has a named interceptor.
inline void set_has_named_interceptor();
@@ -5606,6 +5727,7 @@ class Map: public HeapObject {
inline bool has_fast_double_elements();
inline bool has_fast_elements();
inline bool has_sloppy_arguments_elements();
+ inline bool has_fast_string_wrapper_elements();
inline bool has_fixed_typed_array_elements();
inline bool has_dictionary_elements();
@@ -5656,17 +5778,17 @@ class Map: public HeapObject {
int* old_number_of_fields);
// TODO(ishell): moveit!
static Handle<Map> GeneralizeAllFieldRepresentations(Handle<Map> map);
- MUST_USE_RESULT static Handle<HeapType> GeneralizeFieldType(
- Representation rep1, Handle<HeapType> type1, Representation rep2,
- Handle<HeapType> type2, Isolate* isolate);
+ MUST_USE_RESULT static Handle<FieldType> GeneralizeFieldType(
+ Representation rep1, Handle<FieldType> type1, Representation rep2,
+ Handle<FieldType> type2, Isolate* isolate);
static void GeneralizeFieldType(Handle<Map> map, int modify_index,
Representation new_representation,
- Handle<HeapType> new_field_type);
+ Handle<FieldType> new_field_type);
static Handle<Map> ReconfigureProperty(Handle<Map> map, int modify_index,
PropertyKind new_kind,
PropertyAttributes new_attributes,
Representation new_representation,
- Handle<HeapType> new_field_type,
+ Handle<FieldType> new_field_type,
StoreMode store_mode);
static Handle<Map> CopyGeneralizeAllRepresentations(
Handle<Map> map, int modify_index, StoreMode store_mode,
@@ -5752,6 +5874,10 @@ class Map: public HeapObject {
inline Cell* RetrieveDescriptorsPointer();
+ // Checks whether all properties are stored either in the map or on the object
+ // (inobject, properties, or elements backing store), requiring no special
+ // checks.
+ bool OnlyHasSimpleProperties();
inline int EnumLength();
inline void SetEnumLength(int length);
@@ -5788,11 +5914,8 @@ class Map: public HeapObject {
TransitionFlag flag);
MUST_USE_RESULT static MaybeHandle<Map> CopyWithField(
- Handle<Map> map,
- Handle<Name> name,
- Handle<HeapType> type,
- PropertyAttributes attributes,
- Representation representation,
+ Handle<Map> map, Handle<Name> name, Handle<FieldType> type,
+ PropertyAttributes attributes, Representation representation,
TransitionFlag flag);
MUST_USE_RESULT static MaybeHandle<Map> CopyWithConstant(
@@ -6126,16 +6249,14 @@ class Map: public HeapObject {
void PrintReconfiguration(FILE* file, int modify_index, PropertyKind kind,
PropertyAttributes attributes);
- void PrintGeneralization(FILE* file,
- const char* reason,
- int modify_index,
- int split,
- int descriptors,
- bool constant_to_field,
+ void PrintGeneralization(FILE* file, const char* reason, int modify_index,
+ int split, int descriptors, bool constant_to_field,
Representation old_representation,
Representation new_representation,
- HeapType* old_field_type,
- HeapType* new_field_type);
+ MaybeHandle<FieldType> old_field_type,
+ MaybeHandle<Object> old_value,
+ MaybeHandle<FieldType> new_field_type,
+ MaybeHandle<Object> new_value);
static const int kFastPropertiesSoftLimit = 12;
static const int kMaxFastProperties = 128;
@@ -6287,7 +6408,7 @@ class Script: public Struct {
// [line_ends]: FixedArray of line ends positions.
DECL_ACCESSORS(line_ends, Object)
- // [eval_from_shared]: for eval scripts the shared funcion info for the
+ // [eval_from_shared]: for eval scripts the shared function info for the
// function from which eval was called.
DECL_ACCESSORS(eval_from_shared, Object)
@@ -6335,17 +6456,17 @@ class Script: public Struct {
// resource is accessible. Otherwise, always return true.
inline bool HasValidSource();
- // Convert code position into column number.
- static int GetColumnNumber(Handle<Script> script, int code_pos);
+ // Convert code offset into column number.
+ static int GetColumnNumber(Handle<Script> script, int code_offset);
- // Convert code position into (zero-based) line number.
+ // Convert code offset into (zero-based) line number.
// The non-handlified version does not allocate, but may be much slower.
- static int GetLineNumber(Handle<Script> script, int code_pos);
+ static int GetLineNumber(Handle<Script> script, int code_offset);
int GetLineNumber(int code_pos);
static Handle<Object> GetNameOrSourceURL(Handle<Script> script);
- // Init line_ends array with code positions of line ends inside script source.
+ // Init line_ends array with source code positions of line ends.
static void InitLineEnds(Handle<Script> script);
// Get the JS object wrapping the given script; create it if none exists.
@@ -6414,37 +6535,40 @@ class Script: public Struct {
//
// Installation of ids for the selected builtin functions is handled
// by the bootstrapper.
-#define FUNCTIONS_WITH_ID_LIST(V) \
- V(Array.prototype, indexOf, ArrayIndexOf) \
- V(Array.prototype, lastIndexOf, ArrayLastIndexOf) \
- V(Array.prototype, push, ArrayPush) \
- V(Array.prototype, pop, ArrayPop) \
- V(Array.prototype, shift, ArrayShift) \
- V(Function.prototype, apply, FunctionApply) \
- V(Function.prototype, call, FunctionCall) \
- V(String.prototype, charCodeAt, StringCharCodeAt) \
- V(String.prototype, charAt, StringCharAt) \
- V(String, fromCharCode, StringFromCharCode) \
- V(Math, random, MathRandom) \
- V(Math, floor, MathFloor) \
- V(Math, round, MathRound) \
- V(Math, ceil, MathCeil) \
- V(Math, abs, MathAbs) \
- V(Math, log, MathLog) \
- V(Math, exp, MathExp) \
- V(Math, sqrt, MathSqrt) \
- V(Math, pow, MathPow) \
- V(Math, max, MathMax) \
- V(Math, min, MathMin) \
- V(Math, cos, MathCos) \
- V(Math, sin, MathSin) \
- V(Math, tan, MathTan) \
- V(Math, acos, MathAcos) \
- V(Math, asin, MathAsin) \
- V(Math, atan, MathAtan) \
- V(Math, atan2, MathAtan2) \
- V(Math, imul, MathImul) \
- V(Math, clz32, MathClz32) \
+#define FUNCTIONS_WITH_ID_LIST(V) \
+ V(Array.prototype, indexOf, ArrayIndexOf) \
+ V(Array.prototype, lastIndexOf, ArrayLastIndexOf) \
+ V(Array.prototype, push, ArrayPush) \
+ V(Array.prototype, pop, ArrayPop) \
+ V(Array.prototype, shift, ArrayShift) \
+ V(Function.prototype, apply, FunctionApply) \
+ V(Function.prototype, call, FunctionCall) \
+ V(String.prototype, charCodeAt, StringCharCodeAt) \
+ V(String.prototype, charAt, StringCharAt) \
+ V(String.prototype, concat, StringConcat) \
+ V(String.prototype, toLowerCase, StringToLowerCase) \
+ V(String.prototype, toUpperCase, StringToUpperCase) \
+ V(String, fromCharCode, StringFromCharCode) \
+ V(Math, random, MathRandom) \
+ V(Math, floor, MathFloor) \
+ V(Math, round, MathRound) \
+ V(Math, ceil, MathCeil) \
+ V(Math, abs, MathAbs) \
+ V(Math, log, MathLog) \
+ V(Math, exp, MathExp) \
+ V(Math, sqrt, MathSqrt) \
+ V(Math, pow, MathPow) \
+ V(Math, max, MathMax) \
+ V(Math, min, MathMin) \
+ V(Math, cos, MathCos) \
+ V(Math, sin, MathSin) \
+ V(Math, tan, MathTan) \
+ V(Math, acos, MathAcos) \
+ V(Math, asin, MathAsin) \
+ V(Math, atan, MathAtan) \
+ V(Math, atan2, MathAtan2) \
+ V(Math, imul, MathImul) \
+ V(Math, clz32, MathClz32) \
V(Math, fround, MathFround)
#define ATOMIC_FUNCTIONS_WITH_ID_LIST(V) \
@@ -6481,6 +6605,7 @@ class SharedFunctionInfo: public HeapObject {
// [code]: Function code.
DECL_ACCESSORS(code, Code)
+
inline void ReplaceCode(Code* code);
// [optimized_code_map]: Map from native context to optimized code
@@ -6657,8 +6782,8 @@ class SharedFunctionInfo: public HeapObject {
inline int end_position() const;
inline void set_end_position(int end_position);
- // Is this function a function expression in the source code.
- DECL_BOOLEAN_ACCESSORS(is_expression)
+ // Is this function a named function expression in the source code.
+ DECL_BOOLEAN_ACCESSORS(is_named_expression)
// Is this function a top-level function (scripts, evals).
DECL_BOOLEAN_ACCESSORS(is_toplevel)
@@ -6726,9 +6851,10 @@ class SharedFunctionInfo: public HeapObject {
// see a binding for it.
DECL_BOOLEAN_ACCESSORS(name_should_print_as_anonymous)
- // Indicates that the function is anonymous (the name field can be set
- // through the API, which does not change this flag).
- DECL_BOOLEAN_ACCESSORS(is_anonymous)
+ // Indicates that the function is either an anonymous expression
+ // or an arrow function (the name field can be set through the API,
+ // which does not change this flag).
+ DECL_BOOLEAN_ACCESSORS(is_anonymous_expression)
// Is this a function or top-level/eval code.
DECL_BOOLEAN_ACCESSORS(is_function)
@@ -6748,8 +6874,11 @@ class SharedFunctionInfo: public HeapObject {
// Indicates that this function is a concise method.
DECL_BOOLEAN_ACCESSORS(is_concise_method)
- // Indicates that this function is an accessor (getter or setter).
- DECL_BOOLEAN_ACCESSORS(is_accessor_function)
+ // Indicates that this function is a getter.
+ DECL_BOOLEAN_ACCESSORS(is_getter_function)
+
+ // Indicates that this function is a setter.
+ DECL_BOOLEAN_ACCESSORS(is_setter_function)
// Indicates that this function is a default constructor.
DECL_BOOLEAN_ACCESSORS(is_default_constructor)
@@ -6763,6 +6892,9 @@ class SharedFunctionInfo: public HeapObject {
// Indicates that the the shared function info has never been compiled before.
DECL_BOOLEAN_ACCESSORS(never_compiled)
+ // Whether this function was created from a FunctionDeclaration.
+ DECL_BOOLEAN_ACCESSORS(is_declaration)
+
inline FunctionKind kind();
inline void set_kind(FunctionKind kind);
@@ -6996,10 +7128,10 @@ class SharedFunctionInfo: public HeapObject {
// Bit positions in start_position_and_type.
// The source code start position is in the 30 most significant bits of
// the start_position_and_type field.
- static const int kIsExpressionBit = 0;
- static const int kIsTopLevelBit = 1;
+ static const int kIsNamedExpressionBit = 0;
+ static const int kIsTopLevelBit = 1;
static const int kStartPositionShift = 2;
- static const int kStartPositionMask = ~((1 << kStartPositionShift) - 1);
+ static const int kStartPositionMask = ~((1 << kStartPositionShift) - 1);
// Bit positions in compiler_hints.
enum CompilerHints {
@@ -7016,7 +7148,7 @@ class SharedFunctionInfo: public HeapObject {
kHasDuplicateParameters,
kForceInline,
kIsAsmFunction,
- kIsAnonymous,
+ kIsAnonymousExpression,
kNameShouldPrintAsAnonymous,
kIsFunction,
kDontCrankshaft,
@@ -7026,14 +7158,15 @@ class SharedFunctionInfo: public HeapObject {
kIsArrow = kFunctionKind,
kIsGenerator,
kIsConciseMethod,
- kIsAccessorFunction,
kIsDefaultConstructor,
kIsSubclassConstructor,
kIsBaseConstructor,
- kIsInObjectLiteral,
+ kIsGetterFunction,
+ kIsSetterFunction,
// byte 3
kDeserialized,
kNeverCompiled,
+ kIsDeclaration,
kCompilerHintsCount, // Pseudo entry
};
// Add hints for other modes when they're added.
@@ -7047,11 +7180,11 @@ class SharedFunctionInfo: public HeapObject {
ASSERT_FUNCTION_KIND_ORDER(kArrowFunction, kIsArrow);
ASSERT_FUNCTION_KIND_ORDER(kGeneratorFunction, kIsGenerator);
ASSERT_FUNCTION_KIND_ORDER(kConciseMethod, kIsConciseMethod);
- ASSERT_FUNCTION_KIND_ORDER(kAccessorFunction, kIsAccessorFunction);
ASSERT_FUNCTION_KIND_ORDER(kDefaultConstructor, kIsDefaultConstructor);
ASSERT_FUNCTION_KIND_ORDER(kSubclassConstructor, kIsSubclassConstructor);
ASSERT_FUNCTION_KIND_ORDER(kBaseConstructor, kIsBaseConstructor);
- ASSERT_FUNCTION_KIND_ORDER(kInObjectLiteral, kIsInObjectLiteral);
+ ASSERT_FUNCTION_KIND_ORDER(kGetterFunction, kIsGetterFunction);
+ ASSERT_FUNCTION_KIND_ORDER(kSetterFunction, kIsSetterFunction);
#undef ASSERT_FUNCTION_KIND_ORDER
class FunctionKindBits : public BitField<FunctionKind, kIsArrow, 8> {};
@@ -7159,6 +7292,9 @@ class JSGeneratorObject: public JSObject {
// [receiver]: The receiver of the suspended computation.
DECL_ACCESSORS(receiver, Object)
+ // [input]: The most recent input value.
+ DECL_ACCESSORS(input, Object)
+
// [continuation]: Offset into code of continuation.
//
// A positive offset indicates a suspended generator. The special
@@ -7187,12 +7323,13 @@ class JSGeneratorObject: public JSObject {
static const int kFunctionOffset = JSObject::kHeaderSize;
static const int kContextOffset = kFunctionOffset + kPointerSize;
static const int kReceiverOffset = kContextOffset + kPointerSize;
- static const int kContinuationOffset = kReceiverOffset + kPointerSize;
+ static const int kInputOffset = kReceiverOffset + kPointerSize;
+ static const int kContinuationOffset = kInputOffset + kPointerSize;
static const int kOperandStackOffset = kContinuationOffset + kPointerSize;
static const int kSize = kOperandStackOffset + kPointerSize;
// Resume mode, for use by runtime functions.
- enum ResumeMode { NEXT, THROW };
+ enum ResumeMode { NEXT, RETURN, THROW };
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSGeneratorObject);
@@ -7244,12 +7381,6 @@ class JSBoundFunction : public JSObject {
// arguments to any call to the wrapped function.
DECL_ACCESSORS(bound_arguments, FixedArray)
- // [creation_context]: The native context in which the function was bound.
- // TODO(bmeurer, verwaest): Can we (mis)use (unused) constructor field in
- // the Map instead of putting this into the object? Only required for
- // JSReceiver::GetCreationContext() anyway.
- DECL_ACCESSORS(creation_context, Context)
-
static MaybeHandle<Context> GetFunctionRealm(
Handle<JSBoundFunction> function);
@@ -7267,9 +7398,7 @@ class JSBoundFunction : public JSObject {
static const int kBoundTargetFunctionOffset = JSObject::kHeaderSize;
static const int kBoundThisOffset = kBoundTargetFunctionOffset + kPointerSize;
static const int kBoundArgumentsOffset = kBoundThisOffset + kPointerSize;
- static const int kCreationContextOffset =
- kBoundArgumentsOffset + kPointerSize;
- static const int kLengthOffset = kCreationContextOffset + kPointerSize;
+ static const int kLengthOffset = kBoundArgumentsOffset + kPointerSize;
static const int kNameOffset = kLengthOffset + kPointerSize;
static const int kSize = kNameOffset + kPointerSize;
@@ -7431,6 +7560,12 @@ class JSFunction: public JSObject {
// debug name.
static Handle<String> GetName(Handle<JSFunction> function);
+ // ES6 section 9.2.11 SetFunctionName
+ // Because of the way this abstract operation is used in the spec,
+ // it should never fail.
+ static void SetName(Handle<JSFunction> function, Handle<Name> name,
+ Handle<String> prefix);
+
// The function's displayName if it is set, otherwise name if it is
// configured, otherwise shared function info
// debug name.
@@ -8233,9 +8368,8 @@ class AllocationSite: public Struct {
static const int kPointerFieldsEndOffset = kWeakNextOffset;
// For other visitors, use the fixed body descriptor below.
- typedef FixedBodyDescriptor<HeapObject::kHeaderSize,
- kDependentCodeOffset + kPointerSize,
- kSize> BodyDescriptor;
+ typedef FixedBodyDescriptor<HeapObject::kHeaderSize, kSize, kSize>
+ BodyDescriptor;
private:
inline bool PretenuringDecisionMade();
@@ -8253,6 +8387,7 @@ class AllocationMemento: public Struct {
inline bool IsValid();
inline AllocationSite* GetAllocationSite();
+ inline Address GetAllocationSiteUnchecked();
DECLARE_PRINTER(AllocationMemento)
DECLARE_VERIFIER(AllocationMemento)
@@ -8440,10 +8575,7 @@ class Name: public HeapObject {
// If the name is private, it can only name own properties.
inline bool IsPrivate();
- // If the name is a non-flat string, this method returns a flat version of the
- // string. Otherwise it'll just return the input.
- static inline Handle<Name> Flatten(Handle<Name> name,
- PretenureFlag pretenure = NOT_TENURED);
+ inline bool IsUniqueName() const;
// Return a string version of this name that is converted according to the
// rules described in ES6 section 9.2.11.
@@ -8836,9 +8968,7 @@ class String: public Name {
static const uint32_t kMaxOneByteCharCodeU = unibrow::Latin1::kMaxChar;
static const int kMaxUtf16CodeUnit = 0xffff;
static const uint32_t kMaxUtf16CodeUnitU = kMaxUtf16CodeUnit;
-
- // Value of hash field containing computed hash equal to zero.
- static const int kEmptyStringHash = kIsNotArrayIndexMask;
+ static const uc32 kMaxCodePoint = 0x10ffff;
// Maximal string length.
static const int kMaxLength = (1 << 28) - 16;
@@ -9411,7 +9541,7 @@ class Oddball: public HeapObject {
static const byte kNotBooleanMask = ~1;
static const byte kTheHole = 2;
static const byte kNull = 3;
- static const byte kArgumentMarker = 4;
+ static const byte kArgumentsMarker = 4;
static const byte kUndefined = 5;
static const byte kUninitialized = 6;
static const byte kOther = 7;
@@ -9604,7 +9734,7 @@ class JSProxy: public JSReceiver {
// ES6 9.5.8
MUST_USE_RESULT static MaybeHandle<Object> GetProperty(
Isolate* isolate, Handle<JSProxy> proxy, Handle<Name> name,
- Handle<Object> receiver, LanguageMode language_mode);
+ Handle<Object> receiver);
// ES6 9.5.9
MUST_USE_RESULT static Maybe<bool> SetProperty(Handle<JSProxy> proxy,
@@ -9617,12 +9747,6 @@ class JSProxy: public JSReceiver {
MUST_USE_RESULT static Maybe<bool> DeletePropertyOrElement(
Handle<JSProxy> proxy, Handle<Name> name, LanguageMode language_mode);
- // ES6 9.5.11
- MUST_USE_RESULT static Maybe<bool> Enumerate(Isolate* isolate,
- Handle<JSReceiver> receiver,
- Handle<JSProxy> proxy,
- KeyAccumulator* accumulator);
-
// ES6 9.5.12
MUST_USE_RESULT static Maybe<bool> OwnPropertyKeys(
Isolate* isolate, Handle<JSReceiver> receiver, Handle<JSProxy> proxy,
@@ -9648,12 +9772,12 @@ class JSProxy: public JSReceiver {
static Handle<Smi> GetOrCreateIdentityHash(Handle<JSProxy> proxy);
- private:
- static Maybe<bool> AddPrivateProperty(Isolate* isolate, Handle<JSProxy> proxy,
+ static Maybe<bool> SetPrivateProperty(Isolate* isolate, Handle<JSProxy> proxy,
Handle<Symbol> private_name,
PropertyDescriptor* desc,
ShouldThrow should_throw);
+ private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSProxy);
};
@@ -9811,40 +9935,6 @@ class JSMapIterator: public OrderedHashTableIterator<JSMapIterator,
};
-// ES6 section 25.1.1.3 The IteratorResult Interface
-class JSIteratorResult final : public JSObject {
- public:
- // [done]: This is the result status of an iterator next method call. If the
- // end of the iterator was reached done is true. If the end was not reached
- // done is false and a [value] is available.
- DECL_ACCESSORS(done, Object)
-
- // [value]: If [done] is false, this is the current iteration element value.
- // If [done] is true, this is the return value of the iterator, if it supplied
- // one. If the iterator does not have a return value, value is undefined.
- // In that case, the value property may be absent from the conforming object
- // if it does not inherit an explicit value property.
- DECL_ACCESSORS(value, Object)
-
- // Dispatched behavior.
- DECLARE_PRINTER(JSIteratorResult)
- DECLARE_VERIFIER(JSIteratorResult)
-
- DECLARE_CAST(JSIteratorResult)
-
- static const int kValueOffset = JSObject::kHeaderSize;
- static const int kDoneOffset = kValueOffset + kPointerSize;
- static const int kSize = kDoneOffset + kPointerSize;
-
- // Indices of in-object properties.
- static const int kValueIndex = 0;
- static const int kDoneIndex = 1;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSIteratorResult);
-};
-
-
// Base class for both JSWeakMap and JSWeakSet
class JSWeakCollection: public JSObject {
public:
@@ -10195,11 +10285,26 @@ class JSRegExpResult: public JSArray {
};
+// An accessor must have a getter, but can have no setter.
+//
+// When setting a property, V8 searches accessors in prototypes.
+// If an accessor was found and it does not have a setter,
+// the request is ignored.
+//
+// If the accessor in the prototype has the READ_ONLY property attribute, then
+// a new value is added to the derived object when the property is set.
+// This shadows the accessor in the prototype.
class AccessorInfo: public Struct {
public:
DECL_ACCESSORS(name, Object)
DECL_INT_ACCESSORS(flag)
DECL_ACCESSORS(expected_receiver_type, Object)
+ DECL_ACCESSORS(getter, Object)
+ DECL_ACCESSORS(setter, Object)
+ DECL_ACCESSORS(data, Object)
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(AccessorInfo)
inline bool all_can_read();
inline void set_all_can_read(bool value);
@@ -10233,7 +10338,11 @@ class AccessorInfo: public Struct {
static const int kNameOffset = HeapObject::kHeaderSize;
static const int kFlagOffset = kNameOffset + kPointerSize;
static const int kExpectedReceiverTypeOffset = kFlagOffset + kPointerSize;
- static const int kSize = kExpectedReceiverTypeOffset + kPointerSize;
+ static const int kGetterOffset = kExpectedReceiverTypeOffset + kPointerSize;
+ static const int kSetterOffset = kGetterOffset + kPointerSize;
+ static const int kDataOffset = kSetterOffset + kPointerSize;
+ static const int kSize = kDataOffset + kPointerSize;
+
private:
inline bool HasExpectedReceiverType();
@@ -10248,39 +10357,6 @@ class AccessorInfo: public Struct {
};
-// An accessor must have a getter, but can have no setter.
-//
-// When setting a property, V8 searches accessors in prototypes.
-// If an accessor was found and it does not have a setter,
-// the request is ignored.
-//
-// If the accessor in the prototype has the READ_ONLY property attribute, then
-// a new value is added to the derived object when the property is set.
-// This shadows the accessor in the prototype.
-class ExecutableAccessorInfo: public AccessorInfo {
- public:
- DECL_ACCESSORS(getter, Object)
- DECL_ACCESSORS(setter, Object)
- DECL_ACCESSORS(data, Object)
-
- DECLARE_CAST(ExecutableAccessorInfo)
-
- // Dispatched behavior.
- DECLARE_PRINTER(ExecutableAccessorInfo)
- DECLARE_VERIFIER(ExecutableAccessorInfo)
-
- static const int kGetterOffset = AccessorInfo::kSize;
- static const int kSetterOffset = kGetterOffset + kPointerSize;
- static const int kDataOffset = kSetterOffset + kPointerSize;
- static const int kSize = kDataOffset + kPointerSize;
-
- static void ClearSetter(Handle<ExecutableAccessorInfo> info);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ExecutableAccessorInfo);
-};
-
-
// Support for JavaScript accessors: A pair of a getter and a setter. Each
// accessor can either be
// * a pointer to a JavaScript function or proxy: a real accessor
@@ -10300,7 +10376,8 @@ class AccessorPair: public Struct {
inline void set(AccessorComponent component, Object* value);
// Note: Returns undefined instead in case of a hole.
- Object* GetComponent(AccessorComponent component);
+ static Handle<Object> GetComponent(Handle<AccessorPair> accessor_pair,
+ AccessorComponent component);
// Set both components, skipping arguments which are a JavaScript null.
inline void SetComponents(Object* getter, Object* setter);
@@ -10418,15 +10495,16 @@ class CallHandlerInfo: public Struct {
class TemplateInfo: public Struct {
public:
DECL_ACCESSORS(tag, Object)
- inline int number_of_properties() const;
- inline void set_number_of_properties(int value);
+ DECL_ACCESSORS(serial_number, Object)
+ DECL_INT_ACCESSORS(number_of_properties)
DECL_ACCESSORS(property_list, Object)
DECL_ACCESSORS(property_accessors, Object)
DECLARE_VERIFIER(TemplateInfo)
static const int kTagOffset = HeapObject::kHeaderSize;
- static const int kNumberOfProperties = kTagOffset + kPointerSize;
+ static const int kSerialNumberOffset = kTagOffset + kPointerSize;
+ static const int kNumberOfProperties = kSerialNumberOffset + kPointerSize;
static const int kPropertyListOffset = kNumberOfProperties + kPointerSize;
static const int kPropertyAccessorsOffset =
kPropertyListOffset + kPointerSize;
@@ -10441,7 +10519,6 @@ class TemplateInfo: public Struct {
class FunctionTemplateInfo: public TemplateInfo {
public:
- DECL_ACCESSORS(serial_number, Object)
DECL_ACCESSORS(call_code, Object)
DECL_ACCESSORS(prototype_template, Object)
DECL_ACCESSORS(parent_template, Object)
@@ -10475,8 +10552,7 @@ class FunctionTemplateInfo: public TemplateInfo {
DECLARE_PRINTER(FunctionTemplateInfo)
DECLARE_VERIFIER(FunctionTemplateInfo)
- static const int kSerialNumberOffset = TemplateInfo::kHeaderSize;
- static const int kCallCodeOffset = kSerialNumberOffset + kPointerSize;
+ static const int kCallCodeOffset = TemplateInfo::kHeaderSize;
static const int kPrototypeTemplateOffset =
kCallCodeOffset + kPointerSize;
static const int kParentTemplateOffset =
@@ -10545,30 +10621,33 @@ class DebugInfo: public Struct {
DECL_ACCESSORS(shared, SharedFunctionInfo)
// Code object for the patched code. This code object is the code object
// currently active for the function.
- DECL_ACCESSORS(code, Code)
+ DECL_ACCESSORS(abstract_code, AbstractCode)
// Fixed array holding status information for each active break point.
DECL_ACCESSORS(break_points, FixedArray)
- // Check if there is a break point at a code position.
- bool HasBreakPoint(int code_position);
- // Get the break point info object for a code position.
- Object* GetBreakPointInfo(int code_position);
+ // Check if there is a break point at a code offset.
+ bool HasBreakPoint(int code_offset);
+ // Get the break point info object for a code offset.
+ Object* GetBreakPointInfo(int code_offset);
// Clear a break point.
- static void ClearBreakPoint(Handle<DebugInfo> debug_info,
- int code_position,
+ static void ClearBreakPoint(Handle<DebugInfo> debug_info, int code_offset,
Handle<Object> break_point_object);
// Set a break point.
- static void SetBreakPoint(Handle<DebugInfo> debug_info, int code_position,
+ static void SetBreakPoint(Handle<DebugInfo> debug_info, int code_offset,
int source_position, int statement_position,
Handle<Object> break_point_object);
- // Get the break point objects for a code position.
- Handle<Object> GetBreakPointObjects(int code_position);
+ // Get the break point objects for a code offset.
+ Handle<Object> GetBreakPointObjects(int code_offset);
// Find the break point info holding this break point object.
static Handle<Object> FindBreakPointInfo(Handle<DebugInfo> debug_info,
Handle<Object> break_point_object);
// Get the number of break points for this function.
int GetBreakPointCount();
+ static Smi* uninitialized() { return Smi::FromInt(0); }
+
+ inline BytecodeArray* original_bytecode_array();
+
DECLARE_CAST(DebugInfo)
// Dispatched behavior.
@@ -10576,8 +10655,8 @@ class DebugInfo: public Struct {
DECLARE_VERIFIER(DebugInfo)
static const int kSharedFunctionInfoIndex = Struct::kHeaderSize;
- static const int kCodeIndex = kSharedFunctionInfoIndex + kPointerSize;
- static const int kBreakPointsStateIndex = kCodeIndex + kPointerSize;
+ static const int kAbstractCodeIndex = kSharedFunctionInfoIndex + kPointerSize;
+ static const int kBreakPointsStateIndex = kAbstractCodeIndex + kPointerSize;
static const int kSize = kBreakPointsStateIndex + kPointerSize;
static const int kEstimatedNofBreakPointsInFunction = 16;
@@ -10585,8 +10664,8 @@ class DebugInfo: public Struct {
private:
static const int kNoBreakPointInfo = -1;
- // Lookup the index in the break_points array for a code position.
- int GetBreakPointInfoIndex(int code_position);
+ // Lookup the index in the break_points array for a code offset.
+ int GetBreakPointInfoIndex(int code_offset);
DISALLOW_IMPLICIT_CONSTRUCTORS(DebugInfo);
};
@@ -10597,8 +10676,8 @@ class DebugInfo: public Struct {
// position with one or more break points.
class BreakPointInfo: public Struct {
public:
- // The position in the code for the break point.
- DECL_INT_ACCESSORS(code_position)
+ // The code offset for the break point.
+ DECL_INT_ACCESSORS(code_offset)
// The position in the source for the break position.
DECL_INT_ACCESSORS(source_position)
// The position in the source for the last statement before this break
@@ -10616,7 +10695,7 @@ class BreakPointInfo: public Struct {
// Check if break point info has this break point object.
static bool HasBreakPointObject(Handle<BreakPointInfo> info,
Handle<Object> break_point_object);
- // Get the number of break points for this code position.
+ // Get the number of break points for this code offset.
int GetBreakPointCount();
DECLARE_CAST(BreakPointInfo)
@@ -10625,8 +10704,8 @@ class BreakPointInfo: public Struct {
DECLARE_PRINTER(BreakPointInfo)
DECLARE_VERIFIER(BreakPointInfo)
- static const int kCodePositionIndex = Struct::kHeaderSize;
- static const int kSourcePositionIndex = kCodePositionIndex + kPointerSize;
+ static const int kCodeOffsetIndex = Struct::kHeaderSize;
+ static const int kSourcePositionIndex = kCodeOffsetIndex + kPointerSize;
static const int kStatementPositionIndex =
kSourcePositionIndex + kPointerSize;
static const int kBreakPointObjectsIndex =
@@ -10654,6 +10733,7 @@ class BreakPointInfo: public Struct {
V(kDebug, "debug", "(Debugger)") \
V(kCompilationCache, "compilationcache", "(Compilation cache)") \
V(kHandleScope, "handlescope", "(Handle scope)") \
+ V(kDispatchTable, "dispatchtable", "(Dispatch table)") \
V(kBuiltins, "builtins", "(Builtins)") \
V(kGlobalHandles, "globalhandles", "(Global handles)") \
V(kEternalHandles, "eternalhandles", "(Eternal handles)") \
diff --git a/deps/v8/src/optimizing-compile-dispatcher.cc b/deps/v8/src/optimizing-compile-dispatcher.cc
index 7062db640d..4836b9bebb 100644
--- a/deps/v8/src/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/optimizing-compile-dispatcher.cc
@@ -7,6 +7,7 @@
#include "src/base/atomicops.h"
#include "src/full-codegen/full-codegen.h"
#include "src/isolate.h"
+#include "src/tracing/trace-event.h"
#include "src/v8.h"
namespace v8 {
@@ -59,6 +60,7 @@ class OptimizingCompileDispatcher::CompileTask : public v8::Task {
isolate_->optimizing_compile_dispatcher();
{
TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
+ TRACE_EVENT0("v8", "V8.RecompileConcurrent");
if (dispatcher->recompilation_delay_ != 0) {
base::OS::Sleep(base::TimeDelta::FromMilliseconds(
@@ -244,9 +246,9 @@ void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
}
DisposeOptimizedCompileJob(job, false);
} else {
- Handle<Code> code = Compiler::GetConcurrentlyOptimizedCode(job);
+ MaybeHandle<Code> code = Compiler::GetConcurrentlyOptimizedCode(job);
function->ReplaceCode(code.is_null() ? function->shared()->code()
- : *code);
+ : *code.ToHandleChecked());
}
}
}
diff --git a/deps/v8/src/ostreams.cc b/deps/v8/src/ostreams.cc
index a7a67f5d2f..120db257cd 100644
--- a/deps/v8/src/ostreams.cc
+++ b/deps/v8/src/ostreams.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/ostreams.h"
+#include "src/objects.h"
#if V8_OS_WIN
#if _MSC_VER < 1900
@@ -60,6 +61,16 @@ std::ostream& PrintUC16(std::ostream& os, uint16_t c, bool (*pred)(uint16_t)) {
return os << buf;
}
+
+std::ostream& PrintUC32(std::ostream& os, int32_t c, bool (*pred)(uint16_t)) {
+ if (c <= String::kMaxUtf16CodeUnit) {
+ return PrintUC16(os, static_cast<uint16_t>(c), pred);
+ }
+ char buf[13];
+ snprintf(buf, sizeof(buf), "\\u{%06x}", c);
+ return os << buf;
+}
+
} // namespace
@@ -81,5 +92,10 @@ std::ostream& operator<<(std::ostream& os, const AsUC16& c) {
return PrintUC16(os, c.value, IsPrint);
}
+
+std::ostream& operator<<(std::ostream& os, const AsUC32& c) {
+ return PrintUC32(os, c.value, IsPrint);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ostreams.h b/deps/v8/src/ostreams.h
index 56f4aa7e45..1c2f38a153 100644
--- a/deps/v8/src/ostreams.h
+++ b/deps/v8/src/ostreams.h
@@ -50,6 +50,12 @@ struct AsUC16 {
};
+struct AsUC32 {
+ explicit AsUC32(int32_t v) : value(v) {}
+ int32_t value;
+};
+
+
struct AsReversiblyEscapedUC16 {
explicit AsReversiblyEscapedUC16(uint16_t v) : value(v) {}
uint16_t value;
@@ -73,6 +79,10 @@ std::ostream& operator<<(std::ostream& os, const AsEscapedUC16ForJSON& c);
// of printable ASCII range.
std::ostream& operator<<(std::ostream& os, const AsUC16& c);
+// Writes the given character to the output escaping everything outside
+// of printable ASCII range.
+std::ostream& operator<<(std::ostream& os, const AsUC32& c);
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parsing/OWNERS b/deps/v8/src/parsing/OWNERS
index fbab2056f8..a5daeb3b72 100644
--- a/deps/v8/src/parsing/OWNERS
+++ b/deps/v8/src/parsing/OWNERS
@@ -4,3 +4,4 @@ adamk@chromium.org
littledan@chromium.org
marja@chromium.org
rossberg@chromium.org
+
diff --git a/deps/v8/src/parsing/expression-classifier.h b/deps/v8/src/parsing/expression-classifier.h
index 96ccf871f4..fa1a2f97a4 100644
--- a/deps/v8/src/parsing/expression-classifier.h
+++ b/deps/v8/src/parsing/expression-classifier.h
@@ -13,6 +13,7 @@ namespace v8 {
namespace internal {
+template <typename Traits>
class ExpressionClassifier {
public:
struct Error {
@@ -55,15 +56,25 @@ class ExpressionClassifier {
enum FunctionProperties { NonSimpleParameter = 1 << 0 };
- ExpressionClassifier()
- : invalid_productions_(0),
+ explicit ExpressionClassifier(const Traits* t)
+ : zone_(t->zone()),
+ non_patterns_to_rewrite_(t->GetNonPatternList()),
+ invalid_productions_(0),
function_properties_(0),
- duplicate_finder_(nullptr) {}
+ duplicate_finder_(nullptr) {
+ non_pattern_begin_ = non_patterns_to_rewrite_->length();
+ }
- explicit ExpressionClassifier(DuplicateFinder* duplicate_finder)
- : invalid_productions_(0),
+ ExpressionClassifier(const Traits* t, DuplicateFinder* duplicate_finder)
+ : zone_(t->zone()),
+ non_patterns_to_rewrite_(t->GetNonPatternList()),
+ invalid_productions_(0),
function_properties_(0),
- duplicate_finder_(duplicate_finder) {}
+ duplicate_finder_(duplicate_finder) {
+ non_pattern_begin_ = non_patterns_to_rewrite_->length();
+ }
+
+ ~ExpressionClassifier() { Discard(); }
bool is_valid(unsigned productions) const {
return (invalid_productions_ & productions) == 0;
@@ -281,12 +292,14 @@ class ExpressionClassifier {
assignment_pattern_error_ = Error();
}
- void Accumulate(const ExpressionClassifier& inner,
- unsigned productions = StandardProductions) {
+ void Accumulate(ExpressionClassifier* inner,
+ unsigned productions = StandardProductions,
+ bool merge_non_patterns = true) {
+ if (merge_non_patterns) MergeNonPatterns(inner);
// Propagate errors from inner, but don't overwrite already recorded
// errors.
unsigned non_arrow_inner_invalid_productions =
- inner.invalid_productions_ & ~ArrowFormalParametersProduction;
+ inner->invalid_productions_ & ~ArrowFormalParametersProduction;
if (non_arrow_inner_invalid_productions == 0) return;
unsigned non_arrow_productions =
productions & ~ArrowFormalParametersProduction;
@@ -296,27 +309,27 @@ class ExpressionClassifier {
if (errors != 0) {
invalid_productions_ |= errors;
if (errors & ExpressionProduction)
- expression_error_ = inner.expression_error_;
+ expression_error_ = inner->expression_error_;
if (errors & FormalParameterInitializerProduction)
formal_parameter_initializer_error_ =
- inner.formal_parameter_initializer_error_;
+ inner->formal_parameter_initializer_error_;
if (errors & BindingPatternProduction)
- binding_pattern_error_ = inner.binding_pattern_error_;
+ binding_pattern_error_ = inner->binding_pattern_error_;
if (errors & AssignmentPatternProduction)
- assignment_pattern_error_ = inner.assignment_pattern_error_;
+ assignment_pattern_error_ = inner->assignment_pattern_error_;
if (errors & DistinctFormalParametersProduction)
duplicate_formal_parameter_error_ =
- inner.duplicate_formal_parameter_error_;
+ inner->duplicate_formal_parameter_error_;
if (errors & StrictModeFormalParametersProduction)
strict_mode_formal_parameter_error_ =
- inner.strict_mode_formal_parameter_error_;
+ inner->strict_mode_formal_parameter_error_;
if (errors & StrongModeFormalParametersProduction)
strong_mode_formal_parameter_error_ =
- inner.strong_mode_formal_parameter_error_;
+ inner->strong_mode_formal_parameter_error_;
if (errors & LetPatternProduction)
- let_pattern_error_ = inner.let_pattern_error_;
+ let_pattern_error_ = inner->let_pattern_error_;
if (errors & CoverInitializedNameProduction)
- cover_initialized_name_error_ = inner.cover_initialized_name_error_;
+ cover_initialized_name_error_ = inner->cover_initialized_name_error_;
}
// As an exception to the above, the result continues to be a valid arrow
@@ -325,16 +338,31 @@ class ExpressionClassifier {
is_valid_arrow_formal_parameters()) {
// Also copy function properties if expecting an arrow function
// parameter.
- function_properties_ |= inner.function_properties_;
+ function_properties_ |= inner->function_properties_;
- if (!inner.is_valid_binding_pattern()) {
+ if (!inner->is_valid_binding_pattern()) {
invalid_productions_ |= ArrowFormalParametersProduction;
- arrow_formal_parameters_error_ = inner.binding_pattern_error_;
+ arrow_formal_parameters_error_ = inner->binding_pattern_error_;
}
}
}
+ V8_INLINE int GetNonPatternBegin() const { return non_pattern_begin_; }
+
+ V8_INLINE void Discard() {
+ DCHECK_LE(non_pattern_begin_, non_patterns_to_rewrite_->length());
+ non_patterns_to_rewrite_->Rewind(non_pattern_begin_);
+ }
+
+ V8_INLINE void MergeNonPatterns(ExpressionClassifier* inner) {
+ DCHECK_LE(non_pattern_begin_, inner->non_pattern_begin_);
+ inner->non_pattern_begin_ = inner->non_patterns_to_rewrite_->length();
+ }
+
private:
+ Zone* zone_;
+ ZoneList<typename Traits::Type::Expression>* non_patterns_to_rewrite_;
+ int non_pattern_begin_;
unsigned invalid_productions_;
unsigned function_properties_;
Error expression_error_;
@@ -350,6 +378,7 @@ class ExpressionClassifier {
DuplicateFinder* duplicate_finder_;
};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index d9da445977..6be19b397c 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -88,6 +88,7 @@ class ParserBase : public Traits {
typedef typename Traits::Type::Literal LiteralT;
typedef typename Traits::Type::ObjectLiteralProperty ObjectLiteralPropertyT;
typedef typename Traits::Type::StatementList StatementListT;
+ typedef typename Traits::Type::ExpressionClassifier ExpressionClassifier;
ParserBase(Zone* zone, Scanner* scanner, uintptr_t stack_limit,
v8::Extension* extension, AstValueFactory* ast_value_factory,
@@ -116,7 +117,8 @@ class ParserBase : public Traits {
allow_strong_mode_(false),
allow_legacy_const_(true),
allow_harmony_do_expressions_(false),
- allow_harmony_function_name_(false) {}
+ allow_harmony_function_name_(false),
+ allow_harmony_function_sent_(false) {}
#define ALLOW_ACCESSORS(name) \
bool allow_##name() const { return allow_##name##_; } \
@@ -134,6 +136,7 @@ class ParserBase : public Traits {
ALLOW_ACCESSORS(legacy_const);
ALLOW_ACCESSORS(harmony_do_expressions);
ALLOW_ACCESSORS(harmony_function_name);
+ ALLOW_ACCESSORS(harmony_function_sent);
#undef ALLOW_ACCESSORS
uintptr_t stack_limit() const { return stack_limit_; }
@@ -242,11 +245,37 @@ class ParserBase : public Traits {
return destructuring_assignments_to_rewrite_;
}
+ List<ExpressionT>& expressions_in_tail_position() {
+ return expressions_in_tail_position_;
+ }
+ void AddExpressionInTailPosition(ExpressionT expression) {
+ if (collect_expressions_in_tail_position_) {
+ expressions_in_tail_position_.Add(expression);
+ }
+ }
+
+ bool collect_expressions_in_tail_position() const {
+ return collect_expressions_in_tail_position_;
+ }
+ void set_collect_expressions_in_tail_position(bool collect) {
+ collect_expressions_in_tail_position_ = collect;
+ }
+
+ ZoneList<ExpressionT>* non_patterns_to_rewrite() {
+ return &non_patterns_to_rewrite_;
+ }
+
+ private:
void AddDestructuringAssignment(DestructuringAssignment pair) {
destructuring_assignments_to_rewrite_.Add(pair);
}
- private:
+ V8_INLINE Scope* scope() { return *scope_stack_; }
+
+ void AddNonPatternForRewriting(ExpressionT expr) {
+ non_patterns_to_rewrite_.Add(expr, (*scope_stack_)->zone());
+ }
+
// Used to assign an index to each literal that needs materialization in
// the function. Includes regexp literals, and boilerplate for object and
// array literals.
@@ -276,12 +305,14 @@ class ParserBase : public Traits {
Scope* outer_scope_;
List<DestructuringAssignment> destructuring_assignments_to_rewrite_;
-
- void RewriteDestructuringAssignments();
+ List<ExpressionT> expressions_in_tail_position_;
+ bool collect_expressions_in_tail_position_;
+ ZoneList<ExpressionT> non_patterns_to_rewrite_;
typename Traits::Type::Factory* factory_;
friend class ParserTraits;
+ friend class PreParserTraits;
friend class Checkpoint;
};
@@ -436,6 +467,9 @@ class ParserBase : public Traits {
scanner()->is_next_contextual_keyword(keyword);
}
+ void ExpectMetaProperty(Vector<const char> property_name,
+ const char* full_name, int pos, bool* ok);
+
void ExpectContextualKeyword(Vector<const char> keyword, bool* ok) {
Expect(Token::IDENTIFIER, ok);
if (!*ok) return;
@@ -461,6 +495,10 @@ class ParserBase : public Traits {
return false;
}
+ bool PeekInOrOf() {
+ return peek() == Token::IN || PeekContextualKeyword(CStrVector("of"));
+ }
+
// Checks whether an octal literal was last seen between beg_pos and end_pos.
// If so, reports an error. Only called for strict mode and template strings.
void CheckOctalLiteral(int beg_pos, int end_pos,
@@ -563,8 +601,8 @@ class ParserBase : public Traits {
Scanner::Location location, Token::Value token,
MessageTemplate::Template message = MessageTemplate::kUnexpectedToken);
-
- void ReportClassifierError(const ExpressionClassifier::Error& error) {
+ void ReportClassifierError(
+ const typename ExpressionClassifier::Error& error) {
Traits::ReportMessageAt(error.location, error.message, error.arg,
error.type);
}
@@ -642,7 +680,7 @@ class ParserBase : public Traits {
// neither a valid binding pattern nor a valid parenthesized formal
// parameter list, show the "arrow formal parameters" error if the formals
// started with a parenthesis, and the binding pattern error otherwise.
- const ExpressionClassifier::Error& error =
+ const typename ExpressionClassifier::Error& error =
parenthesized_formals ? classifier->arrow_formal_parameters_error()
: classifier->binding_pattern_error();
ReportClassifierError(error);
@@ -715,10 +753,6 @@ class ParserBase : public Traits {
}
IdentifierT ParseIdentifierName(bool* ok);
- // Parses an identifier and determines whether or not it is 'get' or 'set'.
- IdentifierT ParseIdentifierNameOrGetOrSet(bool* is_get, bool* is_set,
- bool* ok);
-
ExpressionT ParseRegExpLiteral(bool seen_equal,
ExpressionClassifier* classifier, bool* ok);
@@ -728,12 +762,9 @@ class ParserBase : public Traits {
ExpressionT ParseExpression(bool accept_IN, bool* ok);
ExpressionT ParseExpression(bool accept_IN, ExpressionClassifier* classifier,
bool* ok);
- ExpressionT ParseExpression(bool accept_IN, int flags,
- ExpressionClassifier* classifier, bool* ok);
ExpressionT ParseArrayLiteral(ExpressionClassifier* classifier, bool* ok);
ExpressionT ParsePropertyName(IdentifierT* name, bool* is_get, bool* is_set,
- bool* is_static, bool* is_computed_name,
- bool* is_identifier, bool* is_escaped_keyword,
+ bool* is_computed_name,
ExpressionClassifier* classifier, bool* ok);
ExpressionT ParseObjectLiteral(ExpressionClassifier* classifier, bool* ok);
ObjectLiteralPropertyT ParsePropertyDefinition(
@@ -744,21 +775,9 @@ class ParserBase : public Traits {
Scanner::Location* first_spread_pos, ExpressionClassifier* classifier,
bool* ok);
- enum AssignmentExpressionFlags {
- kIsNormalAssignment = 0,
- kIsPossiblePatternElement = 1 << 0,
- kIsPossibleArrowFormals = 1 << 1
- };
-
- ExpressionT ParseAssignmentExpression(bool accept_IN, int flags,
- ExpressionClassifier* classifier,
- bool* ok);
ExpressionT ParseAssignmentExpression(bool accept_IN,
ExpressionClassifier* classifier,
- bool* ok) {
- return ParseAssignmentExpression(accept_IN, kIsNormalAssignment, classifier,
- ok);
- }
+ bool* ok);
ExpressionT ParseYieldExpression(ExpressionClassifier* classifier, bool* ok);
ExpressionT ParseConditionalExpression(bool accept_IN,
ExpressionClassifier* classifier,
@@ -794,9 +813,9 @@ class ParserBase : public Traits {
ExpressionClassifier* classifier, bool* ok);
void ParseFormalParameterList(FormalParametersT* parameters,
ExpressionClassifier* classifier, bool* ok);
- void CheckArityRestrictions(
- int param_count, FunctionLiteral::ArityRestriction arity_restriction,
- bool has_rest, int formals_start_pos, int formals_end_pos, bool* ok);
+ void CheckArityRestrictions(int param_count, FunctionKind function_type,
+ bool has_rest, int formals_start_pos,
+ int formals_end_pos, bool* ok);
bool IsNextLetKeyword();
@@ -829,6 +848,10 @@ class ParserBase : public Traits {
return true;
}
+ bool IsValidPattern(ExpressionT expression) {
+ return expression->IsObjectLiteral() || expression->IsArrayLiteral();
+ }
+
// Keep track of eval() calls since they disable all local variable
// optimizations. This checks if expression is an eval call, and if yes,
// forwards the information to scope.
@@ -932,9 +955,9 @@ class ParserBase : public Traits {
bool allow_legacy_const_;
bool allow_harmony_do_expressions_;
bool allow_harmony_function_name_;
+ bool allow_harmony_function_sent_;
};
-
template <class Traits>
ParserBase<Traits>::FunctionState::FunctionState(
FunctionState** function_state_stack, Scope** scope_stack, Scope* scope,
@@ -950,6 +973,8 @@ ParserBase<Traits>::FunctionState::FunctionState(
outer_function_state_(*function_state_stack),
scope_stack_(scope_stack),
outer_scope_(*scope_stack),
+ collect_expressions_in_tail_position_(true),
+ non_patterns_to_rewrite_(0, scope->zone()),
factory_(factory) {
*scope_stack_ = scope;
*function_state_stack = this;
@@ -967,7 +992,6 @@ template <class Traits>
void ParserBase<Traits>::GetUnexpectedTokenMessage(
Token::Value token, MessageTemplate::Template* message, const char** arg,
MessageTemplate::Template default_) {
- // Four of the tokens are treated specially
switch (token) {
case Token::EOS:
*message = MessageTemplate::kUnexpectedEOS;
@@ -1037,7 +1061,7 @@ void ParserBase<Traits>::ReportUnexpectedTokenAt(
template <class Traits>
typename ParserBase<Traits>::IdentifierT ParserBase<Traits>::ParseIdentifier(
AllowRestrictedIdentifiers allow_restricted_identifiers, bool* ok) {
- ExpressionClassifier classifier;
+ ExpressionClassifier classifier(this);
auto result = ParseAndClassifyIdentifier(&classifier, ok);
if (!*ok) return Traits::EmptyIdentifier();
@@ -1091,10 +1115,8 @@ ParserBase<Traits>::ParseAndClassifyIdentifier(ExpressionClassifier* classifier,
scanner()->location(), MessageTemplate::kStrongUndefined);
if (is_strong(language_mode())) {
// TODO(dslomov): allow 'undefined' in nested patterns.
- classifier->RecordBindingPatternError(
- scanner()->location(), MessageTemplate::kStrongUndefined);
- classifier->RecordAssignmentPatternError(
- scanner()->location(), MessageTemplate::kStrongUndefined);
+ classifier->RecordPatternError(scanner()->location(),
+ MessageTemplate::kStrongUndefined);
}
}
@@ -1116,7 +1138,9 @@ ParserBase<Traits>::ParseAndClassifyIdentifier(ExpressionClassifier* classifier,
*ok = false;
return Traits::EmptyIdentifier();
}
- if (next == Token::LET) {
+ if (next == Token::LET ||
+ (next == Token::ESCAPED_STRICT_RESERVED_WORD &&
+ scanner()->is_literal_contextual_keyword(CStrVector("let")))) {
classifier->RecordLetPatternError(scanner()->location(),
MessageTemplate::kLetInLexicalBinding);
}
@@ -1172,18 +1196,6 @@ ParserBase<Traits>::ParseIdentifierName(bool* ok) {
template <class Traits>
-typename ParserBase<Traits>::IdentifierT
-ParserBase<Traits>::ParseIdentifierNameOrGetOrSet(bool* is_get,
- bool* is_set,
- bool* ok) {
- IdentifierT result = ParseIdentifierName(ok);
- if (!*ok) return Traits::EmptyIdentifier();
- scanner()->IsGetOrSet(is_get, is_set);
- return result;
-}
-
-
-template <class Traits>
typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseRegExpLiteral(
bool seen_equal, ExpressionClassifier* classifier, bool* ok) {
int pos = peek_position();
@@ -1269,8 +1281,7 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
return this->ExpressionFromLiteral(Next(), beg_pos, scanner(), factory());
case Token::SMI:
case Token::NUMBER:
- classifier->RecordBindingPatternError(
- scanner()->peek_location(), MessageTemplate::kUnexpectedTokenNumber);
+ BindingPatternUnexpectedToken(classifier);
return this->ExpressionFromLiteral(Next(), beg_pos, scanner(), factory());
case Token::IDENTIFIER:
@@ -1286,8 +1297,7 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
}
case Token::STRING: {
- classifier->RecordBindingPatternError(
- scanner()->peek_location(), MessageTemplate::kUnexpectedTokenString);
+ BindingPatternUnexpectedToken(classifier);
Consume(Token::STRING);
return this->ExpressionFromString(beg_pos, scanner(), factory());
}
@@ -1323,7 +1333,9 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
if (!classifier->is_valid_binding_pattern()) {
ArrowFormalParametersUnexpectedToken(classifier);
}
- BindingPatternUnexpectedToken(classifier);
+ classifier->RecordPatternError(scanner()->peek_location(),
+ MessageTemplate::kUnexpectedToken,
+ Token::String(Token::LPAREN));
Consume(Token::LPAREN);
if (Check(Token::RPAREN)) {
// ()=>x. The continuation that looks for the => is in
@@ -1331,20 +1343,23 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
classifier->RecordExpressionError(scanner()->location(),
MessageTemplate::kUnexpectedToken,
Token::String(Token::RPAREN));
- classifier->RecordBindingPatternError(scanner()->location(),
- MessageTemplate::kUnexpectedToken,
- Token::String(Token::RPAREN));
return factory()->NewEmptyParentheses(beg_pos);
} else if (Check(Token::ELLIPSIS)) {
// (...x)=>x. The continuation that looks for the => is in
// ParseAssignmentExpression.
int ellipsis_pos = position();
+ int expr_pos = peek_position();
classifier->RecordExpressionError(scanner()->location(),
MessageTemplate::kUnexpectedToken,
Token::String(Token::ELLIPSIS));
classifier->RecordNonSimpleParameter();
ExpressionT expr =
this->ParseAssignmentExpression(true, classifier, CHECK_OK);
+ if (!this->IsIdentifier(expr) && !IsValidPattern(expr)) {
+ classifier->RecordArrowFormalParametersError(
+ Scanner::Location(ellipsis_pos, scanner()->location().end_pos),
+ MessageTemplate::kInvalidRestParameter);
+ }
if (peek() == Token::COMMA) {
ReportMessageAt(scanner()->peek_location(),
MessageTemplate::kParamAfterRest);
@@ -1352,17 +1367,13 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
return this->EmptyExpression();
}
Expect(Token::RPAREN, CHECK_OK);
- return factory()->NewSpread(expr, ellipsis_pos);
+ return factory()->NewSpread(expr, ellipsis_pos, expr_pos);
}
// Heuristically try to detect immediately called functions before
// seeing the call parentheses.
parenthesized_function_ = (peek() == Token::FUNCTION);
- ExpressionT expr = this->ParseExpression(true, kIsPossibleArrowFormals,
- classifier, CHECK_OK);
+ ExpressionT expr = this->ParseExpression(true, classifier, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- if (peek() != Token::ARROW) {
- expr->set_is_parenthesized();
- }
return expr;
}
@@ -1390,9 +1401,7 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
case Token::TEMPLATE_SPAN:
case Token::TEMPLATE_TAIL:
- classifier->RecordBindingPatternError(
- scanner()->peek_location(),
- MessageTemplate::kUnexpectedTemplateString);
+ BindingPatternUnexpectedToken(classifier);
return this->ParseTemplateLiteral(Traits::NoTemplateTag(), beg_pos,
classifier, ok);
@@ -1423,9 +1432,9 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
template <class Traits>
typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
bool accept_IN, bool* ok) {
- ExpressionClassifier classifier;
+ ExpressionClassifier classifier(this);
ExpressionT result = ParseExpression(accept_IN, &classifier, CHECK_OK);
- result = Traits::RewriteNonPattern(result, &classifier, CHECK_OK);
+ Traits::RewriteNonPattern(&classifier, CHECK_OK);
return result;
}
@@ -1433,21 +1442,14 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
template <class Traits>
typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
bool accept_IN, ExpressionClassifier* classifier, bool* ok) {
- return ParseExpression(accept_IN, kIsNormalAssignment, classifier, ok);
-}
-
-
-template <class Traits>
-typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
- bool accept_IN, int flags, ExpressionClassifier* classifier, bool* ok) {
// Expression ::
// AssignmentExpression
// Expression ',' AssignmentExpression
- ExpressionClassifier binding_classifier;
- ExpressionT result = this->ParseAssignmentExpression(
- accept_IN, flags, &binding_classifier, CHECK_OK);
- classifier->Accumulate(binding_classifier,
+ ExpressionClassifier binding_classifier(this);
+ ExpressionT result =
+ this->ParseAssignmentExpression(accept_IN, &binding_classifier, CHECK_OK);
+ classifier->Accumulate(&binding_classifier,
ExpressionClassifier::AllProductions);
bool is_simple_parameter_list = this->IsIdentifier(result);
bool seen_rest = false;
@@ -1469,14 +1471,21 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
Consume(Token::ELLIPSIS);
seen_rest = is_rest = true;
}
- int pos = position();
+ int pos = position(), expr_pos = peek_position();
ExpressionT right = this->ParseAssignmentExpression(
- accept_IN, flags, &binding_classifier, CHECK_OK);
- if (is_rest) right = factory()->NewSpread(right, pos);
+ accept_IN, &binding_classifier, CHECK_OK);
+ classifier->Accumulate(&binding_classifier,
+ ExpressionClassifier::AllProductions);
+ if (is_rest) {
+ if (!this->IsIdentifier(right) && !IsValidPattern(right)) {
+ classifier->RecordArrowFormalParametersError(
+ Scanner::Location(pos, scanner()->location().end_pos),
+ MessageTemplate::kInvalidRestParameter);
+ }
+ right = factory()->NewSpread(right, pos, expr_pos);
+ }
is_simple_parameter_list =
is_simple_parameter_list && this->IsIdentifier(right);
- classifier->Accumulate(binding_classifier,
- ExpressionClassifier::AllProductions);
result = factory()->NewBinaryOperation(Token::COMMA, result, right, pos);
}
if (!is_simple_parameter_list || seen_rest) {
@@ -1511,9 +1520,10 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseArrayLiteral(
} else if (peek() == Token::ELLIPSIS) {
int start_pos = peek_position();
Consume(Token::ELLIPSIS);
+ int expr_pos = peek_position();
ExpressionT argument =
this->ParseAssignmentExpression(true, classifier, CHECK_OK);
- elem = factory()->NewSpread(argument, start_pos);
+ elem = factory()->NewSpread(argument, start_pos, expr_pos);
if (first_spread_index < 0) {
first_spread_index = values->length();
@@ -1534,8 +1544,10 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseArrayLiteral(
MessageTemplate::kElementAfterRest);
}
} else {
- elem = this->ParseAssignmentExpression(true, kIsPossiblePatternElement,
- classifier, CHECK_OK);
+ int beg_pos = peek_position();
+ elem = this->ParseAssignmentExpression(true, classifier, CHECK_OK);
+ CheckDestructuringElement(elem, classifier, beg_pos,
+ scanner()->location().end_pos);
}
values->Add(elem, zone_);
if (peek() != Token::RBRACK) {
@@ -1547,15 +1559,20 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseArrayLiteral(
// Update the scope information before the pre-parsing bailout.
int literal_index = function_state_->NextMaterializedLiteralIndex();
- return factory()->NewArrayLiteral(values, first_spread_index, literal_index,
- is_strong(language_mode()), pos);
+ ExpressionT result =
+ factory()->NewArrayLiteral(values, first_spread_index, literal_index,
+ is_strong(language_mode()), pos);
+ if (first_spread_index >= 0) {
+ result = factory()->NewRewritableExpression(result);
+ Traits::QueueNonPatternForRewriting(result);
+ }
+ return result;
}
template <class Traits>
typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParsePropertyName(
- IdentifierT* name, bool* is_get, bool* is_set, bool* is_static,
- bool* is_computed_name, bool* is_identifier, bool* is_escaped_keyword,
+ IdentifierT* name, bool* is_get, bool* is_set, bool* is_computed_name,
ExpressionClassifier* classifier, bool* ok) {
Token::Value token = peek();
int pos = peek_position();
@@ -1588,29 +1605,19 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParsePropertyName(
case Token::LBRACK: {
*is_computed_name = true;
Consume(Token::LBRACK);
- ExpressionClassifier computed_name_classifier;
+ ExpressionClassifier computed_name_classifier(this);
ExpressionT expression =
ParseAssignmentExpression(true, &computed_name_classifier, CHECK_OK);
- expression = Traits::RewriteNonPattern(
- expression, &computed_name_classifier, CHECK_OK);
- classifier->Accumulate(computed_name_classifier,
+ Traits::RewriteNonPattern(&computed_name_classifier, CHECK_OK);
+ classifier->Accumulate(&computed_name_classifier,
ExpressionClassifier::ExpressionProductions);
Expect(Token::RBRACK, CHECK_OK);
return expression;
}
- case Token::ESCAPED_KEYWORD:
- *is_escaped_keyword = true;
- *name = ParseIdentifierNameOrGetOrSet(is_get, is_set, CHECK_OK);
- break;
-
- case Token::STATIC:
- *is_static = true;
-
- // Fall through.
default:
- *is_identifier = true;
- *name = ParseIdentifierNameOrGetOrSet(is_get, is_set, CHECK_OK);
+ *name = ParseIdentifierName(CHECK_OK);
+ scanner()->IsGetOrSet(is_get, is_set);
break;
}
@@ -1631,27 +1638,19 @@ ParserBase<Traits>::ParsePropertyDefinition(
ExpressionT value = this->EmptyExpression();
bool is_get = false;
bool is_set = false;
- bool name_is_static = false;
bool is_generator = Check(Token::MUL);
Token::Value name_token = peek();
int next_beg_pos = scanner()->peek_location().beg_pos;
int next_end_pos = scanner()->peek_location().end_pos;
- bool is_identifier = false;
- bool is_escaped_keyword = false;
- ExpressionT name_expression = ParsePropertyName(
- name, &is_get, &is_set, &name_is_static, is_computed_name, &is_identifier,
- &is_escaped_keyword, classifier,
- CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ ExpressionT name_expression =
+ ParsePropertyName(name, &is_get, &is_set, is_computed_name, classifier,
+ CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
if (fni_ != nullptr && !*is_computed_name) {
this->PushLiteralName(fni_, *name);
}
- bool escaped_static =
- is_escaped_keyword &&
- scanner()->is_literal_contextual_keyword(CStrVector("static"));
-
if (!in_class && !is_generator) {
DCHECK(!is_static);
@@ -1663,15 +1662,18 @@ ParserBase<Traits>::ParsePropertyDefinition(
CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
}
Consume(Token::COLON);
+ int beg_pos = peek_position();
value = this->ParseAssignmentExpression(
- true, kIsPossiblePatternElement, classifier,
- CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ true, classifier, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ CheckDestructuringElement(value, classifier, beg_pos,
+ scanner()->location().end_pos);
return factory()->NewObjectLiteralProperty(name_expression, value, false,
*is_computed_name);
}
- if ((is_identifier || is_escaped_keyword) &&
+ if (Token::IsIdentifier(name_token, language_mode(),
+ this->is_generator()) &&
(peek() == Token::COMMA || peek() == Token::RBRACE ||
peek() == Token::ASSIGN)) {
// PropertyDefinition
@@ -1680,14 +1682,6 @@ ParserBase<Traits>::ParsePropertyDefinition(
//
// CoverInitializedName
// IdentifierReference Initializer?
- if (!Token::IsIdentifier(name_token, language_mode(),
- this->is_generator())) {
- if (!escaped_static) {
- ReportUnexpectedTokenAt(scanner()->location(), name_token);
- *ok = false;
- return this->EmptyObjectLiteralProperty();
- }
- }
if (classifier->duplicate_finder() != nullptr &&
scanner()->FindSymbol(classifier->duplicate_finder(), 1) != 0) {
classifier->RecordDuplicateFormalParameterError(scanner()->location());
@@ -1703,18 +1697,22 @@ ParserBase<Traits>::ParsePropertyDefinition(
if (peek() == Token::ASSIGN) {
Consume(Token::ASSIGN);
- ExpressionClassifier rhs_classifier;
+ ExpressionClassifier rhs_classifier(this);
ExpressionT rhs = this->ParseAssignmentExpression(
true, &rhs_classifier, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
- rhs = Traits::RewriteNonPattern(
- rhs, &rhs_classifier, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
- classifier->Accumulate(rhs_classifier,
+ Traits::RewriteNonPattern(&rhs_classifier,
+ CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ classifier->Accumulate(&rhs_classifier,
ExpressionClassifier::ExpressionProductions);
value = factory()->NewAssignment(Token::ASSIGN, lhs, rhs,
RelocInfo::kNoPosition);
classifier->RecordCoverInitializedNameError(
Scanner::Location(next_beg_pos, scanner()->location().end_pos),
MessageTemplate::kInvalidCoverInitializedName);
+
+ if (allow_harmony_function_name()) {
+ Traits::SetFunctionNameFromIdentifierRef(rhs, lhs);
+ }
} else {
value = lhs;
}
@@ -1725,12 +1723,6 @@ ParserBase<Traits>::ParsePropertyDefinition(
}
}
- if (in_class && escaped_static && !is_static) {
- ReportUnexpectedTokenAt(scanner()->location(), name_token);
- *ok = false;
- return this->EmptyObjectLiteralProperty();
- }
-
// Method definitions are never valid in patterns.
classifier->RecordPatternError(
Scanner::Location(next_beg_pos, scanner()->location().end_pos),
@@ -1755,28 +1747,24 @@ ParserBase<Traits>::ParsePropertyDefinition(
: FunctionKind::kBaseConstructor;
}
- if (!in_class) kind = WithObjectLiteralBit(kind);
-
value = this->ParseFunctionLiteral(
*name, scanner()->location(), kSkipFunctionNameCheck, kind,
- RelocInfo::kNoPosition, FunctionLiteral::kAnonymousExpression,
- FunctionLiteral::kNormalArity, language_mode(),
- CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ RelocInfo::kNoPosition, FunctionLiteral::kAccessorOrMethod,
+ language_mode(), CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
return factory()->NewObjectLiteralProperty(name_expression, value,
ObjectLiteralProperty::COMPUTED,
is_static, *is_computed_name);
}
- if (in_class && name_is_static && !is_static) {
+ if (in_class && name_token == Token::STATIC && !is_static) {
// ClassElement (static)
// 'static' MethodDefinition
*name = this->EmptyIdentifier();
ObjectLiteralPropertyT property = ParsePropertyDefinition(
checker, true, has_extends, true, is_computed_name, nullptr, classifier,
name, ok);
- property = Traits::RewriteNonPatternObjectLiteralProperty(property,
- classifier, ok);
+ Traits::RewriteNonPattern(classifier, ok);
return property;
}
@@ -1789,8 +1777,8 @@ ParserBase<Traits>::ParsePropertyDefinition(
name_token = peek();
name_expression = ParsePropertyName(
- name, &dont_care, &dont_care, &dont_care, is_computed_name, &dont_care,
- &dont_care, classifier, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ name, &dont_care, &dont_care, is_computed_name, classifier,
+ CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
if (!*is_computed_name) {
checker->CheckProperty(name_token, kAccessorProperty, is_static,
@@ -1798,12 +1786,10 @@ ParserBase<Traits>::ParsePropertyDefinition(
CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
}
- FunctionKind kind = FunctionKind::kAccessorFunction;
- if (!in_class) kind = WithObjectLiteralBit(kind);
typename Traits::Type::FunctionLiteral value = this->ParseFunctionLiteral(
- *name, scanner()->location(), kSkipFunctionNameCheck, kind,
- RelocInfo::kNoPosition, FunctionLiteral::kAnonymousExpression,
- is_get ? FunctionLiteral::kGetterArity : FunctionLiteral::kSetterArity,
+ *name, scanner()->location(), kSkipFunctionNameCheck,
+ is_get ? FunctionKind::kGetterFunction : FunctionKind::kSetterFunction,
+ RelocInfo::kNoPosition, FunctionLiteral::kAccessorOrMethod,
language_mode(), CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
// Make sure the name expression is a string since we need a Name for
@@ -1913,17 +1899,17 @@ typename Traits::Type::ExpressionList ParserBase<Traits>::ParseArguments(
while (!done) {
int start_pos = peek_position();
bool is_spread = Check(Token::ELLIPSIS);
+ int expr_pos = peek_position();
ExpressionT argument = this->ParseAssignmentExpression(
true, classifier, CHECK_OK_CUSTOM(NullExpressionList));
- argument = Traits::RewriteNonPattern(argument, classifier,
- CHECK_OK_CUSTOM(NullExpressionList));
+ Traits::RewriteNonPattern(classifier, CHECK_OK_CUSTOM(NullExpressionList));
if (is_spread) {
if (!spread_arg.IsValid()) {
spread_arg.beg_pos = start_pos;
spread_arg.end_pos = peek_position();
}
- argument = factory()->NewSpread(argument, start_pos);
+ argument = factory()->NewSpread(argument, start_pos, expr_pos);
}
result->Add(argument, zone_);
@@ -1967,7 +1953,7 @@ typename Traits::Type::ExpressionList ParserBase<Traits>::ParseArguments(
// Precedence = 2
template <class Traits>
typename ParserBase<Traits>::ExpressionT
-ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN, int flags,
+ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN,
ExpressionClassifier* classifier,
bool* ok) {
// AssignmentExpression ::
@@ -1975,8 +1961,6 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN, int flags,
// ArrowFunction
// YieldExpression
// LeftHandSideExpression AssignmentOperator AssignmentExpression
- bool maybe_pattern_element = flags & kIsPossiblePatternElement;
- bool maybe_arrow_formals = flags & kIsPossibleArrowFormals;
bool is_destructuring_assignment = false;
int lhs_beg_pos = peek_position();
@@ -1986,7 +1970,8 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN, int flags,
FuncNameInferrer::State fni_state(fni_);
ParserBase<Traits>::Checkpoint checkpoint(this);
- ExpressionClassifier arrow_formals_classifier(classifier->duplicate_finder());
+ ExpressionClassifier arrow_formals_classifier(this,
+ classifier->duplicate_finder());
bool parenthesized_formals = peek() == Token::LPAREN;
if (!parenthesized_formals) {
ArrowFormalParametersUnexpectedToken(&arrow_formals_classifier);
@@ -1994,7 +1979,9 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN, int flags,
ExpressionT expression = this->ParseConditionalExpression(
accept_IN, &arrow_formals_classifier, CHECK_OK);
if (peek() == Token::ARROW) {
- BindingPatternUnexpectedToken(classifier);
+ classifier->RecordPatternError(scanner()->peek_location(),
+ MessageTemplate::kUnexpectedToken,
+ Token::String(Token::ARROW));
ValidateArrowFormalParameters(&arrow_formals_classifier, expression,
parenthesized_formals, CHECK_OK);
Scanner::Location loc(lhs_beg_pos, scanner()->location().end_pos);
@@ -2022,11 +2009,6 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN, int flags,
}
expression = this->ParseArrowFunctionLiteral(
accept_IN, parameters, arrow_formals_classifier, CHECK_OK);
- if (maybe_pattern_element) {
- classifier->RecordPatternError(
- Scanner::Location(lhs_beg_pos, scanner()->location().end_pos),
- MessageTemplate::kInvalidDestructuringTarget);
- }
if (fni_ != nullptr) fni_->Infer();
@@ -2039,44 +2021,42 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN, int flags,
// "expression" was not itself an arrow function parameter list, but it might
// form part of one. Propagate speculative formal parameter error locations.
+ // Do not merge pending non-pattern expressions yet!
classifier->Accumulate(
- arrow_formals_classifier,
+ &arrow_formals_classifier,
ExpressionClassifier::StandardProductions |
- ExpressionClassifier::FormalParametersProductions |
- ExpressionClassifier::CoverInitializedNameProduction);
-
- bool maybe_pattern =
- (expression->IsObjectLiteral() || expression->IsArrayLiteral()) &&
- !expression->is_parenthesized();
+ ExpressionClassifier::FormalParametersProductions |
+ ExpressionClassifier::CoverInitializedNameProduction,
+ false);
if (!Token::IsAssignmentOp(peek())) {
// Parsed conditional expression only (no assignment).
- if (maybe_pattern_element) {
- CheckDestructuringElement(expression, classifier, lhs_beg_pos,
- scanner()->location().end_pos);
- }
+ // Now pending non-pattern expressions must be merged.
+ classifier->MergeNonPatterns(&arrow_formals_classifier);
return expression;
}
+ // Now pending non-pattern expressions must be discarded.
+ arrow_formals_classifier.Discard();
+
if (!(allow_harmony_destructuring_bind() ||
allow_harmony_default_parameters())) {
BindingPatternUnexpectedToken(classifier);
}
- if (allow_harmony_destructuring_assignment() && maybe_pattern &&
+ if (allow_harmony_destructuring_assignment() && IsValidPattern(expression) &&
peek() == Token::ASSIGN) {
classifier->ForgiveCoverInitializedNameError();
ValidateAssignmentPattern(classifier, CHECK_OK);
is_destructuring_assignment = true;
- } else if (maybe_arrow_formals) {
+ } else if (allow_harmony_default_parameters() &&
+ !allow_harmony_destructuring_assignment()) {
+ // TODO(adamk): This branch should be removed once the destructuring
+ // assignment and default parameter flags are removed.
expression = this->ClassifyAndRewriteReferenceExpression(
classifier, expression, lhs_beg_pos, scanner()->location().end_pos,
MessageTemplate::kInvalidLhsInAssignment);
} else {
- if (maybe_pattern_element) {
- CheckDestructuringElement(expression, classifier, lhs_beg_pos,
- scanner()->location().end_pos);
- }
expression = this->CheckAndRewriteReferenceExpression(
expression, lhs_beg_pos, scanner()->location().end_pos,
MessageTemplate::kInvalidLhsInAssignment, CHECK_OK);
@@ -2086,20 +2066,20 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN, int flags,
Token::Value op = Next(); // Get assignment operator.
if (op != Token::ASSIGN) {
- classifier->RecordBindingPatternError(scanner()->location(),
- MessageTemplate::kUnexpectedToken,
- Token::String(op));
+ classifier->RecordPatternError(scanner()->location(),
+ MessageTemplate::kUnexpectedToken,
+ Token::String(op));
}
int pos = position();
- ExpressionClassifier rhs_classifier;
+ ExpressionClassifier rhs_classifier(this);
ExpressionT right =
this->ParseAssignmentExpression(accept_IN, &rhs_classifier, CHECK_OK);
- right = Traits::RewriteNonPattern(right, &rhs_classifier, CHECK_OK);
+ Traits::RewriteNonPattern(&rhs_classifier, CHECK_OK);
classifier->Accumulate(
- rhs_classifier, ExpressionClassifier::ExpressionProductions |
- ExpressionClassifier::CoverInitializedNameProduction);
+ &rhs_classifier, ExpressionClassifier::ExpressionProductions |
+ ExpressionClassifier::CoverInitializedNameProduction);
// TODO(1231235): We try to estimate the set of properties set by
// constructors. We define a new property whenever there is an
@@ -2110,12 +2090,6 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN, int flags,
function_state_->AddProperty();
}
- if (op != Token::ASSIGN && maybe_pattern_element) {
- classifier->RecordAssignmentPatternError(
- Scanner::Location(lhs_beg_pos, scanner()->location().end_pos),
- MessageTemplate::kInvalidDestructuringTarget);
- }
-
this->CheckAssigningFunctionLiteralToProperty(expression, right);
if (fni_ != NULL) {
@@ -2137,7 +2111,7 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN, int flags,
ExpressionT result = factory()->NewAssignment(op, expression, right, pos);
if (is_destructuring_assignment) {
- result = factory()->NewRewritableAssignmentExpression(result);
+ result = factory()->NewRewritableExpression(result);
Traits::QueueDestructuringAssignmentForRewriting(result);
}
@@ -2179,16 +2153,12 @@ ParserBase<Traits>::ParseYieldExpression(ExpressionClassifier* classifier,
// Delegating yields require an RHS; fall through.
default:
expression = ParseAssignmentExpression(false, classifier, CHECK_OK);
- expression =
- Traits::RewriteNonPattern(expression, classifier, CHECK_OK);
+ Traits::RewriteNonPattern(classifier, CHECK_OK);
break;
}
}
if (kind == Yield::kDelegating) {
- // var iterator = subject[Symbol.iterator]();
- // Hackily disambiguate o from o.next and o [Symbol.iterator]().
- // TODO(verwaest): Come up with a better solution.
- expression = this->GetIterator(expression, factory(), pos + 1);
+ return Traits::RewriteYieldStar(generator_object, expression, pos);
}
// Hackily disambiguate o from o.next and o [Symbol.iterator]().
// TODO(verwaest): Come up with a better solution.
@@ -2213,7 +2183,7 @@ ParserBase<Traits>::ParseConditionalExpression(bool accept_IN,
ExpressionT expression =
this->ParseBinaryExpression(4, accept_IN, classifier, CHECK_OK);
if (peek() != Token::CONDITIONAL) return expression;
- expression = Traits::RewriteNonPattern(expression, classifier, CHECK_OK);
+ Traits::RewriteNonPattern(classifier, CHECK_OK);
ArrowFormalParametersUnexpectedToken(classifier);
BindingPatternUnexpectedToken(classifier);
Consume(Token::CONDITIONAL);
@@ -2221,11 +2191,11 @@ ParserBase<Traits>::ParseConditionalExpression(bool accept_IN,
// expressions we always accept the 'in' keyword; see ECMA-262,
// section 11.12, page 58.
ExpressionT left = ParseAssignmentExpression(true, classifier, CHECK_OK);
- left = Traits::RewriteNonPattern(left, classifier, CHECK_OK);
+ Traits::RewriteNonPattern(classifier, CHECK_OK);
Expect(Token::COLON, CHECK_OK);
ExpressionT right =
ParseAssignmentExpression(accept_IN, classifier, CHECK_OK);
- right = Traits::RewriteNonPattern(right, classifier, CHECK_OK);
+ Traits::RewriteNonPattern(classifier, CHECK_OK);
return factory()->NewConditional(expression, left, right, pos);
}
@@ -2241,7 +2211,7 @@ ParserBase<Traits>::ParseBinaryExpression(int prec, bool accept_IN,
for (int prec1 = Precedence(peek(), accept_IN); prec1 >= prec; prec1--) {
// prec1 >= 4
while (Precedence(peek(), accept_IN) == prec1) {
- x = Traits::RewriteNonPattern(x, classifier, CHECK_OK);
+ Traits::RewriteNonPattern(classifier, CHECK_OK);
BindingPatternUnexpectedToken(classifier);
ArrowFormalParametersUnexpectedToken(classifier);
Token::Value op = Next();
@@ -2249,7 +2219,7 @@ ParserBase<Traits>::ParseBinaryExpression(int prec, bool accept_IN,
int pos = position();
ExpressionT y =
ParseBinaryExpression(prec1 + 1, accept_IN, classifier, CHECK_OK);
- y = Traits::RewriteNonPattern(y, classifier, CHECK_OK);
+ Traits::RewriteNonPattern(classifier, CHECK_OK);
if (this->ShortcutNumericLiteralBinaryExpression(&x, y, op, pos,
factory())) {
@@ -2271,13 +2241,15 @@ ParserBase<Traits>::ParseBinaryExpression(int prec, bool accept_IN,
ReportMessageAt(op_location, MessageTemplate::kStrongEqual);
*ok = false;
return this->EmptyExpression();
+ } else if (FLAG_harmony_instanceof && cmp == Token::INSTANCEOF) {
+ x = Traits::RewriteInstanceof(x, y, pos);
+ } else {
+ x = factory()->NewCompareOperation(cmp, x, y, pos);
+ if (cmp != op) {
+ // The comparison was negated - add a NOT.
+ x = factory()->NewUnaryOperation(Token::NOT, x, pos);
+ }
}
- x = factory()->NewCompareOperation(cmp, x, y, pos);
- if (cmp != op) {
- // The comparison was negated - add a NOT.
- x = factory()->NewUnaryOperation(Token::NOT, x, pos);
- }
-
} else {
// We have a "normal" binary operation.
x = factory()->NewBinaryOperation(op, x, y, pos);
@@ -2312,7 +2284,7 @@ ParserBase<Traits>::ParseUnaryExpression(ExpressionClassifier* classifier,
op = Next();
int pos = position();
ExpressionT expression = ParseUnaryExpression(classifier, CHECK_OK);
- expression = Traits::RewriteNonPattern(expression, classifier, CHECK_OK);
+ Traits::RewriteNonPattern(classifier, CHECK_OK);
if (op == Token::DELETE && is_strict(language_mode())) {
if (is_strong(language_mode())) {
@@ -2339,7 +2311,7 @@ ParserBase<Traits>::ParseUnaryExpression(ExpressionClassifier* classifier,
expression, beg_pos, scanner()->location().end_pos,
MessageTemplate::kInvalidLhsInPrefixOp, CHECK_OK);
this->MarkExpressionAsAssigned(expression);
- expression = Traits::RewriteNonPattern(expression, classifier, CHECK_OK);
+ Traits::RewriteNonPattern(classifier, CHECK_OK);
return factory()->NewCountOperation(op,
true /* prefix */,
@@ -2371,7 +2343,7 @@ ParserBase<Traits>::ParsePostfixExpression(ExpressionClassifier* classifier,
expression, lhs_beg_pos, scanner()->location().end_pos,
MessageTemplate::kInvalidLhsInPostfixOp, CHECK_OK);
expression = this->MarkExpressionAsAssigned(expression);
- expression = Traits::RewriteNonPattern(expression, classifier, CHECK_OK);
+ Traits::RewriteNonPattern(classifier, CHECK_OK);
Token::Value next = Next();
expression =
@@ -2397,19 +2369,20 @@ ParserBase<Traits>::ParseLeftHandSideExpression(
while (true) {
switch (peek()) {
case Token::LBRACK: {
+ Traits::RewriteNonPattern(classifier, CHECK_OK);
BindingPatternUnexpectedToken(classifier);
ArrowFormalParametersUnexpectedToken(classifier);
Consume(Token::LBRACK);
int pos = position();
ExpressionT index = ParseExpression(true, classifier, CHECK_OK);
- index = Traits::RewriteNonPattern(index, classifier, CHECK_OK);
+ Traits::RewriteNonPattern(classifier, CHECK_OK);
result = factory()->NewProperty(result, index, pos);
Expect(Token::RBRACK, CHECK_OK);
break;
}
case Token::LPAREN: {
- result = Traits::RewriteNonPattern(result, classifier, CHECK_OK);
+ Traits::RewriteNonPattern(classifier, CHECK_OK);
BindingPatternUnexpectedToken(classifier);
ArrowFormalParametersUnexpectedToken(classifier);
@@ -2473,6 +2446,7 @@ ParserBase<Traits>::ParseLeftHandSideExpression(
}
case Token::PERIOD: {
+ Traits::RewriteNonPattern(classifier, CHECK_OK);
BindingPatternUnexpectedToken(classifier);
ArrowFormalParametersUnexpectedToken(classifier);
Consume(Token::PERIOD);
@@ -2486,6 +2460,7 @@ ParserBase<Traits>::ParseLeftHandSideExpression(
case Token::TEMPLATE_SPAN:
case Token::TEMPLATE_TAIL: {
+ Traits::RewriteNonPattern(classifier, CHECK_OK);
BindingPatternUnexpectedToken(classifier);
ArrowFormalParametersUnexpectedToken(classifier);
result = ParseTemplateLiteral(result, position(), classifier, CHECK_OK);
@@ -2537,7 +2512,7 @@ ParserBase<Traits>::ParseMemberWithNewPrefixesExpression(
} else {
result = this->ParseMemberWithNewPrefixesExpression(classifier, CHECK_OK);
}
- result = Traits::RewriteNonPattern(result, classifier, CHECK_OK);
+ Traits::RewriteNonPattern(classifier, CHECK_OK);
if (peek() == Token::LPAREN) {
// NewExpression with arguments.
Scanner::Location spread_pos;
@@ -2584,6 +2559,23 @@ ParserBase<Traits>::ParseMemberExpression(ExpressionClassifier* classifier,
Consume(Token::FUNCTION);
int function_token_position = position();
+
+ if (allow_harmony_function_sent() && peek() == Token::PERIOD) {
+ // function.sent
+ int pos = position();
+ ExpectMetaProperty(CStrVector("sent"), "function.sent", pos, CHECK_OK);
+
+ if (!is_generator()) {
+ // TODO(neis): allow escaping into closures?
+ ReportMessageAt(scanner()->location(),
+ MessageTemplate::kUnexpectedFunctionSent);
+ *ok = false;
+ return this->EmptyExpression();
+ }
+
+ return this->FunctionSentExpression(scope_, factory(), pos);
+ }
+
bool is_generator = Check(Token::MUL);
IdentifierT name = this->EmptyIdentifier();
bool is_strict_reserved_name = false;
@@ -2602,8 +2594,7 @@ ParserBase<Traits>::ParseMemberExpression(ExpressionClassifier* classifier,
: kFunctionNameValidityUnknown,
is_generator ? FunctionKind::kGeneratorFunction
: FunctionKind::kNormalFunction,
- function_token_position, function_type, FunctionLiteral::kNormalArity,
- language_mode(), CHECK_OK);
+ function_token_position, function_type, language_mode(), CHECK_OK);
} else if (peek() == Token::SUPER) {
const bool is_new = false;
result = ParseSuperExpression(is_new, classifier, CHECK_OK);
@@ -2637,7 +2628,7 @@ ParserBase<Traits>::ParseStrongInitializationExpression(
Consume(Token::LBRACK);
int pos = position();
ExpressionT index = this->ParseExpression(true, classifier, CHECK_OK);
- index = Traits::RewriteNonPattern(index, classifier, CHECK_OK);
+ Traits::RewriteNonPattern(classifier, CHECK_OK);
left = factory()->NewProperty(this_expr, index, pos);
if (fni_ != NULL) {
this->PushPropertyName(fni_, index);
@@ -2673,7 +2664,7 @@ ParserBase<Traits>::ParseStrongInitializationExpression(
ExpressionT right =
this->ParseAssignmentExpression(true, classifier, CHECK_OK);
- right = Traits::RewriteNonPattern(right, classifier, CHECK_OK);
+ Traits::RewriteNonPattern(classifier, CHECK_OK);
this->CheckAssigningFunctionLiteralToProperty(left, right);
function_state_->AddProperty();
if (fni_ != NULL) {
@@ -2796,13 +2787,26 @@ ParserBase<Traits>::ParseSuperExpression(bool is_new,
return this->EmptyExpression();
}
+template <class Traits>
+void ParserBase<Traits>::ExpectMetaProperty(Vector<const char> property_name,
+ const char* full_name, int pos,
+ bool* ok) {
+ Consume(Token::PERIOD);
+ ExpectContextualKeyword(property_name, ok);
+ if (!*ok) return;
+ if (scanner()->literal_contains_escapes()) {
+ Traits::ReportMessageAt(
+ Scanner::Location(pos, scanner()->location().end_pos),
+ MessageTemplate::kInvalidEscapedMetaProperty, full_name);
+ *ok = false;
+ }
+}
template <class Traits>
typename ParserBase<Traits>::ExpressionT
ParserBase<Traits>::ParseNewTargetExpression(bool* ok) {
int pos = position();
- Consume(Token::PERIOD);
- ExpectContextualKeyword(CStrVector("target"), CHECK_OK);
+ ExpectMetaProperty(CStrVector("target"), "new.target", pos, CHECK_OK);
if (!scope_->ReceiverScope()->is_function_scope()) {
ReportMessageAt(scanner()->location(),
@@ -2824,13 +2828,14 @@ ParserBase<Traits>::ParseMemberExpressionContinuation(
while (true) {
switch (peek()) {
case Token::LBRACK: {
+ Traits::RewriteNonPattern(classifier, CHECK_OK);
BindingPatternUnexpectedToken(classifier);
ArrowFormalParametersUnexpectedToken(classifier);
Consume(Token::LBRACK);
int pos = position();
ExpressionT index = this->ParseExpression(true, classifier, CHECK_OK);
- index = Traits::RewriteNonPattern(index, classifier, CHECK_OK);
+ Traits::RewriteNonPattern(classifier, CHECK_OK);
expression = factory()->NewProperty(expression, index, pos);
if (fni_ != NULL) {
this->PushPropertyName(fni_, index);
@@ -2839,6 +2844,7 @@ ParserBase<Traits>::ParseMemberExpressionContinuation(
break;
}
case Token::PERIOD: {
+ Traits::RewriteNonPattern(classifier, CHECK_OK);
BindingPatternUnexpectedToken(classifier);
ArrowFormalParametersUnexpectedToken(classifier);
@@ -2854,6 +2860,7 @@ ParserBase<Traits>::ParseMemberExpressionContinuation(
}
case Token::TEMPLATE_SPAN:
case Token::TEMPLATE_TAIL: {
+ Traits::RewriteNonPattern(classifier, CHECK_OK);
BindingPatternUnexpectedToken(classifier);
ArrowFormalParametersUnexpectedToken(classifier);
int pos;
@@ -2908,14 +2915,19 @@ void ParserBase<Traits>::ParseFormalParameter(
ExpressionT initializer = Traits::EmptyExpression();
if (!is_rest && allow_harmony_default_parameters() && Check(Token::ASSIGN)) {
- ExpressionClassifier init_classifier;
+ ExpressionClassifier init_classifier(this);
initializer = ParseAssignmentExpression(true, &init_classifier, ok);
if (!*ok) return;
- initializer = Traits::RewriteNonPattern(initializer, &init_classifier, ok);
+ Traits::RewriteNonPattern(&init_classifier, ok);
ValidateFormalParameterInitializer(&init_classifier, ok);
if (!*ok) return;
parameters->is_simple = false;
+ init_classifier.Discard();
classifier->RecordNonSimpleParameter();
+
+ if (allow_harmony_function_name()) {
+ Traits::SetFunctionNameFromIdentifierRef(initializer, pattern);
+ }
}
Traits::AddFormalParameter(parameters, pattern, initializer,
@@ -2972,33 +2984,29 @@ void ParserBase<Traits>::ParseFormalParameterList(
}
}
-
template <class Traits>
-void ParserBase<Traits>::CheckArityRestrictions(
- int param_count, FunctionLiteral::ArityRestriction arity_restriction,
- bool has_rest, int formals_start_pos, int formals_end_pos, bool* ok) {
- switch (arity_restriction) {
- case FunctionLiteral::kGetterArity:
- if (param_count != 0) {
- ReportMessageAt(Scanner::Location(formals_start_pos, formals_end_pos),
- MessageTemplate::kBadGetterArity);
- *ok = false;
- }
- break;
- case FunctionLiteral::kSetterArity:
- if (param_count != 1) {
- ReportMessageAt(Scanner::Location(formals_start_pos, formals_end_pos),
- MessageTemplate::kBadSetterArity);
- *ok = false;
- }
- if (has_rest) {
- ReportMessageAt(Scanner::Location(formals_start_pos, formals_end_pos),
- MessageTemplate::kBadSetterRestParameter);
- *ok = false;
- }
- break;
- default:
- break;
+void ParserBase<Traits>::CheckArityRestrictions(int param_count,
+ FunctionKind function_kind,
+ bool has_rest,
+ int formals_start_pos,
+ int formals_end_pos, bool* ok) {
+ if (IsGetterFunction(function_kind)) {
+ if (param_count != 0) {
+ ReportMessageAt(Scanner::Location(formals_start_pos, formals_end_pos),
+ MessageTemplate::kBadGetterArity);
+ *ok = false;
+ }
+ } else if (IsSetterFunction(function_kind)) {
+ if (param_count != 1) {
+ ReportMessageAt(Scanner::Location(formals_start_pos, formals_end_pos),
+ MessageTemplate::kBadSetterArity);
+ *ok = false;
+ }
+ if (has_rest) {
+ ReportMessageAt(Scanner::Location(formals_start_pos, formals_end_pos),
+ MessageTemplate::kBadSetterRestParameter);
+ *ok = false;
+ }
}
}
@@ -3082,10 +3090,10 @@ ParserBase<Traits>::ParseArrowFunctionLiteral(
// Single-expression body
int pos = position();
parenthesized_function_ = false;
- ExpressionClassifier classifier;
+ ExpressionClassifier classifier(this);
ExpressionT expression =
ParseAssignmentExpression(accept_IN, &classifier, CHECK_OK);
- expression = Traits::RewriteNonPattern(expression, &classifier, CHECK_OK);
+ Traits::RewriteNonPattern(&classifier, CHECK_OK);
body = this->NewStatementList(1, zone());
this->AddParameterInitializationBlock(formal_parameters, body, CHECK_OK);
body->Add(factory()->NewReturnStatement(expression, pos), zone());
@@ -3191,7 +3199,7 @@ ParserBase<Traits>::ParseTemplateLiteral(ExpressionT tag, int start,
int expr_pos = peek_position();
ExpressionT expression = this->ParseExpression(true, classifier, CHECK_OK);
- expression = Traits::RewriteNonPattern(expression, classifier, CHECK_OK);
+ Traits::RewriteNonPattern(classifier, CHECK_OK);
Traits::AddTemplateExpression(&ts, expression);
if (peek() != Token::RBRACE) {
@@ -3245,7 +3253,7 @@ typename ParserBase<Traits>::ExpressionT
ParserBase<Traits>::CheckAndRewriteReferenceExpression(
ExpressionT expression, int beg_pos, int end_pos,
MessageTemplate::Template message, ParseErrorType type, bool* ok) {
- ExpressionClassifier classifier;
+ ExpressionClassifier classifier(this);
ExpressionT result = ClassifyAndRewriteReferenceExpression(
&classifier, expression, beg_pos, end_pos, message, type);
ValidateExpression(&classifier, ok);
@@ -3299,21 +3307,11 @@ template <typename Traits>
void ParserBase<Traits>::CheckDestructuringElement(
ExpressionT expression, ExpressionClassifier* classifier, int begin,
int end) {
- static const MessageTemplate::Template message =
- MessageTemplate::kInvalidDestructuringTarget;
- const Scanner::Location location(begin, end);
- if (expression->IsArrayLiteral() || expression->IsObjectLiteral() ||
- expression->IsAssignment()) {
- if (expression->is_parenthesized()) {
- classifier->RecordPatternError(location, message);
- }
- return;
- }
-
- if (expression->IsProperty()) {
- classifier->RecordBindingPatternError(location, message);
- } else if (!this->IsAssignableIdentifier(expression)) {
- classifier->RecordPatternError(location, message);
+ if (!IsValidPattern(expression) && !expression->IsAssignment() &&
+ !IsValidReferenceExpression(expression)) {
+ classifier->RecordAssignmentPatternError(
+ Scanner::Location(begin, end),
+ MessageTemplate::kInvalidDestructuringTarget);
}
}
@@ -3375,6 +3373,8 @@ void ParserBase<Traits>::ClassLiteralChecker::CheckProperty(
return;
}
}
+
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index b1b8c1316b..8005479a32 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -6,6 +6,7 @@
#include "src/api.h"
#include "src/ast/ast.h"
+#include "src/ast/ast-expression-rewriter.h"
#include "src/ast/ast-expression-visitor.h"
#include "src/ast/ast-literal-reindexer.h"
#include "src/ast/scopeinfo.h"
@@ -22,6 +23,7 @@
#include "src/parsing/scanner-character-streams.h"
#include "src/runtime/runtime.h"
#include "src/string-stream.h"
+#include "src/tracing/trace-event.h"
namespace v8 {
namespace internal {
@@ -178,15 +180,14 @@ void Parser::SetCachedData(ParseInfo* info) {
}
}
-
-FunctionLiteral* Parser::DefaultConstructor(bool call_super, Scope* scope,
+FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
+ bool call_super, Scope* scope,
int pos, int end_pos,
LanguageMode language_mode) {
int materialized_literal_count = -1;
int expected_property_count = -1;
int parameter_count = 0;
- const AstRawString* name = ast_value_factory()->empty_string();
-
+ if (name == nullptr) name = ast_value_factory()->empty_string();
FunctionKind kind = call_super ? FunctionKind::kDefaultSubclassConstructor
: FunctionKind::kDefaultBaseConstructor;
@@ -642,10 +643,16 @@ Expression* ParserTraits::NewTargetExpression(Scope* scope,
}
-Expression* ParserTraits::DefaultConstructor(bool call_super, Scope* scope,
- int pos, int end_pos,
- LanguageMode mode) {
- return parser_->DefaultConstructor(call_super, scope, pos, end_pos, mode);
+Expression* ParserTraits::FunctionSentExpression(Scope* scope,
+ AstNodeFactory* factory,
+ int pos) {
+ // We desugar function.sent into %GeneratorGetInput(generator).
+ Zone* zone = parser_->zone();
+ ZoneList<Expression*>* args = new (zone) ZoneList<Expression*>(1, zone);
+ VariableProxy* generator = factory->NewVariableProxy(
+ parser_->function_state_->generator_object_variable());
+ args->Add(generator, zone);
+ return factory->NewCallRuntime(Runtime::kGeneratorGetInput, args, pos);
}
@@ -721,11 +728,10 @@ FunctionLiteral* ParserTraits::ParseFunctionLiteral(
const AstRawString* name, Scanner::Location function_name_location,
FunctionNameValidity function_name_validity, FunctionKind kind,
int function_token_position, FunctionLiteral::FunctionType type,
- FunctionLiteral::ArityRestriction arity_restriction,
LanguageMode language_mode, bool* ok) {
return parser_->ParseFunctionLiteral(
name, function_name_location, function_name_validity, kind,
- function_token_position, type, arity_restriction, language_mode, ok);
+ function_token_position, type, language_mode, ok);
}
@@ -767,6 +773,7 @@ Parser::Parser(ParseInfo* info)
set_allow_legacy_const(FLAG_legacy_const);
set_allow_harmony_do_expressions(FLAG_harmony_do_expressions);
set_allow_harmony_function_name(FLAG_harmony_function_name);
+ set_allow_harmony_function_sent(FLAG_harmony_function_sent);
for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
++feature) {
use_counts_[feature] = 0;
@@ -789,6 +796,7 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
DCHECK(parsing_on_main_thread_);
HistogramTimerScope timer_scope(isolate->counters()->parse(), true);
+ TRACE_EVENT0("v8", "V8.Parse");
Handle<String> source(String::cast(info->script()->source()));
isolate->counters()->total_parse_size()->Increment(source->length());
base::ElapsedTimer timer;
@@ -935,13 +943,9 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
if (ok) {
ParserTraits::RewriteDestructuringAssignments();
- result = factory()->NewFunctionLiteral(
- ast_value_factory()->empty_string(), scope_, body,
- function_state.materialized_literal_count(),
- function_state.expected_property_count(), 0,
- FunctionLiteral::kNoDuplicateParameters,
- FunctionLiteral::kGlobalOrEval, FunctionLiteral::kShouldLazyCompile,
- FunctionKind::kNormalFunction, 0);
+ result = factory()->NewScriptOrEvalFunctionLiteral(
+ scope_, body, function_state.materialized_literal_count(),
+ function_state.expected_property_count());
}
}
@@ -957,6 +961,7 @@ FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info) {
// called in the main thread.
DCHECK(parsing_on_main_thread_);
HistogramTimerScope timer_scope(isolate->counters()->parse_lazy());
+ TRACE_EVENT0("v8", "V8.ParseLazy");
Handle<String> source(String::cast(info->script()->source()));
isolate->counters()->total_parse_size()->Increment(source->length());
base::ElapsedTimer timer;
@@ -990,6 +995,18 @@ FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info) {
return result;
}
+static FunctionLiteral::FunctionType ComputeFunctionType(
+ Handle<SharedFunctionInfo> shared_info) {
+ if (shared_info->is_declaration()) {
+ return FunctionLiteral::kDeclaration;
+ } else if (shared_info->is_named_expression()) {
+ return FunctionLiteral::kNamedExpression;
+ } else if (IsConciseMethod(shared_info->kind()) ||
+ IsAccessorFunction(shared_info->kind())) {
+ return FunctionLiteral::kAccessorOrMethod;
+ }
+ return FunctionLiteral::kAnonymousExpression;
+}
FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info,
Utf16CharacterStream* source) {
@@ -1028,11 +1045,7 @@ FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info,
is_strict(info->language_mode()));
DCHECK(info->language_mode() == shared_info->language_mode());
FunctionLiteral::FunctionType function_type =
- shared_info->is_expression()
- ? (shared_info->is_anonymous()
- ? FunctionLiteral::kAnonymousExpression
- : FunctionLiteral::kNamedExpression)
- : FunctionLiteral::kDeclaration;
+ ComputeFunctionType(shared_info);
bool ok = true;
if (shared_info->is_arrow()) {
@@ -1050,7 +1063,7 @@ FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info,
SetLanguageMode(scope, shared_info->language_mode());
scope->set_start_position(shared_info->start_position());
- ExpressionClassifier formals_classifier;
+ ExpressionClassifier formals_classifier(this);
ParserFormalParameters formals(scope);
Checkpoint checkpoint(this);
{
@@ -1096,15 +1109,15 @@ FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info,
}
}
} else if (shared_info->is_default_constructor()) {
- result = DefaultConstructor(IsSubclassConstructor(shared_info->kind()),
- scope, shared_info->start_position(),
- shared_info->end_position(),
- shared_info->language_mode());
+ result = DefaultConstructor(
+ raw_name, IsSubclassConstructor(shared_info->kind()), scope,
+ shared_info->start_position(), shared_info->end_position(),
+ shared_info->language_mode());
} else {
- result = ParseFunctionLiteral(
- raw_name, Scanner::Location::invalid(), kSkipFunctionNameCheck,
- shared_info->kind(), RelocInfo::kNoPosition, function_type,
- FunctionLiteral::kNormalArity, shared_info->language_mode(), &ok);
+ result = ParseFunctionLiteral(raw_name, Scanner::Location::invalid(),
+ kSkipFunctionNameCheck, shared_info->kind(),
+ RelocInfo::kNoPosition, function_type,
+ shared_info->language_mode(), &ok);
}
// Make sure the results agree.
DCHECK(ok == (result != NULL));
@@ -1260,20 +1273,11 @@ Statement* Parser::ParseStatementListItem(bool* ok) {
// Statement
// Declaration
- if (peek() != Token::CLASS) {
- // No more classes follow; reset the start position for the consecutive
- // class declaration group.
- scope_->set_class_declaration_group_start(-1);
- }
-
switch (peek()) {
case Token::FUNCTION:
return ParseFunctionDeclaration(NULL, ok);
case Token::CLASS:
- if (scope_->class_declaration_group_start() < 0) {
- scope_->set_class_declaration_group_start(
- scanner()->peek_location().beg_pos);
- }
+ Consume(Token::CLASS);
return ParseClassDeclaration(NULL, ok);
case Token::CONST:
if (allow_const()) {
@@ -1345,7 +1349,6 @@ void* Parser::ParseModuleItemList(ZoneList<Statement*>* body, bool* ok) {
}
}
- scope_->module()->Freeze();
return NULL;
}
@@ -1558,24 +1561,53 @@ Statement* Parser::ParseExportDefault(bool* ok) {
Expect(Token::DEFAULT, CHECK_OK);
Scanner::Location default_loc = scanner()->location();
+ const AstRawString* default_string = ast_value_factory()->default_string();
ZoneList<const AstRawString*> names(1, zone());
- Statement* result = NULL;
+ Statement* result = nullptr;
+ Expression* default_export = nullptr;
switch (peek()) {
- case Token::FUNCTION:
- // TODO(ES6): Support parsing anonymous function declarations here.
- result = ParseFunctionDeclaration(&names, CHECK_OK);
+ case Token::FUNCTION: {
+ Consume(Token::FUNCTION);
+ int pos = position();
+ bool is_generator = Check(Token::MUL);
+ if (peek() == Token::LPAREN) {
+ // FunctionDeclaration[+Default] ::
+ // 'function' '(' FormalParameters ')' '{' FunctionBody '}'
+ //
+ // GeneratorDeclaration[+Default] ::
+ // 'function' '*' '(' FormalParameters ')' '{' FunctionBody '}'
+ default_export = ParseFunctionLiteral(
+ default_string, Scanner::Location::invalid(),
+ kSkipFunctionNameCheck,
+ is_generator ? FunctionKind::kGeneratorFunction
+ : FunctionKind::kNormalFunction,
+ pos, FunctionLiteral::kDeclaration, language_mode(), CHECK_OK);
+ result = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
+ } else {
+ result = ParseFunctionDeclaration(pos, is_generator, &names, CHECK_OK);
+ }
break;
+ }
case Token::CLASS:
- // TODO(ES6): Support parsing anonymous class declarations here.
- result = ParseClassDeclaration(&names, CHECK_OK);
+ Consume(Token::CLASS);
+ if (peek() == Token::EXTENDS || peek() == Token::LBRACE) {
+ // ClassDeclaration[+Default] ::
+ // 'class' ('extends' LeftHandExpression)? '{' ClassBody '}'
+ default_export =
+ ParseClassLiteral(default_string, Scanner::Location::invalid(),
+ false, position(), CHECK_OK);
+ result = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
+ } else {
+ result = ParseClassDeclaration(&names, CHECK_OK);
+ }
break;
default: {
int pos = peek_position();
- ExpressionClassifier classifier;
+ ExpressionClassifier classifier(this);
Expression* expr = ParseAssignmentExpression(true, &classifier, CHECK_OK);
- expr = ParserTraits::RewriteNonPattern(expr, &classifier, CHECK_OK);
+ RewriteNonPattern(&classifier, CHECK_OK);
ExpectSemicolon(CHECK_OK);
result = factory()->NewExpressionStatement(expr, pos);
@@ -1583,19 +1615,18 @@ Statement* Parser::ParseExportDefault(bool* ok) {
}
}
- const AstRawString* default_string = ast_value_factory()->default_string();
-
DCHECK_LE(names.length(), 1);
if (names.length() == 1) {
scope_->module()->AddLocalExport(default_string, names.first(), zone(), ok);
if (!*ok) {
ParserTraits::ReportMessageAt(
default_loc, MessageTemplate::kDuplicateExport, default_string);
- return NULL;
+ return nullptr;
}
} else {
// TODO(ES6): Assign result to a const binding with the name "*default*"
// and add an export entry with "*default*" as the local name.
+ USE(default_export);
}
return result;
@@ -1686,6 +1717,7 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
break;
case Token::CLASS:
+ Consume(Token::CLASS);
result = ParseClassDeclaration(&names, CHECK_OK);
break;
@@ -1921,42 +1953,44 @@ Variable* Parser::Declare(Declaration* declaration,
if (var == NULL) {
// Declare the name.
Variable::Kind kind = Variable::NORMAL;
- int declaration_group_start = -1;
if (is_function_declaration) {
kind = Variable::FUNCTION;
- } else if (declaration->IsVariableDeclaration() &&
- declaration->AsVariableDeclaration()->is_class_declaration()) {
- kind = Variable::CLASS;
- declaration_group_start =
- declaration->AsVariableDeclaration()->declaration_group_start();
}
var = declaration_scope->DeclareLocal(
- name, mode, declaration->initialization(), kind, kNotAssigned,
- declaration_group_start);
- } else if (((IsLexicalVariableMode(mode) ||
- IsLexicalVariableMode(var->mode())) &&
- // Allow duplicate function decls for web compat, see bug 4693.
- (is_strict(language_mode()) || !is_function_declaration ||
- !var->is_function())) ||
- ((mode == CONST_LEGACY || var->mode() == CONST_LEGACY) &&
- !declaration_scope->is_script_scope())) {
- // The name was declared in this scope before; check for conflicting
- // re-declarations. We have a conflict if either of the declarations is
- // not a var (in script scope, we also have to ignore legacy const for
- // compatibility). There is similar code in runtime.cc in the Declare
- // functions. The function CheckConflictingVarDeclarations checks for
- // var and let bindings from different scopes whereas this is a check for
- // conflicting declarations within the same scope. This check also covers
- // the special case
- //
- // function () { let x; { var x; } }
- //
- // because the var declaration is hoisted to the function scope where 'x'
- // is already bound.
- DCHECK(IsDeclaredVariableMode(var->mode()));
- if (is_strict(language_mode()) ||
- (allow_harmony_sloppy() && mode != CONST_LEGACY &&
- var->mode() != CONST_LEGACY)) {
+ name, mode, declaration->initialization(), kind, kNotAssigned);
+ } else if ((mode == CONST_LEGACY || var->mode() == CONST_LEGACY) &&
+ !declaration_scope->is_script_scope()) {
+ // Duplicate legacy const definitions throw at runtime.
+ DCHECK(is_sloppy(language_mode()));
+ Expression* expression = NewThrowSyntaxError(
+ MessageTemplate::kVarRedeclaration, name, declaration->position());
+ declaration_scope->SetIllegalRedeclaration(expression);
+ } else if ((IsLexicalVariableMode(mode) ||
+ IsLexicalVariableMode(var->mode())) &&
+ // Lexical bindings may appear for some parameters in sloppy
+ // mode even with --harmony-sloppy off.
+ (is_strict(language_mode()) || allow_harmony_sloppy())) {
+ // Allow duplicate function decls for web compat, see bug 4693.
+ if (is_sloppy(language_mode()) && is_function_declaration &&
+ var->is_function()) {
+ DCHECK(IsLexicalVariableMode(mode) &&
+ IsLexicalVariableMode(var->mode()));
+ ++use_counts_[v8::Isolate::kSloppyModeBlockScopedFunctionRedefinition];
+ } else {
+ // The name was declared in this scope before; check for conflicting
+ // re-declarations. We have a conflict if either of the declarations
+ // is not a var (in script scope, we also have to ignore legacy const
+ // for compatibility). There is similar code in runtime.cc in the
+ // Declare functions. The function CheckConflictingVarDeclarations
+ // checks for var and let bindings from different scopes whereas this
+ // is a check for conflicting declarations within the same scope. This
+ // check also covers the special case
+ //
+ // function () { let x; { var x; } }
+ //
+ // because the var declaration is hoisted to the function scope where
+ // 'x' is already bound.
+ DCHECK(IsDeclaredVariableMode(var->mode()));
// In harmony we treat re-declarations as early errors. See
// ES5 16 for a definition of early errors.
if (declaration_kind == DeclarationDescriptor::NORMAL) {
@@ -1967,9 +2001,6 @@ Variable* Parser::Declare(Declaration* declaration,
*ok = false;
return nullptr;
}
- Expression* expression = NewThrowSyntaxError(
- MessageTemplate::kVarRedeclaration, name, declaration->position());
- declaration_scope->SetIllegalRedeclaration(expression);
} else if (mode == VAR) {
var->set_maybe_assigned();
}
@@ -2093,14 +2124,22 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) {
Statement* Parser::ParseFunctionDeclaration(
ZoneList<const AstRawString*>* names, bool* ok) {
- // FunctionDeclaration ::
- // 'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
- // GeneratorDeclaration ::
- // 'function' '*' Identifier '(' FormalParameterListopt ')'
- // '{' FunctionBody '}'
Expect(Token::FUNCTION, CHECK_OK);
int pos = position();
bool is_generator = Check(Token::MUL);
+ return ParseFunctionDeclaration(pos, is_generator, names, ok);
+}
+
+
+Statement* Parser::ParseFunctionDeclaration(
+ int pos, bool is_generator, ZoneList<const AstRawString*>* names,
+ bool* ok) {
+ // FunctionDeclaration ::
+ // 'function' Identifier '(' FormalParameters ')' '{' FunctionBody '}'
+ // GeneratorDeclaration ::
+ // 'function' '*' Identifier '(' FormalParameters ')' '{' FunctionBody '}'
+ //
+ // 'function' and '*' (if present) have been consumed by the caller.
bool is_strict_reserved = false;
const AstRawString* name = ParseIdentifierOrStrictReservedWord(
&is_strict_reserved, CHECK_OK);
@@ -2113,8 +2152,7 @@ Statement* Parser::ParseFunctionDeclaration(
: kFunctionNameValidityUnknown,
is_generator ? FunctionKind::kGeneratorFunction
: FunctionKind::kNormalFunction,
- pos, FunctionLiteral::kDeclaration, FunctionLiteral::kNormalArity,
- language_mode(), CHECK_OK);
+ pos, FunctionLiteral::kDeclaration, language_mode(), CHECK_OK);
// Even if we're not at the top-level of the global or a function
// scope, we treat it as such and introduce the function with its
@@ -2151,6 +2189,8 @@ Statement* Parser::ParseClassDeclaration(ZoneList<const AstRawString*>* names,
// ClassDeclaration ::
// 'class' Identifier ('extends' LeftHandExpression)? '{' ClassBody '}'
//
+ // 'class' is expected to be consumed by the caller.
+ //
// A ClassDeclaration
//
// class C { ... }
@@ -2161,7 +2201,6 @@ Statement* Parser::ParseClassDeclaration(ZoneList<const AstRawString*>* names,
//
// so rewrite it as such.
- Expect(Token::CLASS, CHECK_OK);
if (!allow_harmony_sloppy() && is_sloppy(language_mode())) {
ReportMessage(MessageTemplate::kSloppyLexical);
*ok = false;
@@ -2177,30 +2216,10 @@ Statement* Parser::ParseClassDeclaration(ZoneList<const AstRawString*>* names,
VariableMode mode = is_strong(language_mode()) ? CONST : LET;
VariableProxy* proxy = NewUnresolved(name, mode);
- const bool is_class_declaration = true;
- Declaration* declaration = factory()->NewVariableDeclaration(
- proxy, mode, scope_, pos, is_class_declaration,
- scope_->class_declaration_group_start());
- Variable* outer_class_variable =
- Declare(declaration, DeclarationDescriptor::NORMAL, true, CHECK_OK);
+ Declaration* declaration =
+ factory()->NewVariableDeclaration(proxy, mode, scope_, pos);
+ Declare(declaration, DeclarationDescriptor::NORMAL, true, CHECK_OK);
proxy->var()->set_initializer_position(position());
- // This is needed because a class ("class Name { }") creates two bindings (one
- // in the outer scope, and one in the class scope). The method is a function
- // scope inside the inner scope (class scope). The consecutive class
- // declarations are in the outer scope.
- if (value->class_variable_proxy() && value->class_variable_proxy()->var() &&
- outer_class_variable->is_class()) {
- // In some cases, the outer variable is not detected as a class variable;
- // this happens e.g., for lazy methods. They are excluded from strong mode
- // checks for now. TODO(marja, rossberg): re-create variables with the
- // correct Kind and remove this hack.
- value->class_variable_proxy()
- ->var()
- ->AsClassVariable()
- ->set_declaration_group_start(
- outer_class_variable->AsClassVariable()->declaration_group_start());
- }
-
Assignment* assignment =
factory()->NewAssignment(Token::INIT, proxy, value, pos);
Statement* assignment_statement =
@@ -2281,17 +2300,16 @@ Block* Parser::ParseVariableStatement(VariableDeclarationContext var_context,
// is inside an initializer block, it is ignored.
DeclarationParsingResult parsing_result;
- ParseVariableDeclarations(var_context, &parsing_result, CHECK_OK);
+ Block* result =
+ ParseVariableDeclarations(var_context, &parsing_result, names, CHECK_OK);
ExpectSemicolon(CHECK_OK);
-
- Block* result = parsing_result.BuildInitializationBlock(names, CHECK_OK);
return result;
}
-
-void Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
- DeclarationParsingResult* parsing_result,
- bool* ok) {
+Block* Parser::ParseVariableDeclarations(
+ VariableDeclarationContext var_context,
+ DeclarationParsingResult* parsing_result,
+ ZoneList<const AstRawString*>* names, bool* ok) {
// VariableDeclarations ::
// ('var' | 'const' | 'let') (Identifier ('=' AssignmentExpression)?)+[',']
//
@@ -2311,17 +2329,19 @@ void Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
parsing_result->descriptor.declaration_pos = peek_position();
parsing_result->descriptor.initialization_pos = peek_position();
parsing_result->descriptor.mode = VAR;
- // True if the binding needs initialization. 'let' and 'const' declared
- // bindings are created uninitialized by their declaration nodes and
- // need initialization. 'var' declared bindings are always initialized
- // immediately by their declaration nodes.
- parsing_result->descriptor.needs_init = false;
+
+ Block* init_block = nullptr;
+ if (var_context != kForStatement) {
+ init_block = factory()->NewBlock(
+ NULL, 1, true, parsing_result->descriptor.declaration_pos);
+ }
+
if (peek() == Token::VAR) {
if (is_strong(language_mode())) {
Scanner::Location location = scanner()->peek_location();
ReportMessageAt(location, MessageTemplate::kStrongVar);
*ok = false;
- return;
+ return nullptr;
}
Consume(Token::VAR);
} else if (peek() == Token::CONST && allow_const()) {
@@ -2334,12 +2354,10 @@ void Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
DCHECK(var_context != kStatement);
parsing_result->descriptor.mode = CONST;
}
- parsing_result->descriptor.needs_init = true;
} else if (peek() == Token::LET && allow_let()) {
Consume(Token::LET);
DCHECK(var_context != kStatement);
parsing_result->descriptor.mode = LET;
- parsing_result->descriptor.needs_init = true;
} else {
UNREACHABLE(); // by current callers
}
@@ -2350,7 +2368,6 @@ void Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
bool first_declaration = true;
int bindings_start = peek_position();
- bool is_for_iteration_variable;
do {
FuncNameInferrer::State fni_state(fni_);
@@ -2360,27 +2377,20 @@ void Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
Expression* pattern;
int decl_pos = peek_position();
{
- ExpressionClassifier pattern_classifier;
+ ExpressionClassifier pattern_classifier(this);
Token::Value next = peek();
- pattern = ParsePrimaryExpression(&pattern_classifier, ok);
- if (!*ok) return;
- ValidateBindingPattern(&pattern_classifier, ok);
- if (!*ok) return;
+ pattern = ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
+ ValidateBindingPattern(&pattern_classifier, CHECK_OK);
if (IsLexicalVariableMode(parsing_result->descriptor.mode)) {
- ValidateLetPattern(&pattern_classifier, ok);
- if (!*ok) return;
+ ValidateLetPattern(&pattern_classifier, CHECK_OK);
}
if (!allow_harmony_destructuring_bind() && !pattern->IsVariableProxy()) {
ReportUnexpectedToken(next);
*ok = false;
- return;
+ return nullptr;
}
}
- bool is_pattern =
- (pattern->IsObjectLiteral() || pattern->IsArrayLiteral()) &&
- !pattern->is_parenthesized();
-
Scanner::Location variable_loc = scanner()->location();
const AstRawString* single_name =
pattern->IsVariableProxy() ? pattern->AsVariableProxy()->raw_name()
@@ -2389,25 +2399,13 @@ void Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
if (fni_ != NULL) fni_->PushVariableName(single_name);
}
- is_for_iteration_variable =
- var_context == kForStatement &&
- (peek() == Token::IN || PeekContextualKeyword(CStrVector("of")));
- if (is_for_iteration_variable &&
- (parsing_result->descriptor.mode == CONST ||
- parsing_result->descriptor.mode == CONST_LEGACY)) {
- parsing_result->descriptor.needs_init = false;
- }
-
Expression* value = NULL;
- // Harmony consts have non-optional initializers.
int initializer_position = RelocInfo::kNoPosition;
if (Check(Token::ASSIGN)) {
- ExpressionClassifier classifier;
+ ExpressionClassifier classifier(this);
value = ParseAssignmentExpression(var_context != kForStatement,
- &classifier, ok);
- if (!*ok) return;
- value = ParserTraits::RewriteNonPattern(value, &classifier, ok);
- if (!*ok) return;
+ &classifier, CHECK_OK);
+ RewriteNonPattern(&classifier, CHECK_OK);
variable_loc.end_pos = scanner()->location().end_pos;
if (!parsing_result->first_initializer_loc.IsValid()) {
@@ -2424,48 +2422,60 @@ void Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
}
}
- if (allow_harmony_function_name() && single_name) {
- if (value->IsFunctionLiteral()) {
- auto function_literal = value->AsFunctionLiteral();
- if (function_literal->is_anonymous()) {
- function_literal->set_raw_name(single_name);
- }
- } else if (value->IsClassLiteral()) {
- auto class_literal = value->AsClassLiteral();
- if (class_literal->raw_name() == nullptr) {
- class_literal->set_raw_name(single_name);
- }
- }
+ if (allow_harmony_function_name()) {
+ ParserTraits::SetFunctionNameFromIdentifierRef(value, pattern);
}
// End position of the initializer is after the assignment expression.
initializer_position = scanner()->location().end_pos;
} else {
- if ((parsing_result->descriptor.mode == CONST || is_pattern) &&
- !is_for_iteration_variable) {
- ParserTraits::ReportMessageAt(
- Scanner::Location(decl_pos, scanner()->location().end_pos),
- MessageTemplate::kDeclarationMissingInitializer,
- is_pattern ? "destructuring" : "const");
- *ok = false;
- return;
+ // Initializers may be either required or implied unless this is a
+ // for-in/of iteration variable.
+ if (var_context != kForStatement || !PeekInOrOf()) {
+ // ES6 'const' and binding patterns require initializers.
+ if (parsing_result->descriptor.mode == CONST ||
+ !pattern->IsVariableProxy()) {
+ ParserTraits::ReportMessageAt(
+ Scanner::Location(decl_pos, scanner()->location().end_pos),
+ MessageTemplate::kDeclarationMissingInitializer,
+ !pattern->IsVariableProxy() ? "destructuring" : "const");
+ *ok = false;
+ return nullptr;
+ }
+
+ // 'let x' and (legacy) 'const x' initialize 'x' to undefined.
+ if (parsing_result->descriptor.mode == LET ||
+ parsing_result->descriptor.mode == CONST_LEGACY) {
+ value = GetLiteralUndefined(position());
+ }
}
+
// End position of the initializer is after the variable.
initializer_position = position();
}
- // Make sure that 'const x' and 'let x' initialize 'x' to undefined.
- if (value == NULL && parsing_result->descriptor.needs_init) {
- value = GetLiteralUndefined(position());
+ DeclarationParsingResult::Declaration decl(pattern, initializer_position,
+ value);
+ if (var_context == kForStatement) {
+ // Save the declaration for further handling in ParseForStatement.
+ parsing_result->declarations.Add(decl);
+ } else {
+ // Immediately declare the variable otherwise. This avoids O(N^2)
+ // behavior (where N is the number of variables in a single
+ // declaration) in the PatternRewriter having to do with removing
+ // and adding VariableProxies to the Scope (see bug 4699).
+ DCHECK_NOT_NULL(init_block);
+ PatternRewriter::DeclareAndInitializeVariables(
+ init_block, &parsing_result->descriptor, &decl, names, CHECK_OK);
}
-
- parsing_result->declarations.Add(DeclarationParsingResult::Declaration(
- pattern, initializer_position, value));
first_declaration = false;
} while (peek() == Token::COMMA);
parsing_result->bindings_loc =
Scanner::Location(bindings_start, scanner()->location().end_pos);
+
+ DCHECK(*ok);
+ return init_block;
}
@@ -2511,13 +2521,13 @@ Statement* Parser::ParseExpressionOrLabelledStatement(
IsClassConstructor(function_state_->kind())) {
bool is_this = peek() == Token::THIS;
Expression* expr;
- ExpressionClassifier classifier;
+ ExpressionClassifier classifier(this);
if (is_this) {
expr = ParseStrongInitializationExpression(&classifier, CHECK_OK);
} else {
expr = ParseStrongSuperCallExpression(&classifier, CHECK_OK);
}
- expr = ParserTraits::RewriteNonPattern(expr, &classifier, CHECK_OK);
+ RewriteNonPattern(&classifier, CHECK_OK);
switch (peek()) {
case Token::SEMICOLON:
Consume(Token::SEMICOLON);
@@ -2728,23 +2738,22 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
if (IsSubclassConstructor(function_state_->kind())) {
// For subclass constructors we need to return this in case of undefined
- // and throw an exception in case of a non object.
+ // return a Smi (transformed into an exception in the ConstructStub)
+ // for a non object.
//
// return expr;
//
// Is rewritten as:
//
// return (temp = expr) === undefined ? this :
- // %_IsJSReceiver(temp) ? temp : throw new TypeError(...);
+ // %_IsJSReceiver(temp) ? temp : 1;
+
+ // temp = expr
Variable* temp = scope_->NewTemporary(
ast_value_factory()->empty_string());
Assignment* assign = factory()->NewAssignment(
Token::ASSIGN, factory()->NewVariableProxy(temp), return_value, pos);
- Expression* throw_expression =
- NewThrowTypeError(MessageTemplate::kDerivedConstructorReturn,
- ast_value_factory()->empty_string(), pos);
-
// %_IsJSReceiver(temp)
ZoneList<Expression*>* is_spec_object_args =
new (zone()) ZoneList<Expression*>(1, zone());
@@ -2755,7 +2764,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
// %_IsJSReceiver(temp) ? temp : throw_expression
Expression* is_object_conditional = factory()->NewConditional(
is_spec_object_call, factory()->NewVariableProxy(temp),
- throw_expression, pos);
+ factory()->NewSmiLiteral(1, pos), pos);
// temp === undefined
Expression* is_undefined = factory()->NewCompareOperation(
@@ -2768,7 +2777,10 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
is_object_conditional, pos);
}
- return_value->MarkTail();
+ // ES6 14.6.1 Static Semantics: IsInTailPosition
+ if (FLAG_harmony_tailcalls && !is_sloppy(language_mode())) {
+ function_state_->AddExpressionInTailPosition(return_value);
+ }
}
ExpectSemicolon(CHECK_OK);
@@ -2974,6 +2986,40 @@ Statement* Parser::ParseThrowStatement(bool* ok) {
factory()->NewThrow(exception, pos), pos);
}
+class Parser::DontCollectExpressionsInTailPositionScope {
+ public:
+ DontCollectExpressionsInTailPositionScope(
+ Parser::FunctionState* function_state)
+ : function_state_(function_state),
+ old_value_(function_state->collect_expressions_in_tail_position()) {
+ function_state->set_collect_expressions_in_tail_position(false);
+ }
+ ~DontCollectExpressionsInTailPositionScope() {
+ function_state_->set_collect_expressions_in_tail_position(old_value_);
+ }
+
+ private:
+ Parser::FunctionState* function_state_;
+ bool old_value_;
+};
+
+// Collects all return expressions at tail call position in this scope
+// to a separate list.
+class Parser::CollectExpressionsInTailPositionToListScope {
+ public:
+ CollectExpressionsInTailPositionToListScope(
+ Parser::FunctionState* function_state, List<Expression*>* list)
+ : function_state_(function_state), list_(list) {
+ function_state->expressions_in_tail_position().Swap(list_);
+ }
+ ~CollectExpressionsInTailPositionToListScope() {
+ function_state_->expressions_in_tail_position().Swap(list_);
+ }
+
+ private:
+ Parser::FunctionState* function_state_;
+ List<Expression*>* list_;
+};
TryStatement* Parser::ParseTryStatement(bool* ok) {
// TryStatement ::
@@ -2990,7 +3036,11 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
Expect(Token::TRY, CHECK_OK);
int pos = position();
- Block* try_block = ParseBlock(NULL, CHECK_OK);
+ Block* try_block;
+ {
+ DontCollectExpressionsInTailPositionScope no_tail_calls(function_state_);
+ try_block = ParseBlock(NULL, CHECK_OK);
+ }
Token::Value tok = peek();
if (tok != Token::CATCH && tok != Token::FINALLY) {
@@ -3002,6 +3052,7 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
Scope* catch_scope = NULL;
Variable* catch_variable = NULL;
Block* catch_block = NULL;
+ List<Expression*> expressions_in_tail_position_in_catch_block;
if (tok == Token::CATCH) {
Consume(Token::CATCH);
@@ -3009,7 +3060,7 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
catch_scope = NewScope(scope_, CATCH_SCOPE);
catch_scope->set_start_position(scanner()->location().beg_pos);
- ExpressionClassifier pattern_classifier;
+ ExpressionClassifier pattern_classifier(this);
Expression* pattern = ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
ValidateBindingPattern(&pattern_classifier, CHECK_OK);
@@ -3027,6 +3078,9 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
Expect(Token::RPAREN, CHECK_OK);
{
+ CollectExpressionsInTailPositionToListScope
+ collect_expressions_in_tail_position_scope(
+ function_state_, &expressions_in_tail_position_in_catch_block);
BlockState block_state(&scope_, catch_scope);
// TODO(adamk): Make a version of ParseBlock that takes a scope and
@@ -3047,7 +3101,6 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
descriptor.scope = scope_;
descriptor.hoist_scope = nullptr;
descriptor.mode = LET;
- descriptor.needs_init = true;
descriptor.declaration_pos = pattern->position();
descriptor.initialization_pos = pattern->position();
@@ -3102,6 +3155,11 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
TryStatement* result = NULL;
if (catch_block != NULL) {
+ // For a try-catch construct append return expressions from the catch block
+ // to the list of return expressions.
+ function_state_->expressions_in_tail_position().AddAll(
+ expressions_in_tail_position_in_catch_block);
+
DCHECK(finally_block == NULL);
DCHECK(catch_scope != NULL && catch_variable != NULL);
result = factory()->NewTryCatchStatement(try_block, catch_scope,
@@ -3262,6 +3320,7 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
}
for_of->Initialize(each, subject, body,
+ iterator,
assign_iterator,
next_result,
result_done,
@@ -3288,9 +3347,8 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
}
}
-
Statement* Parser::DesugarLexicalBindingsInForStatement(
- Scope* inner_scope, bool is_const, ZoneList<const AstRawString*>* names,
+ Scope* inner_scope, VariableMode mode, ZoneList<const AstRawString*>* names,
ForStatement* loop, Statement* init, Expression* cond, Statement* next,
Statement* body, bool* ok) {
// ES6 13.7.4.8 specifies that on each loop iteration the let variables are
@@ -3331,7 +3389,6 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
// }
DCHECK(names->length() > 0);
- Scope* for_scope = scope_;
ZoneList<Variable*> temps(names->length(), zone());
Block* outer_block = factory()->NewBlock(NULL, names->length() + 4, false,
@@ -3384,150 +3441,155 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
ForStatement* outer_loop =
factory()->NewForStatement(NULL, RelocInfo::kNoPosition);
outer_block->statements()->Add(outer_loop, zone());
-
- outer_block->set_scope(for_scope);
- scope_ = inner_scope;
+ outer_block->set_scope(scope_);
Block* inner_block =
factory()->NewBlock(NULL, 3, false, RelocInfo::kNoPosition);
- Block* ignore_completion_block = factory()->NewBlock(
- NULL, names->length() + 3, true, RelocInfo::kNoPosition);
- ZoneList<Variable*> inner_vars(names->length(), zone());
- // For each let variable x:
- // make statement: let/const x = temp_x.
- VariableMode mode = is_const ? CONST : LET;
- for (int i = 0; i < names->length(); i++) {
- VariableProxy* proxy = NewUnresolved(names->at(i), mode);
- Declaration* declaration = factory()->NewVariableDeclaration(
- proxy, mode, scope_, RelocInfo::kNoPosition);
- Declare(declaration, DeclarationDescriptor::NORMAL, true, CHECK_OK);
- inner_vars.Add(declaration->proxy()->var(), zone());
- VariableProxy* temp_proxy = factory()->NewVariableProxy(temps.at(i));
- Assignment* assignment = factory()->NewAssignment(
- Token::INIT, proxy, temp_proxy, RelocInfo::kNoPosition);
- Statement* assignment_statement =
- factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition);
- DCHECK(init->position() != RelocInfo::kNoPosition);
- proxy->var()->set_initializer_position(init->position());
- ignore_completion_block->statements()->Add(assignment_statement, zone());
- }
+ {
+ BlockState block_state(&scope_, inner_scope);
- // Make statement: if (first == 1) { first = 0; } else { next; }
- if (next) {
- DCHECK(first);
- Expression* compare = NULL;
- // Make compare expression: first == 1.
- {
- Expression* const1 = factory()->NewSmiLiteral(1, RelocInfo::kNoPosition);
- VariableProxy* first_proxy = factory()->NewVariableProxy(first);
- compare = factory()->NewCompareOperation(Token::EQ, first_proxy, const1,
- RelocInfo::kNoPosition);
- }
- Statement* clear_first = NULL;
- // Make statement: first = 0.
- {
- VariableProxy* first_proxy = factory()->NewVariableProxy(first);
- Expression* const0 = factory()->NewSmiLiteral(0, RelocInfo::kNoPosition);
+ Block* ignore_completion_block = factory()->NewBlock(
+ NULL, names->length() + 3, true, RelocInfo::kNoPosition);
+ ZoneList<Variable*> inner_vars(names->length(), zone());
+ // For each let variable x:
+ // make statement: let/const x = temp_x.
+ for (int i = 0; i < names->length(); i++) {
+ VariableProxy* proxy = NewUnresolved(names->at(i), mode);
+ Declaration* declaration = factory()->NewVariableDeclaration(
+ proxy, mode, scope_, RelocInfo::kNoPosition);
+ Declare(declaration, DeclarationDescriptor::NORMAL, true, CHECK_OK);
+ inner_vars.Add(declaration->proxy()->var(), zone());
+ VariableProxy* temp_proxy = factory()->NewVariableProxy(temps.at(i));
Assignment* assignment = factory()->NewAssignment(
- Token::ASSIGN, first_proxy, const0, RelocInfo::kNoPosition);
- clear_first =
+ Token::INIT, proxy, temp_proxy, RelocInfo::kNoPosition);
+ Statement* assignment_statement =
factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition);
+ DCHECK(init->position() != RelocInfo::kNoPosition);
+ proxy->var()->set_initializer_position(init->position());
+ ignore_completion_block->statements()->Add(assignment_statement, zone());
}
- Statement* clear_first_or_next = factory()->NewIfStatement(
- compare, clear_first, next, RelocInfo::kNoPosition);
- ignore_completion_block->statements()->Add(clear_first_or_next, zone());
- }
- Variable* flag = scope_->NewTemporary(temp_name);
- // Make statement: flag = 1.
- {
- VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
- Expression* const1 = factory()->NewSmiLiteral(1, RelocInfo::kNoPosition);
- Assignment* assignment = factory()->NewAssignment(
- Token::ASSIGN, flag_proxy, const1, RelocInfo::kNoPosition);
- Statement* assignment_statement =
- factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition);
- ignore_completion_block->statements()->Add(assignment_statement, zone());
- }
+ // Make statement: if (first == 1) { first = 0; } else { next; }
+ if (next) {
+ DCHECK(first);
+ Expression* compare = NULL;
+ // Make compare expression: first == 1.
+ {
+ Expression* const1 =
+ factory()->NewSmiLiteral(1, RelocInfo::kNoPosition);
+ VariableProxy* first_proxy = factory()->NewVariableProxy(first);
+ compare = factory()->NewCompareOperation(Token::EQ, first_proxy, const1,
+ RelocInfo::kNoPosition);
+ }
+ Statement* clear_first = NULL;
+ // Make statement: first = 0.
+ {
+ VariableProxy* first_proxy = factory()->NewVariableProxy(first);
+ Expression* const0 =
+ factory()->NewSmiLiteral(0, RelocInfo::kNoPosition);
+ Assignment* assignment = factory()->NewAssignment(
+ Token::ASSIGN, first_proxy, const0, RelocInfo::kNoPosition);
+ clear_first = factory()->NewExpressionStatement(assignment,
+ RelocInfo::kNoPosition);
+ }
+ Statement* clear_first_or_next = factory()->NewIfStatement(
+ compare, clear_first, next, RelocInfo::kNoPosition);
+ ignore_completion_block->statements()->Add(clear_first_or_next, zone());
+ }
- // Make statement: if (!cond) break.
- if (cond) {
- Statement* stop =
- factory()->NewBreakStatement(outer_loop, RelocInfo::kNoPosition);
- Statement* noop = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
- ignore_completion_block->statements()->Add(
- factory()->NewIfStatement(cond, noop, stop, cond->position()), zone());
- }
+ Variable* flag = scope_->NewTemporary(temp_name);
+ // Make statement: flag = 1.
+ {
+ VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
+ Expression* const1 = factory()->NewSmiLiteral(1, RelocInfo::kNoPosition);
+ Assignment* assignment = factory()->NewAssignment(
+ Token::ASSIGN, flag_proxy, const1, RelocInfo::kNoPosition);
+ Statement* assignment_statement =
+ factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition);
+ ignore_completion_block->statements()->Add(assignment_statement, zone());
+ }
- inner_block->statements()->Add(ignore_completion_block, zone());
- // Make cond expression for main loop: flag == 1.
- Expression* flag_cond = NULL;
- {
- Expression* const1 = factory()->NewSmiLiteral(1, RelocInfo::kNoPosition);
- VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
- flag_cond = factory()->NewCompareOperation(Token::EQ, flag_proxy, const1,
- RelocInfo::kNoPosition);
- }
+ // Make statement: if (!cond) break.
+ if (cond) {
+ Statement* stop =
+ factory()->NewBreakStatement(outer_loop, RelocInfo::kNoPosition);
+ Statement* noop = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
+ ignore_completion_block->statements()->Add(
+ factory()->NewIfStatement(cond, noop, stop, cond->position()),
+ zone());
+ }
- // Create chain of expressions "flag = 0, temp_x = x, ..."
- Statement* compound_next_statement = NULL;
- {
- Expression* compound_next = NULL;
- // Make expression: flag = 0.
+ inner_block->statements()->Add(ignore_completion_block, zone());
+ // Make cond expression for main loop: flag == 1.
+ Expression* flag_cond = NULL;
{
+ Expression* const1 = factory()->NewSmiLiteral(1, RelocInfo::kNoPosition);
VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
- Expression* const0 = factory()->NewSmiLiteral(0, RelocInfo::kNoPosition);
- compound_next = factory()->NewAssignment(Token::ASSIGN, flag_proxy,
- const0, RelocInfo::kNoPosition);
+ flag_cond = factory()->NewCompareOperation(Token::EQ, flag_proxy, const1,
+ RelocInfo::kNoPosition);
}
- // Make the comma-separated list of temp_x = x assignments.
- int inner_var_proxy_pos = scanner()->location().beg_pos;
- for (int i = 0; i < names->length(); i++) {
- VariableProxy* temp_proxy = factory()->NewVariableProxy(temps.at(i));
- VariableProxy* proxy =
- factory()->NewVariableProxy(inner_vars.at(i), inner_var_proxy_pos);
- Assignment* assignment = factory()->NewAssignment(
- Token::ASSIGN, temp_proxy, proxy, RelocInfo::kNoPosition);
- compound_next = factory()->NewBinaryOperation(
- Token::COMMA, compound_next, assignment, RelocInfo::kNoPosition);
- }
+ // Create chain of expressions "flag = 0, temp_x = x, ..."
+ Statement* compound_next_statement = NULL;
+ {
+ Expression* compound_next = NULL;
+ // Make expression: flag = 0.
+ {
+ VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
+ Expression* const0 =
+ factory()->NewSmiLiteral(0, RelocInfo::kNoPosition);
+ compound_next = factory()->NewAssignment(
+ Token::ASSIGN, flag_proxy, const0, RelocInfo::kNoPosition);
+ }
- compound_next_statement = factory()->NewExpressionStatement(
- compound_next, RelocInfo::kNoPosition);
- }
+ // Make the comma-separated list of temp_x = x assignments.
+ int inner_var_proxy_pos = scanner()->location().beg_pos;
+ for (int i = 0; i < names->length(); i++) {
+ VariableProxy* temp_proxy = factory()->NewVariableProxy(temps.at(i));
+ VariableProxy* proxy =
+ factory()->NewVariableProxy(inner_vars.at(i), inner_var_proxy_pos);
+ Assignment* assignment = factory()->NewAssignment(
+ Token::ASSIGN, temp_proxy, proxy, RelocInfo::kNoPosition);
+ compound_next = factory()->NewBinaryOperation(
+ Token::COMMA, compound_next, assignment, RelocInfo::kNoPosition);
+ }
- // Make statement: labels: for (; flag == 1; flag = 0, temp_x = x)
- // Note that we re-use the original loop node, which retains its labels
- // and ensures that any break or continue statements in body point to
- // the right place.
- loop->Initialize(NULL, flag_cond, compound_next_statement, body);
- inner_block->statements()->Add(loop, zone());
+ compound_next_statement = factory()->NewExpressionStatement(
+ compound_next, RelocInfo::kNoPosition);
+ }
- // Make statement: {{if (flag == 1) break;}}
- {
- Expression* compare = NULL;
- // Make compare expresion: flag == 1.
+ // Make statement: labels: for (; flag == 1; flag = 0, temp_x = x)
+ // Note that we re-use the original loop node, which retains its labels
+ // and ensures that any break or continue statements in body point to
+ // the right place.
+ loop->Initialize(NULL, flag_cond, compound_next_statement, body);
+ inner_block->statements()->Add(loop, zone());
+
+ // Make statement: {{if (flag == 1) break;}}
{
- Expression* const1 = factory()->NewSmiLiteral(1, RelocInfo::kNoPosition);
- VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
- compare = factory()->NewCompareOperation(Token::EQ, flag_proxy, const1,
- RelocInfo::kNoPosition);
- }
- Statement* stop =
- factory()->NewBreakStatement(outer_loop, RelocInfo::kNoPosition);
- Statement* empty = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
- Statement* if_flag_break =
- factory()->NewIfStatement(compare, stop, empty, RelocInfo::kNoPosition);
- Block* ignore_completion_block =
- factory()->NewBlock(NULL, 1, true, RelocInfo::kNoPosition);
- ignore_completion_block->statements()->Add(if_flag_break, zone());
- inner_block->statements()->Add(ignore_completion_block, zone());
- }
+ Expression* compare = NULL;
+ // Make compare expresion: flag == 1.
+ {
+ Expression* const1 =
+ factory()->NewSmiLiteral(1, RelocInfo::kNoPosition);
+ VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
+ compare = factory()->NewCompareOperation(Token::EQ, flag_proxy, const1,
+ RelocInfo::kNoPosition);
+ }
+ Statement* stop =
+ factory()->NewBreakStatement(outer_loop, RelocInfo::kNoPosition);
+ Statement* empty = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
+ Statement* if_flag_break = factory()->NewIfStatement(
+ compare, stop, empty, RelocInfo::kNoPosition);
+ Block* ignore_completion_block =
+ factory()->NewBlock(NULL, 1, true, RelocInfo::kNoPosition);
+ ignore_completion_block->statements()->Add(if_flag_break, zone());
+ inner_block->statements()->Add(ignore_completion_block, zone());
+ }
- inner_scope->set_end_position(scanner()->location().end_pos);
- inner_block->set_scope(inner_scope);
- scope_ = for_scope;
+ inner_scope->set_end_position(scanner()->location().end_pos);
+ inner_block->set_scope(inner_scope);
+ }
outer_loop->Initialize(NULL, NULL, NULL, inner_block);
return outer_block;
@@ -3536,18 +3598,14 @@ Statement* Parser::DesugarLexicalBindingsInForStatement(
Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
bool* ok) {
- // ForStatement ::
- // 'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
-
int stmt_pos = peek_position();
- bool is_const = false;
Statement* init = NULL;
ZoneList<const AstRawString*> lexical_bindings(1, zone());
// Create an in-between scope for let-bound iteration variables.
- Scope* saved_scope = scope_;
Scope* for_scope = NewScope(scope_, BLOCK_SCOPE);
- scope_ = for_scope;
+
+ BlockState block_state(&scope_, for_scope);
Expect(Token::FOR, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
for_scope->set_start_position(scanner()->location().beg_pos);
@@ -3556,23 +3614,20 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
if (peek() != Token::SEMICOLON) {
if (peek() == Token::VAR || (peek() == Token::CONST && allow_const()) ||
(peek() == Token::LET && IsNextLetKeyword())) {
- ParseVariableDeclarations(kForStatement, &parsing_result, CHECK_OK);
- is_const = parsing_result.descriptor.mode == CONST;
+ ParseVariableDeclarations(kForStatement, &parsing_result, nullptr,
+ CHECK_OK);
- int num_decl = parsing_result.declarations.length();
- bool accept_IN = num_decl >= 1;
ForEachStatement::VisitMode mode;
int each_beg_pos = scanner()->location().beg_pos;
int each_end_pos = scanner()->location().end_pos;
- if (accept_IN && CheckInOrOf(&mode, ok)) {
+ if (CheckInOrOf(&mode, ok)) {
if (!*ok) return nullptr;
- if (num_decl != 1) {
- const char* loop_type =
- mode == ForEachStatement::ITERATE ? "for-of" : "for-in";
+ if (parsing_result.declarations.length() != 1) {
ParserTraits::ReportMessageAt(
parsing_result.bindings_loc,
- MessageTemplate::kForInOfLoopMultiBindings, loop_type);
+ MessageTemplate::kForInOfLoopMultiBindings,
+ ForEachStatement::VisitModeString(mode));
*ok = false;
return nullptr;
}
@@ -3582,14 +3637,10 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
(is_strict(language_mode()) || mode == ForEachStatement::ITERATE ||
IsLexicalVariableMode(parsing_result.descriptor.mode) ||
!decl.pattern->IsVariableProxy())) {
- if (mode == ForEachStatement::ITERATE) {
- ReportMessageAt(parsing_result.first_initializer_loc,
- MessageTemplate::kForOfLoopInitializer);
- } else {
- // TODO(caitp): This should be an error in sloppy mode too.
- ReportMessageAt(parsing_result.first_initializer_loc,
- MessageTemplate::kForInLoopInitializer);
- }
+ ParserTraits::ReportMessageAt(
+ parsing_result.first_initializer_loc,
+ MessageTemplate::kForInOfLoopInitializer,
+ ForEachStatement::VisitModeString(mode));
*ok = false;
return nullptr;
}
@@ -3599,6 +3650,7 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
// special case for legacy for (var/const x =.... in)
if (!IsLexicalVariableMode(parsing_result.descriptor.mode) &&
decl.pattern->IsVariableProxy() && decl.initializer != nullptr) {
+ ++use_counts_[v8::Isolate::kForInInitializer];
const AstRawString* name =
decl.pattern->AsVariableProxy()->raw_name();
VariableProxy* single_var = scope_->NewUnresolved(
@@ -3630,52 +3682,59 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
// let x; // for TDZ
// }
- Variable* temp = scope_->NewTemporary(
- ast_value_factory()->dot_for_string());
+ Variable* temp =
+ scope_->NewTemporary(ast_value_factory()->dot_for_string());
ForEachStatement* loop =
factory()->NewForEachStatement(mode, labels, stmt_pos);
Target target(&this->target_stack_, loop);
- Expression* enumerable = ParseExpression(true, CHECK_OK);
+ Expression* enumerable;
+ if (mode == ForEachStatement::ITERATE) {
+ ExpressionClassifier classifier(this);
+ enumerable = ParseAssignmentExpression(true, &classifier, CHECK_OK);
+ RewriteNonPattern(&classifier, CHECK_OK);
+ } else {
+ enumerable = ParseExpression(true, CHECK_OK);
+ }
Expect(Token::RPAREN, CHECK_OK);
Scope* body_scope = NewScope(scope_, BLOCK_SCOPE);
body_scope->set_start_position(scanner()->location().beg_pos);
- scope_ = body_scope;
-
- Statement* body = ParseSubStatement(NULL, CHECK_OK);
Block* body_block =
factory()->NewBlock(NULL, 3, false, RelocInfo::kNoPosition);
- auto each_initialization_block =
- factory()->NewBlock(nullptr, 1, true, RelocInfo::kNoPosition);
{
- auto descriptor = parsing_result.descriptor;
- descriptor.declaration_pos = RelocInfo::kNoPosition;
- descriptor.initialization_pos = RelocInfo::kNoPosition;
- decl.initializer = factory()->NewVariableProxy(temp);
+ BlockState block_state(&scope_, body_scope);
+
+ Statement* body = ParseSubStatement(NULL, CHECK_OK);
+
+ auto each_initialization_block =
+ factory()->NewBlock(nullptr, 1, true, RelocInfo::kNoPosition);
+ {
+ auto descriptor = parsing_result.descriptor;
+ descriptor.declaration_pos = RelocInfo::kNoPosition;
+ descriptor.initialization_pos = RelocInfo::kNoPosition;
+ decl.initializer = factory()->NewVariableProxy(temp);
+
+ PatternRewriter::DeclareAndInitializeVariables(
+ each_initialization_block, &descriptor, &decl,
+ IsLexicalVariableMode(descriptor.mode) ? &lexical_bindings
+ : nullptr,
+ CHECK_OK);
+ }
- PatternRewriter::DeclareAndInitializeVariables(
- each_initialization_block, &descriptor, &decl,
- IsLexicalVariableMode(descriptor.mode) ? &lexical_bindings
- : nullptr,
- CHECK_OK);
+ body_block->statements()->Add(each_initialization_block, zone());
+ body_block->statements()->Add(body, zone());
+ VariableProxy* temp_proxy =
+ factory()->NewVariableProxy(temp, each_beg_pos, each_end_pos);
+ InitializeForEachStatement(loop, temp_proxy, enumerable, body_block,
+ false);
}
-
- body_block->statements()->Add(each_initialization_block, zone());
- body_block->statements()->Add(body, zone());
- VariableProxy* temp_proxy =
- factory()->NewVariableProxy(temp, each_beg_pos, each_end_pos);
- InitializeForEachStatement(loop, temp_proxy, enumerable, body_block,
- false);
- scope_ = for_scope;
body_scope->set_end_position(scanner()->location().end_pos);
body_scope = body_scope->FinalizeBlockScope();
- if (body_scope != nullptr) {
- body_block->set_scope(body_scope);
- }
+ body_block->set_scope(body_scope);
// Create a TDZ for any lexically-bound names.
if (IsLexicalVariableMode(parsing_result.descriptor.mode)) {
@@ -3688,28 +3747,31 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
// TODO(adamk): This needs to be some sort of special
// INTERNAL variable that's invisible to the debugger
// but visible to everything else.
- VariableProxy* tdz_proxy = NewUnresolved(lexical_bindings[i], LET);
+ VariableProxy* tdz_proxy =
+ NewUnresolved(lexical_bindings[i], LET);
Declaration* tdz_decl = factory()->NewVariableDeclaration(
tdz_proxy, LET, scope_, RelocInfo::kNoPosition);
- Variable* tdz_var = Declare(tdz_decl, DeclarationDescriptor::NORMAL,
- true, CHECK_OK);
+ Variable* tdz_var = Declare(
+ tdz_decl, DeclarationDescriptor::NORMAL, true, CHECK_OK);
tdz_var->set_initializer_position(position());
}
}
- scope_ = saved_scope;
+ Statement* final_loop = loop->IsForOfStatement()
+ ? FinalizeForOfStatement(
+ loop->AsForOfStatement(), RelocInfo::kNoPosition)
+ : loop;
+
for_scope->set_end_position(scanner()->location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
// Parsed for-in loop w/ variable declarations.
if (init_block != nullptr) {
- init_block->statements()->Add(loop, zone());
- if (for_scope != nullptr) {
- init_block->set_scope(for_scope);
- }
+ init_block->statements()->Add(final_loop, zone());
+ init_block->set_scope(for_scope);
return init_block;
} else {
DCHECK_NULL(for_scope);
- return loop;
+ return final_loop;
}
} else {
init = parsing_result.BuildInitializationBlock(
@@ -3720,7 +3782,7 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
}
} else {
int lhs_beg_pos = peek_position();
- ExpressionClassifier classifier;
+ ExpressionClassifier classifier(this);
Expression* expression = ParseExpression(false, &classifier, CHECK_OK);
int lhs_end_pos = scanner()->location().end_pos;
ForEachStatement::VisitMode mode;
@@ -3738,8 +3800,7 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
if (is_destructuring) {
ValidateAssignmentPattern(&classifier, CHECK_OK);
} else {
- expression =
- ParserTraits::RewriteNonPattern(expression, &classifier, CHECK_OK);
+ RewriteNonPattern(&classifier, CHECK_OK);
}
if (is_for_each) {
@@ -3753,7 +3814,15 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
factory()->NewForEachStatement(mode, labels, stmt_pos);
Target target(&this->target_stack_, loop);
- Expression* enumerable = ParseExpression(true, CHECK_OK);
+ Expression* enumerable;
+ if (mode == ForEachStatement::ITERATE) {
+ ExpressionClassifier classifier(this);
+ enumerable = ParseAssignmentExpression(true, &classifier, CHECK_OK);
+ RewriteNonPattern(&classifier, CHECK_OK);
+ } else {
+ enumerable = ParseExpression(true, CHECK_OK);
+ }
+
Expect(Token::RPAREN, CHECK_OK);
// Make a block around the statement in case a lexical binding
@@ -3763,24 +3832,28 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
// expressions in head of the loop should actually have variables
// resolved in the outer scope.
Scope* body_scope = NewScope(for_scope, BLOCK_SCOPE);
- scope_ = body_scope;
- Block* block =
- factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
- Statement* body = ParseSubStatement(NULL, CHECK_OK);
- block->statements()->Add(body, zone());
- InitializeForEachStatement(loop, expression, enumerable, block,
- is_destructuring);
- scope_ = saved_scope;
- body_scope->set_end_position(scanner()->location().end_pos);
- body_scope = body_scope->FinalizeBlockScope();
- if (body_scope != nullptr) {
+ {
+ BlockState block_state(&scope_, body_scope);
+ Block* block =
+ factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
+ Statement* body = ParseSubStatement(NULL, CHECK_OK);
+ block->statements()->Add(body, zone());
+ InitializeForEachStatement(loop, expression, enumerable, block,
+ is_destructuring);
+ body_scope->set_end_position(scanner()->location().end_pos);
+ body_scope = body_scope->FinalizeBlockScope();
block->set_scope(body_scope);
}
+
+ Statement* final_loop = loop->IsForOfStatement()
+ ? FinalizeForOfStatement(
+ loop->AsForOfStatement(), RelocInfo::kNoPosition)
+ : loop;
+
for_scope->set_end_position(scanner()->location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
DCHECK(for_scope == nullptr);
- // Parsed for-in loop.
- return loop;
+ return final_loop;
} else {
init = factory()->NewExpressionStatement(expression, lhs_beg_pos);
@@ -3802,40 +3875,42 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
}
Expect(Token::SEMICOLON, CHECK_OK);
+ Expression* cond = NULL;
+ Statement* next = NULL;
+ Statement* body = NULL;
+
// If there are let bindings, then condition and the next statement of the
// for loop must be parsed in a new scope.
- Scope* inner_scope = NULL;
+ Scope* inner_scope = scope_;
if (lexical_bindings.length() > 0) {
inner_scope = NewScope(for_scope, BLOCK_SCOPE);
inner_scope->set_start_position(scanner()->location().beg_pos);
- scope_ = inner_scope;
}
+ {
+ BlockState block_state(&scope_, inner_scope);
- Expression* cond = NULL;
- if (peek() != Token::SEMICOLON) {
- cond = ParseExpression(true, CHECK_OK);
- }
- Expect(Token::SEMICOLON, CHECK_OK);
+ if (peek() != Token::SEMICOLON) {
+ cond = ParseExpression(true, CHECK_OK);
+ }
+ Expect(Token::SEMICOLON, CHECK_OK);
- Statement* next = NULL;
- if (peek() != Token::RPAREN) {
- Expression* exp = ParseExpression(true, CHECK_OK);
- next = factory()->NewExpressionStatement(exp, exp->position());
- }
- Expect(Token::RPAREN, CHECK_OK);
+ if (peek() != Token::RPAREN) {
+ Expression* exp = ParseExpression(true, CHECK_OK);
+ next = factory()->NewExpressionStatement(exp, exp->position());
+ }
+ Expect(Token::RPAREN, CHECK_OK);
- Statement* body = ParseSubStatement(NULL, CHECK_OK);
+ body = ParseSubStatement(NULL, CHECK_OK);
+ }
Statement* result = NULL;
if (lexical_bindings.length() > 0) {
- scope_ = for_scope;
+ BlockState block_state(&scope_, for_scope);
result = DesugarLexicalBindingsInForStatement(
- inner_scope, is_const, &lexical_bindings, loop, init, cond,
- next, body, CHECK_OK);
- scope_ = saved_scope;
+ inner_scope, parsing_result.descriptor.mode, &lexical_bindings, loop,
+ init, cond, next, body, CHECK_OK);
for_scope->set_end_position(scanner()->location().end_pos);
} else {
- scope_ = saved_scope;
for_scope->set_end_position(scanner()->location().end_pos);
for_scope = for_scope->FinalizeBlockScope();
if (for_scope) {
@@ -4035,7 +4110,7 @@ void ParserTraits::ParseArrowFunctionFormalParameterList(
ParseArrowFunctionFormalParameters(parameters, expr, params_loc, ok);
if (!*ok) return;
- ExpressionClassifier classifier;
+ Type::ExpressionClassifier classifier(parser_);
if (!parameters->is_simple) {
classifier.RecordNonSimpleParameter();
}
@@ -4069,7 +4144,6 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
const AstRawString* function_name, Scanner::Location function_name_location,
FunctionNameValidity function_name_validity, FunctionKind kind,
int function_token_pos, FunctionLiteral::FunctionType function_type,
- FunctionLiteral::ArityRestriction arity_restriction,
LanguageMode language_mode, bool* ok) {
// Function ::
// '(' FormalParameterList? ')' '{' FunctionBody '}'
@@ -4137,17 +4211,18 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
int materialized_literal_count = -1;
int expected_property_count = -1;
DuplicateFinder duplicate_finder(scanner()->unicode_cache());
- ExpressionClassifier formals_classifier(&duplicate_finder);
FunctionLiteral::EagerCompileHint eager_compile_hint =
parenthesized_function_ ? FunctionLiteral::kShouldEagerCompile
: FunctionLiteral::kShouldLazyCompile;
bool should_be_used_once_hint = false;
+ bool has_duplicate_parameters;
// Parse function.
{
AstNodeFactory function_factory(ast_value_factory());
FunctionState function_state(&function_state_, &scope_, scope, kind,
&function_factory);
scope_->SetScopeName(function_name);
+ ExpressionClassifier formals_classifier(this, &duplicate_finder);
if (is_generator) {
// For generators, allocating variables in contexts is currently a win
@@ -4172,11 +4247,15 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
Expect(Token::RPAREN, CHECK_OK);
int formals_end_position = scanner()->location().end_pos;
- CheckArityRestrictions(arity, arity_restriction,
- formals.has_rest, start_position,
+ CheckArityRestrictions(arity, kind, formals.has_rest, start_position,
formals_end_position, CHECK_OK);
Expect(Token::LBRACE, CHECK_OK);
+ // Don't include the rest parameter into the function's formal parameter
+ // count (esp. the SharedFunctionInfo::internal_formal_parameter_count,
+ // which says whether we need to create an arguments adaptor frame).
+ if (formals.has_rest) arity--;
+
// Determine if the function can be parsed lazily. Lazy parsing is different
// from lazy compilation; we need to parse more eagerly than we compile.
@@ -4321,10 +4400,10 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// If body can be inspected, rewrite queued destructuring assignments
ParserTraits::RewriteDestructuringAssignments();
}
+ has_duplicate_parameters =
+ !formals_classifier.is_valid_formal_parameter_list_without_duplicates();
}
- bool has_duplicate_parameters =
- !formals_classifier.is_valid_formal_parameter_list_without_duplicates();
FunctionLiteral::ParameterFlag duplicate_parameters =
has_duplicate_parameters ? FunctionLiteral::kHasDuplicateParameters
: FunctionLiteral::kNoDuplicateParameters;
@@ -4337,10 +4416,6 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
if (should_be_used_once_hint)
function_literal->set_should_be_used_once_hint();
- if (scope->has_rest_parameter()) {
- function_literal->set_dont_optimize_reason(kRestParameter);
- }
-
if (fni_ != NULL && should_infer_name) fni_->AddFunction(function_literal);
return function_literal;
}
@@ -4462,8 +4537,7 @@ class InitializerRewriter : public AstExpressionVisitor {
private:
void VisitExpression(Expression* expr) {
- RewritableAssignmentExpression* to_rewrite =
- expr->AsRewritableAssignmentExpression();
+ RewritableExpression* to_rewrite = expr->AsRewritableExpression();
if (to_rewrite == nullptr || to_rewrite->is_rewritten()) return;
Parser::PatternRewriter::RewriteDestructuringAssignment(parser_, to_rewrite,
@@ -4497,7 +4571,6 @@ Block* Parser::BuildParameterInitializationBlock(
descriptor.scope = scope_;
descriptor.hoist_scope = nullptr;
descriptor.mode = LET;
- descriptor.needs_init = true;
descriptor.declaration_pos = parameter.pattern->position();
// The position that will be used by the AssignmentExpression
// which copies from the temp parameter to the pattern.
@@ -4597,35 +4670,72 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
{
BlockState block_state(&scope_, inner_scope);
- // For generators, allocate and yield an iterator on function entry.
if (IsGeneratorFunction(kind)) {
- ZoneList<Expression*>* arguments =
- new(zone()) ZoneList<Expression*>(0, zone());
- CallRuntime* allocation = factory()->NewCallRuntime(
- Runtime::kCreateJSGeneratorObject, arguments, pos);
- VariableProxy* init_proxy = factory()->NewVariableProxy(
- function_state_->generator_object_variable());
- Assignment* assignment = factory()->NewAssignment(
- Token::INIT, init_proxy, allocation, RelocInfo::kNoPosition);
- VariableProxy* get_proxy = factory()->NewVariableProxy(
- function_state_->generator_object_variable());
- Yield* yield = factory()->NewYield(
- get_proxy, assignment, Yield::kInitial, RelocInfo::kNoPosition);
- body->Add(factory()->NewExpressionStatement(
- yield, RelocInfo::kNoPosition), zone());
- }
+ // We produce:
+ //
+ // try { InitialYield; ...body...; FinalYield }
+ // finally { %GeneratorClose(generator) }
+ //
+ // - InitialYield yields the actual generator object.
+ // - FinalYield yields {value: foo, done: true} where foo is the
+ // completion value of body. (This is needed here in case the body
+ // falls through without an explicit return.)
+ // - Any return statement inside the body will be converted into a similar
+ // FinalYield.
+ // - If the generator terminates for whatever reason, we must close it.
+ // Hence the finally clause.
+
+ Block* try_block =
+ factory()->NewBlock(nullptr, 3, false, RelocInfo::kNoPosition);
- ParseStatementList(body, Token::RBRACE, CHECK_OK);
+ {
+ ZoneList<Expression*>* arguments =
+ new (zone()) ZoneList<Expression*>(0, zone());
+ CallRuntime* allocation = factory()->NewCallRuntime(
+ Runtime::kCreateJSGeneratorObject, arguments, pos);
+ VariableProxy* init_proxy = factory()->NewVariableProxy(
+ function_state_->generator_object_variable());
+ Assignment* assignment = factory()->NewAssignment(
+ Token::INIT, init_proxy, allocation, RelocInfo::kNoPosition);
+ VariableProxy* get_proxy = factory()->NewVariableProxy(
+ function_state_->generator_object_variable());
+ Yield* yield = factory()->NewYield(
+ get_proxy, assignment, Yield::kInitial, RelocInfo::kNoPosition);
+ try_block->statements()->Add(
+ factory()->NewExpressionStatement(yield, RelocInfo::kNoPosition),
+ zone());
+ }
+
+ ParseStatementList(try_block->statements(), Token::RBRACE, CHECK_OK);
- if (IsGeneratorFunction(kind)) {
VariableProxy* get_proxy = factory()->NewVariableProxy(
function_state_->generator_object_variable());
Expression* undefined =
factory()->NewUndefinedLiteral(RelocInfo::kNoPosition);
Yield* yield = factory()->NewYield(get_proxy, undefined, Yield::kFinal,
RelocInfo::kNoPosition);
- body->Add(factory()->NewExpressionStatement(
- yield, RelocInfo::kNoPosition), zone());
+ try_block->statements()->Add(
+ factory()->NewExpressionStatement(yield, RelocInfo::kNoPosition),
+ zone());
+
+ Block* finally_block =
+ factory()->NewBlock(nullptr, 1, false, RelocInfo::kNoPosition);
+ ZoneList<Expression*>* args =
+ new (zone()) ZoneList<Expression*>(1, zone());
+ VariableProxy* call_proxy = factory()->NewVariableProxy(
+ function_state_->generator_object_variable());
+ args->Add(call_proxy, zone());
+ Expression* call = factory()->NewCallRuntime(
+ Runtime::kGeneratorClose, args, RelocInfo::kNoPosition);
+ finally_block->statements()->Add(
+ factory()->NewExpressionStatement(call, RelocInfo::kNoPosition),
+ zone());
+
+ body->Add(factory()->NewTryFinallyStatement(try_block, finally_block,
+ RelocInfo::kNoPosition),
+ zone());
+ } else {
+ ParseStatementList(body, Token::RBRACE, CHECK_OK);
}
if (IsSubclassConstructor(kind)) {
@@ -4682,6 +4792,13 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
RelocInfo::kNoPosition));
}
+ // ES6 14.6.1 Static Semantics: IsInTailPosition
+ // Mark collected return expressions that are in tail call position.
+ const List<Expression*>& expressions_in_tail_position =
+ function_state_->expressions_in_tail_position();
+ for (int i = 0; i < expressions_in_tail_position.length(); ++i) {
+ expressions_in_tail_position[i]->MarkTail();
+ }
return result;
}
@@ -4693,6 +4810,8 @@ PreParser::PreParseResult Parser::ParseLazyFunctionBodyWithPreParser(
if (pre_parse_timer_ != NULL) {
pre_parse_timer_->Start();
}
+ TRACE_EVENT0("v8", "V8.PreParse");
+
DCHECK_EQ(Token::LBRACE, scanner()->current_token());
if (reusable_preparser_ == NULL) {
@@ -4709,6 +4828,7 @@ PreParser::PreParseResult Parser::ParseLazyFunctionBodyWithPreParser(
SET_ALLOW(strong_mode);
SET_ALLOW(harmony_do_expressions);
SET_ALLOW(harmony_function_name);
+ SET_ALLOW(harmony_function_sent);
#undef SET_ALLOW
}
PreParser::PreParseResult result = reusable_preparser_->PreParseLazyFunction(
@@ -4751,19 +4871,17 @@ ClassLiteral* Parser::ParseClassLiteral(const AstRawString* name,
VariableProxy* proxy = NULL;
if (name != NULL) {
proxy = NewUnresolved(name, CONST);
- const bool is_class_declaration = true;
- Declaration* declaration = factory()->NewVariableDeclaration(
- proxy, CONST, block_scope, pos, is_class_declaration,
- scope_->class_declaration_group_start());
+ Declaration* declaration =
+ factory()->NewVariableDeclaration(proxy, CONST, block_scope, pos);
Declare(declaration, DeclarationDescriptor::NORMAL, true, CHECK_OK);
}
Expression* extends = NULL;
if (Check(Token::EXTENDS)) {
block_scope->set_start_position(scanner()->location().end_pos);
- ExpressionClassifier classifier;
+ ExpressionClassifier classifier(this);
extends = ParseLeftHandSideExpression(&classifier, CHECK_OK);
- extends = ParserTraits::RewriteNonPattern(extends, &classifier, CHECK_OK);
+ RewriteNonPattern(&classifier, CHECK_OK);
} else {
block_scope->set_start_position(scanner()->location().end_pos);
}
@@ -4784,25 +4902,27 @@ ClassLiteral* Parser::ParseClassLiteral(const AstRawString* name,
const bool is_static = false;
bool is_computed_name = false; // Classes do not care about computed
// property names here.
- ExpressionClassifier classifier;
- const AstRawString* name = nullptr;
+ ExpressionClassifier classifier(this);
+ const AstRawString* property_name = nullptr;
ObjectLiteral::Property* property = ParsePropertyDefinition(
&checker, in_class, has_extends, is_static, &is_computed_name,
- &has_seen_constructor, &classifier, &name, CHECK_OK);
- property = ParserTraits::RewriteNonPatternObjectLiteralProperty(
- property, &classifier, CHECK_OK);
+ &has_seen_constructor, &classifier, &property_name, CHECK_OK);
+ RewriteNonPattern(&classifier, CHECK_OK);
if (has_seen_constructor && constructor == NULL) {
constructor = GetPropertyValue(property)->AsFunctionLiteral();
DCHECK_NOT_NULL(constructor);
+ constructor->set_raw_name(
+ name != nullptr ? name : ast_value_factory()->empty_string());
} else {
properties->Add(property, zone());
}
if (fni_ != NULL) fni_->Infer();
- if (allow_harmony_function_name()) {
- SetFunctionNameFromPropertyName(property, name);
+ if (allow_harmony_function_name() &&
+ property_name != ast_value_factory()->constructor_string()) {
+ SetFunctionNameFromPropertyName(property, property_name);
}
}
@@ -4810,8 +4930,8 @@ ClassLiteral* Parser::ParseClassLiteral(const AstRawString* name,
int end_pos = scanner()->location().end_pos;
if (constructor == NULL) {
- constructor = DefaultConstructor(extends != NULL, block_scope, pos, end_pos,
- block_scope->language_mode());
+ constructor = DefaultConstructor(name, extends != NULL, block_scope, pos,
+ end_pos, block_scope->language_mode());
}
// Note that we do not finalize this block scope because strong
@@ -4823,8 +4943,8 @@ ClassLiteral* Parser::ParseClassLiteral(const AstRawString* name,
proxy->var()->set_initializer_position(end_pos);
}
- return factory()->NewClassLiteral(name, block_scope, proxy, extends,
- constructor, properties, pos, end_pos);
+ return factory()->NewClassLiteral(block_scope, proxy, extends, constructor,
+ properties, pos, end_pos);
}
@@ -4838,10 +4958,9 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
const AstRawString* name = ParseIdentifier(kAllowRestrictedIdentifiers,
CHECK_OK);
Scanner::Location spread_pos;
- ExpressionClassifier classifier;
+ ExpressionClassifier classifier(this);
ZoneList<Expression*>* args =
ParseArguments(&spread_pos, &classifier, CHECK_OK);
- args = RewriteNonPatternArguments(args, &classifier, CHECK_OK);
DCHECK(!spread_pos.IsValid());
@@ -5051,6 +5170,12 @@ void Parser::Internalize(Isolate* isolate, Handle<Script> script, bool error) {
isolate->CountUsage(v8::Isolate::UseCounterFeature(feature));
}
}
+ if (scanner_.FoundHtmlComment()) {
+ isolate->CountUsage(v8::Isolate::kHtmlComment);
+ if (script->line_offset() == 0 && script->column_offset() == 0) {
+ isolate->CountUsage(v8::Isolate::kHtmlCommentInExternalScript);
+ }
+ }
isolate->counters()->total_preparse_skipped()->Increment(
total_preparse_skipped_);
}
@@ -5404,143 +5529,1379 @@ void ParserTraits::RewriteDestructuringAssignments() {
}
-Expression* ParserTraits::RewriteNonPattern(
- Expression* expr, const ExpressionClassifier* classifier, bool* ok) {
- return parser_->RewriteNonPattern(expr, classifier, ok);
+void ParserTraits::RewriteNonPattern(Type::ExpressionClassifier* classifier,
+ bool* ok) {
+ parser_->RewriteNonPattern(classifier, ok);
}
-ZoneList<Expression*>* ParserTraits::RewriteNonPatternArguments(
- ZoneList<Expression*>* args, const ExpressionClassifier* classifier,
- bool* ok) {
- return parser_->RewriteNonPatternArguments(args, classifier, ok);
+Zone* ParserTraits::zone() const {
+ return parser_->function_state_->scope()->zone();
}
-ObjectLiteralProperty* ParserTraits::RewriteNonPatternObjectLiteralProperty(
- ObjectLiteralProperty* property, const ExpressionClassifier* classifier,
- bool* ok) {
- return parser_->RewriteNonPatternObjectLiteralProperty(property, classifier,
- ok);
+ZoneList<Expression*>* ParserTraits::GetNonPatternList() const {
+ return parser_->function_state_->non_patterns_to_rewrite();
}
-Expression* Parser::RewriteNonPattern(Expression* expr,
- const ExpressionClassifier* classifier,
- bool* ok) {
- // For the time being, this does no rewriting at all.
- ValidateExpression(classifier, ok);
- return expr;
-}
+class NonPatternRewriter : public AstExpressionRewriter {
+ public:
+ NonPatternRewriter(uintptr_t stack_limit, Parser* parser)
+ : AstExpressionRewriter(stack_limit), parser_(parser) {}
+ ~NonPatternRewriter() override {}
+ private:
+ bool RewriteExpression(Expression* expr) override {
+ if (expr->IsRewritableExpression()) return true;
+ // Rewrite only what could have been a pattern but is not.
+ if (expr->IsArrayLiteral()) {
+ // Spread rewriting in array literals.
+ ArrayLiteral* lit = expr->AsArrayLiteral();
+ VisitExpressions(lit->values());
+ replacement_ = parser_->RewriteSpreads(lit);
+ return false;
+ }
+ if (expr->IsObjectLiteral()) {
+ return true;
+ }
+ if (expr->IsBinaryOperation() &&
+ expr->AsBinaryOperation()->op() == Token::COMMA) {
+ return true;
+ }
+ // Everything else does not need rewriting.
+ return false;
+ }
-ZoneList<Expression*>* Parser::RewriteNonPatternArguments(
- ZoneList<Expression*>* args, const ExpressionClassifier* classifier,
- bool* ok) {
- // For the time being, this does no rewriting at all.
- ValidateExpression(classifier, ok);
- return args;
-}
+ void VisitObjectLiteralProperty(ObjectLiteralProperty* property) override {
+ if (property == nullptr) return;
+ // Do not rewrite (computed) key expressions
+ AST_REWRITE_PROPERTY(Expression, property, value);
+ }
+ Parser* parser_;
+};
-ObjectLiteralProperty* Parser::RewriteNonPatternObjectLiteralProperty(
- ObjectLiteralProperty* property, const ExpressionClassifier* classifier,
- bool* ok) {
- if (property != nullptr) {
- Expression* key = RewriteNonPattern(property->key(), classifier, ok);
- property->set_key(key);
- Expression* value = RewriteNonPattern(property->value(), classifier, ok);
- property->set_value(value);
+
+void Parser::RewriteNonPattern(ExpressionClassifier* classifier, bool* ok) {
+ ValidateExpression(classifier, ok);
+ if (!*ok) return;
+ auto non_patterns_to_rewrite = function_state_->non_patterns_to_rewrite();
+ int begin = classifier->GetNonPatternBegin();
+ int end = non_patterns_to_rewrite->length();
+ if (begin < end) {
+ NonPatternRewriter rewriter(stack_limit_, this);
+ for (int i = begin; i < end; i++) {
+ DCHECK(non_patterns_to_rewrite->at(i)->IsRewritableExpression());
+ rewriter.Rewrite(non_patterns_to_rewrite->at(i));
+ }
+ non_patterns_to_rewrite->Rewind(begin);
}
- return property;
}
void Parser::RewriteDestructuringAssignments() {
- FunctionState* func = function_state_;
if (!allow_harmony_destructuring_assignment()) return;
- const List<DestructuringAssignment>& assignments =
- func->destructuring_assignments_to_rewrite();
+ const auto& assignments =
+ function_state_->destructuring_assignments_to_rewrite();
for (int i = assignments.length() - 1; i >= 0; --i) {
// Rewrite list in reverse, so that nested assignment patterns are rewritten
// correctly.
- DestructuringAssignment pair = assignments.at(i);
- RewritableAssignmentExpression* to_rewrite =
- pair.assignment->AsRewritableAssignmentExpression();
- Scope* scope = pair.scope;
+ const DestructuringAssignment& pair = assignments.at(i);
+ RewritableExpression* to_rewrite =
+ pair.assignment->AsRewritableExpression();
DCHECK_NOT_NULL(to_rewrite);
if (!to_rewrite->is_rewritten()) {
- PatternRewriter::RewriteDestructuringAssignment(this, to_rewrite, scope);
+ PatternRewriter::RewriteDestructuringAssignment(this, to_rewrite,
+ pair.scope);
}
}
}
+Expression* Parser::RewriteSpreads(ArrayLiteral* lit) {
+ // Array literals containing spreads are rewritten using do expressions, e.g.
+ // [1, 2, 3, ...x, 4, ...y, 5]
+ // is roughly rewritten as:
+ // do {
+ // $R = [1, 2, 3];
+ // for ($i of x) %AppendElement($R, $i);
+ // %AppendElement($R, 4);
+ // for ($j of y) %AppendElement($R, $j);
+ // %AppendElement($R, 5);
+ // $R
+ // }
+ // where $R, $i and $j are fresh temporary variables.
+ ZoneList<Expression*>::iterator s = lit->FirstSpread();
+ if (s == lit->EndValue()) return nullptr; // no spread, no rewriting...
+ Variable* result =
+ scope_->NewTemporary(ast_value_factory()->dot_result_string());
+ // NOTE: The value assigned to R is the whole original array literal,
+ // spreads included. This will be fixed before the rewritten AST is returned.
+ // $R = lit
+ Expression* init_result =
+ factory()->NewAssignment(Token::INIT, factory()->NewVariableProxy(result),
+ lit, RelocInfo::kNoPosition);
+ Block* do_block =
+ factory()->NewBlock(nullptr, 16, false, RelocInfo::kNoPosition);
+ do_block->statements()->Add(
+ factory()->NewExpressionStatement(init_result, RelocInfo::kNoPosition),
+ zone());
+ // Traverse the array literal starting from the first spread.
+ while (s != lit->EndValue()) {
+ Expression* value = *s++;
+ Spread* spread = value->AsSpread();
+ if (spread == nullptr) {
+ // If the element is not a spread, we're adding a single:
+ // %AppendElement($R, value)
+ ZoneList<Expression*>* append_element_args = NewExpressionList(2, zone());
+ append_element_args->Add(factory()->NewVariableProxy(result), zone());
+ append_element_args->Add(value, zone());
+ do_block->statements()->Add(
+ factory()->NewExpressionStatement(
+ factory()->NewCallRuntime(Runtime::kAppendElement,
+ append_element_args,
+ RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition),
+ zone());
+ } else {
+ // If it's a spread, we're adding a for/of loop iterating through it.
+ Variable* each =
+ scope_->NewTemporary(ast_value_factory()->dot_for_string());
+ Expression* subject = spread->expression();
+ Variable* iterator =
+ scope_->NewTemporary(ast_value_factory()->dot_iterator_string());
+ Variable* element =
+ scope_->NewTemporary(ast_value_factory()->dot_result_string());
+ // iterator = subject[Symbol.iterator]()
+ Expression* assign_iterator = factory()->NewAssignment(
+ Token::ASSIGN, factory()->NewVariableProxy(iterator),
+ GetIterator(subject, factory(), spread->expression_position()),
+ subject->position());
+ // !%_IsJSReceiver(element = iterator.next()) &&
+ // %ThrowIteratorResultNotAnObject(element)
+ Expression* next_element;
+ {
+ // element = iterator.next()
+ Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
+ next_element = BuildIteratorNextResult(iterator_proxy, element,
+ spread->expression_position());
+ }
+ // element.done
+ Expression* element_done;
+ {
+ Expression* done_literal = factory()->NewStringLiteral(
+ ast_value_factory()->done_string(), RelocInfo::kNoPosition);
+ Expression* element_proxy = factory()->NewVariableProxy(element);
+ element_done = factory()->NewProperty(element_proxy, done_literal,
+ RelocInfo::kNoPosition);
+ }
+ // each = element.value
+ Expression* assign_each;
+ {
+ Expression* value_literal = factory()->NewStringLiteral(
+ ast_value_factory()->value_string(), RelocInfo::kNoPosition);
+ Expression* element_proxy = factory()->NewVariableProxy(element);
+ Expression* element_value = factory()->NewProperty(
+ element_proxy, value_literal, RelocInfo::kNoPosition);
+ assign_each = factory()->NewAssignment(
+ Token::ASSIGN, factory()->NewVariableProxy(each), element_value,
+ RelocInfo::kNoPosition);
+ }
+ // %AppendElement($R, each)
+ Statement* append_body;
+ {
+ ZoneList<Expression*>* append_element_args =
+ NewExpressionList(2, zone());
+ append_element_args->Add(factory()->NewVariableProxy(result), zone());
+ append_element_args->Add(factory()->NewVariableProxy(each), zone());
+ append_body = factory()->NewExpressionStatement(
+ factory()->NewCallRuntime(Runtime::kAppendElement,
+ append_element_args,
+ RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition);
+ }
+ // for (each of spread) %AppendElement($R, each)
+ ForEachStatement* loop = factory()->NewForEachStatement(
+ ForEachStatement::ITERATE, nullptr, RelocInfo::kNoPosition);
+ ForOfStatement* for_of = loop->AsForOfStatement();
+ for_of->Initialize(factory()->NewVariableProxy(each), subject,
+ append_body, iterator, assign_iterator, next_element,
+ element_done, assign_each);
+ do_block->statements()->Add(for_of, zone());
+ }
+ }
+ // Now, rewind the original array literal to truncate everything from the
+ // first spread (included) until the end. This fixes $R's initialization.
+ lit->RewindSpreads();
+ return factory()->NewDoExpression(do_block, result, lit->position());
+}
+
+
void ParserTraits::QueueDestructuringAssignmentForRewriting(Expression* expr) {
- DCHECK(expr->IsRewritableAssignmentExpression());
+ DCHECK(expr->IsRewritableExpression());
parser_->function_state_->AddDestructuringAssignment(
Parser::DestructuringAssignment(expr, parser_->scope_));
}
+void ParserTraits::QueueNonPatternForRewriting(Expression* expr) {
+ DCHECK(expr->IsRewritableExpression());
+ parser_->function_state_->AddNonPatternForRewriting(expr);
+}
+
+
void ParserTraits::SetFunctionNameFromPropertyName(
ObjectLiteralProperty* property, const AstRawString* name) {
Expression* value = property->value();
- if (!value->IsFunctionLiteral() && !value->IsClassLiteral()) return;
- // TODO(adamk): Support computed names.
+ // Computed name setting must happen at runtime.
if (property->is_computed_name()) return;
+
+ // Getter and setter names are handled here because their names
+ // change in ES2015, even though they are not anonymous.
+ auto function = value->AsFunctionLiteral();
+ if (function != nullptr) {
+ bool is_getter = property->kind() == ObjectLiteralProperty::GETTER;
+ bool is_setter = property->kind() == ObjectLiteralProperty::SETTER;
+ if (is_getter || is_setter) {
+ DCHECK_NOT_NULL(name);
+ const AstRawString* prefix =
+ is_getter ? parser_->ast_value_factory()->get_space_string()
+ : parser_->ast_value_factory()->set_space_string();
+ function->set_raw_name(
+ parser_->ast_value_factory()->NewConsString(prefix, name));
+ return;
+ }
+ }
+
+ if (!value->IsAnonymousFunctionDefinition()) return;
DCHECK_NOT_NULL(name);
// Ignore "__proto__" as a name when it's being used to set the [[Prototype]]
// of an object literal.
if (property->kind() == ObjectLiteralProperty::PROTOTYPE) return;
- if (value->IsFunctionLiteral()) {
- auto function = value->AsFunctionLiteral();
- if (function->is_anonymous()) {
- if (property->kind() == ObjectLiteralProperty::GETTER) {
- function->set_raw_name(parser_->ast_value_factory()->NewConsString(
- parser_->ast_value_factory()->get_space_string(), name));
- } else if (property->kind() == ObjectLiteralProperty::SETTER) {
- function->set_raw_name(parser_->ast_value_factory()->NewConsString(
- parser_->ast_value_factory()->set_space_string(), name));
- } else {
- function->set_raw_name(name);
- DCHECK_EQ(ObjectLiteralProperty::COMPUTED, property->kind());
- }
- }
+ if (function != nullptr) {
+ function->set_raw_name(name);
+ DCHECK_EQ(ObjectLiteralProperty::COMPUTED, property->kind());
} else {
DCHECK(value->IsClassLiteral());
DCHECK_EQ(ObjectLiteralProperty::COMPUTED, property->kind());
- auto class_literal = value->AsClassLiteral();
- if (class_literal->raw_name() == nullptr) {
- class_literal->set_raw_name(name);
- }
+ value->AsClassLiteral()->constructor()->set_raw_name(name);
}
}
void ParserTraits::SetFunctionNameFromIdentifierRef(Expression* value,
Expression* identifier) {
- if (!value->IsFunctionLiteral() && !value->IsClassLiteral()) return;
+ if (!value->IsAnonymousFunctionDefinition()) return;
if (!identifier->IsVariableProxy()) return;
auto name = identifier->AsVariableProxy()->raw_name();
DCHECK_NOT_NULL(name);
- if (value->IsFunctionLiteral()) {
- auto function = value->AsFunctionLiteral();
- if (function->is_anonymous()) {
- function->set_raw_name(name);
- }
+ auto function = value->AsFunctionLiteral();
+ if (function != nullptr) {
+ function->set_raw_name(name);
} else {
DCHECK(value->IsClassLiteral());
- auto class_literal = value->AsClassLiteral();
- if (class_literal->raw_name() == nullptr) {
- class_literal->set_raw_name(name);
+ value->AsClassLiteral()->constructor()->set_raw_name(name);
+ }
+}
+
+
+// Desugaring of yield*
+// ====================
+//
+// With the help of do-expressions and function.sent, we desugar yield* into a
+// loop containing a "raw" yield (a yield that doesn't wrap an iterator result
+// object around its argument). Concretely, "yield* iterable" turns into
+// roughly the following code:
+//
+// do {
+// const kNext = 0;
+// const kReturn = 1;
+// const kThrow = 2;
+//
+// let input = function.sent;
+// let mode = kNext;
+// let output = undefined;
+//
+// let iterator = iterable[Symbol.iterator]();
+// if (!IS_RECEIVER(iterator)) throw MakeTypeError(kSymbolIteratorInvalid);
+//
+// while (true) {
+// // From the generator to the iterator:
+// // Forward input according to resume mode and obtain output.
+// switch (mode) {
+// case kNext:
+// output = iterator.next(input);
+// if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
+// break;
+// case kReturn:
+// IteratorClose(iterator, input, output); // See below.
+// break;
+// case kThrow:
+// let iteratorThrow = iterator.throw;
+// if (IS_NULL_OR_UNDEFINED(iteratorThrow)) {
+// IteratorClose(iterator); // See below.
+// throw MakeTypeError(kThrowMethodMissing);
+// }
+// output = %_Call(iteratorThrow, iterator, input);
+// if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
+// break;
+// }
+// if (output.done) break;
+//
+// // From the generator to its user:
+// // Forward output, receive new input, and determine resume mode.
+// mode = kReturn;
+// try {
+// try {
+// RawYield(output); // See explanation above.
+// mode = kNext;
+// } catch (error) {
+// mode = kThrow;
+// }
+// } finally {
+// input = function.sent;
+// continue;
+// }
+// }
+//
+// output.value;
+// }
+//
+// IteratorClose(iterator) expands to the following:
+//
+// let iteratorReturn = iterator.return;
+// if (IS_NULL_OR_UNDEFINED(iteratorReturn)) return;
+// let output = %_Call(iteratorReturn, iterator);
+// if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
+//
+// IteratorClose(iterator, input, output) expands to the following:
+//
+// let iteratorReturn = iterator.return;
+// if (IS_NULL_OR_UNDEFINED(iteratorReturn)) return input;
+// output = %_Call(iteratorReturn, iterator, input);
+// if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
+
+
+Expression* ParserTraits::RewriteYieldStar(
+ Expression* generator, Expression* iterable, int pos) {
+
+ const int nopos = RelocInfo::kNoPosition;
+
+ auto factory = parser_->factory();
+ auto avfactory = parser_->ast_value_factory();
+ auto scope = parser_->scope_;
+ auto zone = parser_->zone();
+
+
+ // Forward definition for break/continue statements.
+ WhileStatement* loop = factory->NewWhileStatement(nullptr, nopos);
+
+
+ // let input = undefined;
+ Variable* var_input = scope->NewTemporary(avfactory->empty_string());
+ Statement* initialize_input;
+ {
+ Expression* input_proxy = factory->NewVariableProxy(var_input);
+ Expression* assignment = factory->NewAssignment(
+ Token::ASSIGN, input_proxy, factory->NewUndefinedLiteral(nopos), nopos);
+ initialize_input = factory->NewExpressionStatement(assignment, nopos);
+ }
+
+
+ // let mode = kNext;
+ Variable* var_mode = scope->NewTemporary(avfactory->empty_string());
+ Statement* initialize_mode;
+ {
+ Expression* mode_proxy = factory->NewVariableProxy(var_mode);
+ Expression* knext = factory->NewSmiLiteral(JSGeneratorObject::NEXT, nopos);
+ Expression* assignment =
+ factory->NewAssignment(Token::ASSIGN, mode_proxy, knext, nopos);
+ initialize_mode = factory->NewExpressionStatement(assignment, nopos);
+ }
+
+
+ // let output = undefined;
+ Variable* var_output = scope->NewTemporary(avfactory->empty_string());
+ Statement* initialize_output;
+ {
+ Expression* output_proxy = factory->NewVariableProxy(var_output);
+ Expression* assignment = factory->NewAssignment(
+ Token::ASSIGN, output_proxy, factory->NewUndefinedLiteral(nopos),
+ nopos);
+ initialize_output = factory->NewExpressionStatement(assignment, nopos);
+ }
+
+
+ // let iterator = iterable[Symbol.iterator];
+ Variable* var_iterator = scope->NewTemporary(avfactory->empty_string());
+ Statement* get_iterator;
+ {
+ Expression* iterator = GetIterator(iterable, factory, nopos);
+ Expression* iterator_proxy = factory->NewVariableProxy(var_iterator);
+ Expression* assignment = factory->NewAssignment(
+ Token::ASSIGN, iterator_proxy, iterator, nopos);
+ get_iterator = factory->NewExpressionStatement(assignment, nopos);
+ }
+
+
+ // if (!IS_RECEIVER(iterator)) throw MakeTypeError(kSymbolIteratorInvalid);
+ Statement* validate_iterator;
+ {
+ Expression* is_receiver_call;
+ {
+ auto args = new (zone) ZoneList<Expression*>(1, zone);
+ args->Add(factory->NewVariableProxy(var_iterator), zone);
+ is_receiver_call =
+ factory->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
+ }
+
+ Statement* throw_call;
+ {
+ Expression* call = NewThrowTypeError(
+ MessageTemplate::kSymbolIteratorInvalid, avfactory->empty_string(),
+ nopos);
+ throw_call = factory->NewExpressionStatement(call, nopos);
+ }
+
+ validate_iterator = factory->NewIfStatement(
+ is_receiver_call, factory->NewEmptyStatement(nopos), throw_call, nopos);
+ }
+
+
+ // output = iterator.next(input);
+ Statement* call_next;
+ {
+ Expression* iterator_proxy = factory->NewVariableProxy(var_iterator);
+ Expression* literal =
+ factory->NewStringLiteral(avfactory->next_string(), nopos);
+ Expression* next_property =
+ factory->NewProperty(iterator_proxy, literal, nopos);
+ Expression* input_proxy = factory->NewVariableProxy(var_input);
+ auto args = new (zone) ZoneList<Expression*>(1, zone);
+ args->Add(input_proxy, zone);
+ Expression* call = factory->NewCall(next_property, args, nopos);
+ Expression* output_proxy = factory->NewVariableProxy(var_output);
+ Expression* assignment =
+ factory->NewAssignment(Token::ASSIGN, output_proxy, call, nopos);
+ call_next = factory->NewExpressionStatement(assignment, nopos);
+ }
+
+
+ // if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
+ Statement* validate_next_output;
+ {
+ Expression* is_receiver_call;
+ {
+ auto args = new (zone) ZoneList<Expression*>(1, zone);
+ args->Add(factory->NewVariableProxy(var_output), zone);
+ is_receiver_call =
+ factory->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
+ }
+
+ Statement* throw_call;
+ {
+ auto args = new (zone) ZoneList<Expression*>(1, zone);
+ args->Add(factory->NewVariableProxy(var_output), zone);
+ Expression* call = factory->NewCallRuntime(
+ Runtime::kThrowIteratorResultNotAnObject, args, nopos);
+ throw_call = factory->NewExpressionStatement(call, nopos);
+ }
+
+ validate_next_output = factory->NewIfStatement(
+ is_receiver_call, factory->NewEmptyStatement(nopos), throw_call, nopos);
+ }
+
+
+ // let iteratorThrow = iterator.throw;
+ Variable* var_throw = scope->NewTemporary(avfactory->empty_string());
+ Statement* get_throw;
+ {
+ Expression* iterator_proxy = factory->NewVariableProxy(var_iterator);
+ Expression* literal =
+ factory->NewStringLiteral(avfactory->throw_string(), nopos);
+ Expression* property =
+ factory->NewProperty(iterator_proxy, literal, nopos);
+ Expression* throw_proxy = factory->NewVariableProxy(var_throw);
+ Expression* assignment = factory->NewAssignment(
+ Token::ASSIGN, throw_proxy, property, nopos);
+ get_throw = factory->NewExpressionStatement(assignment, nopos);
+ }
+
+
+ // if (IS_NULL_OR_UNDEFINED(iteratorThrow) {
+ // IteratorClose(iterator);
+ // throw MakeTypeError(kThrowMethodMissing);
+ // }
+ Statement* check_throw;
+ {
+ Expression* condition = factory->NewCompareOperation(
+ Token::EQ, factory->NewVariableProxy(var_throw),
+ factory->NewNullLiteral(nopos), nopos);
+
+ Expression* call = NewThrowTypeError(
+ MessageTemplate::kThrowMethodMissing,
+ avfactory->empty_string(), nopos);
+ Statement* throw_call = factory->NewExpressionStatement(call, nopos);
+
+ Block* then = factory->NewBlock(nullptr, 4+1, false, nopos);
+ Variable* var_tmp = scope->NewTemporary(avfactory->empty_string());
+ BuildIteratorClose(
+ then->statements(), var_iterator, factory->NewUndefinedLiteral(nopos),
+ var_tmp);
+ then->statements()->Add(throw_call, zone);
+ check_throw = factory->NewIfStatement(
+ condition, then, factory->NewEmptyStatement(nopos), nopos);
+ }
+
+
+ // output = %_Call(iteratorThrow, iterator, input);
+ Statement* call_throw;
+ {
+ auto args = new (zone) ZoneList<Expression*>(3, zone);
+ args->Add(factory->NewVariableProxy(var_throw), zone);
+ args->Add(factory->NewVariableProxy(var_iterator), zone);
+ args->Add(factory->NewVariableProxy(var_input), zone);
+ Expression* call =
+ factory->NewCallRuntime(Runtime::kInlineCall, args, nopos);
+ Expression* assignment = factory->NewAssignment(
+ Token::ASSIGN, factory->NewVariableProxy(var_output), call, nopos);
+ call_throw = factory->NewExpressionStatement(assignment, nopos);
+ }
+
+
+ // if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
+ Statement* validate_throw_output;
+ {
+ Expression* is_receiver_call;
+ {
+ auto args = new (zone) ZoneList<Expression*>(1, zone);
+ args->Add(factory->NewVariableProxy(var_output), zone);
+ is_receiver_call =
+ factory->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
+ }
+
+ Statement* throw_call;
+ {
+ auto args = new (zone) ZoneList<Expression*>(1, zone);
+ args->Add(factory->NewVariableProxy(var_output), zone);
+ Expression* call = factory->NewCallRuntime(
+ Runtime::kThrowIteratorResultNotAnObject, args, nopos);
+ throw_call = factory->NewExpressionStatement(call, nopos);
}
+
+ validate_throw_output = factory->NewIfStatement(
+ is_receiver_call, factory->NewEmptyStatement(nopos), throw_call, nopos);
+ }
+
+
+ // if (output.done) break;
+ Statement* if_done;
+ {
+ Expression* output_proxy = factory->NewVariableProxy(var_output);
+ Expression* literal =
+ factory->NewStringLiteral(avfactory->done_string(), nopos);
+ Expression* property = factory->NewProperty(output_proxy, literal, nopos);
+ BreakStatement* break_loop = factory->NewBreakStatement(loop, nopos);
+ if_done = factory->NewIfStatement(
+ property, break_loop, factory->NewEmptyStatement(nopos), nopos);
+ }
+
+
+ // mode = kReturn;
+ Statement* set_mode_return;
+ {
+ Expression* mode_proxy = factory->NewVariableProxy(var_mode);
+ Expression* kreturn =
+ factory->NewSmiLiteral(JSGeneratorObject::RETURN, nopos);
+ Expression* assignment =
+ factory->NewAssignment(Token::ASSIGN, mode_proxy, kreturn, nopos);
+ set_mode_return = factory->NewExpressionStatement(assignment, nopos);
+ }
+
+
+ // RawYield(output);
+ Statement* yield_output;
+ {
+ Expression* output_proxy = factory->NewVariableProxy(var_output);
+ Yield* yield = factory->NewYield(
+ generator, output_proxy, Yield::kInitial, nopos);
+ yield_output = factory->NewExpressionStatement(yield, nopos);
+ }
+
+
+ // mode = kNext;
+ Statement* set_mode_next;
+ {
+ Expression* mode_proxy = factory->NewVariableProxy(var_mode);
+ Expression* knext = factory->NewSmiLiteral(JSGeneratorObject::NEXT, nopos);
+ Expression* assignment =
+ factory->NewAssignment(Token::ASSIGN, mode_proxy, knext, nopos);
+ set_mode_next = factory->NewExpressionStatement(assignment, nopos);
+ }
+
+
+ // mode = kThrow;
+ Statement* set_mode_throw;
+ {
+ Expression* mode_proxy = factory->NewVariableProxy(var_mode);
+ Expression* kthrow =
+ factory->NewSmiLiteral(JSGeneratorObject::THROW, nopos);
+ Expression* assignment =
+ factory->NewAssignment(Token::ASSIGN, mode_proxy, kthrow, nopos);
+ set_mode_throw = factory->NewExpressionStatement(assignment, nopos);
+ }
+
+
+ // input = function.sent;
+ Statement* get_input;
+ {
+ Expression* function_sent = FunctionSentExpression(scope, factory, nopos);
+ Expression* input_proxy = factory->NewVariableProxy(var_input);
+ Expression* assignment = factory->NewAssignment(
+ Token::ASSIGN, input_proxy, function_sent, nopos);
+ get_input = factory->NewExpressionStatement(assignment, nopos);
+ }
+
+
+ // output.value;
+ Statement* get_value;
+ {
+ Expression* output_proxy = factory->NewVariableProxy(var_output);
+ Expression* literal =
+ factory->NewStringLiteral(avfactory->value_string(), nopos);
+ Expression* property = factory->NewProperty(output_proxy, literal, nopos);
+ get_value = factory->NewExpressionStatement(property, nopos);
+ }
+
+
+ // Now put things together.
+
+
+ // try { ... } catch(e) { ... }
+ Statement* try_catch;
+ {
+ Block* try_block = factory->NewBlock(nullptr, 2, false, nopos);
+ try_block->statements()->Add(yield_output, zone);
+ try_block->statements()->Add(set_mode_next, zone);
+
+ Block* catch_block = factory->NewBlock(nullptr, 1, false, nopos);
+ catch_block->statements()->Add(set_mode_throw, zone);
+
+ Scope* catch_scope = NewScope(scope, CATCH_SCOPE);
+ const AstRawString* name = avfactory->dot_catch_string();
+ Variable* catch_variable =
+ catch_scope->DeclareLocal(name, VAR, kCreatedInitialized,
+ Variable::NORMAL);
+
+ try_catch = factory->NewTryCatchStatement(
+ try_block, catch_scope, catch_variable, catch_block, nopos);
}
+
+
+ // try { ... } finally { ... }
+ Statement* try_finally;
+ {
+ Block* try_block = factory->NewBlock(nullptr, 1, false, nopos);
+ try_block->statements()->Add(try_catch, zone);
+
+ Block* finally = factory->NewBlock(nullptr, 2, false, nopos);
+ finally->statements()->Add(get_input, zone);
+ finally->statements()->Add(
+ factory->NewContinueStatement(loop, nopos), zone);
+
+ try_finally = factory->NewTryFinallyStatement(try_block, finally, nopos);
+ }
+
+
+ // switch (mode) { ... }
+ SwitchStatement* switch_mode = factory->NewSwitchStatement(nullptr, nopos);
+ {
+ auto case_next = new (zone) ZoneList<Statement*>(3, zone);
+ case_next->Add(call_next, zone);
+ case_next->Add(validate_next_output, zone);
+ case_next->Add(factory->NewBreakStatement(switch_mode, nopos), zone);
+
+ auto case_return = new (zone) ZoneList<Statement*>(5, zone);
+ BuildIteratorClose(case_return, var_iterator,
+ factory->NewVariableProxy(var_input, nopos), var_output);
+ case_return->Add(factory->NewBreakStatement(switch_mode, nopos), zone);
+
+ auto case_throw = new (zone) ZoneList<Statement*>(5, zone);
+ case_throw->Add(get_throw, zone);
+ case_throw->Add(check_throw, zone);
+ case_throw->Add(call_throw, zone);
+ case_throw->Add(validate_throw_output, zone);
+ case_throw->Add(factory->NewBreakStatement(switch_mode, nopos), zone);
+
+ auto cases = new (zone) ZoneList<CaseClause*>(3, zone);
+ Expression* knext = factory->NewSmiLiteral(JSGeneratorObject::NEXT, nopos);
+ Expression* kreturn =
+ factory->NewSmiLiteral(JSGeneratorObject::RETURN, nopos);
+ Expression* kthrow =
+ factory->NewSmiLiteral(JSGeneratorObject::THROW, nopos);
+ cases->Add(factory->NewCaseClause(knext, case_next, nopos), zone);
+ cases->Add(factory->NewCaseClause(kreturn, case_return, nopos), zone);
+ cases->Add(factory->NewCaseClause(kthrow, case_throw, nopos), zone);
+
+ switch_mode->Initialize(factory->NewVariableProxy(var_mode), cases);
+ }
+
+
+ // while (true) { ... }
+ // Already defined earlier: WhileStatement* loop = ...
+ {
+ Block* loop_body = factory->NewBlock(nullptr, 4, false, nopos);
+ loop_body->statements()->Add(switch_mode, zone);
+ loop_body->statements()->Add(if_done, zone);
+ loop_body->statements()->Add(set_mode_return, zone);
+ loop_body->statements()->Add(try_finally, zone);
+
+ loop->Initialize(factory->NewBooleanLiteral(true, nopos), loop_body);
+ }
+
+
+ // do { ... }
+ DoExpression* yield_star;
+ {
+ // The rewriter needs to process the get_value statement only, hence we
+ // put the preceding statements into an init block.
+
+ Block* do_block_ = factory->NewBlock(nullptr, 6, true, nopos);
+ do_block_->statements()->Add(initialize_input, zone);
+ do_block_->statements()->Add(initialize_mode, zone);
+ do_block_->statements()->Add(initialize_output, zone);
+ do_block_->statements()->Add(get_iterator, zone);
+ do_block_->statements()->Add(validate_iterator, zone);
+ do_block_->statements()->Add(loop, zone);
+
+ Block* do_block = factory->NewBlock(nullptr, 2, false, nopos);
+ do_block->statements()->Add(do_block_, zone);
+ do_block->statements()->Add(get_value, zone);
+
+ Variable* dot_result = scope->NewTemporary(avfactory->dot_result_string());
+ yield_star = factory->NewDoExpression(do_block, dot_result, nopos);
+ Rewriter::Rewrite(parser_, yield_star, avfactory);
+ }
+
+ return yield_star;
+}
+
+// Desugaring of (lhs) instanceof (rhs)
+// ====================================
+//
+// We desugar instanceof into a load of property @@hasInstance on the rhs.
+// We end up with roughly the following code (O, C):
+//
+// do {
+// let O = lhs;
+// let C = rhs;
+// if (!IS_RECEIVER(C)) throw MakeTypeError(kNonObjectInInstanceOfCheck);
+// let handler_result = C[Symbol.hasInstance];
+// if (handler_result === undefined) {
+// if (!IS_CALLABLE(C)) {
+// throw MakeTypeError(kCalledNonCallableInstanceOf);
+// }
+// handler_result = %ordinary_has_instance(C, O);
+// } else {
+// handler_result = !!(%_Call(handler_result, C, O));
+// }
+// handler_result;
+// }
+//
+Expression* ParserTraits::RewriteInstanceof(Expression* lhs, Expression* rhs,
+ int pos) {
+ const int nopos = RelocInfo::kNoPosition;
+
+ auto factory = parser_->factory();
+ auto avfactory = parser_->ast_value_factory();
+ auto scope = parser_->scope_;
+ auto zone = parser_->zone();
+
+ // let O = lhs;
+ Variable* var_O = scope->NewTemporary(avfactory->empty_string());
+ Statement* get_O;
+ {
+ Expression* O_proxy = factory->NewVariableProxy(var_O);
+ Expression* assignment =
+ factory->NewAssignment(Token::ASSIGN, O_proxy, lhs, nopos);
+ get_O = factory->NewExpressionStatement(assignment, nopos);
+ }
+
+ // let C = lhs;
+ Variable* var_C = scope->NewTemporary(avfactory->empty_string());
+ Statement* get_C;
+ {
+ Expression* C_proxy = factory->NewVariableProxy(var_C);
+ Expression* assignment =
+ factory->NewAssignment(Token::ASSIGN, C_proxy, rhs, nopos);
+ get_C = factory->NewExpressionStatement(assignment, nopos);
+ }
+
+ // if (!IS_RECEIVER(C)) throw MakeTypeError(kNonObjectInInstanceOfCheck);
+ Statement* validate_C;
+ {
+ auto args = new (zone) ZoneList<Expression*>(1, zone);
+ args->Add(factory->NewVariableProxy(var_C), zone);
+ Expression* is_receiver_call =
+ factory->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
+ Expression* call =
+ NewThrowTypeError(MessageTemplate::kNonObjectInInstanceOfCheck,
+ avfactory->empty_string(), nopos);
+ Statement* throw_call = factory->NewExpressionStatement(call, nopos);
+
+ validate_C =
+ factory->NewIfStatement(is_receiver_call,
+ factory->NewEmptyStatement(nopos),
+ throw_call,
+ nopos);
+ }
+
+ // let handler_result = C[Symbol.hasInstance];
+ Variable* var_handler_result = scope->NewTemporary(avfactory->empty_string());
+ Statement* initialize_handler;
+ {
+ Expression* hasInstance_symbol_literal =
+ factory->NewSymbolLiteral("hasInstance_symbol", RelocInfo::kNoPosition);
+ Expression* prop = factory->NewProperty(factory->NewVariableProxy(var_C),
+ hasInstance_symbol_literal, pos);
+ Expression* handler_proxy = factory->NewVariableProxy(var_handler_result);
+ Expression* assignment =
+ factory->NewAssignment(Token::ASSIGN, handler_proxy, prop, nopos);
+ initialize_handler = factory->NewExpressionStatement(assignment, nopos);
+ }
+
+ // if (handler_result === undefined) {
+ // if (!IS_CALLABLE(C)) {
+ // throw MakeTypeError(kCalledNonCallableInstanceOf);
+ // }
+ // result = %ordinary_has_instance(C, O);
+ // } else {
+ // handler_result = !!%_Call(handler_result, C, O);
+ // }
+ Statement* call_handler;
+ {
+ Expression* condition = factory->NewCompareOperation(
+ Token::EQ_STRICT, factory->NewVariableProxy(var_handler_result),
+ factory->NewUndefinedLiteral(nopos), nopos);
+
+ Block* then_side = factory->NewBlock(nullptr, 2, false, nopos);
+ {
+ Expression* throw_expr =
+ NewThrowTypeError(MessageTemplate::kCalledNonCallableInstanceOf,
+ avfactory->empty_string(), nopos);
+ Statement* validate_C = CheckCallable(var_C, throw_expr);
+ ZoneList<Expression*>* args = new (zone) ZoneList<Expression*>(2, zone);
+ args->Add(factory->NewVariableProxy(var_C), zone);
+ args->Add(factory->NewVariableProxy(var_O), zone);
+ CallRuntime* call = factory->NewCallRuntime(
+ Context::ORDINARY_HAS_INSTANCE_INDEX, args, pos);
+ Expression* result_proxy = factory->NewVariableProxy(var_handler_result);
+ Expression* assignment =
+ factory->NewAssignment(Token::ASSIGN, result_proxy, call, nopos);
+ Statement* assignment_return =
+ factory->NewExpressionStatement(assignment, nopos);
+
+ then_side->statements()->Add(validate_C, zone);
+ then_side->statements()->Add(assignment_return, zone);
+ }
+
+ Statement* else_side;
+ {
+ auto args = new (zone) ZoneList<Expression*>(3, zone);
+ args->Add(factory->NewVariableProxy(var_handler_result), zone);
+ args->Add(factory->NewVariableProxy(var_C), zone);
+ args->Add(factory->NewVariableProxy(var_O), zone);
+ Expression* call =
+ factory->NewCallRuntime(Runtime::kInlineCall, args, nopos);
+ Expression* inner_not =
+ factory->NewUnaryOperation(Token::NOT, call, nopos);
+ Expression* outer_not =
+ factory->NewUnaryOperation(Token::NOT, inner_not, nopos);
+ Expression* result_proxy = factory->NewVariableProxy(var_handler_result);
+ Expression* assignment =
+ factory->NewAssignment(Token::ASSIGN, result_proxy, outer_not, nopos);
+
+ else_side = factory->NewExpressionStatement(assignment, nopos);
+ }
+ call_handler =
+ factory->NewIfStatement(condition, then_side, else_side, nopos);
+ }
+
+ // do { ... }
+ DoExpression* instanceof;
+ {
+ Block* block = factory->NewBlock(nullptr, 5, true, nopos);
+ block->statements()->Add(get_O, zone);
+ block->statements()->Add(get_C, zone);
+ block->statements()->Add(validate_C, zone);
+ block->statements()->Add(initialize_handler, zone);
+ block->statements()->Add(call_handler, zone);
+
+ // Here is the desugared instanceof.
+ instanceof = factory->NewDoExpression(block, var_handler_result, nopos);
+ Rewriter::Rewrite(parser_, instanceof, avfactory);
+ }
+
+ return instanceof;
+}
+
+Statement* ParserTraits::CheckCallable(Variable* var, Expression* error) {
+ auto factory = parser_->factory();
+ auto avfactory = parser_->ast_value_factory();
+ const int nopos = RelocInfo::kNoPosition;
+ Statement* validate_var;
+ {
+ Expression* type_of = factory->NewUnaryOperation(
+ Token::TYPEOF, factory->NewVariableProxy(var), nopos);
+ Expression* function_literal =
+ factory->NewStringLiteral(avfactory->function_string(), nopos);
+ Expression* condition = factory->NewCompareOperation(
+ Token::EQ_STRICT, type_of, function_literal, nopos);
+
+ Statement* throw_call = factory->NewExpressionStatement(error, nopos);
+
+ validate_var = factory->NewIfStatement(
+ condition, factory->NewEmptyStatement(nopos), throw_call, nopos);
+ }
+ return validate_var;
+}
+
+void ParserTraits::BuildIteratorClose(ZoneList<Statement*>* statements,
+ Variable* iterator,
+ Expression* input,
+ Variable* var_output) {
+ //
+ // This function adds four statements to [statements], corresponding to the
+ // following code:
+ //
+ // let iteratorReturn = iterator.return;
+ // if (IS_NULL_OR_UNDEFINED(iteratorReturn) return input;
+ // output = %_Call(iteratorReturn, iterator);
+ // if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
+ //
+
+ const int nopos = RelocInfo::kNoPosition;
+ auto factory = parser_->factory();
+ auto avfactory = parser_->ast_value_factory();
+ auto zone = parser_->zone();
+
+ // let iteratorReturn = iterator.return;
+ Variable* var_return = var_output; // Reusing the output variable.
+ Statement* get_return;
+ {
+ Expression* iterator_proxy = factory->NewVariableProxy(iterator);
+ Expression* literal =
+ factory->NewStringLiteral(avfactory->return_string(), nopos);
+ Expression* property =
+ factory->NewProperty(iterator_proxy, literal, nopos);
+ Expression* return_proxy = factory->NewVariableProxy(var_return);
+ Expression* assignment = factory->NewAssignment(
+ Token::ASSIGN, return_proxy, property, nopos);
+ get_return = factory->NewExpressionStatement(assignment, nopos);
+ }
+
+ // if (IS_NULL_OR_UNDEFINED(iteratorReturn) return input;
+ Statement* check_return;
+ {
+ Expression* condition = factory->NewCompareOperation(
+ Token::EQ, factory->NewVariableProxy(var_return),
+ factory->NewNullLiteral(nopos), nopos);
+
+ Statement* return_input = factory->NewReturnStatement(input, nopos);
+
+ check_return = factory->NewIfStatement(
+ condition, return_input, factory->NewEmptyStatement(nopos), nopos);
+ }
+
+ // output = %_Call(iteratorReturn, iterator);
+ Statement* call_return;
+ {
+ auto args = new (zone) ZoneList<Expression*>(3, zone);
+ args->Add(factory->NewVariableProxy(var_return), zone);
+ args->Add(factory->NewVariableProxy(iterator), zone);
+
+ Expression* call =
+ factory->NewCallRuntime(Runtime::kInlineCall, args, nopos);
+ Expression* output_proxy = factory->NewVariableProxy(var_output);
+ Expression* assignment = factory->NewAssignment(
+ Token::ASSIGN, output_proxy, call, nopos);
+ call_return = factory->NewExpressionStatement(assignment, nopos);
+ }
+
+ // if (!IS_RECEIVER(output)) %ThrowIteratorResultNotAnObject(output);
+ Statement* validate_output;
+ {
+ Expression* is_receiver_call;
+ {
+ auto args = new (zone) ZoneList<Expression*>(1, zone);
+ args->Add(factory->NewVariableProxy(var_output), zone);
+ is_receiver_call =
+ factory->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
+ }
+
+ Statement* throw_call;
+ {
+ auto args = new (zone) ZoneList<Expression*>(1, zone);
+ args->Add(factory->NewVariableProxy(var_output), zone);
+ Expression* call = factory->NewCallRuntime(
+ Runtime::kThrowIteratorResultNotAnObject, args, nopos);
+ throw_call = factory->NewExpressionStatement(call, nopos);
+ }
+
+ validate_output = factory->NewIfStatement(
+ is_receiver_call, factory->NewEmptyStatement(nopos), throw_call, nopos);
+ }
+
+ statements->Add(get_return, zone);
+ statements->Add(check_return, zone);
+ statements->Add(call_return, zone);
+ statements->Add(validate_output, zone);
+}
+
+
+// Runtime encoding of different completion modes.
+enum ForOfLoopBodyCompletion { BODY_COMPLETED, BODY_ABORTED, BODY_THREW };
+
+void ParserTraits::BuildIteratorCloseForCompletion(
+ ZoneList<Statement*>* statements, Variable* iterator,
+ Variable* completion) {
+ //
+ // This function adds two statements to [statements], corresponding to the
+ // following code:
+ //
+ // let iteratorReturn = iterator.return;
+ // if (!IS_NULL_OR_UNDEFINED(iteratorReturn)) {
+ // let output;
+ // if (completion === BODY_THREW) {
+ // if (!IS_CALLABLE(iteratorReturn)) {
+ // throw MakeTypeError(kReturnMethodNotCallable);
+ // }
+ // try { output = %_Call(iteratorReturn, iterator) } catch (_) { }
+ // } else {
+ // output = %_Call(iteratorReturn, iterator);
+ // }
+ // if (!IS_RECEIVER(output)) %ThrowIterResultNotAnObject(output);
+ // }
+ //
+
+ const int nopos = RelocInfo::kNoPosition;
+ auto factory = parser_->factory();
+ auto avfactory = parser_->ast_value_factory();
+ auto scope = parser_->scope_;
+ auto zone = parser_->zone();
+
+ // let output;
+ Variable* var_output = scope->NewTemporary(avfactory->empty_string());
+
+ // let iteratorReturn = iterator.return;
+ Variable* var_return = var_output; // Reusing the output variable.
+ Statement* get_return;
+ {
+ Expression* iterator_proxy = factory->NewVariableProxy(iterator);
+ Expression* literal =
+ factory->NewStringLiteral(avfactory->return_string(), nopos);
+ Expression* property =
+ factory->NewProperty(iterator_proxy, literal, nopos);
+ Expression* return_proxy = factory->NewVariableProxy(var_return);
+ Expression* assignment = factory->NewAssignment(
+ Token::ASSIGN, return_proxy, property, nopos);
+ get_return = factory->NewExpressionStatement(assignment, nopos);
+ }
+
+ // if (!IS_CALLABLE(iteratorReturn)) {
+ // throw MakeTypeError(kReturnMethodNotCallable);
+ // }
+ Statement* check_return_callable;
+ {
+ Expression* throw_expr = NewThrowTypeError(
+ MessageTemplate::kReturnMethodNotCallable,
+ avfactory->empty_string(), nopos);
+ check_return_callable = CheckCallable(var_return, throw_expr);
+ }
+
+ // output = %_Call(iteratorReturn, iterator);
+ Statement* call_return;
+ {
+ auto args = new (zone) ZoneList<Expression*>(2, zone);
+ args->Add(factory->NewVariableProxy(var_return), zone);
+ args->Add(factory->NewVariableProxy(iterator), zone);
+ Expression* call =
+ factory->NewCallRuntime(Runtime::kInlineCall, args, nopos);
+
+ Expression* output_proxy = factory->NewVariableProxy(var_output);
+ Expression* assignment = factory->NewAssignment(
+ Token::ASSIGN, output_proxy, call, nopos);
+ call_return = factory->NewExpressionStatement(assignment, nopos);
+ }
+
+ // try { output = %_Call(iteratorReturn, iterator) } catch (_) { }
+ Statement* try_call_return;
+ {
+ auto args = new (zone) ZoneList<Expression*>(2, zone);
+ args->Add(factory->NewVariableProxy(var_return), zone);
+ args->Add(factory->NewVariableProxy(iterator), zone);
+
+ Expression* call =
+ factory->NewCallRuntime(Runtime::kInlineCall, args, nopos);
+ Expression* assignment = factory->NewAssignment(
+ Token::ASSIGN, factory->NewVariableProxy(var_output), call, nopos);
+
+ Block* try_block = factory->NewBlock(nullptr, 1, false, nopos);
+ try_block->statements()->Add(
+ factory->NewExpressionStatement(assignment, nopos), zone);
+
+ Block* catch_block = factory->NewBlock(nullptr, 0, false, nopos);
+
+ Scope* catch_scope = NewScope(scope, CATCH_SCOPE);
+ Variable* catch_variable = catch_scope->DeclareLocal(
+ avfactory->dot_catch_string(), VAR, kCreatedInitialized,
+ Variable::NORMAL);
+
+ try_call_return = factory->NewTryCatchStatement(
+ try_block, catch_scope, catch_variable, catch_block, nopos);
+ }
+
+ // if (completion === ABRUPT_THROW) {
+ // #check_return_callable;
+ // #try_call_return;
+ // } else {
+ // #call_return;
+ // }
+ Statement* call_return_carefully;
+ {
+ Expression* condition = factory->NewCompareOperation(
+ Token::EQ_STRICT, factory->NewVariableProxy(completion),
+ factory->NewSmiLiteral(BODY_THREW, nopos), nopos);
+
+ Block* then_block = factory->NewBlock(nullptr, 2, false, nopos);
+ then_block->statements()->Add(check_return_callable, zone);
+ then_block->statements()->Add(try_call_return, zone);
+
+ call_return_carefully =
+ factory->NewIfStatement(condition, then_block, call_return, nopos);
+ }
+
+ // if (!IS_RECEIVER(output)) %ThrowIteratorResultNotAnObject(output);
+ Statement* validate_output;
+ {
+ Expression* is_receiver_call;
+ {
+ auto args = new (zone) ZoneList<Expression*>(1, zone);
+ args->Add(factory->NewVariableProxy(var_output), zone);
+ is_receiver_call =
+ factory->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
+ }
+
+ Statement* throw_call;
+ {
+ auto args = new (zone) ZoneList<Expression*>(1, zone);
+ args->Add(factory->NewVariableProxy(var_output), zone);
+ Expression* call = factory->NewCallRuntime(
+ Runtime::kThrowIteratorResultNotAnObject, args, nopos);
+ throw_call = factory->NewExpressionStatement(call, nopos);
+ }
+
+ validate_output = factory->NewIfStatement(
+ is_receiver_call, factory->NewEmptyStatement(nopos), throw_call, nopos);
+ }
+
+ // if (!IS_NULL_OR_UNDEFINED(iteratorReturn)) { ... }
+ Statement* maybe_call_return;
+ {
+ Expression* condition = factory->NewCompareOperation(
+ Token::EQ, factory->NewVariableProxy(var_return),
+ factory->NewNullLiteral(nopos), nopos);
+
+ Block* block = factory->NewBlock(nullptr, 2, false, nopos);
+ block->statements()->Add(call_return_carefully, zone);
+ block->statements()->Add(validate_output, zone);
+
+ maybe_call_return = factory->NewIfStatement(
+ condition, factory->NewEmptyStatement(nopos), block, nopos);
+ }
+
+
+ statements->Add(get_return, zone);
+ statements->Add(maybe_call_return, zone);
+}
+
+
+Statement* ParserTraits::FinalizeForOfStatement(ForOfStatement* loop, int pos) {
+ if (!FLAG_harmony_iterator_close) return loop;
+
+ //
+ // This function replaces the loop with the following wrapping:
+ //
+ // let completion = BODY_COMPLETED;
+ // try {
+ // #loop;
+ // } catch(e) {
+ // if (completion === BODY_ABORTED) completion = BODY_THREW;
+ // throw e;
+ // } finally {
+ // if (!(completion === BODY_COMPLETED || IS_UNDEFINED(#iterator))) {
+ // #BuildIteratorClose(#iterator, completion) // See above.
+ // }
+ // }
+ //
+ // where the loop's body is wrapped as follows:
+ //
+ // {
+ // {{completion = BODY_ABORTED;}}
+ // #loop-body
+ // {{completion = BODY_COMPLETED;}}
+ // }
+
+ const int nopos = RelocInfo::kNoPosition;
+ auto factory = parser_->factory();
+ auto avfactory = parser_->ast_value_factory();
+ auto scope = parser_->scope_;
+ auto zone = parser_->zone();
+
+ // let completion = BODY_COMPLETED;
+ Variable* var_completion = scope->NewTemporary(avfactory->empty_string());
+ Statement* initialize_completion;
+ {
+ Expression* proxy = factory->NewVariableProxy(var_completion);
+ Expression* assignment = factory->NewAssignment(
+ Token::ASSIGN, proxy,
+ factory->NewSmiLiteral(BODY_COMPLETED, nopos), nopos);
+ initialize_completion =
+ factory->NewExpressionStatement(assignment, nopos);
+ }
+
+ // if (completion === BODY_ABORTED) completion = BODY_THREW;
+ Statement* set_completion_throw;
+ {
+ Expression* condition = factory->NewCompareOperation(
+ Token::EQ_STRICT, factory->NewVariableProxy(var_completion),
+ factory->NewSmiLiteral(BODY_ABORTED, nopos), nopos);
+
+ Expression* proxy = factory->NewVariableProxy(var_completion);
+ Expression* assignment = factory->NewAssignment(
+ Token::ASSIGN, proxy, factory->NewSmiLiteral(BODY_THREW, nopos),
+ nopos);
+ Statement* statement = factory->NewExpressionStatement(assignment, nopos);
+ set_completion_throw = factory->NewIfStatement(
+ condition, statement, factory->NewEmptyStatement(nopos), nopos);
+ }
+
+ // if (!(completion === BODY_COMPLETED || IS_UNDEFINED(#iterator))) {
+ // #BuildIteratorClose(#iterator, completion)
+ // }
+ Block* maybe_close;
+ {
+ Expression* condition1 = factory->NewCompareOperation(
+ Token::EQ_STRICT, factory->NewVariableProxy(var_completion),
+ factory->NewSmiLiteral(BODY_COMPLETED, nopos), nopos);
+ Expression* condition2 = factory->NewCompareOperation(
+ Token::EQ_STRICT, factory->NewVariableProxy(loop->iterator()),
+ factory->NewUndefinedLiteral(nopos), nopos);
+ Expression* condition = factory->NewBinaryOperation(
+ Token::OR, condition1, condition2, nopos);
+
+ Block* block = factory->NewBlock(nullptr, 2, false, nopos);
+ BuildIteratorCloseForCompletion(
+ block->statements(), loop->iterator(), var_completion);
+ DCHECK(block->statements()->length() == 2);
+
+ maybe_close = factory->NewBlock(nullptr, 1, false, nopos);
+ maybe_close->statements()->Add(factory->NewIfStatement(
+ condition, factory->NewEmptyStatement(nopos), block, nopos), zone);
+ }
+
+ // try { #try_block }
+ // catch(e) {
+ // #set_completion_throw;
+ // throw e;
+ // }
+ Statement* try_catch;
+ {
+ Scope* catch_scope = NewScope(scope, CATCH_SCOPE);
+ Variable* catch_variable = catch_scope->DeclareLocal(
+ avfactory->dot_catch_string(), VAR, kCreatedInitialized,
+ Variable::NORMAL);
+
+ Statement* rethrow;
+ {
+ Expression* proxy = factory->NewVariableProxy(catch_variable);
+ rethrow = factory->NewExpressionStatement(
+ factory->NewThrow(proxy, nopos), nopos);
+ }
+
+ Block* try_block = factory->NewBlock(nullptr, 1, false, nopos);
+ try_block->statements()->Add(loop, zone);
+
+ Block* catch_block = factory->NewBlock(nullptr, 2, false, nopos);
+ catch_block->statements()->Add(set_completion_throw, zone);
+ catch_block->statements()->Add(rethrow, zone);
+
+ try_catch = factory->NewTryCatchStatement(
+ try_block, catch_scope, catch_variable, catch_block, nopos);
+ }
+
+ // try { #try_catch } finally { #maybe_close }
+ Statement* try_finally;
+ {
+ Block* try_block = factory->NewBlock(nullptr, 1, false, nopos);
+ try_block->statements()->Add(try_catch, zone);
+
+ try_finally =
+ factory->NewTryFinallyStatement(try_block, maybe_close, nopos);
+ }
+
+ // #initialize_completion;
+ // #try_finally;
+ Statement* final_loop;
+ {
+ Block* block = factory->NewBlock(nullptr, 2, false, nopos);
+ block->statements()->Add(initialize_completion, zone);
+ block->statements()->Add(try_finally, zone);
+ final_loop = block;
+ }
+
+ // {{completion = BODY_ABORTED;}}
+ Statement* set_completion_break;
+ {
+ Expression* proxy = factory->NewVariableProxy(var_completion);
+ Expression* assignment = factory->NewAssignment(
+ Token::ASSIGN, proxy,
+ factory->NewSmiLiteral(BODY_ABORTED, nopos), nopos);
+
+ Block* block = factory->NewBlock(nullptr, 1, true, nopos);
+ block->statements()->Add(
+ factory->NewExpressionStatement(assignment, nopos), zone);
+ set_completion_break = block;
+ }
+
+ // {{completion = BODY_COMPLETED;}}
+ Statement* set_completion_normal;
+ {
+ Expression* proxy = factory->NewVariableProxy(var_completion);
+ Expression* assignment = factory->NewAssignment(
+ Token::ASSIGN, proxy, factory->NewSmiLiteral(BODY_COMPLETED, nopos),
+ nopos);
+
+ Block* block = factory->NewBlock(nullptr, 1, true, nopos);
+ block->statements()->Add(
+ factory->NewExpressionStatement(assignment, nopos), zone);
+ set_completion_normal = block;
+ }
+
+ // { #set_completion_break; #loop-body; #set_completion_normal }
+ Block* new_body = factory->NewBlock(nullptr, 2, false, nopos);
+ new_body->statements()->Add(set_completion_break, zone);
+ new_body->statements()->Add(loop->body(), zone);
+ new_body->statements()->Add(set_completion_normal, zone);
+
+ loop->set_body(new_body);
+ return final_loop;
}
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index 7d50221334..d4fb62f02c 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -335,6 +335,9 @@ class ParserTraits {
typedef v8::internal::AstProperties AstProperties;
+ typedef v8::internal::ExpressionClassifier<ParserTraits>
+ ExpressionClassifier;
+
// Return types for traversing functions.
typedef const AstRawString* Identifier;
typedef v8::internal::Expression* Expression;
@@ -461,6 +464,8 @@ class ParserTraits {
MessageTemplate::Template message,
const AstRawString* arg, int pos);
+ Statement* FinalizeForOfStatement(ForOfStatement* loop, int pos);
+
// Reporting errors.
void ReportMessageAt(Scanner::Location source_location,
MessageTemplate::Template message,
@@ -513,8 +518,8 @@ class ParserTraits {
int pos);
Expression* NewTargetExpression(Scope* scope, AstNodeFactory* factory,
int pos);
- Expression* DefaultConstructor(bool call_super, Scope* scope, int pos,
- int end_pos, LanguageMode language_mode);
+ Expression* FunctionSentExpression(Scope* scope, AstNodeFactory* factory,
+ int pos);
Literal* ExpressionFromLiteral(Token::Value token, int pos, Scanner* scanner,
AstNodeFactory* factory);
Expression* ExpressionFromIdentifier(const AstRawString* name,
@@ -547,7 +552,7 @@ class ParserTraits {
int initializer_end_position, bool is_rest);
V8_INLINE void DeclareFormalParameter(
Scope* scope, const ParserFormalParameters::Parameter& parameter,
- ExpressionClassifier* classifier);
+ Type::ExpressionClassifier* classifier);
void ParseArrowFunctionFormalParameters(ParserFormalParameters* parameters,
Expression* params,
const Scanner::Location& params_loc,
@@ -567,7 +572,6 @@ class ParserTraits {
const AstRawString* name, Scanner::Location function_name_location,
FunctionNameValidity function_name_validity, FunctionKind kind,
int function_token_position, FunctionLiteral::FunctionType type,
- FunctionLiteral::ArityRestriction arity_restriction,
LanguageMode language_mode, bool* ok);
V8_INLINE void SkipLazyFunctionBody(
int* materialized_literal_count, int* expected_property_count, bool* ok,
@@ -642,6 +646,7 @@ class ParserTraits {
V8_INLINE void QueueDestructuringAssignmentForRewriting(
Expression* assignment);
+ V8_INLINE void QueueNonPatternForRewriting(Expression* expr);
void SetFunctionNameFromPropertyName(ObjectLiteralProperty* property,
const AstRawString* name);
@@ -650,17 +655,28 @@ class ParserTraits {
Expression* identifier);
// Rewrite expressions that are not used as patterns
- V8_INLINE Expression* RewriteNonPattern(
- Expression* expr, const ExpressionClassifier* classifier, bool* ok);
- V8_INLINE ZoneList<Expression*>* RewriteNonPatternArguments(
- ZoneList<Expression*>* args, const ExpressionClassifier* classifier,
- bool* ok);
- V8_INLINE ObjectLiteralProperty* RewriteNonPatternObjectLiteralProperty(
- ObjectLiteralProperty* property, const ExpressionClassifier* classifier,
- bool* ok);
+ V8_INLINE void RewriteNonPattern(Type::ExpressionClassifier* classifier,
+ bool* ok);
+
+ V8_INLINE Zone* zone() const;
+
+ V8_INLINE ZoneList<Expression*>* GetNonPatternList() const;
+
+ Expression* RewriteYieldStar(
+ Expression* generator, Expression* expression, int pos);
+
+ Expression* RewriteInstanceof(Expression* lhs, Expression* rhs, int pos);
private:
Parser* parser_;
+
+ void BuildIteratorClose(
+ ZoneList<Statement*>* statements, Variable* iterator,
+ Expression* input, Variable* output);
+ void BuildIteratorCloseForCompletion(
+ ZoneList<Statement*>* statements, Variable* iterator,
+ Variable* body_threw);
+ Statement* CheckCallable(Variable* var, Expression* error);
};
@@ -744,6 +760,9 @@ class Parser : public ParserBase<ParserTraits> {
bool* ok);
Statement* ParseFunctionDeclaration(ZoneList<const AstRawString*>* names,
bool* ok);
+ Statement* ParseFunctionDeclaration(int pos, bool is_generator,
+ ZoneList<const AstRawString*>* names,
+ bool* ok);
Statement* ParseClassDeclaration(ZoneList<const AstRawString*>* names,
bool* ok);
Statement* ParseNativeDeclaration(bool* ok);
@@ -754,6 +773,7 @@ class Parser : public ParserBase<ParserTraits> {
ZoneList<const AstRawString*>* names,
bool* ok);
DoExpression* ParseDoExpression(bool* ok);
+ Expression* ParseYieldStarExpression(bool* ok);
struct DeclarationDescriptor {
enum Kind { NORMAL, PARAMETER };
@@ -761,7 +781,6 @@ class Parser : public ParserBase<ParserTraits> {
Scope* scope;
Scope* hoist_scope;
VariableMode mode;
- bool needs_init;
int declaration_pos;
int initialization_pos;
Kind declaration_kind;
@@ -801,8 +820,9 @@ class Parser : public ParserBase<ParserTraits> {
const DeclarationParsingResult::Declaration* declaration,
ZoneList<const AstRawString*>* names, bool* ok);
- static void RewriteDestructuringAssignment(
- Parser* parser, RewritableAssignmentExpression* expr, Scope* Scope);
+ static void RewriteDestructuringAssignment(Parser* parser,
+ RewritableExpression* expr,
+ Scope* Scope);
static Expression* RewriteDestructuringAssignment(Parser* parser,
Assignment* assignment,
@@ -872,10 +892,10 @@ class Parser : public ParserBase<ParserTraits> {
bool* ok_;
};
-
- void ParseVariableDeclarations(VariableDeclarationContext var_context,
- DeclarationParsingResult* parsing_result,
- bool* ok);
+ Block* ParseVariableDeclarations(VariableDeclarationContext var_context,
+ DeclarationParsingResult* parsing_result,
+ ZoneList<const AstRawString*>* names,
+ bool* ok);
Statement* ParseExpressionOrLabelledStatement(
ZoneList<const AstRawString*>* labels, bool* ok);
IfStatement* ParseIfStatement(ZoneList<const AstRawString*>* labels,
@@ -896,6 +916,8 @@ class Parser : public ParserBase<ParserTraits> {
Statement* ParseForStatement(ZoneList<const AstRawString*>* labels, bool* ok);
Statement* ParseThrowStatement(bool* ok);
Expression* MakeCatchContext(Handle<String> id, VariableProxy* value);
+ class DontCollectExpressionsInTailPositionScope;
+ class CollectExpressionsInTailPositionToListScope;
TryStatement* ParseTryStatement(bool* ok);
DebuggerStatement* ParseDebuggerStatement(bool* ok);
@@ -910,9 +932,9 @@ class Parser : public ParserBase<ParserTraits> {
Expression* subject, Statement* body,
bool is_destructuring);
Statement* DesugarLexicalBindingsInForStatement(
- Scope* inner_scope, bool is_const, ZoneList<const AstRawString*>* names,
- ForStatement* loop, Statement* init, Expression* cond, Statement* next,
- Statement* body, bool* ok);
+ Scope* inner_scope, VariableMode mode,
+ ZoneList<const AstRawString*>* names, ForStatement* loop, Statement* init,
+ Expression* cond, Statement* next, Statement* body, bool* ok);
void RewriteDoExpression(Expression* expr, bool* ok);
@@ -920,7 +942,6 @@ class Parser : public ParserBase<ParserTraits> {
const AstRawString* name, Scanner::Location function_name_location,
FunctionNameValidity function_name_validity, FunctionKind kind,
int function_token_position, FunctionLiteral::FunctionType type,
- FunctionLiteral::ArityRestriction arity_restriction,
LanguageMode language_mode, bool* ok);
@@ -966,8 +987,9 @@ class Parser : public ParserBase<ParserTraits> {
Statement* BuildAssertIsCoercible(Variable* var);
// Factory methods.
- FunctionLiteral* DefaultConstructor(bool call_super, Scope* scope, int pos,
- int end_pos, LanguageMode language_mode);
+ FunctionLiteral* DefaultConstructor(const AstRawString* name, bool call_super,
+ Scope* scope, int pos, int end_pos,
+ LanguageMode language_mode);
// Skip over a lazy function, either using cached data if we have it, or
// by parsing the function with PreParser. Consumes the ending }.
@@ -1013,14 +1035,10 @@ class Parser : public ParserBase<ParserTraits> {
V8_INLINE void RewriteDestructuringAssignments();
- V8_INLINE Expression* RewriteNonPattern(
- Expression* expr, const ExpressionClassifier* classifier, bool* ok);
- V8_INLINE ZoneList<Expression*>* RewriteNonPatternArguments(
- ZoneList<Expression*>* args, const ExpressionClassifier* classifier,
- bool* ok);
- V8_INLINE ObjectLiteralProperty* RewriteNonPatternObjectLiteralProperty(
- ObjectLiteralProperty* property, const ExpressionClassifier* classifier,
- bool* ok);
+ friend class NonPatternRewriter;
+ V8_INLINE Expression* RewriteSpreads(ArrayLiteral* lit);
+
+ V8_INLINE void RewriteNonPattern(ExpressionClassifier* classifier, bool* ok);
friend class InitializerRewriter;
void RewriteParameterInitializer(Expression* expr, Scope* scope);
@@ -1171,7 +1189,7 @@ void ParserTraits::AddFormalParameter(ParserFormalParameters* parameters,
void ParserTraits::DeclareFormalParameter(
Scope* scope, const ParserFormalParameters::Parameter& parameter,
- ExpressionClassifier* classifier) {
+ Type::ExpressionClassifier* classifier) {
bool is_duplicate = false;
bool is_simple = classifier->is_simple_parameter_list();
auto name = is_simple || parameter.is_rest
diff --git a/deps/v8/src/parsing/pattern-rewriter.cc b/deps/v8/src/parsing/pattern-rewriter.cc
index 6e20282785..04b517ebba 100644
--- a/deps/v8/src/parsing/pattern-rewriter.cc
+++ b/deps/v8/src/parsing/pattern-rewriter.cc
@@ -33,7 +33,7 @@ void Parser::PatternRewriter::DeclareAndInitializeVariables(
void Parser::PatternRewriter::RewriteDestructuringAssignment(
- Parser* parser, RewritableAssignmentExpression* to_rewrite, Scope* scope) {
+ Parser* parser, RewritableExpression* to_rewrite, Scope* scope) {
PatternRewriter rewriter;
DCHECK(!to_rewrite->is_rewritten());
@@ -58,8 +58,7 @@ Expression* Parser::PatternRewriter::RewriteDestructuringAssignment(
Parser* parser, Assignment* assignment, Scope* scope) {
DCHECK_NOT_NULL(assignment);
DCHECK_EQ(Token::ASSIGN, assignment->op());
- auto to_rewrite =
- parser->factory()->NewRewritableAssignmentExpression(assignment);
+ auto to_rewrite = parser->factory()->NewRewritableExpression(assignment);
RewriteDestructuringAssignment(parser, to_rewrite, scope);
return to_rewrite->expression();
}
@@ -91,8 +90,8 @@ Parser::PatternRewriter::SetInitializerContextIfNeeded(Expression* node) {
// AssignmentElement nodes
PatternContext old_context = context();
bool is_destructuring_assignment =
- node->IsRewritableAssignmentExpression() &&
- !node->AsRewritableAssignmentExpression()->is_rewritten();
+ node->IsRewritableExpression() &&
+ !node->AsRewritableExpression()->is_rewritten();
bool is_assignment =
node->IsAssignment() && node->AsAssignment()->op() == Token::ASSIGN;
if (is_destructuring_assignment || is_assignment) {
@@ -324,10 +323,11 @@ Variable* Parser::PatternRewriter::CreateTempVar(Expression* value) {
}
-void Parser::PatternRewriter::VisitRewritableAssignmentExpression(
- RewritableAssignmentExpression* node) {
- if (!IsAssignmentContext()) {
- // Mark the assignment as rewritten to prevent redundant rewriting, and
+void Parser::PatternRewriter::VisitRewritableExpression(
+ RewritableExpression* node) {
+ // If this is not a destructuring assignment...
+ if (!IsAssignmentContext() || !node->expression()->IsAssignment()) {
+ // Mark the node as rewritten to prevent redundant rewriting, and
// perform BindingPattern rewriting
DCHECK(!node->is_rewritten());
node->Rewrite(node->expression());
diff --git a/deps/v8/src/parsing/preparser.cc b/deps/v8/src/parsing/preparser.cc
index 64511acc39..d335c8bdcd 100644
--- a/deps/v8/src/parsing/preparser.cc
+++ b/deps/v8/src/parsing/preparser.cc
@@ -94,11 +94,10 @@ PreParserExpression PreParserTraits::ParseFunctionLiteral(
PreParserIdentifier name, Scanner::Location function_name_location,
FunctionNameValidity function_name_validity, FunctionKind kind,
int function_token_position, FunctionLiteral::FunctionType type,
- FunctionLiteral::ArityRestriction arity_restriction,
LanguageMode language_mode, bool* ok) {
return pre_parser_->ParseFunctionLiteral(
name, function_name_location, function_name_validity, kind,
- function_token_position, type, arity_restriction, language_mode, ok);
+ function_token_position, type, language_mode, ok);
}
@@ -451,8 +450,7 @@ PreParser::Statement PreParser::ParseFunctionDeclaration(bool* ok) {
: kFunctionNameValidityUnknown,
is_generator ? FunctionKind::kGeneratorFunction
: FunctionKind::kNormalFunction,
- pos, FunctionLiteral::kDeclaration,
- FunctionLiteral::kNormalArity, language_mode(),
+ pos, FunctionLiteral::kDeclaration, language_mode(),
CHECK_OK);
return Statement::FunctionDeclaration();
}
@@ -575,7 +573,7 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
int decl_pos = peek_position();
PreParserExpression pattern = PreParserExpression::Default();
{
- ExpressionClassifier pattern_classifier;
+ ExpressionClassifier pattern_classifier(this);
Token::Value next = peek();
pattern = ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
@@ -591,17 +589,12 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
}
}
- is_pattern = (pattern.IsObjectLiteral() || pattern.IsArrayLiteral()) &&
- !pattern.is_parenthesized();
-
- bool is_for_iteration_variable =
- var_context == kForStatement &&
- (peek() == Token::IN || PeekContextualKeyword(CStrVector("of")));
+ is_pattern = pattern.IsObjectLiteral() || pattern.IsArrayLiteral();
Scanner::Location variable_loc = scanner()->location();
nvars++;
if (Check(Token::ASSIGN)) {
- ExpressionClassifier classifier;
+ ExpressionClassifier classifier(this);
ParseAssignmentExpression(var_context != kForStatement, &classifier,
CHECK_OK);
ValidateExpression(&classifier, CHECK_OK);
@@ -611,7 +604,7 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
*first_initializer_loc = variable_loc;
}
} else if ((require_initializer || is_pattern) &&
- !is_for_iteration_variable) {
+ (var_context != kForStatement || !PeekInOrOf())) {
PreParserTraits::ReportMessageAt(
Scanner::Location(decl_pos, scanner()->location().end_pos),
MessageTemplate::kDeclarationMissingInitializer,
@@ -655,7 +648,7 @@ PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(bool* ok) {
IsClassConstructor(function_state_->kind())) {
bool is_this = peek() == Token::THIS;
Expression expr = Expression::Default();
- ExpressionClassifier classifier;
+ ExpressionClassifier classifier(this);
if (is_this) {
expr = ParseStrongInitializationExpression(&classifier, CHECK_OK);
} else {
@@ -691,7 +684,7 @@ PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(bool* ok) {
}
bool starts_with_identifier = peek_any_identifier();
- ExpressionClassifier classifier;
+ ExpressionClassifier classifier(this);
Expression expr = ParseExpression(true, &classifier, CHECK_OK);
ValidateExpression(&classifier, CHECK_OK);
@@ -924,40 +917,40 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
ParseVariableDeclarations(kForStatement, &decl_count, &is_lexical,
&is_binding_pattern, &first_initializer_loc,
&bindings_loc, CHECK_OK);
- bool accept_IN = decl_count >= 1;
- if (accept_IN && CheckInOrOf(&mode, ok)) {
+ if (CheckInOrOf(&mode, ok)) {
if (!*ok) return Statement::Default();
if (decl_count != 1) {
- const char* loop_type =
- mode == ForEachStatement::ITERATE ? "for-of" : "for-in";
PreParserTraits::ReportMessageAt(
bindings_loc, MessageTemplate::kForInOfLoopMultiBindings,
- loop_type);
+ ForEachStatement::VisitModeString(mode));
*ok = false;
return Statement::Default();
}
if (first_initializer_loc.IsValid() &&
(is_strict(language_mode()) || mode == ForEachStatement::ITERATE ||
is_lexical || is_binding_pattern)) {
- if (mode == ForEachStatement::ITERATE) {
- ReportMessageAt(first_initializer_loc,
- MessageTemplate::kForOfLoopInitializer);
- } else {
- // TODO(caitp): This should be an error in sloppy mode, too.
- ReportMessageAt(first_initializer_loc,
- MessageTemplate::kForInLoopInitializer);
- }
+ PreParserTraits::ReportMessageAt(
+ first_initializer_loc, MessageTemplate::kForInOfLoopInitializer,
+ ForEachStatement::VisitModeString(mode));
*ok = false;
return Statement::Default();
}
- ParseExpression(true, CHECK_OK);
+
+ if (mode == ForEachStatement::ITERATE) {
+ ExpressionClassifier classifier(this);
+ ParseAssignmentExpression(true, &classifier, CHECK_OK);
+ RewriteNonPattern(&classifier, CHECK_OK);
+ } else {
+ ParseExpression(true, CHECK_OK);
+ }
+
Expect(Token::RPAREN, CHECK_OK);
ParseSubStatement(CHECK_OK);
return Statement::Default();
}
} else {
int lhs_beg_pos = peek_position();
- ExpressionClassifier classifier;
+ ExpressionClassifier classifier(this);
Expression lhs = ParseExpression(false, &classifier, CHECK_OK);
int lhs_end_pos = scanner()->location().end_pos;
is_let_identifier_expression =
@@ -980,7 +973,15 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
lhs, lhs_beg_pos, lhs_end_pos, MessageTemplate::kInvalidLhsInFor,
kSyntaxError, CHECK_OK);
}
- ParseExpression(true, CHECK_OK);
+
+ if (mode == ForEachStatement::ITERATE) {
+ ExpressionClassifier classifier(this);
+ ParseAssignmentExpression(true, &classifier, CHECK_OK);
+ RewriteNonPattern(&classifier, CHECK_OK);
+ } else {
+ ParseExpression(true, CHECK_OK);
+ }
+
Expect(Token::RPAREN, CHECK_OK);
ParseSubStatement(CHECK_OK);
return Statement::Default();
@@ -1054,7 +1055,7 @@ PreParser::Statement PreParser::ParseTryStatement(bool* ok) {
if (tok == Token::CATCH) {
Consume(Token::CATCH);
Expect(Token::LPAREN, CHECK_OK);
- ExpressionClassifier pattern_classifier;
+ ExpressionClassifier pattern_classifier(this);
ParsePrimaryExpression(&pattern_classifier, CHECK_OK);
ValidateBindingPattern(&pattern_classifier, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
@@ -1099,7 +1100,6 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
Identifier function_name, Scanner::Location function_name_location,
FunctionNameValidity function_name_validity, FunctionKind kind,
int function_token_pos, FunctionLiteral::FunctionType function_type,
- FunctionLiteral::ArityRestriction arity_restriction,
LanguageMode language_mode, bool* ok) {
// Function ::
// '(' FormalParameterList? ')' '{' FunctionBody '}'
@@ -1112,7 +1112,7 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
FunctionState function_state(&function_state_, &scope_, function_scope, kind,
&factory);
DuplicateFinder duplicate_finder(scanner()->unicode_cache());
- ExpressionClassifier formals_classifier(&duplicate_finder);
+ ExpressionClassifier formals_classifier(this, &duplicate_finder);
Expect(Token::LPAREN, CHECK_OK);
int start_position = scanner()->location().beg_pos;
@@ -1122,8 +1122,7 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
Expect(Token::RPAREN, CHECK_OK);
int formals_end_position = scanner()->location().end_pos;
- CheckArityRestrictions(formals.arity, arity_restriction,
- formals.has_rest, start_position,
+ CheckArityRestrictions(formals.arity, kind, formals.has_rest, start_position,
formals_end_position, CHECK_OK);
// See Parser::ParseFunctionLiteral for more information about lazy parsing
@@ -1219,7 +1218,7 @@ PreParserExpression PreParser::ParseClassLiteral(
bool has_extends = Check(Token::EXTENDS);
if (has_extends) {
- ExpressionClassifier classifier;
+ ExpressionClassifier classifier(this);
ParseLeftHandSideExpression(&classifier, CHECK_OK);
ValidateExpression(&classifier, CHECK_OK);
}
@@ -1235,7 +1234,7 @@ PreParserExpression PreParser::ParseClassLiteral(
bool is_computed_name = false; // Classes do not care about computed
// property names here.
Identifier name;
- ExpressionClassifier classifier;
+ ExpressionClassifier classifier(this);
ParsePropertyDefinition(&checker, in_class, has_extends, is_static,
&is_computed_name, &has_seen_constructor,
&classifier, &name, CHECK_OK);
@@ -1259,7 +1258,7 @@ PreParser::Expression PreParser::ParseV8Intrinsic(bool* ok) {
// Allow "eval" or "arguments" for backward compatibility.
ParseIdentifier(kAllowRestrictedIdentifiers, CHECK_OK);
Scanner::Location spread_pos;
- ExpressionClassifier classifier;
+ ExpressionClassifier classifier(this);
ParseArguments(&spread_pos, &classifier, ok);
ValidateExpression(&classifier, CHECK_OK);
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index 59100f1ae9..253251c073 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -279,12 +279,6 @@ class PreParserExpression {
int position() const { return RelocInfo::kNoPosition; }
void set_function_token_position(int position) {}
- // Parenthesized expressions in the form `( Expression )`.
- void set_is_parenthesized() {
- code_ = ParenthesizedField::update(code_, true);
- }
- bool is_parenthesized() const { return ParenthesizedField::decode(code_); }
-
private:
enum Type {
kExpression,
@@ -491,8 +485,7 @@ class PreParserFactory {
PreParserExpression right, int pos) {
return PreParserExpression::Default();
}
- PreParserExpression NewRewritableAssignmentExpression(
- PreParserExpression expression) {
+ PreParserExpression NewRewritableExpression(PreParserExpression expression) {
return expression;
}
PreParserExpression NewAssignment(Token::Value op,
@@ -550,7 +543,8 @@ class PreParserFactory {
return PreParserExpression::Default();
}
- PreParserExpression NewSpread(PreParserExpression expression, int pos) {
+ PreParserExpression NewSpread(PreParserExpression expression, int pos,
+ int expr_pos) {
return PreParserExpression::Spread(expression);
}
@@ -592,6 +586,9 @@ class PreParserTraits {
typedef int AstProperties;
+ typedef v8::internal::ExpressionClassifier<PreParserTraits>
+ ExpressionClassifier;
+
// Return types for traversing functions.
typedef PreParserIdentifier Identifier;
typedef PreParserExpression Expression;
@@ -797,8 +794,9 @@ class PreParserTraits {
return PreParserExpression::Default();
}
- static PreParserExpression DefaultConstructor(bool call_super, Scope* scope,
- int pos, int end_pos) {
+ static PreParserExpression FunctionSentExpression(Scope* scope,
+ PreParserFactory* factory,
+ int pos) {
return PreParserExpression::Default();
}
@@ -887,7 +885,7 @@ class PreParserTraits {
++parameters->arity;
}
void DeclareFormalParameter(Scope* scope, PreParserIdentifier parameter,
- ExpressionClassifier* classifier) {
+ Type::ExpressionClassifier* classifier) {
if (!classifier->is_simple_parameter_list()) {
scope->SetHasNonSimpleParameters();
}
@@ -902,7 +900,6 @@ class PreParserTraits {
PreParserIdentifier name, Scanner::Location function_name_location,
FunctionNameValidity function_name_validity, FunctionKind kind,
int function_token_position, FunctionLiteral::FunctionType type,
- FunctionLiteral::ArityRestriction arity_restriction,
LanguageMode language_mode, bool* ok);
PreParserExpression ParseClassLiteral(PreParserIdentifier name,
@@ -926,21 +923,24 @@ class PreParserTraits {
inline void RewriteDestructuringAssignments() {}
inline void QueueDestructuringAssignmentForRewriting(PreParserExpression) {}
+ inline void QueueNonPatternForRewriting(PreParserExpression) {}
void SetFunctionNameFromPropertyName(PreParserExpression,
PreParserIdentifier) {}
void SetFunctionNameFromIdentifierRef(PreParserExpression,
PreParserExpression) {}
- inline PreParserExpression RewriteNonPattern(
- PreParserExpression expr, const ExpressionClassifier* classifier,
- bool* ok);
- inline PreParserExpression RewriteNonPatternArguments(
- PreParserExpression args, const ExpressionClassifier* classifier,
- bool* ok);
- inline PreParserExpression RewriteNonPatternObjectLiteralProperty(
- PreParserExpression property, const ExpressionClassifier* classifier,
- bool* ok);
+ inline void RewriteNonPattern(Type::ExpressionClassifier* classifier,
+ bool* ok);
+
+ V8_INLINE Zone* zone() const;
+ V8_INLINE ZoneList<PreParserExpression>* GetNonPatternList() const;
+
+ inline PreParserExpression RewriteYieldStar(
+ PreParserExpression generator, PreParserExpression expr, int pos);
+ inline PreParserExpression RewriteInstanceof(PreParserExpression lhs,
+ PreParserExpression rhs,
+ int pos);
private:
PreParser* pre_parser_;
@@ -1071,7 +1071,6 @@ class PreParser : public ParserBase<PreParserTraits> {
Identifier name, Scanner::Location function_name_location,
FunctionNameValidity function_name_validity, FunctionKind kind,
int function_token_pos, FunctionLiteral::FunctionType function_type,
- FunctionLiteral::ArityRestriction arity_restriction,
LanguageMode language_mode, bool* ok);
void ParseLazyFunctionLiteralBody(bool* ok,
Scanner::BookmarkScope* bookmark = nullptr);
@@ -1123,30 +1122,34 @@ PreParserExpression PreParserTraits::ParseDoExpression(bool* ok) {
}
-PreParserExpression PreParserTraits::RewriteNonPattern(
- PreParserExpression expr, const ExpressionClassifier* classifier,
- bool* ok) {
+void PreParserTraits::RewriteNonPattern(Type::ExpressionClassifier* classifier,
+ bool* ok) {
pre_parser_->ValidateExpression(classifier, ok);
- return expr;
}
-PreParserExpression PreParserTraits::RewriteNonPatternArguments(
- PreParserExpression args, const ExpressionClassifier* classifier,
- bool* ok) {
- pre_parser_->ValidateExpression(classifier, ok);
- return args;
+Zone* PreParserTraits::zone() const {
+ return pre_parser_->function_state_->scope()->zone();
}
-PreParserExpression PreParserTraits::RewriteNonPatternObjectLiteralProperty(
- PreParserExpression property, const ExpressionClassifier* classifier,
- bool* ok) {
- pre_parser_->ValidateExpression(classifier, ok);
- return property;
+ZoneList<PreParserExpression>* PreParserTraits::GetNonPatternList() const {
+ return pre_parser_->function_state_->non_patterns_to_rewrite();
}
+PreParserExpression PreParserTraits::RewriteYieldStar(
+ PreParserExpression generator, PreParserExpression expression, int pos) {
+ return pre_parser_->factory()->NewYield(
+ generator, expression, Yield::kDelegating, pos);
+}
+
+PreParserExpression PreParserTraits::RewriteInstanceof(PreParserExpression lhs,
+ PreParserExpression rhs,
+ int pos) {
+ return PreParserExpression::Default();
+}
+
PreParserStatementList PreParser::ParseEagerFunctionBody(
PreParserIdentifier function_name, int pos,
const PreParserFormalParameters& parameters, FunctionKind kind,
diff --git a/deps/v8/src/parsing/rewriter.cc b/deps/v8/src/parsing/rewriter.cc
index 4da60aca18..c8e8fedc23 100644
--- a/deps/v8/src/parsing/rewriter.cc
+++ b/deps/v8/src/parsing/rewriter.cc
@@ -31,6 +31,7 @@ class Processor: public AstVisitor {
result_assigned_(false),
replacement_(nullptr),
is_set_(false),
+ zone_(ast_value_factory->zone()),
scope_(scope),
factory_(ast_value_factory) {
InitializeAstVisitor(parser->stack_limit());
@@ -148,7 +149,7 @@ void Processor::VisitIfStatement(IfStatement* node) {
is_set_ = is_set_ && set_in_then;
replacement_ = node;
- if (FLAG_harmony_completion && !is_set_) {
+ if (!is_set_) {
is_set_ = true;
replacement_ = AssignUndefinedBefore(node);
}
@@ -164,7 +165,7 @@ void Processor::VisitIterationStatement(IterationStatement* node) {
is_set_ = is_set_ && set_after;
replacement_ = node;
- if (FLAG_harmony_completion && !is_set_) {
+ if (!is_set_) {
is_set_ = true;
replacement_ = AssignUndefinedBefore(node);
}
@@ -208,7 +209,7 @@ void Processor::VisitTryCatchStatement(TryCatchStatement* node) {
is_set_ = is_set_ && set_in_try;
replacement_ = node;
- if (FLAG_harmony_completion && !is_set_) {
+ if (!is_set_) {
is_set_ = true;
replacement_ = AssignUndefinedBefore(node);
}
@@ -225,6 +226,7 @@ void Processor::VisitTryFinallyStatement(TryFinallyStatement* node) {
// at the end again: ".backup = .result; ...; .result = .backup"
// This is necessary because the finally block does not normally contribute
// to the completion value.
+ CHECK(scope() != nullptr);
Variable* backup = scope()->NewTemporary(
factory()->ast_value_factory()->dot_result_string());
Expression* backup_proxy = factory()->NewVariableProxy(backup);
@@ -245,7 +247,7 @@ void Processor::VisitTryFinallyStatement(TryFinallyStatement* node) {
node->set_try_block(replacement_->AsBlock());
replacement_ = node;
- if (FLAG_harmony_completion && !is_set_) {
+ if (!is_set_) {
is_set_ = true;
replacement_ = AssignUndefinedBefore(node);
}
@@ -263,7 +265,7 @@ void Processor::VisitSwitchStatement(SwitchStatement* node) {
is_set_ = is_set_ && set_after;
replacement_ = node;
- if (FLAG_harmony_completion && !is_set_) {
+ if (!is_set_) {
is_set_ = true;
replacement_ = AssignUndefinedBefore(node);
}
@@ -287,7 +289,7 @@ void Processor::VisitWithStatement(WithStatement* node) {
node->set_statement(replacement_);
replacement_ = node;
- if (FLAG_harmony_completion && !is_set_) {
+ if (!is_set_) {
is_set_ = true;
replacement_ = AssignUndefinedBefore(node);
}
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index 73175934b5..2d5a579583 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -39,7 +39,8 @@ void Utf16CharacterStream::ResetToBookmark() { UNREACHABLE(); }
Scanner::Scanner(UnicodeCache* unicode_cache)
: unicode_cache_(unicode_cache),
bookmark_c0_(kNoBookmark),
- octal_pos_(Location::invalid()) {
+ octal_pos_(Location::invalid()),
+ found_html_comment_(false) {
bookmark_current_.literal_chars = &bookmark_current_literal_;
bookmark_current_.raw_literal_chars = &bookmark_current_raw_literal_;
bookmark_next_.literal_chars = &bookmark_next_literal_;
@@ -438,7 +439,10 @@ Token::Value Scanner::ScanHtmlComment() {
Advance();
if (c0_ == '-') {
Advance();
- if (c0_ == '-') return SkipSingleLineComment();
+ if (c0_ == '-') {
+ found_html_comment_ = true;
+ return SkipSingleLineComment();
+ }
PushBack('-'); // undo Advance()
}
PushBack('!'); // undo Advance()
@@ -1206,7 +1210,9 @@ static Token::Value KeywordOrIdentifierToken(const uint8_t* input,
(keyword_length <= 8 || input[8] == keyword[8]) && \
(keyword_length <= 9 || input[9] == keyword[9])) { \
if (escaped) { \
- return token == Token::FUTURE_STRICT_RESERVED_WORD \
+ /* TODO(adamk): YIELD should be handled specially. */ \
+ return (token == Token::FUTURE_STRICT_RESERVED_WORD || \
+ token == Token::LET || token == Token::STATIC) \
? Token::ESCAPED_STRICT_RESERVED_WORD \
: Token::ESCAPED_KEYWORD; \
} \
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index 1d0aba0611..3f9bbb54a4 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -448,6 +448,8 @@ class Scanner {
bool IdentifierIsFutureStrictReserved(const AstRawString* string) const;
+ bool FoundHtmlComment() const { return found_html_comment_; }
+
private:
// The current and look-ahead token.
struct TokenDesc {
@@ -473,6 +475,7 @@ class Scanner {
current_.literal_chars = NULL;
current_.raw_literal_chars = NULL;
next_next_.token = Token::UNINITIALIZED;
+ found_html_comment_ = false;
}
// Support BookmarkScope functionality.
@@ -752,6 +755,9 @@ class Scanner {
// Whether there is a multi-line comment that contains a
// line-terminator after the current token, and before the next.
bool has_multiline_comment_before_next_;
+
+ // Whether this scanner encountered an HTML comment.
+ bool found_html_comment_;
};
} // namespace internal
diff --git a/deps/v8/src/parsing/token.h b/deps/v8/src/parsing/token.h
index fee1f7e85a..7a62b4d915 100644
--- a/deps/v8/src/parsing/token.h
+++ b/deps/v8/src/parsing/token.h
@@ -280,6 +280,22 @@ class Token {
}
}
+ static bool EvalComparison(Value op, double op1, double op2) {
+ DCHECK(IsArithmeticCompareOp(op));
+ switch (op) {
+ case Token::EQ:
+ case Token::EQ_STRICT: return (op1 == op2);
+ case Token::NE: return (op1 != op2);
+ case Token::LT: return (op1 < op2);
+ case Token::GT: return (op1 > op2);
+ case Token::LTE: return (op1 <= op2);
+ case Token::GTE: return (op1 >= op2);
+ default:
+ UNREACHABLE();
+ return false;
+ }
+ }
+
static bool IsBitOp(Value op) {
return (BIT_OR <= op && op <= SHR) || op == BIT_NOT;
}
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h
index b384d3f4f9..42e220809f 100644
--- a/deps/v8/src/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/ppc/assembler-ppc-inl.h
@@ -202,8 +202,8 @@ void RelocInfo::set_target_object(Object* target,
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
target->IsHeapObject()) {
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+ host(), this, HeapObject::cast(target));
}
}
@@ -248,9 +248,8 @@ void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode write_barrier_mode,
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
- // TODO(1550) We are passing NULL as a slot because cell can never be on
- // evacuation candidate.
- host()->GetHeap()->incremental_marking()->RecordWrite(host(), NULL, cell);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
+ cell);
}
}
@@ -329,39 +328,6 @@ void RelocInfo::WipeOut() {
}
-bool RelocInfo::IsPatchedReturnSequence() {
- //
- // The patched return sequence is defined by
- // BreakLocation::SetDebugBreakAtReturn()
- // FIXED_SEQUENCE
-
- Instr instr0 = Assembler::instr_at(pc_);
- Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);
-#if V8_TARGET_ARCH_PPC64
- Instr instr3 = Assembler::instr_at(pc_ + (3 * Assembler::kInstrSize));
- Instr instr4 = Assembler::instr_at(pc_ + (4 * Assembler::kInstrSize));
- Instr binstr = Assembler::instr_at(pc_ + (7 * Assembler::kInstrSize));
-#else
- Instr binstr = Assembler::instr_at(pc_ + 4 * Assembler::kInstrSize);
-#endif
- bool patched_return =
- ((instr0 & kOpcodeMask) == ADDIS && (instr1 & kOpcodeMask) == ORI &&
-#if V8_TARGET_ARCH_PPC64
- (instr3 & kOpcodeMask) == ORIS && (instr4 & kOpcodeMask) == ORI &&
-#endif
- (binstr == 0x7d821008)); // twge r2, r2
-
- // printf("IsPatchedReturnSequence: %d\n", patched_return);
- return patched_return;
-}
-
-
-bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
- Instr current_instr = Assembler::instr_at(pc_);
- return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
-}
-
-
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index 147fb59aae..aed149bcab 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -55,7 +55,7 @@ static unsigned CpuFeaturesImpliedByCompiler() {
void CpuFeatures::ProbeImpl(bool cross_compile) {
supported_ |= CpuFeaturesImpliedByCompiler();
- cache_line_size_ = 128;
+ icache_line_size_ = 128;
// Only use statically determined features for cross compile (snapshot).
if (cross_compile) return;
@@ -85,6 +85,9 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
// Assume support
supported_ |= (1u << FPU);
}
+ if (cpu.icache_line_size() != base::CPU::UNKNOWN_CACHE_LINE_SIZE) {
+ icache_line_size_ = cpu.icache_line_size();
+ }
#elif V8_OS_AIX
// Assume support FP support and default cache line size
supported_ |= (1u << FPU);
@@ -1504,14 +1507,14 @@ void Assembler::divdu(Register dst, Register src1, Register src2, OEBit o,
// Code address skips the function descriptor "header".
// TOC and static chain are ignored and set to 0.
void Assembler::function_descriptor() {
-#if ABI_USES_FUNCTION_DESCRIPTORS
- Label instructions;
- DCHECK(pc_offset() == 0);
- emit_label_addr(&instructions);
- dp(0);
- dp(0);
- bind(&instructions);
-#endif
+ if (ABI_USES_FUNCTION_DESCRIPTORS) {
+ Label instructions;
+ DCHECK(pc_offset() == 0);
+ emit_label_addr(&instructions);
+ dp(0);
+ dp(0);
+ bind(&instructions);
+ }
}
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index e84d695251..58c6c94dc6 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -46,15 +46,24 @@
#include "src/assembler.h"
#include "src/ppc/constants-ppc.h"
-#define ABI_USES_FUNCTION_DESCRIPTORS \
- (V8_HOST_ARCH_PPC && (V8_OS_AIX || \
- (V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN)))
+#if V8_HOST_ARCH_PPC && \
+ (V8_OS_AIX || (V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN))
+#define ABI_USES_FUNCTION_DESCRIPTORS 1
+#else
+#define ABI_USES_FUNCTION_DESCRIPTORS 0
+#endif
-#define ABI_PASSES_HANDLES_IN_REGS \
- (!V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64)
+#if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64
+#define ABI_PASSES_HANDLES_IN_REGS 1
+#else
+#define ABI_PASSES_HANDLES_IN_REGS 0
+#endif
-#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS \
- (!V8_HOST_ARCH_PPC || !V8_TARGET_ARCH_PPC64 || V8_TARGET_LITTLE_ENDIAN)
+#if !V8_HOST_ARCH_PPC || !V8_TARGET_ARCH_PPC64 || V8_TARGET_LITTLE_ENDIAN
+#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 1
+#else
+#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 0
+#endif
#if !V8_HOST_ARCH_PPC || (V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN)
#define ABI_CALL_VIA_IP 1
@@ -63,9 +72,9 @@
#endif
#if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64
-#define ABI_TOC_REGISTER Register::kCode_r2
+#define ABI_TOC_REGISTER 2
#else
-#define ABI_TOC_REGISTER Register::kCode_r13
+#define ABI_TOC_REGISTER 13
#endif
#define INSTR_AND_DATA_CACHE_COHERENCY LWSYNC
@@ -247,7 +256,7 @@ Register ToRegister(int num);
// Coprocessor register
struct CRegister {
- bool is_valid() const { return 0 <= reg_code && reg_code < 16; }
+ bool is_valid() const { return 0 <= reg_code && reg_code < 8; }
bool is(CRegister creg) const { return reg_code == creg.reg_code; }
int code() const {
DCHECK(is_valid());
@@ -273,14 +282,9 @@ const CRegister cr4 = {4};
const CRegister cr5 = {5};
const CRegister cr6 = {6};
const CRegister cr7 = {7};
-const CRegister cr8 = {8};
-const CRegister cr9 = {9};
-const CRegister cr10 = {10};
-const CRegister cr11 = {11};
-const CRegister cr12 = {12};
-const CRegister cr13 = {13};
-const CRegister cr14 = {14};
-const CRegister cr15 = {15};
+
+// TODO(ppc) Define SIMD registers.
+typedef DoubleRegister Simd128Register;
// -----------------------------------------------------------------------------
// Machine instruction Operands
@@ -1203,7 +1207,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, const SourcePosition position);
+ void RecordDeoptReason(const int reason, int raw_position);
// Writes a single byte or word of data in the code stream. Used
// for inline tables, e.g., jump-tables.
diff --git a/deps/v8/src/ppc/builtins-ppc.cc b/deps/v8/src/ppc/builtins-ppc.cc
index 0476cd27e1..f0b76ccc39 100644
--- a/deps/v8/src/ppc/builtins-ppc.cc
+++ b/deps/v8/src/ppc/builtins-ppc.cc
@@ -136,6 +136,107 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// static
+void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
+ // ----------- S t a t e -------------
+ // -- r3 : number of arguments
+ // -- lr : return address
+ // -- sp[(argc - n) * 8] : arg[n] (zero-based)
+ // -- sp[(argc + 1) * 8] : receiver
+ // -----------------------------------
+ Condition const cond_done = (kind == MathMaxMinKind::kMin) ? lt : gt;
+ Heap::RootListIndex const root_index =
+ (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
+ : Heap::kMinusInfinityValueRootIndex;
+ DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? d2 : d1;
+
+ // Load the accumulator with the default return value (either -Infinity or
+ // +Infinity), with the tagged value in r4 and the double value in d1.
+ __ LoadRoot(r4, root_index);
+ __ lfd(d1, FieldMemOperand(r4, HeapNumber::kValueOffset));
+
+ // Setup state for loop
+ // r5: address of arg[0] + kPointerSize
+ // r6: number of slots to drop at exit (arguments + receiver)
+ __ ShiftLeftImm(r5, r3, Operand(kPointerSizeLog2));
+ __ add(r5, sp, r5);
+ __ addi(r6, r3, Operand(1));
+
+ Label done_loop, loop;
+ __ bind(&loop);
+ {
+ // Check if all parameters done.
+ __ cmpl(r5, sp);
+ __ ble(&done_loop);
+
+ // Load the next parameter tagged value into r3.
+ __ LoadPU(r3, MemOperand(r5, -kPointerSize));
+
+ // Load the double value of the parameter into d2, maybe converting the
+ // parameter to a number first using the ToNumberStub if necessary.
+ Label convert, convert_smi, convert_number, done_convert;
+ __ bind(&convert);
+ __ JumpIfSmi(r3, &convert_smi);
+ __ LoadP(r7, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ JumpIfRoot(r7, Heap::kHeapNumberMapRootIndex, &convert_number);
+ {
+ // Parameter is not a Number, use the ToNumberStub to convert it.
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(r6);
+ __ Push(r4, r5, r6);
+ ToNumberStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ Pop(r4, r5, r6);
+ __ SmiUntag(r6);
+ {
+ // Restore the double accumulator value (d1).
+ Label done_restore;
+ __ SmiToDouble(d1, r4);
+ __ JumpIfSmi(r4, &done_restore);
+ __ lfd(d1, FieldMemOperand(r4, HeapNumber::kValueOffset));
+ __ bind(&done_restore);
+ }
+ }
+ __ b(&convert);
+ __ bind(&convert_number);
+ __ lfd(d2, FieldMemOperand(r3, HeapNumber::kValueOffset));
+ __ b(&done_convert);
+ __ bind(&convert_smi);
+ __ SmiToDouble(d2, r3);
+ __ bind(&done_convert);
+
+ // Perform the actual comparison with the accumulator value on the left hand
+ // side (d1) and the next parameter value on the right hand side (d2).
+ Label compare_nan, compare_swap;
+ __ fcmpu(d1, d2);
+ __ bunordered(&compare_nan);
+ __ b(cond_done, &loop);
+ __ b(CommuteCondition(cond_done), &compare_swap);
+
+ // Left and right hand side are equal, check for -0 vs. +0.
+ __ TestDoubleIsMinusZero(reg, r7, r8);
+ __ bne(&loop);
+
+ // Update accumulator. Result is on the right hand side.
+ __ bind(&compare_swap);
+ __ fmr(d1, d2);
+ __ mr(r4, r3);
+ __ b(&loop);
+
+ // At least one side is NaN, which means that the result will be NaN too.
+ // We still need to visit the rest of the arguments.
+ __ bind(&compare_nan);
+ __ LoadRoot(r4, Heap::kNanValueRootIndex);
+ __ lfd(d1, FieldMemOperand(r4, HeapNumber::kValueOffset));
+ __ b(&loop);
+ }
+
+ __ bind(&done_loop);
+ __ mr(r3, r4);
+ __ Drop(r6);
+ __ Ret();
+}
+
+// static
void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
@@ -230,8 +331,9 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r5, r4, r6); // first argument, constructor, new target
- __ CallRuntime(Runtime::kNewObject);
+ __ Push(r5); // first argument
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ Pop(r5);
}
__ StoreP(r5, FieldMemOperand(r3, JSValue::kValueOffset), r0);
@@ -359,8 +461,9 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r5, r4, r6); // first argument, constructor, new target
- __ CallRuntime(Runtime::kNewObject);
+ __ Push(r5); // first argument
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ Pop(r5);
}
__ StoreP(r5, FieldMemOperand(r3, JSValue::kValueOffset), r0);
@@ -368,24 +471,6 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
}
-static void CallRuntimePassFunction(MacroAssembler* masm,
- Runtime::FunctionId function_id) {
- // ----------- S t a t e -------------
- // -- r4 : target function (preserved for callee)
- // -- r6 : new target (preserved for callee)
- // -----------------------------------
-
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the target function and the new target.
- // Push function as parameter to the runtime call.
- __ Push(r4, r6, r4);
-
- __ CallRuntime(function_id, 1);
- // Restore target function and new target.
- __ Pop(r4, r6);
-}
-
-
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ LoadP(ip, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ LoadP(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
@@ -393,9 +478,29 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ JumpToJSEntry(ip);
}
-
-static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
- __ addi(ip, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ // ----------- S t a t e -------------
+ // -- r3 : argument count (preserved for callee)
+ // -- r4 : target function (preserved for callee)
+ // -- r6 : new target (preserved for callee)
+ // -----------------------------------
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ // Push the number of arguments to the callee.
+ // Push a copy of the target function and the new target.
+ // Push function as parameter to the runtime call.
+ __ SmiTag(r3);
+ __ Push(r3, r4, r6, r4);
+
+ __ CallRuntime(function_id, 1);
+ __ mr(r5, r3);
+
+ // Restore target function and new target.
+ __ Pop(r3, r4, r6);
+ __ SmiUntag(r3);
+ }
+ __ addi(ip, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
__ JumpToJSEntry(ip);
}
@@ -411,8 +516,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
__ cmpl(sp, ip);
__ bge(&ok);
- CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
@@ -421,7 +525,8 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool create_implicit_receiver) {
+ bool create_implicit_receiver,
+ bool check_derived_construct) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
// -- r4 : constructor function
@@ -448,142 +553,18 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ SmiTag(r3);
__ Push(r5, r3);
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- // Verify that the new target is a JSFunction.
- __ CompareObjectType(r6, r8, r7, JS_FUNCTION_TYPE);
- __ bne(&rt_call);
-
- // Load the initial map and verify that it is in fact a map.
- // r6: new target
- __ LoadP(r5,
- FieldMemOperand(r6, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(r5, &rt_call);
- __ CompareObjectType(r5, r8, r7, MAP_TYPE);
- __ bne(&rt_call);
-
- // Fall back to runtime if the expected base constructor and base
- // constructor differ.
- __ LoadP(r8, FieldMemOperand(r5, Map::kConstructorOrBackPointerOffset));
- __ cmp(r4, r8);
- __ bne(&rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // r4: constructor function
- // r5: initial map
- // r6: new target
- __ CompareInstanceType(r5, r8, JS_FUNCTION_TYPE);
- __ beq(&rt_call);
-
- // Now allocate the JSObject on the heap.
- // r4: constructor function
- // r5: initial map
- // r6: new target
- __ lbz(r10, FieldMemOperand(r5, Map::kInstanceSizeOffset));
-
- __ Allocate(r10, r7, r10, r9, &rt_call, SIZE_IN_WORDS);
-
- // Allocated the JSObject, now initialize the fields. Map is set to
- // initial map and properties and elements are set to empty fixed array.
- // r4: constructor function
- // r5: initial map
- // r6: new target
- // r7: JSObject (not HeapObject tagged - the actual address).
- // r10: start of next object
- __ LoadRoot(r9, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r5, MemOperand(r7, JSObject::kMapOffset));
- __ StoreP(r9, MemOperand(r7, JSObject::kPropertiesOffset));
- __ StoreP(r9, MemOperand(r7, JSObject::kElementsOffset));
- __ addi(r8, r7, Operand(JSObject::kElementsOffset + kPointerSize));
-
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on.
- __ addi(r7, r7, Operand(kHeapObjectTag));
-
- // Fill all the in-object properties with the appropriate filler.
- // r7: JSObject (tagged)
- // r8: First in-object property of JSObject (not tagged)
- __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
-
- if (!is_api_function) {
- Label no_inobject_slack_tracking;
-
- MemOperand bit_field3 = FieldMemOperand(r5, Map::kBitField3Offset);
- // Check if slack tracking is enabled.
- __ lwz(r3, bit_field3);
- __ DecodeField<Map::ConstructionCounter>(r11, r3);
- // r11: slack tracking counter
- __ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
- __ blt(&no_inobject_slack_tracking);
- // Decrease generous allocation count.
- __ Add(r3, r3, -(1 << Map::ConstructionCounter::kShift), r0);
- __ stw(r3, bit_field3);
-
- // Allocate object with a slack.
- __ lbz(r3, FieldMemOperand(r5, Map::kUnusedPropertyFieldsOffset));
- __ ShiftLeftImm(r3, r3, Operand(kPointerSizeLog2));
- __ sub(r3, r10, r3);
- // r3: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ cmp(r8, r3);
- __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
- }
- __ InitializeFieldsWithFiller(r8, r3, r9);
-
- // To allow truncation fill the remaining fields with one pointer
- // filler map.
- __ LoadRoot(r9, Heap::kOnePointerFillerMapRootIndex);
- __ InitializeFieldsWithFiller(r8, r10, r9);
-
- // r11: slack tracking counter value before decreasing.
- __ cmpi(r11, Operand(Map::kSlackTrackingCounterEnd));
- __ bne(&allocated);
-
- // Push the constructor, new_target and the object to the stack,
- // and then the initial map as an argument to the runtime call.
- __ Push(r4, r6, r7, r5);
- __ CallRuntime(Runtime::kFinalizeInstanceSize);
- __ Pop(r4, r6, r7);
-
- // Continue with JSObject being successfully allocated
- // r4: constructor function
- // r6: new target
- // r7: JSObject
- __ b(&allocated);
-
- __ bind(&no_inobject_slack_tracking);
- }
-
- __ InitializeFieldsWithFiller(r8, r10, r9);
-
- // Continue with JSObject being successfully allocated
- // r4: constructor function
- // r6: new target
- // r7: JSObject
- __ b(&allocated);
- }
-
- // Allocate the new receiver object using the runtime call.
- // r4: constructor function
- // r6: new target
- __ bind(&rt_call);
-
- // Push the constructor and new_target twice, second pair as arguments
- // to the runtime call.
- __ Push(r4, r6, r4, r6);
- __ CallRuntime(Runtime::kNewObject);
+ // Allocate the new receiver object.
+ __ Push(r4, r6);
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ mr(r7, r3);
__ Pop(r4, r6);
- // Receiver for constructor call allocated.
- // r4: constructor function
- // r6: new target
- // r7: JSObject
- __ bind(&allocated);
+ // ----------- S t a t e -------------
+ // -- r4: constructor function
+ // -- r6: new target
+ // -- r7: newly allocated object
+ // -----------------------------------
// Retrieve smi-tagged arguments count from the stack.
__ LoadP(r3, MemOperand(sp));
@@ -680,6 +661,19 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Leave construct frame.
}
+ // ES6 9.2.2. Step 13+
+ // Check that the result is not a Smi, indicating that the constructor result
+ // from a derived class is neither undefined nor an Object.
+ if (check_derived_construct) {
+ Label dont_throw;
+ __ JumpIfNotSmi(r3, &dont_throw);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
+ }
+ __ bind(&dont_throw);
+ }
+
__ SmiToPtrArrayOffset(r4, r4);
__ add(sp, sp, r4);
__ addi(sp, sp, Operand(kPointerSize));
@@ -691,17 +685,23 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
+ Generate_JSConstructStubHelper(masm, false, true, false);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, true);
+ Generate_JSConstructStubHelper(masm, true, false, false);
}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
+ Generate_JSConstructStubHelper(masm, false, false, false);
+}
+
+
+void Builtins::Generate_JSBuiltinsConstructStubForDerived(
+ MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false, true);
}
@@ -846,10 +846,8 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
// o sp: stack pointer
// o lr: return address
//
-// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-ppc.h for its layout.
-// TODO(rmcilroy): We will need to include the current bytecode pointer in the
-// frame.
+// The function builds an interpreter frame. See InterpreterFrameConstants in
+// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
@@ -857,17 +855,23 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushFixedFrame(r4);
__ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
- __ push(r6);
-
- // Push zero for bytecode array offset.
- __ li(r3, Operand::Zero());
- __ push(r3);
// Get the bytecode array from the function object and load the pointer to the
// first entry into kInterpreterBytecodeRegister.
__ LoadP(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ Label array_done;
+ Register debug_info = r5;
+ DCHECK(!debug_info.is(r3));
+ __ LoadP(debug_info,
+ FieldMemOperand(r3, SharedFunctionInfo::kDebugInfoOffset));
+ // Load original bytecode array or the debug copy.
__ LoadP(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
+ __ CmpSmiLiteral(debug_info, DebugInfo::uninitialized(), r0);
+ __ beq(&array_done);
+ __ LoadP(kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(debug_info, DebugInfo::kAbstractCodeIndex));
+ __ bind(&array_done);
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -878,6 +882,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
+ // Push new.target, bytecode array and zero for bytecode array offset.
+ __ li(r3, Operand::Zero());
+ __ Push(r6, kInterpreterBytecodeArrayRegister, r3);
+
// Allocate the local and temporary register file on the stack.
{
// Load frame size (word) from the BytecodeArray object.
@@ -908,23 +916,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// TODO(rmcilroy): List of things not currently dealt with here but done in
// fullcodegen's prologue:
- // - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
- // - Allow simulator stop operations if FLAG_stop_at is set.
// - Code aging of the BytecodeArray object.
- // Perform stack guard check.
- {
- Label ok;
- __ LoadRoot(r0, Heap::kStackLimitRootIndex);
- __ cmp(sp, r0);
- __ bge(&ok);
- __ push(kInterpreterBytecodeArrayRegister);
- __ CallRuntime(Runtime::kStackGuard);
- __ pop(kInterpreterBytecodeArrayRegister);
- __ bind(&ok);
- }
-
// Load accumulator, register file, bytecode offset, dispatch table into
// registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
@@ -932,10 +926,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ mov(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
- __ LoadRoot(kInterpreterDispatchTableRegister,
- Heap::kInterpreterTableRootIndex);
- __ addi(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ mov(kInterpreterDispatchTableRegister,
+ Operand(ExternalReference::interpreter_dispatch_table_address(
+ masm->isolate())));
// Dispatch to the first bytecode handler for the function.
__ lbzx(r4, MemOperand(kInterpreterBytecodeArrayRegister,
@@ -946,7 +939,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// and header removal.
__ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(ip);
- __ bkpt(0); // Does not return here.
+
+ // Even though the first bytecode handler was called, we will never return.
+ __ Abort(kUnexpectedReturnFromBytecodeHandler);
}
@@ -983,7 +978,8 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm, Register index,
// static
-void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+ MacroAssembler* masm, TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r5 : the address of the first argument to be pushed. Subsequent
@@ -999,7 +995,9 @@ void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
Generate_InterpreterPushArgs(masm, r5, r6, r7);
// Call the target.
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ tail_call_mode),
+ RelocInfo::CODE_TARGET);
}
@@ -1028,45 +1026,24 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
}
-static void Generate_InterpreterNotifyDeoptimizedHelper(
- MacroAssembler* masm, Deoptimizer::BailoutType type) {
- // Enter an internal frame.
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- // Save accumulator register and pass the deoptimization type to
- // the runtime system.
- __ LoadSmiLiteral(r4, Smi::FromInt(static_cast<int>(type)));
- __ Push(kInterpreterAccumulatorRegister, r4);
- __ CallRuntime(Runtime::kNotifyDeoptimized);
- __ pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
- // Tear down internal frame.
- }
-
- // Drop state (we don't use these for interpreter deopts).
- __ Drop(1);
-
+static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
// Initialize register file register and dispatch table register.
__ addi(kInterpreterRegisterFileRegister, fp,
Operand(InterpreterFrameConstants::kRegisterFilePointerFromFp));
- __ LoadRoot(kInterpreterDispatchTableRegister,
- Heap::kInterpreterTableRootIndex);
- __ addi(kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ mov(kInterpreterDispatchTableRegister,
+ Operand(ExternalReference::interpreter_dispatch_table_address(
+ masm->isolate())));
// Get the context from the frame.
- // TODO(rmcilroy): Update interpreter frame to expect current context at the
- // context slot instead of the function context.
__ LoadP(kContextRegister,
MemOperand(kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kContextFromRegisterPointer));
// Get the bytecode array pointer from the frame.
- __ LoadP(r4,
- MemOperand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kFunctionFromRegisterPointer));
- __ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(kInterpreterBytecodeArrayRegister,
- FieldMemOperand(r4, SharedFunctionInfo::kFunctionDataOffset));
+ __ LoadP(
+ kInterpreterBytecodeArrayRegister,
+ MemOperand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -1094,6 +1071,29 @@ static void Generate_InterpreterNotifyDeoptimizedHelper(
}
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+ MacroAssembler* masm, Deoptimizer::BailoutType type) {
+ // Enter an internal frame.
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+ // Pass the deoptimization type to the runtime system.
+ __ LoadSmiLiteral(r4, Smi::FromInt(static_cast<int>(type)));
+ __ Push(r4);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+ // Tear down internal frame.
+ }
+
+ // Drop state (we don't use these for interpreter deopts) and and pop the
+ // accumulator value into the accumulator register.
+ __ Drop(1);
+ __ Pop(kInterpreterAccumulatorRegister);
+
+ // Enter the bytecode dispatch.
+ Generate_EnterBytecodeDispatch(masm);
+}
+
+
void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
@@ -1108,22 +1108,32 @@ void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ // Set the address of the interpreter entry trampoline as a return address.
+ // This simulates the initial call to bytecode handlers in interpreter entry
+ // trampoline. The return will never actually be taken, but our stack walker
+ // uses this address to determine whether a frame is interpreted.
+ __ mov(r0,
+ Operand(masm->isolate()->builtins()->InterpreterEntryTrampoline()));
+ __ mtlr(r0);
+
+ Generate_EnterBytecodeDispatch(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileLazy);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm,
+ Runtime::kCompileOptimized_NotConcurrent);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
@@ -1346,13 +1356,12 @@ void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
// Load the next prototype.
__ bind(&next_prototype);
- __ LoadP(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
- // End if the prototype is null or not hidden.
- __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, receiver_check_failed);
- __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ lwz(scratch, FieldMemOperand(map, Map::kBitField3Offset));
- __ DecodeField<Map::IsHiddenPrototype>(scratch, SetRC);
+ __ DecodeField<Map::HasHiddenPrototype>(scratch, SetRC);
__ beq(receiver_check_failed, cr0);
+
+ __ LoadP(receiver, FieldMemOperand(map, Map::kPrototypeOffset));
+ __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Iterate.
__ b(&prototype_loop_start);
@@ -1868,9 +1877,7 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Try to create the list from an arguments object.
__ bind(&create_arguments);
- __ LoadP(r5, FieldMemOperand(
- r3, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize));
+ __ LoadP(r5, FieldMemOperand(r3, JSArgumentsObject::kLengthOffset));
__ LoadP(r7, FieldMemOperand(r3, JSObject::kElementsOffset));
__ LoadP(ip, FieldMemOperand(r7, FixedArray::kLengthOffset));
__ cmp(r5, ip);
@@ -1946,10 +1953,138 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
}
}
+namespace {
+
+// Drops top JavaScript frame and an arguments adaptor frame below it (if
+// present) preserving all the arguments prepared for current call.
+// Does nothing if debugger is currently active.
+// ES6 14.6.3. PrepareForTailCall
+//
+// Stack structure for the function g() tail calling f():
+//
+// ------- Caller frame: -------
+// | ...
+// | g()'s arg M
+// | ...
+// | g()'s arg 1
+// | g()'s receiver arg
+// | g()'s caller pc
+// ------- g()'s frame: -------
+// | g()'s caller fp <- fp
+// | g()'s context
+// | function pointer: g
+// | -------------------------
+// | ...
+// | ...
+// | f()'s arg N
+// | ...
+// | f()'s arg 1
+// | f()'s receiver arg <- sp (f()'s caller pc is not on the stack yet!)
+// ----------------------
+//
+void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+ DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+ Comment cmnt(masm, "[ PrepareForTailCall");
+
+ // Prepare for tail call only if the debugger is not active.
+ Label done;
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(masm->isolate());
+ __ mov(scratch1, Operand(debug_is_active));
+ __ lbz(scratch1, MemOperand(scratch1));
+ __ cmpi(scratch1, Operand::Zero());
+ __ bne(&done);
+
+ // Drop possible interpreter handler/stub frame.
+ {
+ Label no_interpreter_frame;
+ __ LoadP(scratch3, MemOperand(fp, StandardFrameConstants::kMarkerOffset));
+ __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::STUB), r0);
+ __ bne(&no_interpreter_frame);
+ __ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&no_interpreter_frame);
+ }
+
+ // Check if next frame is an arguments adaptor frame.
+ Label no_arguments_adaptor, formal_parameter_count_loaded;
+ __ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(scratch3,
+ MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ bne(&no_arguments_adaptor);
+
+ // Drop arguments adaptor frame and load arguments count.
+ __ mr(fp, scratch2);
+ __ LoadP(scratch1,
+ MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(scratch1);
+ __ b(&formal_parameter_count_loaded);
+
+ __ bind(&no_arguments_adaptor);
+ // Load caller's formal parameter count
+ __ LoadP(scratch1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LoadP(scratch1,
+ FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadWordArith(
+ scratch1, FieldMemOperand(
+ scratch1, SharedFunctionInfo::kFormalParameterCountOffset));
+#if !V8_TARGET_ARCH_PPC64
+ __ SmiUntag(scratch1);
+#endif
+
+ __ bind(&formal_parameter_count_loaded);
+
+ // Calculate the end of destination area where we will put the arguments
+ // after we drop current frame. We add kPointerSize to count the receiver
+ // argument which is not included into formal parameters count.
+ Register dst_reg = scratch2;
+ __ ShiftLeftImm(dst_reg, scratch1, Operand(kPointerSizeLog2));
+ __ add(dst_reg, fp, dst_reg);
+ __ addi(dst_reg, dst_reg,
+ Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
+
+ Register src_reg = scratch1;
+ __ ShiftLeftImm(src_reg, args_reg, Operand(kPointerSizeLog2));
+ __ add(src_reg, sp, src_reg);
+ // Count receiver argument as well (not included in args_reg).
+ __ addi(src_reg, src_reg, Operand(kPointerSize));
+
+ if (FLAG_debug_code) {
+ __ cmpl(src_reg, dst_reg);
+ __ Check(lt, kStackAccessBelowStackPointer);
+ }
+
+ // Restore caller's frame pointer and return address now as they will be
+ // overwritten by the copying loop.
+ __ RestoreFrameStateForTailCall();
+
+ // Now copy callee arguments to the caller frame going backwards to avoid
+ // callee arguments corruption (source and destination areas could overlap).
+
+ // Both src_reg and dst_reg are pointing to the word after the one to copy,
+ // so they must be pre-decremented in the loop.
+ Register tmp_reg = scratch3;
+ Label loop;
+ __ addi(tmp_reg, args_reg, Operand(1)); // +1 for receiver
+ __ mtctr(tmp_reg);
+ __ bind(&loop);
+ __ LoadPU(tmp_reg, MemOperand(src_reg, -kPointerSize));
+ __ StorePU(tmp_reg, MemOperand(dst_reg, -kPointerSize));
+ __ bdnz(&loop);
+
+ // Leave current frame.
+ __ mr(sp, dst_reg);
+
+ __ bind(&done);
+}
+} // namespace
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode) {
+ ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r4 : the function to call (checked to be a JSFunction)
@@ -2034,6 +2169,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- cp : the function context.
// -----------------------------------
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, r3, r6, r7, r8);
+ }
+
__ LoadWordArith(
r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
#if !V8_TARGET_ARCH_PPC64
@@ -2094,7 +2233,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kThrowStackOverflow, 0);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
}
__ bind(&done);
}
@@ -2138,13 +2277,18 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// static
-void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r4 : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(r4);
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, r3, r6, r7, r8);
+ }
+
// Patch the receiver to [[BoundThis]].
__ LoadP(ip, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset));
__ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
@@ -2165,7 +2309,8 @@ void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
// static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- r3 : the number of arguments (not including the receiver)
// -- r4 : the target to call (can be any Object).
@@ -2175,14 +2320,25 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ JumpIfSmi(r4, &non_callable);
__ bind(&non_smi);
__ CompareObjectType(r4, r7, r8, JS_FUNCTION_TYPE);
- __ Jump(masm->isolate()->builtins()->CallFunction(mode),
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
RelocInfo::CODE_TARGET, eq);
__ cmpi(r8, Operand(JS_BOUND_FUNCTION_TYPE));
- __ Jump(masm->isolate()->builtins()->CallBoundFunction(),
+ __ Jump(masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
RelocInfo::CODE_TARGET, eq);
+
+ // Check if target has a [[Call]] internal method.
+ __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
+ __ TestBit(r7, Map::kIsCallable, r0);
+ __ beq(&non_callable, cr0);
+
__ cmpi(r8, Operand(JS_PROXY_TYPE));
__ bne(&non_function);
+ // 0. Prepare for tail call if necessary.
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, r3, r6, r7, r8);
+ }
+
// 1. Runtime fallback for Proxy [[Call]].
__ Push(r4);
// Increase the arguments size to include the pushed function and the
@@ -2195,17 +2351,13 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
__ bind(&non_function);
- // Check if target has a [[Call]] internal method.
- __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
- __ TestBit(r7, Map::kIsCallable, r0);
- __ beq(&non_callable, cr0);
// Overwrite the original receiver the (original) target.
__ ShiftLeftImm(r8, r3, Operand(kPointerSizeLog2));
__ StorePX(r4, MemOperand(sp, r8));
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r4);
__ Jump(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined),
+ ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index 26fbe98cf9..f6befb269a 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -91,9 +91,8 @@ void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
#define __ ACCESS_MASM(masm)
-
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
- Condition cond, Strength strength);
+ Condition cond);
static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
Register rhs, Label* lhs_not_nan,
Label* slow, bool strict);
@@ -248,7 +247,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// Equality is almost reflexive (everything but NaN), so this is a test
// for "identity and not NaN".
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
- Condition cond, Strength strength) {
+ Condition cond) {
Label not_identical;
Label heap_number, return_equal;
__ cmp(r3, r4);
@@ -268,14 +267,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// Call runtime on identical SIMD values since we must throw a TypeError.
__ cmpi(r7, Operand(SIMD128_VALUE_TYPE));
__ beq(slow);
- if (is_strong(strength)) {
- // Call the runtime on anything that is converted in the semantics, since
- // we need to throw a TypeError. Smis have already been ruled out.
- __ cmpi(r7, Operand(HEAP_NUMBER_TYPE));
- __ beq(&return_equal);
- __ andi(r0, r7, Operand(kIsNotStringMask));
- __ bne(slow, cr0);
- }
} else {
__ CompareObjectType(r3, r7, r7, HEAP_NUMBER_TYPE);
__ beq(&heap_number);
@@ -289,13 +280,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// Call runtime on identical SIMD values since we must throw a TypeError.
__ cmpi(r7, Operand(SIMD128_VALUE_TYPE));
__ beq(slow);
- if (is_strong(strength)) {
- // Call the runtime on anything that is converted in the semantics,
- // since we need to throw a TypeError. Smis and heap numbers have
- // already been ruled out.
- __ andi(r0, r7, Operand(kIsNotStringMask));
- __ bne(slow, cr0);
- }
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
@@ -515,40 +499,49 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, Register lhs,
static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
Register lhs, Register rhs,
Label* possible_strings,
- Label* not_both_strings) {
+ Label* runtime_call) {
DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
// r5 is object type of rhs.
- Label object_test;
+ Label object_test, return_unequal, undetectable;
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ andi(r0, r5, Operand(kIsNotStringMask));
__ bne(&object_test, cr0);
__ andi(r0, r5, Operand(kIsNotInternalizedMask));
__ bne(possible_strings, cr0);
__ CompareObjectType(lhs, r6, r6, FIRST_NONSTRING_TYPE);
- __ bge(not_both_strings);
+ __ bge(runtime_call);
__ andi(r0, r6, Operand(kIsNotInternalizedMask));
__ bne(possible_strings, cr0);
- // Both are internalized. We already checked they weren't the same pointer
- // so they are not equal.
- __ li(r3, Operand(NOT_EQUAL));
+ // Both are internalized. We already checked they weren't the same pointer so
+ // they are not equal. Return non-equal by returning the non-zero object
+ // pointer in r3.
__ Ret();
__ bind(&object_test);
- __ cmpi(r5, Operand(FIRST_JS_RECEIVER_TYPE));
- __ blt(not_both_strings);
- __ CompareObjectType(lhs, r5, r6, FIRST_JS_RECEIVER_TYPE);
- __ blt(not_both_strings);
- // If both objects are undetectable, they are equal. Otherwise, they
- // are not equal, since they are different objects and an object is not
- // equal to undefined.
+ __ LoadP(r5, FieldMemOperand(lhs, HeapObject::kMapOffset));
__ LoadP(r6, FieldMemOperand(rhs, HeapObject::kMapOffset));
- __ lbz(r5, FieldMemOperand(r5, Map::kBitFieldOffset));
- __ lbz(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
- __ and_(r3, r5, r6);
- __ andi(r3, r3, Operand(1 << Map::kIsUndetectable));
- __ xori(r3, r3, Operand(1 << Map::kIsUndetectable));
+ __ lbz(r7, FieldMemOperand(r5, Map::kBitFieldOffset));
+ __ lbz(r8, FieldMemOperand(r6, Map::kBitFieldOffset));
+ __ andi(r0, r7, Operand(1 << Map::kIsUndetectable));
+ __ bne(&undetectable, cr0);
+ __ andi(r0, r8, Operand(1 << Map::kIsUndetectable));
+ __ bne(&return_unequal, cr0);
+
+ __ CompareInstanceType(r5, r5, FIRST_JS_RECEIVER_TYPE);
+ __ blt(runtime_call);
+ __ CompareInstanceType(r6, r6, FIRST_JS_RECEIVER_TYPE);
+ __ blt(runtime_call);
+
+ __ bind(&return_unequal);
+ // Return non-equal by returning the non-zero object pointer in r3.
+ __ Ret();
+
+ __ bind(&undetectable);
+ __ andi(r0, r8, Operand(1 << Map::kIsUndetectable));
+ __ beq(&return_unequal, cr0);
+ __ li(r3, Operand(EQUAL));
__ Ret();
}
@@ -600,7 +593,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc, strength());
+ EmitIdenticalObjectComparison(masm, &slow, cc);
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
@@ -722,8 +715,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
- : Runtime::kCompare);
+ __ TailCallRuntime(Runtime::kCompare);
}
__ bind(&miss);
@@ -942,7 +934,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ ConvertIntToDouble(exponent, double_exponent);
// Returning or bailing out.
- Counters* counters = isolate()->counters();
if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
@@ -956,7 +947,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ stfd(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
DCHECK(heapnumber.is(r3));
- __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
__ Ret(2);
} else {
__ mflr(r0);
@@ -973,7 +963,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ MovFromFloatResult(double_result);
__ bind(&done);
- __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
__ Ret();
}
}
@@ -1055,14 +1044,13 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Need at least one extra slot for return address location.
int arg_stack_space = 1;
-// PPC LINUX ABI:
-#if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
// Pass buffer for return value on stack if necessary
- if (result_size() > 1) {
- DCHECK_EQ(2, result_size());
- arg_stack_space += 2;
+ bool needs_return_buffer =
+ result_size() > 2 ||
+ (result_size() == 2 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS);
+ if (needs_return_buffer) {
+ arg_stack_space += result_size();
}
-#endif
__ EnterExitFrame(save_doubles(), arg_stack_space);
@@ -1076,9 +1064,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Result returned in registers or stack, depending on result size and ABI.
Register isolate_reg = r5;
-#if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
- if (result_size() > 1) {
- // The return value is 16-byte non-scalar value.
+ if (needs_return_buffer) {
+ // The return value is a non-scalar value.
// Use frame storage reserved by calling function to pass return
// buffer as implicit first argument.
__ mr(r5, r4);
@@ -1086,21 +1073,20 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ addi(r3, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize));
isolate_reg = r6;
}
-#endif
// Call C built-in.
__ mov(isolate_reg, Operand(ExternalReference::isolate_address(isolate())));
Register target = r15;
-#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
- // Native AIX/PPC64 Linux use a function descriptor.
- __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(r15, kPointerSize));
- __ LoadP(ip, MemOperand(r15, 0)); // Instruction address
- target = ip;
-#elif ABI_CALL_VIA_IP
- __ Move(ip, r15);
- target = ip;
-#endif
+ if (ABI_USES_FUNCTION_DESCRIPTORS) {
+ // AIX/PPC64BE Linux use a function descriptor.
+ __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(r15, kPointerSize));
+ __ LoadP(ip, MemOperand(r15, 0)); // Instruction address
+ target = ip;
+ } else if (ABI_CALL_VIA_IP) {
+ __ Move(ip, r15);
+ target = ip;
+ }
// To let the GC traverse the return address of the exit frames, we need to
// know where the return address is. The CEntryStub is unmovable, so
@@ -1112,13 +1098,12 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Call(target);
__ bind(&after_call);
-#if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
// If return value is on the stack, pop it to registers.
- if (result_size() > 1) {
+ if (needs_return_buffer) {
+ if (result_size() > 2) __ LoadP(r5, MemOperand(r3, 2 * kPointerSize));
__ LoadP(r4, MemOperand(r3, kPointerSize));
__ LoadP(r3, MemOperand(r3));
}
-#endif
// Check result for exception sentinel.
Label exception_returned;
@@ -1132,9 +1117,9 @@ void CEntryStub::Generate(MacroAssembler* masm) {
ExternalReference pending_exception_address(
Isolate::kPendingExceptionAddress, isolate());
- __ mov(r5, Operand(pending_exception_address));
- __ LoadP(r5, MemOperand(r5));
- __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
+ __ mov(r6, Operand(pending_exception_address));
+ __ LoadP(r6, MemOperand(r6));
+ __ CompareRoot(r6, Heap::kTheHoleValueRootIndex);
// Cannot use check here as it attempts to generate call into runtime.
__ beq(&okay);
__ stop("Unexpected pending exception");
@@ -1538,332 +1523,6 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- // The displacement is the offset of the last parameter (if any)
- // relative to the frame pointer.
- const int kDisplacement =
- StandardFrameConstants::kCallerSPOffset - kPointerSize;
- DCHECK(r4.is(ArgumentsAccessReadDescriptor::index()));
- DCHECK(r3.is(ArgumentsAccessReadDescriptor::parameter_count()));
-
- // Check that the key is a smi.
- Label slow;
- __ JumpIfNotSmi(r4, &slow);
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor;
- __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset));
- STATIC_ASSERT(StackFrame::ARGUMENTS_ADAPTOR < 0x3fffu);
- __ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
- __ beq(&adaptor);
-
- // Check index against formal parameters count limit passed in
- // through register r3. Use unsigned comparison to get negative
- // check for free.
- __ cmpl(r4, r3);
- __ bge(&slow);
-
- // Read the argument from the stack and return it.
- __ sub(r6, r3, r4);
- __ SmiToPtrArrayOffset(r6, r6);
- __ add(r6, fp, r6);
- __ LoadP(r3, MemOperand(r6, kDisplacement));
- __ blr();
-
- // Arguments adaptor case: Check index against actual arguments
- // limit found in the arguments adaptor frame. Use unsigned
- // comparison to get negative check for free.
- __ bind(&adaptor);
- __ LoadP(r3, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmpl(r4, r3);
- __ bge(&slow);
-
- // Read the argument from the adaptor frame and return it.
- __ sub(r6, r3, r4);
- __ SmiToPtrArrayOffset(r6, r6);
- __ add(r6, r5, r6);
- __ LoadP(r3, MemOperand(r6, kDisplacement));
- __ blr();
-
- // Slow-case: Handle non-smi or out-of-bounds access to arguments
- // by calling the runtime system.
- __ bind(&slow);
- __ push(r4);
- __ TailCallRuntime(Runtime::kArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
- // r4 : function
- // r5 : number of parameters (tagged)
- // r6 : parameters pointer
-
- DCHECK(r4.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(r5.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(r6.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- __ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(r3, MemOperand(r7, StandardFrameConstants::kContextOffset));
- __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
- __ bne(&runtime);
-
- // Patch the arguments.length and the parameters pointer in the current frame.
- __ LoadP(r5, MemOperand(r7, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiToPtrArrayOffset(r6, r5);
- __ add(r6, r6, r7);
- __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
-
- __ bind(&runtime);
- __ Push(r4, r6, r5);
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
- // r4 : function
- // r5 : number of parameters (tagged)
- // r6 : parameters pointer
- // Registers used over whole function:
- // r8 : arguments count (tagged)
- // r9 : mapped parameter count (tagged)
-
- DCHECK(r4.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(r5.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(r6.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(r3, MemOperand(r7, StandardFrameConstants::kContextOffset));
- __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
- __ beq(&adaptor_frame);
-
- // No adaptor, parameter count = argument count.
- __ mr(r8, r5);
- __ mr(r9, r5);
- __ b(&try_allocate);
-
- // We have an adaptor frame. Patch the parameters pointer.
- __ bind(&adaptor_frame);
- __ LoadP(r8, MemOperand(r7, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiToPtrArrayOffset(r6, r8);
- __ add(r6, r6, r7);
- __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // r8 = argument count (tagged)
- // r9 = parameter count (tagged)
- // Compute the mapped parameter count = min(r5, r8) in r9.
- __ cmp(r5, r8);
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ isel(lt, r9, r5, r8);
- } else {
- Label skip;
- __ mr(r9, r5);
- __ blt(&skip);
- __ mr(r9, r8);
- __ bind(&skip);
- }
-
- __ bind(&try_allocate);
-
- // Compute the sizes of backing store, parameter map, and arguments object.
- // 1. Parameter map, has 2 extra words containing context and backing store.
- const int kParameterMapHeaderSize =
- FixedArray::kHeaderSize + 2 * kPointerSize;
- // If there are no mapped parameters, we do not need the parameter_map.
- __ CmpSmiLiteral(r9, Smi::FromInt(0), r0);
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ SmiToPtrArrayOffset(r11, r9);
- __ addi(r11, r11, Operand(kParameterMapHeaderSize));
- __ isel(eq, r11, r0, r11);
- } else {
- Label skip2, skip3;
- __ bne(&skip2);
- __ li(r11, Operand::Zero());
- __ b(&skip3);
- __ bind(&skip2);
- __ SmiToPtrArrayOffset(r11, r9);
- __ addi(r11, r11, Operand(kParameterMapHeaderSize));
- __ bind(&skip3);
- }
-
- // 2. Backing store.
- __ SmiToPtrArrayOffset(r7, r8);
- __ add(r11, r11, r7);
- __ addi(r11, r11, Operand(FixedArray::kHeaderSize));
-
- // 3. Arguments object.
- __ addi(r11, r11, Operand(Heap::kSloppyArgumentsObjectSize));
-
- // Do the allocation of all three objects in one go.
- __ Allocate(r11, r3, r11, r7, &runtime, TAG_OBJECT);
-
- // r3 = address of new object(s) (tagged)
- // r5 = argument count (smi-tagged)
- // Get the arguments boilerplate from the current native context into r4.
- const int kNormalOffset =
- Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
- const int kAliasedOffset =
- Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
-
- __ LoadP(r7, NativeContextMemOperand());
- __ cmpi(r9, Operand::Zero());
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ LoadP(r11, MemOperand(r7, kNormalOffset));
- __ LoadP(r7, MemOperand(r7, kAliasedOffset));
- __ isel(eq, r7, r11, r7);
- } else {
- Label skip4, skip5;
- __ bne(&skip4);
- __ LoadP(r7, MemOperand(r7, kNormalOffset));
- __ b(&skip5);
- __ bind(&skip4);
- __ LoadP(r7, MemOperand(r7, kAliasedOffset));
- __ bind(&skip5);
- }
-
- // r3 = address of new object (tagged)
- // r5 = argument count (smi-tagged)
- // r7 = address of arguments map (tagged)
- // r9 = mapped parameter count (tagged)
- __ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0);
- __ LoadRoot(r11, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r11, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
- __ StoreP(r11, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
-
- // Set up the callee in-object property.
- STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ AssertNotSmi(r4);
- const int kCalleeOffset =
- JSObject::kHeaderSize + Heap::kArgumentsCalleeIndex * kPointerSize;
- __ StoreP(r4, FieldMemOperand(r3, kCalleeOffset), r0);
-
- // Use the length (smi tagged) and set that as an in-object property too.
- __ AssertSmi(r8);
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- const int kLengthOffset =
- JSObject::kHeaderSize + Heap::kArgumentsLengthIndex * kPointerSize;
- __ StoreP(r8, FieldMemOperand(r3, kLengthOffset), r0);
-
- // Set up the elements pointer in the allocated arguments object.
- // If we allocated a parameter map, r7 will point there, otherwise
- // it will point to the backing store.
- __ addi(r7, r3, Operand(Heap::kSloppyArgumentsObjectSize));
- __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
-
- // r3 = address of new object (tagged)
- // r5 = argument count (tagged)
- // r7 = address of parameter map or backing store (tagged)
- // r9 = mapped parameter count (tagged)
- // Initialize parameter map. If there are no mapped arguments, we're done.
- Label skip_parameter_map;
- __ CmpSmiLiteral(r9, Smi::FromInt(0), r0);
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ isel(eq, r4, r7, r4);
- __ beq(&skip_parameter_map);
- } else {
- Label skip6;
- __ bne(&skip6);
- // Move backing store address to r4, because it is
- // expected there when filling in the unmapped arguments.
- __ mr(r4, r7);
- __ b(&skip_parameter_map);
- __ bind(&skip6);
- }
-
- __ LoadRoot(r8, Heap::kSloppyArgumentsElementsMapRootIndex);
- __ StoreP(r8, FieldMemOperand(r7, FixedArray::kMapOffset), r0);
- __ AddSmiLiteral(r8, r9, Smi::FromInt(2), r0);
- __ StoreP(r8, FieldMemOperand(r7, FixedArray::kLengthOffset), r0);
- __ StoreP(cp, FieldMemOperand(r7, FixedArray::kHeaderSize + 0 * kPointerSize),
- r0);
- __ SmiToPtrArrayOffset(r8, r9);
- __ add(r8, r8, r7);
- __ addi(r8, r8, Operand(kParameterMapHeaderSize));
- __ StoreP(r8, FieldMemOperand(r7, FixedArray::kHeaderSize + 1 * kPointerSize),
- r0);
-
- // Copy the parameter slots and the holes in the arguments.
- // We need to fill in mapped_parameter_count slots. They index the context,
- // where parameters are stored in reverse order, at
- // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
- // The mapped parameter thus need to get indices
- // MIN_CONTEXT_SLOTS+parameter_count-1 ..
- // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
- // We loop from right to left.
- Label parameters_loop;
- __ mr(r8, r9);
- __ AddSmiLiteral(r11, r5, Smi::FromInt(Context::MIN_CONTEXT_SLOTS), r0);
- __ sub(r11, r11, r9);
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ SmiToPtrArrayOffset(r4, r8);
- __ add(r4, r4, r7);
- __ addi(r4, r4, Operand(kParameterMapHeaderSize));
-
- // r4 = address of backing store (tagged)
- // r7 = address of parameter map (tagged)
- // r8 = temporary scratch (a.o., for address calculation)
- // r10 = temporary scratch (a.o., for address calculation)
- // ip = the hole value
- __ SmiUntag(r8);
- __ mtctr(r8);
- __ ShiftLeftImm(r8, r8, Operand(kPointerSizeLog2));
- __ add(r10, r4, r8);
- __ add(r8, r7, r8);
- __ addi(r10, r10, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ addi(r8, r8, Operand(kParameterMapHeaderSize - kHeapObjectTag));
-
- __ bind(&parameters_loop);
- __ StorePU(r11, MemOperand(r8, -kPointerSize));
- __ StorePU(ip, MemOperand(r10, -kPointerSize));
- __ AddSmiLiteral(r11, r11, Smi::FromInt(1), r0);
- __ bdnz(&parameters_loop);
-
- // Restore r8 = argument count (tagged).
- __ LoadP(r8, FieldMemOperand(r3, kLengthOffset));
-
- __ bind(&skip_parameter_map);
- // r3 = address of new object (tagged)
- // r4 = address of backing store (tagged)
- // r8 = argument count (tagged)
- // r9 = mapped parameter count (tagged)
- // r11 = scratch
- // Copy arguments header and remaining slots (if there are any).
- __ LoadRoot(r11, Heap::kFixedArrayMapRootIndex);
- __ StoreP(r11, FieldMemOperand(r4, FixedArray::kMapOffset), r0);
- __ StoreP(r8, FieldMemOperand(r4, FixedArray::kLengthOffset), r0);
- __ sub(r11, r8, r9, LeaveOE, SetRC);
- __ Ret(eq, cr0);
-
- Label arguments_loop;
- __ SmiUntag(r11);
- __ mtctr(r11);
-
- __ SmiToPtrArrayOffset(r0, r9);
- __ sub(r6, r6, r0);
- __ add(r11, r4, r0);
- __ addi(r11, r11,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
-
- __ bind(&arguments_loop);
- __ LoadPU(r7, MemOperand(r6, -kPointerSize));
- __ StorePU(r7, MemOperand(r11, kPointerSize));
- __ bdnz(&arguments_loop);
-
- // Return.
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- // r8 = argument count (tagged)
- __ bind(&runtime);
- __ Push(r4, r6, r8);
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
// Return address is in lr.
Label slow;
@@ -1887,117 +1546,6 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
- // r4 : function
- // r5 : number of parameters (tagged)
- // r6 : parameters pointer
-
- DCHECK(r4.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(r5.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(r6.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label try_allocate, runtime;
- __ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(r3, MemOperand(r7, StandardFrameConstants::kContextOffset));
- __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
- __ bne(&try_allocate);
-
- // Patch the arguments.length and the parameters pointer.
- __ LoadP(r5, MemOperand(r7, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiToPtrArrayOffset(r6, r5);
- __ add(r6, r6, r7);
- __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
-
- // Try the new space allocation. Start out with computing the size
- // of the arguments object and the elements array in words.
- Label add_arguments_object;
- __ bind(&try_allocate);
- __ SmiUntag(r11, r5, SetRC);
- __ beq(&add_arguments_object, cr0);
- __ addi(r11, r11, Operand(FixedArray::kHeaderSize / kPointerSize));
- __ bind(&add_arguments_object);
- __ addi(r11, r11, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
-
- // Do the allocation of both objects in one go.
- __ Allocate(r11, r3, r7, r8, &runtime,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
-
- // Get the arguments boilerplate from the current native context.
- __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r7);
-
- __ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0);
- __ LoadRoot(r8, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r8, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
- __ StoreP(r8, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
-
- // Get the length (smi tagged) and set that as an in-object property too.
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ AssertSmi(r5);
- __ StoreP(r5,
- FieldMemOperand(r3, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize),
- r0);
-
- // If there are no actual arguments, we're done.
- __ SmiUntag(r9, r5, SetRC);
- __ Ret(eq, cr0);
-
- // Set up the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- __ addi(r7, r3, Operand(Heap::kStrictArgumentsObjectSize));
- __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
- __ LoadRoot(r8, Heap::kFixedArrayMapRootIndex);
- __ StoreP(r8, FieldMemOperand(r7, FixedArray::kMapOffset), r0);
- __ StoreP(r5, FieldMemOperand(r7, FixedArray::kLengthOffset), r0);
-
- // Copy the fixed array slots.
- Label loop;
- // Set up r7 to point just prior to the first array slot.
- __ addi(r7, r7,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
- __ mtctr(r9);
- __ bind(&loop);
- // Pre-decrement r6 with kPointerSize on each iteration.
- // Pre-decrement in order to skip receiver.
- __ LoadPU(r8, MemOperand(r6, -kPointerSize));
- // Pre-increment r7 with kPointerSize on each iteration.
- __ StorePU(r8, MemOperand(r7, kPointerSize));
- __ bdnz(&loop);
-
- // Return.
- __ Ret();
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ Push(r4, r6, r5);
- __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
-void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
- // r5 : number of parameters (tagged)
- // r6 : parameters pointer
- // r7 : rest parameter index (tagged)
-
- Label runtime;
- __ LoadP(r8, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(r3, MemOperand(r8, StandardFrameConstants::kContextOffset));
- __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
- __ bne(&runtime);
-
- // Patch the arguments.length and the parameters pointer.
- __ LoadP(r5, MemOperand(r8, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiToPtrArrayOffset(r0, r5);
- __ add(r6, r8, r0);
- __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
-
- __ bind(&runtime);
- __ Push(r5, r6, r7);
- __ TailCallRuntime(Runtime::kNewRestParam);
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -2255,16 +1803,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Locate the code entry and call it.
__ addi(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
-
-#if ABI_USES_FUNCTION_DESCRIPTORS && defined(USE_SIMULATOR)
- // Even Simulated AIX/PPC64 Linux uses a function descriptor for the
- // RegExp routine. Extract the instruction address here since
- // DirectCEntryStub::GenerateCall will not do it for calls out to
- // what it thinks is C code compiled for the simulator/host
- // platform.
- __ LoadP(code, MemOperand(code, 0)); // Instruction address
-#endif
-
DirectCEntryStub stub(isolate());
stub.GenerateCall(masm, code);
@@ -2658,7 +2196,8 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&call_function);
__ mov(r3, Operand(argc));
- __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+ __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
+ tail_call_mode()),
RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
@@ -2696,7 +2235,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&call);
__ mov(r3, Operand(argc));
- __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+ __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
RelocInfo::CODE_TARGET);
__ bind(&uninitialized);
@@ -3246,6 +2785,37 @@ void ToStringStub::Generate(MacroAssembler* masm) {
}
+void ToNameStub::Generate(MacroAssembler* masm) {
+ // The ToName stub takes one argument in r3.
+ Label is_number;
+ __ JumpIfSmi(r3, &is_number);
+
+ STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+ __ CompareObjectType(r3, r4, r4, LAST_NAME_TYPE);
+ // r3: receiver
+ // r4: receiver instance type
+ __ Ret(le);
+
+ Label not_heap_number;
+ __ cmpi(r4, Operand(HEAP_NUMBER_TYPE));
+ __ bne(&not_heap_number);
+ __ bind(&is_number);
+ NumberToStringStub stub(isolate());
+ __ TailCallStub(&stub);
+ __ bind(&not_heap_number);
+
+ Label not_oddball;
+ __ cmpi(r4, Operand(ODDBALL_TYPE));
+ __ bne(&not_oddball);
+ __ LoadP(r3, FieldMemOperand(r3, Oddball::kToStringOffset));
+ __ Ret();
+ __ bind(&not_oddball);
+
+ __ push(r3); // Push argument.
+ __ TailCallRuntime(Runtime::kToName);
+}
+
+
void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register left,
Register right,
@@ -3438,18 +3008,14 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ CheckMap(r4, r5, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
__ CheckMap(r3, r6, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
- if (op() != Token::EQ_STRICT && is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
- } else {
- if (!Token::IsEqualityOp(op())) {
- __ LoadP(r4, FieldMemOperand(r4, Oddball::kToNumberOffset));
- __ AssertSmi(r4);
- __ LoadP(r3, FieldMemOperand(r3, Oddball::kToNumberOffset));
- __ AssertSmi(r3);
- }
- __ sub(r3, r4, r3);
- __ Ret();
+ if (!Token::IsEqualityOp(op())) {
+ __ LoadP(r4, FieldMemOperand(r4, Oddball::kToNumberOffset));
+ __ AssertSmi(r4);
+ __ LoadP(r3, FieldMemOperand(r3, Oddball::kToNumberOffset));
+ __ AssertSmi(r3);
}
+ __ sub(r3, r4, r3);
+ __ Ret();
__ bind(&miss);
GenerateMiss(masm);
@@ -3547,7 +3113,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&unordered);
__ bind(&generic_stub);
- CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
+ CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
CompareICState::GENERIC, CompareICState::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
@@ -3770,8 +3336,6 @@ void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
if (Token::IsEqualityOp(op())) {
__ sub(r3, r3, r4);
__ Ret();
- } else if (is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
if (op() == Token::LT || op() == Token::LTE) {
__ LoadSmiLiteral(r5, Smi::FromInt(GREATER));
@@ -3820,15 +3384,15 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) {
-#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
- // Native AIX/PPC64 Linux use a function descriptor.
- __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kPointerSize));
- __ LoadP(ip, MemOperand(target, 0)); // Instruction address
-#else
- // ip needs to be set for DirectCEentryStub::Generate, and also
- // for ABI_CALL_VIA_IP.
- __ Move(ip, target);
-#endif
+ if (ABI_USES_FUNCTION_DESCRIPTORS) {
+ // AIX/PPC64BE Linux use a function descriptor.
+ __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kPointerSize));
+ __ LoadP(ip, MemOperand(target, 0)); // Instruction address
+ } else {
+ // ip needs to be set for DirectCEentryStub::Generate, and also
+ // for ABI_CALL_VIA_IP.
+ __ Move(ip, target);
+ }
intptr_t code = reinterpret_cast<intptr_t>(GetCode().location());
__ mov(r0, Operand(code, RelocInfo::CODE_TARGET));
@@ -4142,9 +3706,8 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
__ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
regs_.scratch0(), &dont_need_remembered_set);
- __ CheckPageFlag(regs_.object(), regs_.scratch0(),
- 1 << MemoryChunk::SCAN_ON_SCAVENGE, ne,
- &dont_need_remembered_set);
+ __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
+ &dont_need_remembered_set);
// First notify the incremental marker if necessary, then update the
// remembered set.
@@ -4791,34 +4354,32 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
#if !defined(USE_SIMULATOR)
uintptr_t entry_hook =
reinterpret_cast<uintptr_t>(isolate()->function_entry_hook());
- __ mov(ip, Operand(entry_hook));
+#else
+ // Under the simulator we need to indirect the entry hook through a
+ // trampoline function at a known address.
+ ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
+ ExternalReference entry_hook = ExternalReference(
+ &dispatcher, ExternalReference::BUILTIN_CALL, isolate());
-#if ABI_USES_FUNCTION_DESCRIPTORS
- // Function descriptor
- __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(ip, kPointerSize));
- __ LoadP(ip, MemOperand(ip, 0));
-#elif ABI_CALL_VIA_IP
-// ip set above, so nothing to do.
+ // It additionally takes an isolate as a third parameter
+ __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
#endif
+ __ mov(ip, Operand(entry_hook));
+
+ if (ABI_USES_FUNCTION_DESCRIPTORS) {
+ __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(ip, kPointerSize));
+ __ LoadP(ip, MemOperand(ip, 0));
+ }
+ // ip set above, so nothing more to do for ABI_CALL_VIA_IP.
+
// PPC LINUX ABI:
__ li(r0, Operand::Zero());
__ StorePU(r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
-#else
- // Under the simulator we need to indirect the entry hook through a
- // trampoline function at a known address.
- // It additionally takes an isolate as a third parameter
- __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
- ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
- __ mov(ip, Operand(ExternalReference(
- &dispatcher, ExternalReference::BUILTIN_CALL, isolate())));
-#endif
__ Call(ip);
-#if !defined(USE_SIMULATOR)
__ addi(sp, sp, Operand(kNumRequiredStackFrameSlots * kPointerSize));
-#endif
// Restore the stack pointer if needed.
if (frame_alignment > kPointerSize) {
@@ -5143,6 +4704,633 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateCase(masm, FAST_ELEMENTS);
}
+void FastNewObjectStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r4 : target
+ // -- r6 : new target
+ // -- cp : context
+ // -- lr : return address
+ // -----------------------------------
+ __ AssertFunction(r4);
+ __ AssertReceiver(r6);
+
+ // Verify that the new target is a JSFunction.
+ Label new_object;
+ __ CompareObjectType(r6, r5, r5, JS_FUNCTION_TYPE);
+ __ bne(&new_object);
+
+ // Load the initial map and verify that it's in fact a map.
+ __ LoadP(r5, FieldMemOperand(r6, JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(r5, &new_object);
+ __ CompareObjectType(r5, r3, r3, MAP_TYPE);
+ __ bne(&new_object);
+
+ // Fall back to runtime if the target differs from the new target's
+ // initial map constructor.
+ __ LoadP(r3, FieldMemOperand(r5, Map::kConstructorOrBackPointerOffset));
+ __ cmp(r3, r4);
+ __ bne(&new_object);
+
+ // Allocate the JSObject on the heap.
+ Label allocate, done_allocate;
+ __ lbz(r7, FieldMemOperand(r5, Map::kInstanceSizeOffset));
+ __ Allocate(r7, r3, r8, r9, &allocate, SIZE_IN_WORDS);
+ __ bind(&done_allocate);
+
+ // Initialize the JSObject fields.
+ __ StoreP(r5, MemOperand(r3, JSObject::kMapOffset));
+ __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r6, MemOperand(r3, JSObject::kPropertiesOffset));
+ __ StoreP(r6, MemOperand(r3, JSObject::kElementsOffset));
+ STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
+ __ addi(r4, r3, Operand(JSObject::kHeaderSize));
+
+ // ----------- S t a t e -------------
+ // -- r3 : result (untagged)
+ // -- r4 : result fields (untagged)
+ // -- r8 : result end (untagged)
+ // -- r5 : initial map
+ // -- cp : context
+ // -- lr : return address
+ // -----------------------------------
+
+ // Perform in-object slack tracking if requested.
+ Label slack_tracking;
+ STATIC_ASSERT(Map::kNoSlackTracking == 0);
+ __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
+ __ lwz(r6, FieldMemOperand(r5, Map::kBitField3Offset));
+ __ DecodeField<Map::ConstructionCounter>(r10, r6, SetRC);
+ __ bne(&slack_tracking, cr0);
+ {
+ // Initialize all in-object fields with undefined.
+ __ InitializeFieldsWithFiller(r4, r8, r9);
+
+ // Add the object tag to make the JSObject real.
+ __ addi(r3, r3, Operand(kHeapObjectTag));
+ __ Ret();
+ }
+ __ bind(&slack_tracking);
+ {
+ // Decrease generous allocation count.
+ STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+ __ Add(r6, r6, -(1 << Map::ConstructionCounter::kShift), r0);
+ __ stw(r6, FieldMemOperand(r5, Map::kBitField3Offset));
+
+ // Initialize the in-object fields with undefined.
+ __ lbz(r7, FieldMemOperand(r5, Map::kUnusedPropertyFieldsOffset));
+ __ ShiftLeftImm(r7, r7, Operand(kPointerSizeLog2));
+ __ sub(r7, r8, r7);
+ __ InitializeFieldsWithFiller(r4, r7, r9);
+
+ // Initialize the remaining (reserved) fields with one pointer filler map.
+ __ LoadRoot(r9, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(r4, r8, r9);
+
+ // Add the object tag to make the JSObject real.
+ __ addi(r3, r3, Operand(kHeapObjectTag));
+
+ // Check if we can finalize the instance size.
+ __ cmpi(r10, Operand(Map::kSlackTrackingCounterEnd));
+ __ Ret(ne);
+
+ // Finalize the instance size.
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r3, r5);
+ __ CallRuntime(Runtime::kFinalizeInstanceSize);
+ __ Pop(r3);
+ }
+ __ Ret();
+ }
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ ShiftLeftImm(r7, r7,
+ Operand(kPointerSizeLog2 + kSmiTagSize + kSmiShiftSize));
+ __ Push(r5, r7);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ Pop(r5);
+ }
+ __ subi(r3, r3, Operand(kHeapObjectTag));
+ __ lbz(r8, FieldMemOperand(r5, Map::kInstanceSizeOffset));
+ __ ShiftLeftImm(r8, r8, Operand(kPointerSizeLog2));
+ __ add(r8, r3, r8);
+ __ b(&done_allocate);
+
+ // Fall back to %NewObject.
+ __ bind(&new_object);
+ __ Push(r4, r6);
+ __ TailCallRuntime(Runtime::kNewObject);
+}
+
+void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r4 : function
+ // -- cp : context
+ // -- fp : frame pointer
+ // -- lr : return address
+ // -----------------------------------
+ __ AssertFunction(r4);
+
+ // For Ignition we need to skip all possible handler/stub frames until
+ // we reach the JavaScript frame for the function (similar to what the
+ // runtime fallback implementation does). So make r5 point to that
+ // JavaScript frame.
+ {
+ Label loop, loop_entry;
+ __ mr(r5, fp);
+ __ b(&loop_entry);
+ __ bind(&loop);
+ __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&loop_entry);
+ __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kMarkerOffset));
+ __ cmp(ip, r4);
+ __ bne(&loop);
+ }
+
+ // Check if we have rest parameters (only possible if we have an
+ // arguments adaptor frame below the function frame).
+ Label no_rest_parameters;
+ __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ bne(&no_rest_parameters);
+
+ // Check if the arguments adaptor frame contains more arguments than
+ // specified by the function's internal formal parameter count.
+ Label rest_parameters;
+ __ LoadP(r3, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadWordArith(
+ r4, FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset));
+#if V8_TARGET_ARCH_PPC64
+ __ SmiTag(r4);
+#endif
+ __ sub(r3, r3, r4, LeaveOE, SetRC);
+ __ bgt(&rest_parameters, cr0);
+
+ // Return an empty rest parameter array.
+ __ bind(&no_rest_parameters);
+ {
+ // ----------- S t a t e -------------
+ // -- cp : context
+ // -- lr : return address
+ // -----------------------------------
+
+ // Allocate an empty rest parameter array.
+ Label allocate, done_allocate;
+ __ Allocate(JSArray::kSize, r3, r4, r5, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Setup the rest parameter array in r0.
+ __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r4);
+ __ StoreP(r4, FieldMemOperand(r3, JSArray::kMapOffset), r0);
+ __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r4, FieldMemOperand(r3, JSArray::kPropertiesOffset), r0);
+ __ StoreP(r4, FieldMemOperand(r3, JSArray::kElementsOffset), r0);
+ __ li(r4, Operand::Zero());
+ __ StoreP(r4, FieldMemOperand(r3, JSArray::kLengthOffset), r0);
+ STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+ __ Ret();
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(Smi::FromInt(JSArray::kSize));
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ }
+ __ b(&done_allocate);
+ }
+
+ __ bind(&rest_parameters);
+ {
+ // Compute the pointer to the first rest parameter (skippping the receiver).
+ __ SmiToPtrArrayOffset(r9, r3);
+ __ add(r5, r5, r9);
+ __ addi(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // ----------- S t a t e -------------
+ // -- cp : context
+ // -- r3 : number of rest parameters (tagged)
+ // -- r5 : pointer just past first rest parameters
+ // -- r9 : size of rest parameters
+ // -- lr : return address
+ // -----------------------------------
+
+ // Allocate space for the rest parameter array plus the backing store.
+ Label allocate, done_allocate;
+ __ mov(r4, Operand(JSArray::kSize + FixedArray::kHeaderSize));
+ __ add(r4, r4, r9);
+ __ Allocate(r4, r6, r7, r8, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Setup the elements array in r6.
+ __ LoadRoot(r4, Heap::kFixedArrayMapRootIndex);
+ __ StoreP(r4, FieldMemOperand(r6, FixedArray::kMapOffset), r0);
+ __ StoreP(r3, FieldMemOperand(r6, FixedArray::kLengthOffset), r0);
+ __ addi(r7, r6,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+ {
+ Label loop;
+ __ SmiUntag(r0, r3);
+ __ mtctr(r0);
+ __ bind(&loop);
+ __ LoadPU(ip, MemOperand(r5, -kPointerSize));
+ __ StorePU(ip, MemOperand(r7, kPointerSize));
+ __ bdnz(&loop);
+ __ addi(r7, r7, Operand(kPointerSize));
+ }
+
+ // Setup the rest parameter array in r7.
+ __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r4);
+ __ StoreP(r4, MemOperand(r7, JSArray::kMapOffset));
+ __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r4, MemOperand(r7, JSArray::kPropertiesOffset));
+ __ StoreP(r6, MemOperand(r7, JSArray::kElementsOffset));
+ __ StoreP(r3, MemOperand(r7, JSArray::kLengthOffset));
+ STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+ __ addi(r3, r7, Operand(kHeapObjectTag));
+ __ Ret();
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(r4);
+ __ Push(r3, r5, r4);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ mr(r6, r3);
+ __ Pop(r3, r5);
+ }
+ __ b(&done_allocate);
+ }
+}
+
+void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r4 : function
+ // -- cp : context
+ // -- fp : frame pointer
+ // -- lr : return address
+ // -----------------------------------
+ __ AssertFunction(r4);
+
+ // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadWordArith(
+ r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
+#if V8_TARGET_ARCH_PPC64
+ __ SmiTag(r5);
+#endif
+ __ SmiToPtrArrayOffset(r6, r5);
+ __ add(r6, fp, r6);
+ __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // r4 : function
+ // r5 : number of parameters (tagged)
+ // r6 : parameters pointer
+ // Registers used over whole function:
+ // r8 : arguments count (tagged)
+ // r9 : mapped parameter count (tagged)
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, try_allocate, runtime;
+ __ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(r3, MemOperand(r7, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ beq(&adaptor_frame);
+
+ // No adaptor, parameter count = argument count.
+ __ mr(r8, r5);
+ __ mr(r9, r5);
+ __ b(&try_allocate);
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ LoadP(r8, MemOperand(r7, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiToPtrArrayOffset(r6, r8);
+ __ add(r6, r6, r7);
+ __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // r8 = argument count (tagged)
+ // r9 = parameter count (tagged)
+ // Compute the mapped parameter count = min(r5, r8) in r9.
+ __ cmp(r5, r8);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ isel(lt, r9, r5, r8);
+ } else {
+ Label skip;
+ __ mr(r9, r5);
+ __ blt(&skip);
+ __ mr(r9, r8);
+ __ bind(&skip);
+ }
+
+ __ bind(&try_allocate);
+
+ // Compute the sizes of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has 2 extra words containing context and backing store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+ // If there are no mapped parameters, we do not need the parameter_map.
+ __ CmpSmiLiteral(r9, Smi::FromInt(0), r0);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ SmiToPtrArrayOffset(r11, r9);
+ __ addi(r11, r11, Operand(kParameterMapHeaderSize));
+ __ isel(eq, r11, r0, r11);
+ } else {
+ Label skip2, skip3;
+ __ bne(&skip2);
+ __ li(r11, Operand::Zero());
+ __ b(&skip3);
+ __ bind(&skip2);
+ __ SmiToPtrArrayOffset(r11, r9);
+ __ addi(r11, r11, Operand(kParameterMapHeaderSize));
+ __ bind(&skip3);
+ }
+
+ // 2. Backing store.
+ __ SmiToPtrArrayOffset(r7, r8);
+ __ add(r11, r11, r7);
+ __ addi(r11, r11, Operand(FixedArray::kHeaderSize));
+
+ // 3. Arguments object.
+ __ addi(r11, r11, Operand(JSSloppyArgumentsObject::kSize));
+
+ // Do the allocation of all three objects in one go.
+ __ Allocate(r11, r3, r11, r7, &runtime, TAG_OBJECT);
+
+ // r3 = address of new object(s) (tagged)
+ // r5 = argument count (smi-tagged)
+ // Get the arguments boilerplate from the current native context into r4.
+ const int kNormalOffset =
+ Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
+ const int kAliasedOffset =
+ Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
+
+ __ LoadP(r7, NativeContextMemOperand());
+ __ cmpi(r9, Operand::Zero());
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ LoadP(r11, MemOperand(r7, kNormalOffset));
+ __ LoadP(r7, MemOperand(r7, kAliasedOffset));
+ __ isel(eq, r7, r11, r7);
+ } else {
+ Label skip4, skip5;
+ __ bne(&skip4);
+ __ LoadP(r7, MemOperand(r7, kNormalOffset));
+ __ b(&skip5);
+ __ bind(&skip4);
+ __ LoadP(r7, MemOperand(r7, kAliasedOffset));
+ __ bind(&skip5);
+ }
+
+ // r3 = address of new object (tagged)
+ // r5 = argument count (smi-tagged)
+ // r7 = address of arguments map (tagged)
+ // r9 = mapped parameter count (tagged)
+ __ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0);
+ __ LoadRoot(r11, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r11, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
+ __ StoreP(r11, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
+
+ // Set up the callee in-object property.
+ __ AssertNotSmi(r4);
+ __ StoreP(r4, FieldMemOperand(r3, JSSloppyArgumentsObject::kCalleeOffset),
+ r0);
+
+ // Use the length (smi tagged) and set that as an in-object property too.
+ __ AssertSmi(r8);
+ __ StoreP(r8, FieldMemOperand(r3, JSSloppyArgumentsObject::kLengthOffset),
+ r0);
+
+ // Set up the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, r7 will point there, otherwise
+ // it will point to the backing store.
+ __ addi(r7, r3, Operand(JSSloppyArgumentsObject::kSize));
+ __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
+
+ // r3 = address of new object (tagged)
+ // r5 = argument count (tagged)
+ // r7 = address of parameter map or backing store (tagged)
+ // r9 = mapped parameter count (tagged)
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ __ CmpSmiLiteral(r9, Smi::FromInt(0), r0);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ isel(eq, r4, r7, r4);
+ __ beq(&skip_parameter_map);
+ } else {
+ Label skip6;
+ __ bne(&skip6);
+ // Move backing store address to r4, because it is
+ // expected there when filling in the unmapped arguments.
+ __ mr(r4, r7);
+ __ b(&skip_parameter_map);
+ __ bind(&skip6);
+ }
+
+ __ LoadRoot(r8, Heap::kSloppyArgumentsElementsMapRootIndex);
+ __ StoreP(r8, FieldMemOperand(r7, FixedArray::kMapOffset), r0);
+ __ AddSmiLiteral(r8, r9, Smi::FromInt(2), r0);
+ __ StoreP(r8, FieldMemOperand(r7, FixedArray::kLengthOffset), r0);
+ __ StoreP(cp, FieldMemOperand(r7, FixedArray::kHeaderSize + 0 * kPointerSize),
+ r0);
+ __ SmiToPtrArrayOffset(r8, r9);
+ __ add(r8, r8, r7);
+ __ addi(r8, r8, Operand(kParameterMapHeaderSize));
+ __ StoreP(r8, FieldMemOperand(r7, FixedArray::kHeaderSize + 1 * kPointerSize),
+ r0);
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. They index the context,
+ // where parameters are stored in reverse order, at
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+ // The mapped parameter thus need to get indices
+ // MIN_CONTEXT_SLOTS+parameter_count-1 ..
+ // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+ // We loop from right to left.
+ Label parameters_loop;
+ __ mr(r8, r9);
+ __ AddSmiLiteral(r11, r5, Smi::FromInt(Context::MIN_CONTEXT_SLOTS), r0);
+ __ sub(r11, r11, r9);
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ SmiToPtrArrayOffset(r4, r8);
+ __ add(r4, r4, r7);
+ __ addi(r4, r4, Operand(kParameterMapHeaderSize));
+
+ // r4 = address of backing store (tagged)
+ // r7 = address of parameter map (tagged)
+ // r8 = temporary scratch (a.o., for address calculation)
+ // r10 = temporary scratch (a.o., for address calculation)
+ // ip = the hole value
+ __ SmiUntag(r8);
+ __ mtctr(r8);
+ __ ShiftLeftImm(r8, r8, Operand(kPointerSizeLog2));
+ __ add(r10, r4, r8);
+ __ add(r8, r7, r8);
+ __ addi(r10, r10, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ addi(r8, r8, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+
+ __ bind(&parameters_loop);
+ __ StorePU(r11, MemOperand(r8, -kPointerSize));
+ __ StorePU(ip, MemOperand(r10, -kPointerSize));
+ __ AddSmiLiteral(r11, r11, Smi::FromInt(1), r0);
+ __ bdnz(&parameters_loop);
+
+ // Restore r8 = argument count (tagged).
+ __ LoadP(r8, FieldMemOperand(r3, JSSloppyArgumentsObject::kLengthOffset));
+
+ __ bind(&skip_parameter_map);
+ // r3 = address of new object (tagged)
+ // r4 = address of backing store (tagged)
+ // r8 = argument count (tagged)
+ // r9 = mapped parameter count (tagged)
+ // r11 = scratch
+ // Copy arguments header and remaining slots (if there are any).
+ __ LoadRoot(r11, Heap::kFixedArrayMapRootIndex);
+ __ StoreP(r11, FieldMemOperand(r4, FixedArray::kMapOffset), r0);
+ __ StoreP(r8, FieldMemOperand(r4, FixedArray::kLengthOffset), r0);
+ __ sub(r11, r8, r9, LeaveOE, SetRC);
+ __ Ret(eq, cr0);
+
+ Label arguments_loop;
+ __ SmiUntag(r11);
+ __ mtctr(r11);
+
+ __ SmiToPtrArrayOffset(r0, r9);
+ __ sub(r6, r6, r0);
+ __ add(r11, r4, r0);
+ __ addi(r11, r11,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+
+ __ bind(&arguments_loop);
+ __ LoadPU(r7, MemOperand(r6, -kPointerSize));
+ __ StorePU(r7, MemOperand(r11, kPointerSize));
+ __ bdnz(&arguments_loop);
+
+ // Return.
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ // r8 = argument count (tagged)
+ __ bind(&runtime);
+ __ Push(r4, r6, r8);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
+}
+
+void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r4 : function
+ // -- cp : context
+ // -- fp : frame pointer
+ // -- lr : return address
+ // -----------------------------------
+ __ AssertFunction(r4);
+
+ // For Ignition we need to skip all possible handler/stub frames until
+ // we reach the JavaScript frame for the function (similar to what the
+ // runtime fallback implementation does). So make r5 point to that
+ // JavaScript frame.
+ {
+ Label loop, loop_entry;
+ __ mr(r5, fp);
+ __ b(&loop_entry);
+ __ bind(&loop);
+ __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&loop_entry);
+ __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kMarkerOffset));
+ __ cmp(ip, r4);
+ __ bne(&loop);
+ }
+
+ // Check if we have an arguments adaptor frame below the function frame.
+ Label arguments_adaptor, arguments_done;
+ __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(ip, MemOperand(r6, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ beq(&arguments_adaptor);
+ {
+ __ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadWordArith(
+ r3,
+ FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset));
+#if V8_TARGET_ARCH_PPC64
+ __ SmiTag(r3);
+#endif
+ __ SmiToPtrArrayOffset(r9, r3);
+ __ add(r5, r5, r9);
+ }
+ __ b(&arguments_done);
+ __ bind(&arguments_adaptor);
+ {
+ __ LoadP(r3, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiToPtrArrayOffset(r9, r3);
+ __ add(r5, r6, r9);
+ }
+ __ bind(&arguments_done);
+ __ addi(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // ----------- S t a t e -------------
+ // -- cp : context
+ // -- r3 : number of rest parameters (tagged)
+ // -- r5 : pointer just past first rest parameters
+ // -- r9 : size of rest parameters
+ // -- lr : return address
+ // -----------------------------------
+
+ // Allocate space for the strict arguments object plus the backing store.
+ Label allocate, done_allocate;
+ __ mov(r4, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
+ __ add(r4, r4, r9);
+ __ Allocate(r4, r6, r7, r8, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Setup the elements array in r6.
+ __ LoadRoot(r4, Heap::kFixedArrayMapRootIndex);
+ __ StoreP(r4, FieldMemOperand(r6, FixedArray::kMapOffset), r0);
+ __ StoreP(r3, FieldMemOperand(r6, FixedArray::kLengthOffset), r0);
+ __ addi(r7, r6,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+ {
+ Label loop, done_loop;
+ __ SmiUntag(r0, r3, SetRC);
+ __ beq(&done_loop, cr0);
+ __ mtctr(r0);
+ __ bind(&loop);
+ __ LoadPU(ip, MemOperand(r5, -kPointerSize));
+ __ StorePU(ip, MemOperand(r7, kPointerSize));
+ __ bdnz(&loop);
+ __ bind(&done_loop);
+ __ addi(r7, r7, Operand(kPointerSize));
+ }
+
+ // Setup the rest parameter array in r7.
+ __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r4);
+ __ StoreP(r4, MemOperand(r7, JSStrictArgumentsObject::kMapOffset));
+ __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r4, MemOperand(r7, JSStrictArgumentsObject::kPropertiesOffset));
+ __ StoreP(r6, MemOperand(r7, JSStrictArgumentsObject::kElementsOffset));
+ __ StoreP(r3, MemOperand(r7, JSStrictArgumentsObject::kLengthOffset));
+ STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
+ __ addi(r3, r7, Operand(kHeapObjectTag));
+ __ Ret();
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(r4);
+ __ Push(r3, r5, r4);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ mr(r6, r3);
+ __ Pop(r3, r5);
+ }
+ __ b(&done_allocate);
+}
void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
Register context = cp;
@@ -5446,11 +5634,10 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ b(&leave_exit_frame);
}
-
static void CallApiFunctionStubHelper(MacroAssembler* masm,
const ParameterCount& argc,
bool return_first_arg,
- bool call_data_undefined) {
+ bool call_data_undefined, bool is_lazy) {
// ----------- S t a t e -------------
// -- r3 : callee
// -- r7 : call_data
@@ -5482,12 +5669,14 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
STATIC_ASSERT(FCA::kHolderIndex == 0);
STATIC_ASSERT(FCA::kArgsLength == 7);
- DCHECK(argc.is_immediate() || r3.is(argc.reg()));
+ DCHECK(argc.is_immediate() || r6.is(argc.reg()));
// context save
__ push(context);
- // load context from callee
- __ LoadP(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+ if (!is_lazy) {
+ // load context from callee
+ __ LoadP(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+ }
// callee
__ push(callee);
@@ -5586,7 +5775,7 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
void CallApiFunctionStub::Generate(MacroAssembler* masm) {
bool call_data_undefined = this->call_data_undefined();
CallApiFunctionStubHelper(masm, ParameterCount(r6), false,
- call_data_undefined);
+ call_data_undefined, false);
}
@@ -5594,24 +5783,32 @@ void CallApiAccessorStub::Generate(MacroAssembler* masm) {
bool is_store = this->is_store();
int argc = this->argc();
bool call_data_undefined = this->call_data_undefined();
+ bool is_lazy = this->is_lazy();
CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
- call_data_undefined);
+ call_data_undefined, is_lazy);
}
void CallApiGetterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- sp[0] : name
- // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
+ // -- sp[0] : name
+ // -- sp[4 .. (4 + kArgsLength*4)] : v8::PropertyCallbackInfo::args_
// -- ...
- // -- r5 : api_function_address
+ // -- r5 : api_function_address
// -----------------------------------
Register api_function_address = ApiGetterDescriptor::function_address();
+ int arg0Slot = 0;
+ int accessorInfoSlot = 0;
+ int apiStackSpace = 0;
DCHECK(api_function_address.is(r5));
- __ mr(r3, sp); // r0 = Handle<Name>
- __ addi(r4, r3, Operand(1 * kPointerSize)); // r4 = PCA
+ // v8::PropertyCallbackInfo::args_ array and name handle.
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
+ __ mr(r3, sp); // r3 = Handle<Name>
+ __ addi(r4, r3, Operand(1 * kPointerSize)); // r4 = v8::PCI::args_
// If ABI passes Handles (pointer-sized struct) in a register:
//
@@ -5625,37 +5822,38 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
// [0] space for DirectCEntryStub's LR save
// [1] copy of Handle (first arg)
// [2] AccessorInfo&
-#if ABI_PASSES_HANDLES_IN_REGS
- const int kAccessorInfoSlot = kStackFrameExtraParamSlot + 1;
- const int kApiStackSpace = 2;
-#else
- const int kArg0Slot = kStackFrameExtraParamSlot + 1;
- const int kAccessorInfoSlot = kArg0Slot + 1;
- const int kApiStackSpace = 3;
-#endif
+ if (ABI_PASSES_HANDLES_IN_REGS) {
+ accessorInfoSlot = kStackFrameExtraParamSlot + 1;
+ apiStackSpace = 2;
+ } else {
+ arg0Slot = kStackFrameExtraParamSlot + 1;
+ accessorInfoSlot = arg0Slot + 1;
+ apiStackSpace = 3;
+ }
FrameScope frame_scope(masm, StackFrame::MANUAL);
- __ EnterExitFrame(false, kApiStackSpace);
+ __ EnterExitFrame(false, apiStackSpace);
-#if !ABI_PASSES_HANDLES_IN_REGS
- // pass 1st arg by reference
- __ StoreP(r3, MemOperand(sp, kArg0Slot * kPointerSize));
- __ addi(r3, sp, Operand(kArg0Slot * kPointerSize));
-#endif
+ if (!ABI_PASSES_HANDLES_IN_REGS) {
+ // pass 1st arg by reference
+ __ StoreP(r3, MemOperand(sp, arg0Slot * kPointerSize));
+ __ addi(r3, sp, Operand(arg0Slot * kPointerSize));
+ }
- // Create PropertyAccessorInfo instance on the stack above the exit frame with
- // r4 (internal::Object** args_) as the data.
- __ StoreP(r4, MemOperand(sp, kAccessorInfoSlot * kPointerSize));
- // r4 = AccessorInfo&
- __ addi(r4, sp, Operand(kAccessorInfoSlot * kPointerSize));
-
- const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+ // Create v8::PropertyCallbackInfo object on the stack and initialize
+ // it's args_ field.
+ __ StoreP(r4, MemOperand(sp, accessorInfoSlot * kPointerSize));
+ __ addi(r4, sp, Operand(accessorInfoSlot * kPointerSize));
+ // r4 = v8::PropertyCallbackInfo&
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback(isolate());
+
+ // +3 is to skip prolog, return address and name handle.
+ MemOperand return_value_operand(
+ fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
- kStackUnwindSpace, NULL,
- MemOperand(fp, 6 * kPointerSize), NULL);
+ kStackUnwindSpace, NULL, return_value_operand, NULL);
}
diff --git a/deps/v8/src/ppc/codegen-ppc.cc b/deps/v8/src/ppc/codegen-ppc.cc
index 2bf8b4ee83..d6d86b0fcc 100644
--- a/deps/v8/src/ppc/codegen-ppc.cc
+++ b/deps/v8/src/ppc/codegen-ppc.cc
@@ -58,9 +58,7 @@ UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(&desc);
-#if !ABI_USES_FUNCTION_DESCRIPTORS
- DCHECK(!RelocInfo::RequiresRelocation(desc));
-#endif
+ DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
@@ -96,9 +94,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(&desc);
-#if !ABI_USES_FUNCTION_DESCRIPTORS
- DCHECK(!RelocInfo::RequiresRelocation(desc));
-#endif
+ DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
diff --git a/deps/v8/src/ppc/cpu-ppc.cc b/deps/v8/src/ppc/cpu-ppc.cc
index a42fa53960..91ea4000e1 100644
--- a/deps/v8/src/ppc/cpu-ppc.cc
+++ b/deps/v8/src/ppc/cpu-ppc.cc
@@ -25,7 +25,7 @@ void CpuFeatures::FlushICache(void* buffer, size_t size) {
return;
}
- const int kCacheLineSize = CpuFeatures::cache_line_size();
+ const int kCacheLineSize = CpuFeatures::icache_line_size();
intptr_t mask = kCacheLineSize - 1;
byte *start =
reinterpret_cast<byte *>(reinterpret_cast<intptr_t>(buffer) & ~mask);
diff --git a/deps/v8/src/ppc/deoptimizer-ppc.cc b/deps/v8/src/ppc/deoptimizer-ppc.cc
index 4232342b93..9ec5cdd11a 100644
--- a/deps/v8/src/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/ppc/deoptimizer-ppc.cc
@@ -88,31 +88,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
-void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
- // Set the register values. The values are not important as there are no
- // callee saved registers in JavaScript frames, so all registers are
- // spilled. Registers fp and sp are set to the correct values though.
- // We ensure the values are Smis to avoid confusing the garbage
- // collector in the event that any values are retreived and stored
- // elsewhere.
-
- for (int i = 0; i < Register::kNumRegisters; i++) {
- input_->SetRegister(i, reinterpret_cast<intptr_t>(Smi::FromInt(i)));
- }
- input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
- input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::kNumRegisters; i++) {
- input_->SetDoubleRegister(i, 0.0);
- }
-
- // Fill the frame content from the actual data on the frame.
- for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
- input_->SetFrameSlot(
- i, reinterpret_cast<intptr_t>(Memory::Address_at(tos + i)));
- }
-}
-
-
void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
ApiFunction function(descriptor->deoptimization_handler());
@@ -131,8 +106,7 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
}
}
-
-bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
// There is no dynamic alignment padding on PPC in the input frame.
return false;
}
diff --git a/deps/v8/src/ppc/disasm-ppc.cc b/deps/v8/src/ppc/disasm-ppc.cc
index d9450f8a42..e72658fba7 100644
--- a/deps/v8/src/ppc/disasm-ppc.cc
+++ b/deps/v8/src/ppc/disasm-ppc.cc
@@ -1073,14 +1073,12 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%08x ",
instr->InstructionBits());
-#if ABI_USES_FUNCTION_DESCRIPTORS
- // The first field will be identified as a jump table entry. We emit the rest
- // of the structure as zero, so just skip past them.
- if (instr->InstructionBits() == 0) {
+ if (ABI_USES_FUNCTION_DESCRIPTORS && instr->InstructionBits() == 0) {
+ // The first field will be identified as a jump table entry. We
+ // emit the rest of the structure as zero, so just skip past them.
Format(instr, "constant");
return Instruction::kInstrSize;
}
-#endif
switch (instr->OpcodeValue() << 26) {
case TWI: {
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index b649f71ea3..3db7bd5c17 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -54,20 +54,6 @@ const Register StringCompareDescriptor::LeftRegister() { return r4; }
const Register StringCompareDescriptor::RightRegister() { return r3; }
-const Register ArgumentsAccessReadDescriptor::index() { return r4; }
-const Register ArgumentsAccessReadDescriptor::parameter_count() { return r3; }
-
-
-const Register ArgumentsAccessNewDescriptor::function() { return r4; }
-const Register ArgumentsAccessNewDescriptor::parameter_count() { return r5; }
-const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return r6; }
-
-
-const Register RestParamAccessDescriptor::parameter_count() { return r5; }
-const Register RestParamAccessDescriptor::parameter_pointer() { return r6; }
-const Register RestParamAccessDescriptor::rest_parameter_index() { return r7; }
-
-
const Register ApiGetterDescriptor::function_address() { return r5; }
@@ -96,6 +82,29 @@ void FastNewContextDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void FastNewObjectDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r4, r6};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewRestParameterDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void ToNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -113,6 +122,10 @@ const Register ToStringDescriptor::ReceiverRegister() { return r3; }
// static
+const Register ToNameDescriptor::ReceiverRegister() { return r3; }
+
+
+// static
const Register ToObjectDescriptor::ReceiverRegister() { return r3; }
@@ -165,13 +178,6 @@ void CreateWeakCellDescriptor::InitializePlatformSpecific(
}
-void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r6, r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r4};
@@ -406,6 +412,14 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void InterpreterDispatchDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
+ kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
+ kInterpreterDispatchTableRegister};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -417,7 +431,6 @@ void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@@ -429,7 +442,6 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index 9cd35ab01c..14759de0da 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -183,6 +183,10 @@ void MacroAssembler::Drop(int count) {
}
}
+void MacroAssembler::Drop(Register count, Register scratch) {
+ ShiftLeftImm(scratch, count, Operand(kPointerSizeLog2));
+ add(sp, sp, scratch);
+}
void MacroAssembler::Call(Label* target) { b(target, SetLK); }
@@ -298,13 +302,10 @@ void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index,
void MacroAssembler::InNewSpace(Register object, Register scratch,
Condition cond, Label* branch) {
- // N.B. scratch may be same register as object
DCHECK(cond == eq || cond == ne);
- mov(r0, Operand(ExternalReference::new_space_mask(isolate())));
- and_(scratch, object, r0);
- mov(r0, Operand(ExternalReference::new_space_start(isolate())));
- cmp(scratch, r0);
- b(cond, branch);
+ const int mask =
+ (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
+ CheckPageFlag(object, scratch, mask, cond, branch);
}
@@ -483,6 +484,68 @@ void MacroAssembler::RecordWrite(
}
}
+void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
+ Register code_entry,
+ Register scratch) {
+ const int offset = JSFunction::kCodeEntryOffset;
+
+ // Since a code entry (value) is always in old space, we don't need to update
+ // remembered set. If incremental marking is off, there is nothing for us to
+ // do.
+ if (!FLAG_incremental_marking) return;
+
+ DCHECK(js_function.is(r4));
+ DCHECK(code_entry.is(r7));
+ DCHECK(scratch.is(r8));
+ AssertNotSmi(js_function);
+
+ if (emit_debug_code()) {
+ addi(scratch, js_function, Operand(offset - kHeapObjectTag));
+ LoadP(ip, MemOperand(scratch));
+ cmp(ip, code_entry);
+ Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ }
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis and stores into young gen.
+ Label done;
+
+ CheckPageFlag(code_entry, scratch,
+ MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
+ CheckPageFlag(js_function, scratch,
+ MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
+
+ const Register dst = scratch;
+ addi(dst, js_function, Operand(offset - kHeapObjectTag));
+
+ // Save caller-saved registers. js_function and code_entry are in the
+ // caller-saved register list.
+ DCHECK(kJSCallerSaved & js_function.bit());
+ DCHECK(kJSCallerSaved & code_entry.bit());
+ mflr(r0);
+ MultiPush(kJSCallerSaved | r0.bit());
+
+ int argument_count = 3;
+ PrepareCallCFunction(argument_count, code_entry);
+
+ mr(r3, js_function);
+ mr(r4, dst);
+ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
+
+ {
+ AllowExternalCallThatCantCauseGC scope(this);
+ CallCFunction(
+ ExternalReference::incremental_marking_record_write_code_entry_function(
+ isolate()),
+ argument_count);
+ }
+
+ // Restore caller-saved registers (including js_function and code_entry).
+ MultiPop(kJSCallerSaved | r0.bit());
+ mtlr(r0);
+
+ bind(&done);
+}
void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Register address, Register scratch,
@@ -564,6 +627,16 @@ void MacroAssembler::PopFixedFrame(Register marker_reg) {
mtlr(r0);
}
+void MacroAssembler::RestoreFrameStateForTailCall() {
+ if (FLAG_enable_embedded_constant_pool) {
+ LoadP(kConstantPoolRegister,
+ MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
+ set_constant_pool_available(false);
+ }
+ LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ mtlr(r0);
+}
const RegList MacroAssembler::kSafepointSavedRegisters = Register::kAllocatable;
const int MacroAssembler::kNumSafepointSavedRegisters =
@@ -640,28 +713,27 @@ void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst,
fsub(dst, src, kDoubleRegZero);
}
-
-void MacroAssembler::ConvertIntToDouble(Register src,
- DoubleRegister double_dst) {
- MovIntToDouble(double_dst, src, r0);
- fcfid(double_dst, double_dst);
+void MacroAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) {
+ MovIntToDouble(dst, src, r0);
+ fcfid(dst, dst);
}
-
void MacroAssembler::ConvertUnsignedIntToDouble(Register src,
- DoubleRegister double_dst) {
- MovUnsignedIntToDouble(double_dst, src, r0);
- fcfid(double_dst, double_dst);
+ DoubleRegister dst) {
+ MovUnsignedIntToDouble(dst, src, r0);
+ fcfid(dst, dst);
}
-
-void MacroAssembler::ConvertIntToFloat(const DoubleRegister dst,
- const Register src,
- const Register int_scratch) {
- MovIntToDouble(dst, src, int_scratch);
+void MacroAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) {
+ MovIntToDouble(dst, src, r0);
fcfids(dst, dst);
}
+void MacroAssembler::ConvertUnsignedIntToFloat(Register src,
+ DoubleRegister dst) {
+ MovUnsignedIntToDouble(dst, src, r0);
+ fcfids(dst, dst);
+}
#if V8_TARGET_ARCH_PPC64
void MacroAssembler::ConvertInt64ToDouble(Register src,
@@ -1116,7 +1188,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
Push(new_target);
}
Push(fun, fun);
- CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
Pop(fun);
if (new_target.is_valid()) {
Pop(new_target);
@@ -2114,6 +2186,41 @@ void MacroAssembler::TestDoubleIsInt32(DoubleRegister double_input,
TryDoubleToInt32Exact(scratch1, double_input, scratch2, double_scratch);
}
+void MacroAssembler::TestDoubleIsMinusZero(DoubleRegister input,
+ Register scratch1,
+ Register scratch2) {
+#if V8_TARGET_ARCH_PPC64
+ MovDoubleToInt64(scratch1, input);
+ rotldi(scratch1, scratch1, 1);
+ cmpi(scratch1, Operand(1));
+#else
+ MovDoubleToInt64(scratch1, scratch2, input);
+ Label done;
+ cmpi(scratch2, Operand::Zero());
+ bne(&done);
+ lis(scratch2, Operand(SIGN_EXT_IMM16(0x8000)));
+ cmp(scratch1, scratch2);
+ bind(&done);
+#endif
+}
+
+void MacroAssembler::TestDoubleSign(DoubleRegister input, Register scratch) {
+#if V8_TARGET_ARCH_PPC64
+ MovDoubleToInt64(scratch, input);
+#else
+ MovDoubleHighToInt(scratch, input);
+#endif
+ cmpi(scratch, Operand::Zero());
+}
+
+void MacroAssembler::TestHeapNumberSign(Register input, Register scratch) {
+#if V8_TARGET_ARCH_PPC64
+ LoadP(scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
+#else
+ lwz(scratch, FieldMemOperand(input, HeapNumber::kExponentOffset));
+#endif
+ cmpi(scratch, Operand::Zero());
+}
void MacroAssembler::TryDoubleToInt32Exact(Register result,
DoubleRegister double_input,
@@ -2335,18 +2442,6 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
}
-void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- // You can't call a builtin without a valid frame.
- DCHECK(flag == JUMP_FUNCTION || has_frame());
-
- // Fake a parameter count to avoid emitting code to do the check.
- ParameterCount expected(0);
- LoadNativeContextSlot(native_context_index, r4);
- InvokeFunctionCode(r4, no_reg, expected, expected, flag, call_wrapper);
-}
-
-
void MacroAssembler::SetCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
if (FLAG_native_code_counters && counter->Enabled()) {
@@ -2441,9 +2536,9 @@ void MacroAssembler::Abort(BailoutReason reason) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 1);
+ CallRuntime(Runtime::kAbort);
} else {
- CallRuntime(Runtime::kAbort, 1);
+ CallRuntime(Runtime::kAbort);
}
// will not return here
}
@@ -2656,6 +2751,18 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
}
+void MacroAssembler::AssertReceiver(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ TestIfSmi(object, r0);
+ Check(ne, kOperandIsASmiAndNotAReceiver, cr0);
+ push(object);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ CompareObjectType(object, object, object, FIRST_JS_RECEIVER_TYPE);
+ pop(object);
+ Check(ge, kOperandIsNotAReceiver);
+ }
+}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
@@ -3087,20 +3194,21 @@ void MacroAssembler::CallCFunctionHelper(Register function,
int num_reg_arguments,
int num_double_arguments) {
DCHECK(has_frame());
-// Just call directly. The function called cannot cause a GC, or
-// allow preemption, so the return address in the link register
-// stays correct.
+
+ // Just call directly. The function called cannot cause a GC, or
+ // allow preemption, so the return address in the link register
+ // stays correct.
Register dest = function;
-#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
- // AIX uses a function descriptor. When calling C code be aware
- // of this descriptor and pick up values from it
- LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(function, kPointerSize));
- LoadP(ip, MemOperand(function, 0));
- dest = ip;
-#elif ABI_CALL_VIA_IP
- Move(ip, function);
- dest = ip;
-#endif
+ if (ABI_USES_FUNCTION_DESCRIPTORS) {
+ // AIX/PPC64BE Linux uses a function descriptor. When calling C code be
+ // aware of this descriptor and pick up values from it
+ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(function, kPointerSize));
+ LoadP(ip, MemOperand(function, 0));
+ dest = ip;
+ } else if (ABI_CALL_VIA_IP) {
+ Move(ip, function);
+ dest = ip;
+ }
Call(dest);
@@ -3116,41 +3224,6 @@ void MacroAssembler::CallCFunctionHelper(Register function,
}
-void MacroAssembler::FlushICache(Register address, size_t size,
- Register scratch) {
- if (CpuFeatures::IsSupported(INSTR_AND_DATA_CACHE_COHERENCY)) {
- sync();
- icbi(r0, address);
- isync();
- return;
- }
-
- Label done;
-
- dcbf(r0, address);
- sync();
- icbi(r0, address);
- isync();
-
- // This code handles ranges which cross a single cacheline boundary.
- // scratch is last cacheline which intersects range.
- const int kCacheLineSizeLog2 = WhichPowerOf2(CpuFeatures::cache_line_size());
-
- DCHECK(size > 0 && size <= (size_t)(1 << kCacheLineSizeLog2));
- addi(scratch, address, Operand(size - 1));
- ClearRightImm(scratch, scratch, Operand(kCacheLineSizeLog2));
- cmpl(scratch, address);
- ble(&done);
-
- dcbf(r0, scratch);
- sync();
- icbi(r0, scratch);
- isync();
-
- bind(&done);
-}
-
-
void MacroAssembler::DecodeConstantPoolOffset(Register result,
Register location) {
Label overflow_access, done;
@@ -3386,7 +3459,8 @@ void MacroAssembler::LoadAccessor(Register dst, Register holder,
}
-void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
+void MacroAssembler::CheckEnumCache(Label* call_runtime) {
+ Register null_value = r8;
Register empty_fixed_array_value = r9;
LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
Label next, start;
@@ -3400,6 +3474,7 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
CmpSmiLiteral(r6, Smi::FromInt(kInvalidEnumCacheSentinel), r0);
beq(call_runtime);
+ LoadRoot(null_value, Heap::kNullValueRootIndex);
b(&start);
bind(&next);
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index 78de89aa5c..d9dbd56827 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -16,6 +16,7 @@ namespace internal {
// Give alias names to registers for calling conventions.
const Register kReturnRegister0 = {Register::kCode_r3};
const Register kReturnRegister1 = {Register::kCode_r4};
+const Register kReturnRegister2 = {Register::kCode_r5};
const Register kJSFunctionRegister = {Register::kCode_r4};
const Register kContextRegister = {Register::kCode_r30};
const Register kInterpreterAccumulatorRegister = {Register::kCode_r3};
@@ -146,6 +147,7 @@ class MacroAssembler : public Assembler {
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
void Drop(int count);
+ void Drop(Register count, Register scratch = r0);
void Ret(int drop) {
Drop(drop);
@@ -161,6 +163,7 @@ class MacroAssembler : public Assembler {
}
// Register move. May do nothing if the registers are identical.
+ void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); }
void Move(Register dst, Handle<Object> value);
void Move(Register dst, Register src, Condition cond = al);
void Move(DoubleRegister dst, DoubleRegister src);
@@ -200,13 +203,13 @@ class MacroAssembler : public Assembler {
// Check if object is in new space. Jumps if the object is not in new space.
// The register scratch can be object itself, but scratch will be clobbered.
void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
- InNewSpace(object, scratch, ne, branch);
+ InNewSpace(object, scratch, eq, branch);
}
// Check if object is in new space. Jumps if the object is in new space.
// The register scratch can be object itself, but it will be clobbered.
void JumpIfInNewSpace(Register object, Register scratch, Label* branch) {
- InNewSpace(object, scratch, eq, branch);
+ InNewSpace(object, scratch, ne, branch);
}
// Check if an object has a given incremental marking color.
@@ -248,6 +251,11 @@ class MacroAssembler : public Assembler {
pointers_to_here_check_for_value);
}
+ // Notify the garbage collector that we wrote a code entry into a
+ // JSFunction. Only scratch is clobbered by the operation.
+ void RecordWriteCodeEntryField(Register js_function, Register code_entry,
+ Register scratch);
+
void RecordWriteForMap(Register object, Register map, Register dst,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp);
@@ -341,6 +349,10 @@ class MacroAssembler : public Assembler {
void PushFixedFrame(Register marker_reg = no_reg);
void PopFixedFrame(Register marker_reg = no_reg);
+ // Restore caller's frame pointer and return address prior to being
+ // overwritten by tail call stack preparation.
+ void RestoreFrameStateForTailCall();
+
// Push and pop the registers that can hold pointers, as defined by the
// RegList constant kSafepointSavedRegisters.
void PushSafepointRegisters();
@@ -364,18 +376,20 @@ class MacroAssembler : public Assembler {
}
// Converts the integer (untagged smi) in |src| to a double, storing
- // the result to |double_dst|
- void ConvertIntToDouble(Register src, DoubleRegister double_dst);
+ // the result to |dst|
+ void ConvertIntToDouble(Register src, DoubleRegister dst);
// Converts the unsigned integer (untagged smi) in |src| to
- // a double, storing the result to |double_dst|
- void ConvertUnsignedIntToDouble(Register src, DoubleRegister double_dst);
+ // a double, storing the result to |dst|
+ void ConvertUnsignedIntToDouble(Register src, DoubleRegister dst);
// Converts the integer (untagged smi) in |src| to
// a float, storing the result in |dst|
- // Warning: The value in |int_scrach| will be changed in the process!
- void ConvertIntToFloat(const DoubleRegister dst, const Register src,
- const Register int_scratch);
+ void ConvertIntToFloat(Register src, DoubleRegister dst);
+
+ // Converts the unsigned integer (untagged smi) in |src| to
+ // a float, storing the result in |dst|
+ void ConvertUnsignedIntToFloat(Register src, DoubleRegister dst);
#if V8_TARGET_ARCH_PPC64
void ConvertInt64ToFloat(Register src, DoubleRegister double_dst);
@@ -858,6 +872,16 @@ class MacroAssembler : public Assembler {
void TestDoubleIsInt32(DoubleRegister double_input, Register scratch1,
Register scratch2, DoubleRegister double_scratch);
+ // Check if a double is equal to -0.0.
+ // CR_EQ in cr7 holds the result.
+ void TestDoubleIsMinusZero(DoubleRegister input, Register scratch1,
+ Register scratch2);
+
+ // Check the sign of a double.
+ // CR_LT in cr7 holds the result.
+ void TestDoubleSign(DoubleRegister input, Register scratch);
+ void TestHeapNumberSign(Register input, Register scratch);
+
// Try to convert a double to a signed 32-bit integer.
// CR_EQ in cr7 is set and result assigned if the conversion is exact.
void TryDoubleToInt32Exact(Register result, DoubleRegister double_input,
@@ -1004,10 +1028,6 @@ class MacroAssembler : public Assembler {
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin);
- // Invoke specified builtin JavaScript function.
- void InvokeBuiltin(int native_context_index, InvokeFlag flag,
- const CallWrapper& call_wrapper = NullCallWrapper());
-
Handle<Object> CodeObject() {
DCHECK(!code_object_.is_null());
return code_object_;
@@ -1332,6 +1352,9 @@ class MacroAssembler : public Assembler {
// enabled via --debug-code.
void AssertBoundFunction(Register object);
+ // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
+ void AssertReceiver(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@@ -1446,9 +1469,9 @@ class MacroAssembler : public Assembler {
// Returns the pc offset at which the frame ends.
int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
- // Expects object in r0 and returns map with validated enum cache
- // in r0. Assumes that any other register can be used as a scratch.
- void CheckEnumCache(Register null_value, Label* call_runtime);
+ // Expects object in r3 and returns map with validated enum cache
+ // in r3. Assumes that any other register can be used as a scratch.
+ void CheckEnumCache(Label* call_runtime);
// AllocationMemento support. Arrays may have an associated
// AllocationMemento object that can be checked for in order to pretransition
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc
index 0efa6605d5..9a1f9e035f 100644
--- a/deps/v8/src/ppc/simulator-ppc.cc
+++ b/deps/v8/src/ppc/simulator-ppc.cc
@@ -15,6 +15,7 @@
#include "src/ppc/constants-ppc.h"
#include "src/ppc/frames-ppc.h"
#include "src/ppc/simulator-ppc.h"
+#include "src/runtime/runtime-utils.h"
#if defined(USE_SIMULATOR)
@@ -446,7 +447,8 @@ void PPCDebugger::Debug() {
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
intptr_t value = *cur;
Heap* current_heap = sim_->isolate_->heap();
- if (((value & 1) == 0) || current_heap->Contains(obj)) {
+ if (((value & 1) == 0) ||
+ current_heap->ContainsSlow(obj->address())) {
PrintF(" (");
if ((value & 1) == 0) {
PrintF("smi %d", PlatformSmiTagging::SmiToInt(obj));
@@ -855,10 +857,19 @@ class Redirection {
isolate->simulator_i_cache(),
reinterpret_cast<void*>(&swi_instruction_), Instruction::kInstrSize);
isolate->set_simulator_redirection(this);
+ if (ABI_USES_FUNCTION_DESCRIPTORS) {
+ function_descriptor_[0] = reinterpret_cast<intptr_t>(&swi_instruction_);
+ function_descriptor_[1] = 0;
+ function_descriptor_[2] = 0;
+ }
}
- void* address_of_swi_instruction() {
- return reinterpret_cast<void*>(&swi_instruction_);
+ void* address() {
+ if (ABI_USES_FUNCTION_DESCRIPTORS) {
+ return reinterpret_cast<void*>(function_descriptor_);
+ } else {
+ return reinterpret_cast<void*>(&swi_instruction_);
+ }
}
void* external_function() { return external_function_; }
@@ -883,9 +894,16 @@ class Redirection {
return reinterpret_cast<Redirection*>(addr_of_redirection);
}
+ static Redirection* FromAddress(void* address) {
+ int delta = ABI_USES_FUNCTION_DESCRIPTORS
+ ? offsetof(Redirection, function_descriptor_)
+ : offsetof(Redirection, swi_instruction_);
+ char* addr_of_redirection = reinterpret_cast<char*>(address) - delta;
+ return reinterpret_cast<Redirection*>(addr_of_redirection);
+ }
+
static void* ReverseRedirection(intptr_t reg) {
- Redirection* redirection = FromSwiInstruction(
- reinterpret_cast<Instruction*>(reinterpret_cast<void*>(reg)));
+ Redirection* redirection = FromAddress(reinterpret_cast<void*>(reg));
return redirection->external_function();
}
@@ -902,6 +920,7 @@ class Redirection {
uint32_t swi_instruction_;
ExternalReference::Type type_;
Redirection* next_;
+ intptr_t function_descriptor_[3];
};
@@ -922,7 +941,7 @@ void* Simulator::RedirectExternalReference(Isolate* isolate,
void* external_function,
ExternalReference::Type type) {
Redirection* redirection = Redirection::Get(isolate, external_function, type);
- return redirection->address_of_swi_instruction();
+ return redirection->address();
}
@@ -1171,20 +1190,11 @@ bool Simulator::OverflowFrom(int32_t alu_out, int32_t left, int32_t right,
#if V8_TARGET_ARCH_PPC64
-struct ObjectPair {
- intptr_t x;
- intptr_t y;
-};
-
-
static void decodeObjectPair(ObjectPair* pair, intptr_t* x, intptr_t* y) {
- *x = pair->x;
- *y = pair->y;
+ *x = reinterpret_cast<intptr_t>(pair->x);
+ *y = reinterpret_cast<intptr_t>(pair->y);
}
#else
-typedef uint64_t ObjectPair;
-
-
static void decodeObjectPair(ObjectPair* pair, intptr_t* x, intptr_t* y) {
#if V8_TARGET_BIG_ENDIAN
*x = static_cast<int32_t>(*pair >> 32);
@@ -1196,16 +1206,17 @@ static void decodeObjectPair(ObjectPair* pair, intptr_t* x, intptr_t* y) {
}
#endif
-// Calls into the V8 runtime are based on this very simple interface.
-// Note: To be able to return two values from some calls the code in
-// runtime.cc uses the ObjectPair which is essentially two pointer
-// values stuffed into a structure. With the code below we assume that
-// all runtime calls return this pair. If they don't, the r4 result
-// register contains a bogus value, which is fine because it is
-// caller-saved.
-typedef ObjectPair (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1,
- intptr_t arg2, intptr_t arg3,
- intptr_t arg4, intptr_t arg5);
+// Calls into the V8 runtime.
+typedef intptr_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1,
+ intptr_t arg2, intptr_t arg3,
+ intptr_t arg4, intptr_t arg5);
+typedef ObjectPair (*SimulatorRuntimePairCall)(intptr_t arg0, intptr_t arg1,
+ intptr_t arg2, intptr_t arg3,
+ intptr_t arg4, intptr_t arg5);
+typedef ObjectTriple (*SimulatorRuntimeTripleCall)(intptr_t arg0, intptr_t arg1,
+ intptr_t arg2, intptr_t arg3,
+ intptr_t arg4,
+ intptr_t arg5);
// These prototypes handle the four types of FP calls.
typedef int (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
@@ -1237,13 +1248,15 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
Redirection* redirection = Redirection::FromSwiInstruction(instr);
const int kArgCount = 6;
int arg0_regnum = 3;
-#if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
intptr_t result_buffer = 0;
- if (redirection->type() == ExternalReference::BUILTIN_OBJECTPAIR_CALL) {
+ bool uses_result_buffer =
+ redirection->type() == ExternalReference::BUILTIN_CALL_TRIPLE ||
+ (redirection->type() == ExternalReference::BUILTIN_CALL_PAIR &&
+ !ABI_RETURNS_OBJECT_PAIRS_IN_REGS);
+ if (uses_result_buffer) {
result_buffer = get_register(r3);
arg0_regnum++;
}
-#endif
intptr_t arg[kArgCount];
for (int i = 0; i < kArgCount; i++) {
arg[i] = get_register(arg0_regnum + i);
@@ -1389,9 +1402,9 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
CHECK(stack_aligned);
SimulatorRuntimeDirectGetterCall target =
reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
-#if !ABI_PASSES_HANDLES_IN_REGS
- arg[0] = *(reinterpret_cast<intptr_t*>(arg[0]));
-#endif
+ if (!ABI_PASSES_HANDLES_IN_REGS) {
+ arg[0] = *(reinterpret_cast<intptr_t*>(arg[0]));
+ }
target(arg[0], arg[1]);
} else if (redirection->type() ==
ExternalReference::PROFILING_GETTER_CALL) {
@@ -1408,9 +1421,9 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
CHECK(stack_aligned);
SimulatorRuntimeProfilingGetterCall target =
reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
-#if !ABI_PASSES_HANDLES_IN_REGS
- arg[0] = *(reinterpret_cast<intptr_t*>(arg[0]));
-#endif
+ if (!ABI_PASSES_HANDLES_IN_REGS) {
+ arg[0] = *(reinterpret_cast<intptr_t*>(arg[0]));
+ }
target(arg[0], arg[1], Redirection::ReverseRedirection(arg[2]));
} else {
// builtin call.
@@ -1430,19 +1443,53 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF("\n");
}
CHECK(stack_aligned);
- DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL);
- SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
- ObjectPair result =
- target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
- intptr_t x;
- intptr_t y;
- decodeObjectPair(&result, &x, &y);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned {%08" V8PRIxPTR ", %08" V8PRIxPTR "}\n", x, y);
- }
- set_register(r3, x);
- set_register(r4, y);
+ if (redirection->type() == ExternalReference::BUILTIN_CALL_TRIPLE) {
+ SimulatorRuntimeTripleCall target =
+ reinterpret_cast<SimulatorRuntimeTripleCall>(external);
+ ObjectTriple result =
+ target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned {%08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
+ "}\n",
+ reinterpret_cast<intptr_t>(result.x),
+ reinterpret_cast<intptr_t>(result.y),
+ reinterpret_cast<intptr_t>(result.z));
+ }
+ memcpy(reinterpret_cast<void*>(result_buffer), &result,
+ sizeof(ObjectTriple));
+ set_register(r3, result_buffer);
+ } else {
+ if (redirection->type() == ExternalReference::BUILTIN_CALL_PAIR) {
+ SimulatorRuntimePairCall target =
+ reinterpret_cast<SimulatorRuntimePairCall>(external);
+ ObjectPair result =
+ target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
+ intptr_t x;
+ intptr_t y;
+ decodeObjectPair(&result, &x, &y);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned {%08" V8PRIxPTR ", %08" V8PRIxPTR "}\n", x, y);
+ }
+ if (ABI_RETURNS_OBJECT_PAIRS_IN_REGS) {
+ set_register(r3, x);
+ set_register(r4, y);
+ } else {
+ memcpy(reinterpret_cast<void*>(result_buffer), &result,
+ sizeof(ObjectPair));
+ set_register(r3, result_buffer);
+ }
+ } else {
+ DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL);
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
+ intptr_t result =
+ target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned %08" V8PRIxPTR "\n", result);
+ }
+ set_register(r3, result);
+ }
+ }
}
set_pc(saved_lr);
break;
@@ -3852,17 +3899,19 @@ void Simulator::CallInternal(byte* entry) {
// Adjust JS-based stack limit to C-based stack limit.
isolate_->stack_guard()->AdjustStackLimitForSimulator();
-// Prepare to execute the code at entry
-#if ABI_USES_FUNCTION_DESCRIPTORS
- // entry is the function descriptor
- set_pc(*(reinterpret_cast<intptr_t*>(entry)));
-#else
- // entry is the instruction address
- set_pc(reinterpret_cast<intptr_t>(entry));
-#endif
+ // Prepare to execute the code at entry
+ if (ABI_USES_FUNCTION_DESCRIPTORS) {
+ // entry is the function descriptor
+ set_pc(*(reinterpret_cast<intptr_t*>(entry)));
+ } else {
+ // entry is the instruction address
+ set_pc(reinterpret_cast<intptr_t>(entry));
+ }
- // Put target address in ip (for JS prologue).
- set_register(r12, get_pc());
+ if (ABI_CALL_VIA_IP) {
+ // Put target address in ip (for JS prologue).
+ set_register(r12, get_pc());
+ }
// Put down marker for end of simulation. The simulator will stop simulation
// when the PC reaches this value. By saving the "end simulation" value into
@@ -3919,8 +3968,12 @@ void Simulator::CallInternal(byte* entry) {
Execute();
// Check that the non-volatile registers have been preserved.
- CHECK_EQ(callee_saved_value, get_register(r2));
- CHECK_EQ(callee_saved_value, get_register(r13));
+ if (ABI_TOC_REGISTER != 2) {
+ CHECK_EQ(callee_saved_value, get_register(r2));
+ }
+ if (ABI_TOC_REGISTER != 13) {
+ CHECK_EQ(callee_saved_value, get_register(r13));
+ }
CHECK_EQ(callee_saved_value, get_register(r14));
CHECK_EQ(callee_saved_value, get_register(r15));
CHECK_EQ(callee_saved_value, get_register(r16));
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index bbddc873b1..b6c7945797 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -49,12 +49,12 @@ void ProfilerEventsProcessor::AddDeoptStack(Isolate* isolate, Address from,
regs.sp = fp - fp_to_sp_delta;
regs.fp = fp;
regs.pc = from;
- record.sample.Init(isolate, regs, TickSample::kSkipCEntryFrame);
+ record.sample.Init(isolate, regs, TickSample::kSkipCEntryFrame, false);
ticks_from_vm_buffer_.Enqueue(record);
}
-
-void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate) {
+void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate,
+ bool update_stats) {
TickSampleEventRecord record(last_code_event_id_.Value());
RegisterState regs;
StackFrameIterator it(isolate);
@@ -64,7 +64,7 @@ void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate) {
regs.fp = frame->fp();
regs.pc = frame->pc();
}
- record.sample.Init(isolate, regs, TickSample::kSkipCEntryFrame);
+ record.sample.Init(isolate, regs, TickSample::kSkipCEntryFrame, update_stats);
ticks_from_vm_buffer_.Enqueue(record);
}
@@ -429,6 +429,11 @@ void CpuProfiler::ResetProfiles() {
profiles_ = new CpuProfilesCollection(isolate()->heap());
}
+void CpuProfiler::CollectSample() {
+ if (processor_ != NULL) {
+ processor_->AddCurrentStack(isolate_);
+ }
+}
void CpuProfiler::StartProfiling(const char* title, bool record_samples) {
if (profiles_->StartProfiling(title, record_samples)) {
diff --git a/deps/v8/src/profiler/cpu-profiler.h b/deps/v8/src/profiler/cpu-profiler.h
index e5ef0ac7c4..1a1249c8b2 100644
--- a/deps/v8/src/profiler/cpu-profiler.h
+++ b/deps/v8/src/profiler/cpu-profiler.h
@@ -139,7 +139,7 @@ class ProfilerEventsProcessor : public base::Thread {
void Enqueue(const CodeEventsContainer& event);
// Puts current stack into tick sample events buffer.
- void AddCurrentStack(Isolate* isolate);
+ void AddCurrentStack(Isolate* isolate, bool update_stats = false);
void AddDeoptStack(Isolate* isolate, Address from, int fp_to_sp_delta);
// Tick sample events are filled directly in the buffer of the circular
@@ -168,8 +168,7 @@ class ProfilerEventsProcessor : public base::Thread {
ProfileGenerator* generator_;
Sampler* sampler_;
base::Atomic32 running_;
- // Sampling period in microseconds.
- const base::TimeDelta period_;
+ const base::TimeDelta period_; // Samples & code events processing period.
LockedQueue<CodeEventsContainer> events_buffer_;
static const size_t kTickSampleBufferSize = 1 * MB;
static const size_t kTickSampleQueueLength =
@@ -205,6 +204,7 @@ class CpuProfiler : public CodeEventListener {
virtual ~CpuProfiler();
void set_sampling_interval(base::TimeDelta value);
+ void CollectSample();
void StartProfiling(const char* title, bool record_samples = false);
void StartProfiling(String* title, bool record_samples);
CpuProfile* StopProfiling(const char* title);
diff --git a/deps/v8/src/profiler/heap-profiler.cc b/deps/v8/src/profiler/heap-profiler.cc
index 4403e5d6c9..1305cae66e 100644
--- a/deps/v8/src/profiler/heap-profiler.cc
+++ b/deps/v8/src/profiler/heap-profiler.cc
@@ -8,6 +8,7 @@
#include "src/debug/debug.h"
#include "src/profiler/allocation-tracker.h"
#include "src/profiler/heap-snapshot-generator-inl.h"
+#include "src/profiler/sampling-heap-profiler.h"
namespace v8 {
namespace internal {
@@ -84,6 +85,31 @@ HeapSnapshot* HeapProfiler::TakeSnapshot(
}
+bool HeapProfiler::StartSamplingHeapProfiler(uint64_t sample_interval,
+ int stack_depth) {
+ if (sampling_heap_profiler_.get()) {
+ return false;
+ }
+ sampling_heap_profiler_.Reset(new SamplingHeapProfiler(
+ heap(), names_.get(), sample_interval, stack_depth));
+ return true;
+}
+
+
+void HeapProfiler::StopSamplingHeapProfiler() {
+ sampling_heap_profiler_.Reset(nullptr);
+}
+
+
+v8::AllocationProfile* HeapProfiler::GetAllocationProfile() {
+ if (sampling_heap_profiler_.get()) {
+ return sampling_heap_profiler_->GetAllocationProfile();
+ } else {
+ return nullptr;
+ }
+}
+
+
void HeapProfiler::StartHeapObjectsTracking(bool track_allocations) {
ids_->UpdateHeapObjectsMap();
is_tracking_object_moves_ = true;
diff --git a/deps/v8/src/profiler/heap-profiler.h b/deps/v8/src/profiler/heap-profiler.h
index 9a04e83af4..74539ae142 100644
--- a/deps/v8/src/profiler/heap-profiler.h
+++ b/deps/v8/src/profiler/heap-profiler.h
@@ -16,6 +16,7 @@ namespace internal {
class AllocationTracker;
class HeapObjectsMap;
class HeapSnapshot;
+class SamplingHeapProfiler;
class StringsStorage;
class HeapProfiler {
@@ -29,6 +30,10 @@ class HeapProfiler {
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver);
+ bool StartSamplingHeapProfiler(uint64_t sample_interval, int stack_depth);
+ void StopSamplingHeapProfiler();
+ AllocationProfile* GetAllocationProfile();
+
void StartHeapObjectsTracking(bool track_allocations);
void StopHeapObjectsTracking();
AllocationTracker* allocation_tracker() const {
@@ -79,6 +84,7 @@ class HeapProfiler {
base::SmartPointer<AllocationTracker> allocation_tracker_;
bool is_tracking_object_moves_;
base::Mutex profiler_mutex_;
+ base::SmartPointer<SamplingHeapProfiler> sampling_heap_profiler_;
};
} // namespace internal
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index 69ed5e6f29..fc43f9f471 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -11,7 +11,6 @@
#include "src/profiler/allocation-tracker.h"
#include "src/profiler/heap-profiler.h"
#include "src/profiler/heap-snapshot-generator-inl.h"
-#include "src/types.h"
namespace v8 {
namespace internal {
@@ -1109,10 +1108,6 @@ void V8HeapExplorer::ExtractJSObjectReferences(
TagObject(js_fun->bound_arguments(), "(bound arguments)");
SetInternalReference(js_fun, entry, "bindings", js_fun->bound_arguments(),
JSBoundFunction::kBoundArgumentsOffset);
- TagObject(js_fun->creation_context(), "(creation context)");
- SetInternalReference(js_fun, entry, "creation_context",
- js_fun->creation_context(),
- JSBoundFunction::kCreationContextOffset);
SetNativeBindReference(js_obj, entry, "bound_this", js_fun->bound_this());
SetNativeBindReference(js_obj, entry, "bound_function",
js_fun->bound_target_function());
@@ -1425,18 +1420,17 @@ void V8HeapExplorer::ExtractAccessorInfoReferences(
SetInternalReference(accessor_info, entry, "expected_receiver_type",
accessor_info->expected_receiver_type(),
AccessorInfo::kExpectedReceiverTypeOffset);
- if (accessor_info->IsExecutableAccessorInfo()) {
- ExecutableAccessorInfo* executable_accessor_info =
- ExecutableAccessorInfo::cast(accessor_info);
+ if (accessor_info->IsAccessorInfo()) {
+ AccessorInfo* executable_accessor_info = AccessorInfo::cast(accessor_info);
SetInternalReference(executable_accessor_info, entry, "getter",
executable_accessor_info->getter(),
- ExecutableAccessorInfo::kGetterOffset);
+ AccessorInfo::kGetterOffset);
SetInternalReference(executable_accessor_info, entry, "setter",
executable_accessor_info->setter(),
- ExecutableAccessorInfo::kSetterOffset);
+ AccessorInfo::kSetterOffset);
SetInternalReference(executable_accessor_info, entry, "data",
executable_accessor_info->data(),
- ExecutableAccessorInfo::kDataOffset);
+ AccessorInfo::kDataOffset);
}
}
@@ -1538,7 +1532,7 @@ void V8HeapExplorer::ExtractAllocationSiteReferences(int entry,
// Do not visit weak_next as it is not visited by the StaticVisitor,
// and we're not very interested in weak_next field here.
STATIC_ASSERT(AllocationSite::kWeakNextOffset >=
- AllocationSite::BodyDescriptor::kEndOffset);
+ AllocationSite::kPointerFieldsEndOffset);
}
@@ -1604,7 +1598,7 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
int field_offset =
field_index.is_inobject() ? field_index.offset() : -1;
- if (k != heap_->hidden_string()) {
+ if (k != heap_->hidden_properties_symbol()) {
SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry, k,
value, NULL, field_offset);
} else {
@@ -1631,7 +1625,7 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
DCHECK(dictionary->ValueAt(i)->IsPropertyCell());
PropertyCell* cell = PropertyCell::cast(dictionary->ValueAt(i));
Object* value = cell->value();
- if (k == heap_->hidden_string()) {
+ if (k == heap_->hidden_properties_symbol()) {
TagObject(value, "(hidden properties)");
SetInternalReference(js_obj, entry, "hidden_properties", value);
continue;
@@ -1648,7 +1642,7 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
Object* k = dictionary->KeyAt(i);
if (dictionary->IsKey(k)) {
Object* value = dictionary->ValueAt(i);
- if (k == heap_->hidden_string()) {
+ if (k == heap_->hidden_properties_symbol()) {
TagObject(value, "(hidden properties)");
SetInternalReference(js_obj, entry, "hidden_properties", value);
continue;
@@ -1873,7 +1867,6 @@ bool V8HeapExplorer::IterateAndExtractSinglePass() {
bool V8HeapExplorer::IsEssentialObject(Object* object) {
return object->IsHeapObject() && !object->IsOddball() &&
object != heap_->empty_byte_array() &&
- object != heap_->empty_bytecode_array() &&
object != heap_->empty_fixed_array() &&
object != heap_->empty_descriptor_array() &&
object != heap_->fixed_array_map() && object != heap_->cell_map() &&
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index 890f341e89..58d06c9db2 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -274,9 +274,8 @@ unsigned ProfileTree::GetFunctionId(const ProfileNode* node) {
return static_cast<unsigned>(reinterpret_cast<uintptr_t>(entry->value));
}
-
ProfileNode* ProfileTree::AddPathFromEnd(const Vector<CodeEntry*>& path,
- int src_line) {
+ int src_line, bool update_stats) {
ProfileNode* node = root_;
CodeEntry* last_entry = NULL;
for (CodeEntry** entry = path.start() + path.length() - 1;
@@ -290,9 +289,11 @@ ProfileNode* ProfileTree::AddPathFromEnd(const Vector<CodeEntry*>& path,
if (last_entry && last_entry->has_deopt_info()) {
node->CollectDeoptInfo(last_entry);
}
- node->IncrementSelfTicks();
- if (src_line != v8::CpuProfileNode::kNoLineNumberInfo) {
- node->IncrementLineTicks(src_line);
+ if (update_stats) {
+ node->IncrementSelfTicks();
+ if (src_line != v8::CpuProfileNode::kNoLineNumberInfo) {
+ node->IncrementLineTicks(src_line);
+ }
}
return node;
}
@@ -354,11 +355,12 @@ CpuProfile::CpuProfile(Isolate* isolate, const char* title, bool record_samples)
start_time_(base::TimeTicks::HighResolutionNow()),
top_down_(isolate) {}
-
void CpuProfile::AddPath(base::TimeTicks timestamp,
- const Vector<CodeEntry*>& path, int src_line) {
- ProfileNode* top_frame_node = top_down_.AddPathFromEnd(path, src_line);
- if (record_samples_) {
+ const Vector<CodeEntry*>& path, int src_line,
+ bool update_stats) {
+ ProfileNode* top_frame_node =
+ top_down_.AddPathFromEnd(path, src_line, update_stats);
+ if (record_samples_ && !timestamp.IsNull()) {
timestamps_.Add(timestamp);
samples_.Add(top_frame_node);
}
@@ -522,15 +524,15 @@ void CpuProfilesCollection::RemoveProfile(CpuProfile* profile) {
UNREACHABLE();
}
-
void CpuProfilesCollection::AddPathToCurrentProfiles(
- base::TimeTicks timestamp, const Vector<CodeEntry*>& path, int src_line) {
+ base::TimeTicks timestamp, const Vector<CodeEntry*>& path, int src_line,
+ bool update_stats) {
// As starting / stopping profiles is rare relatively to this
// method, we don't bother minimizing the duration of lock holding,
// e.g. copying contents of the list to a local vector.
current_profiles_semaphore_.Wait();
for (int i = 0; i < current_profiles_.length(); ++i) {
- current_profiles_[i]->AddPath(timestamp, path, src_line);
+ current_profiles_[i]->AddPath(timestamp, path, src_line, update_stats);
}
current_profiles_semaphore_.Signal();
}
@@ -595,7 +597,7 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
// Don't use PC when in external callback code, as it can point
// inside callback's code, and we will erroneously report
// that a callback calls itself.
- *entry++ = code_map_.FindEntry(sample.external_callback);
+ *entry++ = code_map_.FindEntry(sample.external_callback_entry);
} else {
CodeEntry* pc_entry = code_map_.FindEntry(sample.pc);
// If there is no pc_entry we're likely in native code.
@@ -634,10 +636,9 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
}
}
- for (const Address* stack_pos = sample.stack,
- *stack_end = stack_pos + sample.frames_count;
- stack_pos != stack_end;
- ++stack_pos) {
+ for (const Address *stack_pos = sample.stack,
+ *stack_end = stack_pos + sample.frames_count;
+ stack_pos != stack_end; ++stack_pos) {
*entry = code_map_.FindEntry(*stack_pos);
// Skip unresolved frames (e.g. internal frame) and get source line of
@@ -670,7 +671,8 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
}
}
- profiles_->AddPathToCurrentProfiles(sample.timestamp, entries, src_line);
+ profiles_->AddPathToCurrentProfiles(sample.timestamp, entries, src_line,
+ sample.update_stats);
}
diff --git a/deps/v8/src/profiler/profile-generator.h b/deps/v8/src/profiler/profile-generator.h
index 47a73f191a..3c976d6292 100644
--- a/deps/v8/src/profiler/profile-generator.h
+++ b/deps/v8/src/profiler/profile-generator.h
@@ -192,7 +192,8 @@ class ProfileTree {
ProfileNode* AddPathFromEnd(
const Vector<CodeEntry*>& path,
- int src_line = v8::CpuProfileNode::kNoLineNumberInfo);
+ int src_line = v8::CpuProfileNode::kNoLineNumberInfo,
+ bool update_stats = true);
ProfileNode* root() const { return root_; }
unsigned next_node_id() { return next_node_id_++; }
unsigned GetFunctionId(const ProfileNode* node);
@@ -225,7 +226,7 @@ class CpuProfile {
// Add pc -> ... -> main() call path to the profile.
void AddPath(base::TimeTicks timestamp, const Vector<CodeEntry*>& path,
- int src_line);
+ int src_line, bool update_stats);
void CalculateTotalTicksAndSamplingRate();
const char* title() const { return title_; }
@@ -333,7 +334,8 @@ class CpuProfilesCollection {
// Called from profile generator thread.
void AddPathToCurrentProfiles(base::TimeTicks timestamp,
- const Vector<CodeEntry*>& path, int src_line);
+ const Vector<CodeEntry*>& path, int src_line,
+ bool update_stats);
// Limits the number of profiles that can be simultaneously collected.
static const int kMaxSimultaneousProfiles = 100;
diff --git a/deps/v8/src/profiler/sampler.cc b/deps/v8/src/profiler/sampler.cc
index dc4c4c4f06..e331db9dcf 100644
--- a/deps/v8/src/profiler/sampler.cc
+++ b/deps/v8/src/profiler/sampler.cc
@@ -657,10 +657,12 @@ SamplerThread* SamplerThread::instance_ = NULL;
//
DISABLE_ASAN void TickSample::Init(Isolate* isolate,
const v8::RegisterState& regs,
- RecordCEntryFrame record_c_entry_frame) {
+ RecordCEntryFrame record_c_entry_frame,
+ bool update_stats) {
timestamp = base::TimeTicks::HighResolutionNow();
pc = reinterpret_cast<Address>(regs.pc);
state = isolate->current_vm_state();
+ this->update_stats = update_stats;
// Avoid collecting traces while doing GC.
if (state == GC) return;
@@ -669,6 +671,8 @@ DISABLE_ASAN void TickSample::Init(Isolate* isolate,
if (js_entry_sp == 0) return; // Not executing JS now.
if (pc && IsNoFrameRegion(pc)) {
+ // Can't collect stack. Mark the sample as spoiled.
+ timestamp = base::TimeTicks();
pc = 0;
return;
}
@@ -679,7 +683,7 @@ DISABLE_ASAN void TickSample::Init(Isolate* isolate,
// we have already entrered JavaScript again and the external callback
// is not the top function.
if (scope && scope->scope_address() < handler) {
- external_callback = scope->callback();
+ external_callback_entry = *scope->callback_entrypoint_address();
has_external_callback = true;
} else {
// sp register may point at an arbitrary place in memory, make
@@ -699,6 +703,12 @@ DISABLE_ASAN void TickSample::Init(Isolate* isolate,
GetStackSample(isolate, regs, record_c_entry_frame,
reinterpret_cast<void**>(&stack[0]), kMaxFramesCount, &info);
frames_count = static_cast<unsigned>(info.frames_count);
+ if (!frames_count) {
+ // It is executing JS but failed to collect a stack trace.
+ // Mark the sample as spoiled.
+ timestamp = base::TimeTicks();
+ pc = 0;
+ }
}
@@ -743,7 +753,6 @@ void Sampler::TearDown() {
#endif
}
-
Sampler::Sampler(Isolate* isolate, int interval)
: isolate_(isolate),
interval_(interval),
@@ -751,17 +760,16 @@ Sampler::Sampler(Isolate* isolate, int interval)
has_processing_thread_(false),
active_(false),
is_counting_samples_(false),
- js_and_external_sample_count_(0) {
+ js_sample_count_(0),
+ external_sample_count_(0) {
data_ = new PlatformData;
}
-
Sampler::~Sampler() {
DCHECK(!IsActive());
delete data_;
}
-
void Sampler::Start() {
DCHECK(!IsActive());
SetActive(true);
@@ -796,11 +804,10 @@ void Sampler::SampleStack(const v8::RegisterState& state) {
TickSample* sample = isolate_->cpu_profiler()->StartTickSample();
TickSample sample_obj;
if (sample == NULL) sample = &sample_obj;
- sample->Init(isolate_, state, TickSample::kIncludeCEntryFrame);
- if (is_counting_samples_) {
- if (sample->state == JS || sample->state == EXTERNAL) {
- ++js_and_external_sample_count_;
- }
+ sample->Init(isolate_, state, TickSample::kIncludeCEntryFrame, true);
+ if (is_counting_samples_ && !sample->timestamp.IsNull()) {
+ if (sample->state == JS) ++js_sample_count_;
+ if (sample->state == EXTERNAL) ++external_sample_count_;
}
Tick(sample);
if (sample != &sample_obj) {
diff --git a/deps/v8/src/profiler/sampler.h b/deps/v8/src/profiler/sampler.h
index 354e935e31..8e8ef1cfc3 100644
--- a/deps/v8/src/profiler/sampler.h
+++ b/deps/v8/src/profiler/sampler.h
@@ -34,12 +34,13 @@ struct TickSample {
TickSample()
: state(OTHER),
pc(NULL),
- external_callback(NULL),
+ external_callback_entry(NULL),
frames_count(0),
has_external_callback(false),
+ update_stats(true),
top_frame_type(StackFrame::NONE) {}
void Init(Isolate* isolate, const v8::RegisterState& state,
- RecordCEntryFrame record_c_entry_frame);
+ RecordCEntryFrame record_c_entry_frame, bool update_stats);
static void GetStackSample(Isolate* isolate, const v8::RegisterState& state,
RecordCEntryFrame record_c_entry_frame,
void** frames, size_t frames_limit,
@@ -48,7 +49,7 @@ struct TickSample {
Address pc; // Instruction pointer.
union {
Address tos; // Top stack value (*sp).
- Address external_callback;
+ Address external_callback_entry;
};
static const unsigned kMaxFramesCountLog2 = 8;
static const unsigned kMaxFramesCount = (1 << kMaxFramesCountLog2) - 1;
@@ -56,6 +57,7 @@ struct TickSample {
base::TimeTicks timestamp;
unsigned frames_count : kMaxFramesCountLog2; // Number of captured frames.
bool has_external_callback : 1;
+ bool update_stats : 1; // Whether the sample should update aggregated stats.
StackFrame::Type top_frame_type : 4;
};
@@ -98,12 +100,12 @@ class Sampler {
}
// Used in tests to make sure that stack sampling is performed.
- unsigned js_and_external_sample_count() const {
- return js_and_external_sample_count_;
- }
+ unsigned js_sample_count() const { return js_sample_count_; }
+ unsigned external_sample_count() const { return external_sample_count_; }
void StartCountingSamples() {
- is_counting_samples_ = true;
- js_and_external_sample_count_ = 0;
+ js_sample_count_ = 0;
+ external_sample_count_ = 0;
+ is_counting_samples_ = true;
}
class PlatformData;
@@ -123,9 +125,10 @@ class Sampler {
base::Atomic32 has_processing_thread_;
base::Atomic32 active_;
PlatformData* data_; // Platform specific data.
+ // Counts stack samples taken in various VM states.
bool is_counting_samples_;
- // Counts stack samples taken in JS VM state.
- unsigned js_and_external_sample_count_;
+ unsigned js_sample_count_;
+ unsigned external_sample_count_;
DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
};
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.cc b/deps/v8/src/profiler/sampling-heap-profiler.cc
new file mode 100644
index 0000000000..c13538c356
--- /dev/null
+++ b/deps/v8/src/profiler/sampling-heap-profiler.cc
@@ -0,0 +1,260 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/profiler/sampling-heap-profiler.h"
+
+#include <stdint.h>
+#include <memory>
+#include "src/api.h"
+#include "src/base/utils/random-number-generator.h"
+#include "src/frames-inl.h"
+#include "src/heap/heap.h"
+#include "src/isolate.h"
+#include "src/profiler/strings-storage.h"
+
+namespace v8 {
+namespace internal {
+
+// We sample with a Poisson process, with constant average sampling interval.
+// This follows the exponential probability distribution with parameter
+// Ī» = 1/rate where rate is the average number of bytes between samples.
+//
+// Let u be a uniformly distributed random number between 0 and 1, then
+// next_sample = (- ln u) / Ī»
+intptr_t SamplingAllocationObserver::GetNextSampleInterval(uint64_t rate) {
+ if (FLAG_sampling_heap_profiler_suppress_randomness) {
+ return static_cast<intptr_t>(rate);
+ }
+ double u = random_->NextDouble();
+ double next = (-std::log(u)) * rate;
+ return next < kPointerSize
+ ? kPointerSize
+ : (next > INT_MAX ? INT_MAX : static_cast<intptr_t>(next));
+}
+
+// Samples were collected according to a poisson process. Since we have not
+// recorded all allocations, we must approximate the shape of the underlying
+// space of allocations based on the samples we have collected. Given that
+// we sample at rate R, the probability that an allocation of size S will be
+// sampled is 1-exp(-S/R). This function uses the above probability to
+// approximate the true number of allocations with size *size* given that
+// *count* samples were observed.
+v8::AllocationProfile::Allocation SamplingHeapProfiler::ScaleSample(
+ size_t size, unsigned int count) {
+ double scale = 1.0 / (1.0 - std::exp(-static_cast<double>(size) / rate_));
+ // Round count instead of truncating.
+ return {size, static_cast<unsigned int>(count * scale + 0.5)};
+}
+
+SamplingHeapProfiler::SamplingHeapProfiler(Heap* heap, StringsStorage* names,
+ uint64_t rate, int stack_depth)
+ : isolate_(heap->isolate()),
+ heap_(heap),
+ new_space_observer_(new SamplingAllocationObserver(
+ heap_, static_cast<intptr_t>(rate), rate, this,
+ heap->isolate()->random_number_generator())),
+ other_spaces_observer_(new SamplingAllocationObserver(
+ heap_, static_cast<intptr_t>(rate), rate, this,
+ heap->isolate()->random_number_generator())),
+ names_(names),
+ profile_root_("(root)", v8::UnboundScript::kNoScriptId, 0),
+ samples_(),
+ stack_depth_(stack_depth),
+ rate_(rate) {
+ CHECK_GT(rate_, 0);
+ heap->new_space()->AddAllocationObserver(new_space_observer_.get());
+ AllSpaces spaces(heap);
+ for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+ if (space != heap->new_space()) {
+ space->AddAllocationObserver(other_spaces_observer_.get());
+ }
+ }
+}
+
+
+SamplingHeapProfiler::~SamplingHeapProfiler() {
+ heap_->new_space()->RemoveAllocationObserver(new_space_observer_.get());
+ AllSpaces spaces(heap_);
+ for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+ if (space != heap_->new_space()) {
+ space->RemoveAllocationObserver(other_spaces_observer_.get());
+ }
+ }
+
+ for (auto sample : samples_) {
+ delete sample;
+ }
+ std::set<Sample*> empty;
+ samples_.swap(empty);
+}
+
+
+void SamplingHeapProfiler::SampleObject(Address soon_object, size_t size) {
+ DisallowHeapAllocation no_allocation;
+
+ HandleScope scope(isolate_);
+ HeapObject* heap_object = HeapObject::FromAddress(soon_object);
+ Handle<Object> obj(heap_object, isolate_);
+
+ // Mark the new block as FreeSpace to make sure the heap is iterable while we
+ // are taking the sample.
+ heap()->CreateFillerObjectAt(soon_object, static_cast<int>(size));
+
+ Local<v8::Value> loc = v8::Utils::ToLocal(obj);
+
+ AllocationNode* node = AddStack();
+ node->allocations_[size]++;
+ Sample* sample = new Sample(size, node, loc, this);
+ samples_.insert(sample);
+ sample->global.SetWeak(sample, OnWeakCallback, WeakCallbackType::kParameter);
+}
+
+void SamplingHeapProfiler::OnWeakCallback(
+ const WeakCallbackInfo<Sample>& data) {
+ Sample* sample = data.GetParameter();
+ AllocationNode* node = sample->owner;
+ DCHECK(node->allocations_[sample->size] > 0);
+ node->allocations_[sample->size]--;
+ sample->profiler->samples_.erase(sample);
+ delete sample;
+}
+
+SamplingHeapProfiler::AllocationNode* SamplingHeapProfiler::FindOrAddChildNode(
+ AllocationNode* parent, const char* name, int script_id,
+ int start_position) {
+ for (AllocationNode* child : parent->children_) {
+ if (child->script_id_ == script_id &&
+ child->script_position_ == start_position &&
+ strcmp(child->name_, name) == 0) {
+ return child;
+ }
+ }
+ AllocationNode* child = new AllocationNode(name, script_id, start_position);
+ parent->children_.push_back(child);
+ return child;
+}
+
+SamplingHeapProfiler::AllocationNode* SamplingHeapProfiler::AddStack() {
+ AllocationNode* node = &profile_root_;
+
+ std::vector<SharedFunctionInfo*> stack;
+ StackTraceFrameIterator it(isolate_);
+ int frames_captured = 0;
+ while (!it.done() && frames_captured < stack_depth_) {
+ JavaScriptFrame* frame = it.frame();
+ SharedFunctionInfo* shared = frame->function()->shared();
+ stack.push_back(shared);
+
+ frames_captured++;
+ it.Advance();
+ }
+
+ if (frames_captured == 0) {
+ const char* name = nullptr;
+ switch (isolate_->current_vm_state()) {
+ case GC:
+ name = "(GC)";
+ break;
+ case COMPILER:
+ name = "(COMPILER)";
+ break;
+ case OTHER:
+ name = "(V8 API)";
+ break;
+ case EXTERNAL:
+ name = "(EXTERNAL)";
+ break;
+ case IDLE:
+ name = "(IDLE)";
+ break;
+ case JS:
+ name = "(JS)";
+ break;
+ }
+ return FindOrAddChildNode(node, name, v8::UnboundScript::kNoScriptId, 0);
+ }
+
+ // We need to process the stack in reverse order as the top of the stack is
+ // the first element in the list.
+ for (auto it = stack.rbegin(); it != stack.rend(); ++it) {
+ SharedFunctionInfo* shared = *it;
+ const char* name = this->names()->GetFunctionName(shared->DebugName());
+ int script_id = v8::UnboundScript::kNoScriptId;
+ if (shared->script()->IsScript()) {
+ Script* script = Script::cast(shared->script());
+ script_id = script->id();
+ }
+ node = FindOrAddChildNode(node, name, script_id, shared->start_position());
+ }
+ return node;
+}
+
+v8::AllocationProfile::Node* SamplingHeapProfiler::TranslateAllocationNode(
+ AllocationProfile* profile, SamplingHeapProfiler::AllocationNode* node,
+ const std::map<int, Script*>& scripts) {
+ Local<v8::String> script_name =
+ ToApiHandle<v8::String>(isolate_->factory()->InternalizeUtf8String(""));
+ int line = v8::AllocationProfile::kNoLineNumberInfo;
+ int column = v8::AllocationProfile::kNoColumnNumberInfo;
+ std::vector<v8::AllocationProfile::Allocation> allocations;
+ allocations.reserve(node->allocations_.size());
+ if (node->script_id_ != v8::UnboundScript::kNoScriptId) {
+ // Cannot use std::map<T>::at because it is not available on android.
+ auto non_const_scripts = const_cast<std::map<int, Script*>&>(scripts);
+ Script* script = non_const_scripts[node->script_id_];
+ if (script->name()->IsName()) {
+ Name* name = Name::cast(script->name());
+ script_name = ToApiHandle<v8::String>(
+ isolate_->factory()->InternalizeUtf8String(names_->GetName(name)));
+ }
+ Handle<Script> script_handle(script);
+
+ line = 1 + Script::GetLineNumber(script_handle, node->script_position_);
+ column = 1 + Script::GetColumnNumber(script_handle, node->script_position_);
+ for (auto alloc : node->allocations_) {
+ allocations.push_back(ScaleSample(alloc.first, alloc.second));
+ }
+ }
+
+ profile->nodes().push_back(v8::AllocationProfile::Node(
+ {ToApiHandle<v8::String>(
+ isolate_->factory()->InternalizeUtf8String(node->name_)),
+ script_name, node->script_id_, node->script_position_, line, column,
+ std::vector<v8::AllocationProfile::Node*>(), allocations}));
+ v8::AllocationProfile::Node* current = &profile->nodes().back();
+ size_t child_len = node->children_.size();
+ // The children vector may have nodes appended to it during translation
+ // because the translation may allocate strings on the JS heap that have
+ // the potential to be sampled. We cache the length of the vector before
+ // iteration so that nodes appended to the vector during iteration are
+ // not processed.
+ for (size_t i = 0; i < child_len; i++) {
+ current->children.push_back(
+ TranslateAllocationNode(profile, node->children_[i], scripts));
+ }
+ return current;
+}
+
+v8::AllocationProfile* SamplingHeapProfiler::GetAllocationProfile() {
+ // To resolve positions to line/column numbers, we will need to look up
+ // scripts. Build a map to allow fast mapping from script id to script.
+ std::map<int, Script*> scripts;
+ {
+ Script::Iterator iterator(isolate_);
+ Script* script;
+ while ((script = iterator.Next())) {
+ scripts[script->id()] = script;
+ }
+ }
+
+ auto profile = new v8::internal::AllocationProfile();
+
+ TranslateAllocationNode(profile, &profile_root_, scripts);
+
+ return profile;
+}
+
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/profiler/sampling-heap-profiler.h b/deps/v8/src/profiler/sampling-heap-profiler.h
new file mode 100644
index 0000000000..0b538b070c
--- /dev/null
+++ b/deps/v8/src/profiler/sampling-heap-profiler.h
@@ -0,0 +1,166 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PROFILER_SAMPLING_HEAP_PROFILER_H_
+#define V8_PROFILER_SAMPLING_HEAP_PROFILER_H_
+
+#include <deque>
+#include <map>
+#include <set>
+#include "include/v8-profiler.h"
+#include "src/heap/heap.h"
+#include "src/profiler/strings-storage.h"
+
+namespace v8 {
+
+namespace base {
+class RandomNumberGenerator;
+}
+
+namespace internal {
+
+class SamplingAllocationObserver;
+
+class AllocationProfile : public v8::AllocationProfile {
+ public:
+ AllocationProfile() : nodes_() {}
+
+ v8::AllocationProfile::Node* GetRootNode() override {
+ return nodes_.size() == 0 ? nullptr : &nodes_.front();
+ }
+
+ std::deque<v8::AllocationProfile::Node>& nodes() { return nodes_; }
+
+ private:
+ std::deque<v8::AllocationProfile::Node> nodes_;
+
+ DISALLOW_COPY_AND_ASSIGN(AllocationProfile);
+};
+
+class SamplingHeapProfiler {
+ public:
+ SamplingHeapProfiler(Heap* heap, StringsStorage* names, uint64_t rate,
+ int stack_depth);
+ ~SamplingHeapProfiler();
+
+ v8::AllocationProfile* GetAllocationProfile();
+
+ StringsStorage* names() const { return names_; }
+
+ class AllocationNode;
+
+ struct Sample {
+ public:
+ Sample(size_t size_, AllocationNode* owner_, Local<Value> local_,
+ SamplingHeapProfiler* profiler_)
+ : size(size_),
+ owner(owner_),
+ global(Global<Value>(
+ reinterpret_cast<v8::Isolate*>(profiler_->isolate_), local_)),
+ profiler(profiler_) {}
+ ~Sample() { global.Reset(); }
+ const size_t size;
+ AllocationNode* const owner;
+ Global<Value> global;
+ SamplingHeapProfiler* const profiler;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Sample);
+ };
+
+ class AllocationNode {
+ public:
+ AllocationNode(const char* const name, int script_id,
+ const int start_position)
+ : script_id_(script_id),
+ script_position_(start_position),
+ name_(name) {}
+ ~AllocationNode() {
+ for (auto child : children_) {
+ delete child;
+ }
+ }
+
+ private:
+ std::map<size_t, unsigned int> allocations_;
+ std::vector<AllocationNode*> children_;
+ const int script_id_;
+ const int script_position_;
+ const char* const name_;
+
+ friend class SamplingHeapProfiler;
+
+ DISALLOW_COPY_AND_ASSIGN(AllocationNode);
+ };
+
+ private:
+ Heap* heap() const { return heap_; }
+
+ void SampleObject(Address soon_object, size_t size);
+
+ static void OnWeakCallback(const WeakCallbackInfo<Sample>& data);
+
+ // Methods that construct v8::AllocationProfile.
+
+ // Translates the provided AllocationNode *node* returning an equivalent
+ // AllocationProfile::Node. The newly created AllocationProfile::Node is added
+ // to the provided AllocationProfile *profile*. Line numbers, column numbers,
+ // and script names are resolved using *scripts* which maps all currently
+ // loaded scripts keyed by their script id.
+ v8::AllocationProfile::Node* TranslateAllocationNode(
+ AllocationProfile* profile, SamplingHeapProfiler::AllocationNode* node,
+ const std::map<int, Script*>& scripts);
+ v8::AllocationProfile::Allocation ScaleSample(size_t size,
+ unsigned int count);
+ AllocationNode* AddStack();
+ AllocationNode* FindOrAddChildNode(AllocationNode* parent, const char* name,
+ int script_id, int start_position);
+
+ Isolate* const isolate_;
+ Heap* const heap_;
+ base::SmartPointer<SamplingAllocationObserver> new_space_observer_;
+ base::SmartPointer<SamplingAllocationObserver> other_spaces_observer_;
+ StringsStorage* const names_;
+ AllocationNode profile_root_;
+ std::set<Sample*> samples_;
+ const int stack_depth_;
+ const uint64_t rate_;
+
+ friend class SamplingAllocationObserver;
+};
+
+class SamplingAllocationObserver : public AllocationObserver {
+ public:
+ SamplingAllocationObserver(Heap* heap, intptr_t step_size, uint64_t rate,
+ SamplingHeapProfiler* profiler,
+ base::RandomNumberGenerator* random)
+ : AllocationObserver(step_size),
+ profiler_(profiler),
+ heap_(heap),
+ random_(random),
+ rate_(rate) {}
+ virtual ~SamplingAllocationObserver() {}
+
+ protected:
+ void Step(int bytes_allocated, Address soon_object, size_t size) override {
+ USE(heap_);
+ DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
+ DCHECK(soon_object);
+ profiler_->SampleObject(soon_object, size);
+ }
+
+ intptr_t GetNextStepSize() override { return GetNextSampleInterval(rate_); }
+
+ private:
+ intptr_t GetNextSampleInterval(uint64_t rate);
+ SamplingHeapProfiler* const profiler_;
+ Heap* const heap_;
+ base::RandomNumberGenerator* const random_;
+ uint64_t const rate_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PROFILER_SAMPLING_HEAP_PROFILER_H_
diff --git a/deps/v8/src/property-descriptor.cc b/deps/v8/src/property-descriptor.cc
index 243a9faac3..750f948adb 100644
--- a/deps/v8/src/property-descriptor.cc
+++ b/deps/v8/src/property-descriptor.cc
@@ -13,6 +13,8 @@
namespace v8 {
namespace internal {
+namespace {
+
// Helper function for ToPropertyDescriptor. Comments describe steps for
// "enumerable", other properties are handled the same way.
// Returns false if an exception was thrown.
@@ -101,19 +103,51 @@ bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<Object> obj,
}
-static void CreateDataProperty(Isolate* isolate, Handle<JSObject> object,
- Handle<String> name, Handle<Object> value) {
+void CreateDataProperty(Isolate* isolate, Handle<JSObject> object,
+ Handle<String> name, Handle<Object> value) {
LookupIterator it(object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<bool> result = JSObject::CreateDataProperty(&it, value);
CHECK(result.IsJust() && result.FromJust());
}
+} // namespace
+
// ES6 6.2.4.4 "FromPropertyDescriptor"
Handle<Object> PropertyDescriptor::ToObject(Isolate* isolate) {
DCHECK(!(PropertyDescriptor::IsAccessorDescriptor(this) &&
PropertyDescriptor::IsDataDescriptor(this)));
Factory* factory = isolate->factory();
+ if (IsRegularAccessorProperty()) {
+ // Fast case for regular accessor properties.
+ Handle<JSObject> result = factory->NewJSObjectFromMap(
+ isolate->accessor_property_descriptor_map());
+ result->InObjectPropertyAtPut(JSAccessorPropertyDescriptor::kGetIndex,
+ *get());
+ result->InObjectPropertyAtPut(JSAccessorPropertyDescriptor::kSetIndex,
+ *set());
+ result->InObjectPropertyAtPut(
+ JSAccessorPropertyDescriptor::kEnumerableIndex,
+ isolate->heap()->ToBoolean(enumerable()));
+ result->InObjectPropertyAtPut(
+ JSAccessorPropertyDescriptor::kConfigurableIndex,
+ isolate->heap()->ToBoolean(configurable()));
+ return result;
+ }
+ if (IsRegularDataProperty()) {
+ // Fast case for regular data properties.
+ Handle<JSObject> result =
+ factory->NewJSObjectFromMap(isolate->data_property_descriptor_map());
+ result->InObjectPropertyAtPut(JSDataPropertyDescriptor::kValueIndex,
+ *value());
+ result->InObjectPropertyAtPut(JSDataPropertyDescriptor::kWritableIndex,
+ isolate->heap()->ToBoolean(writable()));
+ result->InObjectPropertyAtPut(JSDataPropertyDescriptor::kEnumerableIndex,
+ isolate->heap()->ToBoolean(enumerable()));
+ result->InObjectPropertyAtPut(JSDataPropertyDescriptor::kConfigurableIndex,
+ isolate->heap()->ToBoolean(configurable()));
+ return result;
+ }
Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
if (has_value()) {
CreateDataProperty(isolate, result, factory->value_string(), value());
diff --git a/deps/v8/src/property-descriptor.h b/deps/v8/src/property-descriptor.h
index 5fbbfa36ec..cba43ed334 100644
--- a/deps/v8/src/property-descriptor.h
+++ b/deps/v8/src/property-descriptor.h
@@ -57,6 +57,16 @@ class PropertyDescriptor {
!has_value() && !has_get() && !has_set();
}
+ bool IsRegularAccessorProperty() const {
+ return has_configurable() && has_enumerable() && !has_value() &&
+ !has_writable() && has_get() && has_set();
+ }
+
+ bool IsRegularDataProperty() const {
+ return has_configurable() && has_enumerable() && has_value() &&
+ has_writable() && !has_get() && !has_set();
+ }
+
bool enumerable() const { return enumerable_; }
void set_enumerable(bool enumerable) {
enumerable_ = enumerable;
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index 44f32cbc93..fdf2c6c4ab 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -56,9 +56,7 @@ STATIC_ASSERT(((SKIP_STRINGS | SKIP_SYMBOLS | ONLY_ALL_CAN_READ) &
class Smi;
-template<class> class TypeImpl;
-struct ZoneTypeConfig;
-typedef TypeImpl<ZoneTypeConfig> Type;
+class Type;
class TypeInfo;
// Type of properties.
@@ -332,6 +330,7 @@ class PropertyDetails BASE_EMBEDDED {
bool IsReadOnly() const { return (attributes() & READ_ONLY) != 0; }
bool IsConfigurable() const { return (attributes() & DONT_DELETE) == 0; }
bool IsDontEnum() const { return (attributes() & DONT_ENUM) != 0; }
+ bool IsEnumerable() const { return !IsDontEnum(); }
PropertyCellType cell_type() const {
return PropertyCellTypeField::decode(value_);
}
diff --git a/deps/v8/src/property.cc b/deps/v8/src/property.cc
index 93ba43cb92..a4e0d67102 100644
--- a/deps/v8/src/property.cc
+++ b/deps/v8/src/property.cc
@@ -4,6 +4,7 @@
#include "src/property.h"
+#include "src/field-type.h"
#include "src/handles-inl.h"
#include "src/ostreams.h"
@@ -20,6 +21,11 @@ std::ostream& operator<<(std::ostream& os,
return os;
}
+DataDescriptor::DataDescriptor(Handle<Name> key, int field_index,
+ PropertyAttributes attributes,
+ Representation representation)
+ : Descriptor(key, FieldType::Any(key->GetIsolate()), attributes, DATA,
+ representation, field_index) {}
struct FastPropertyDetails {
explicit FastPropertyDetails(const PropertyDetails& v) : details(v) {}
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index b58c9c6acb..add9e4d11b 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -8,9 +8,7 @@
#include <iosfwd>
#include "src/factory.h"
-#include "src/field-index.h"
#include "src/isolate.h"
-#include "src/types.h"
namespace v8 {
namespace internal {
@@ -22,13 +20,6 @@ namespace internal {
// optionally a piece of data.
class Descriptor BASE_EMBEDDED {
public:
- void KeyToUniqueName() {
- if (!key_->IsUniqueName()) {
- key_ = key_->GetIsolate()->factory()->InternalizeString(
- Handle<String>::cast(key_));
- }
- }
-
Handle<Name> GetKey() const { return key_; }
Handle<Object> GetValue() const { return value_; }
PropertyDetails GetDetails() const { return details_; }
@@ -44,25 +35,25 @@ class Descriptor BASE_EMBEDDED {
Descriptor() : details_(Smi::FromInt(0)) {}
void Init(Handle<Name> key, Handle<Object> value, PropertyDetails details) {
+ DCHECK(key->IsUniqueName());
key_ = key;
value_ = value;
details_ = details;
}
Descriptor(Handle<Name> key, Handle<Object> value, PropertyDetails details)
+ : key_(key), value_(value), details_(details) {
+ DCHECK(key->IsUniqueName());
+ }
+
+ Descriptor(Handle<Name> key, Handle<Object> value,
+ PropertyAttributes attributes, PropertyType type,
+ Representation representation, int field_index = 0)
: key_(key),
value_(value),
- details_(details) { }
-
- Descriptor(Handle<Name> key,
- Handle<Object> value,
- PropertyAttributes attributes,
- PropertyType type,
- Representation representation,
- int field_index = 0)
- : key_(key),
- value_(value),
- details_(attributes, type, representation, field_index) { }
+ details_(attributes, type, representation, field_index) {
+ DCHECK(key->IsUniqueName());
+ }
friend class DescriptorArray;
friend class Map;
@@ -75,9 +66,7 @@ std::ostream& operator<<(std::ostream& os, const Descriptor& d);
class DataDescriptor final : public Descriptor {
public:
DataDescriptor(Handle<Name> key, int field_index,
- PropertyAttributes attributes, Representation representation)
- : Descriptor(key, HeapType::Any(key->GetIsolate()), attributes, DATA,
- representation, field_index) {}
+ PropertyAttributes attributes, Representation representation);
// The field type is either a simple type or a map wrapped in a weak cell.
DataDescriptor(Handle<Name> key, int field_index,
Handle<Object> wrapped_field_type,
diff --git a/deps/v8/src/prototype.h b/deps/v8/src/prototype.h
index 3253791f90..c5e954554c 100644
--- a/deps/v8/src/prototype.h
+++ b/deps/v8/src/prototype.h
@@ -22,6 +22,7 @@ namespace internal {
* The PrototypeIterator can either run to the null_value(), the first
* non-hidden prototype, or a given object.
*/
+
class PrototypeIterator {
public:
enum WhereToStart { START_AT_RECEIVER, START_AT_PROTOTYPE };
@@ -30,40 +31,42 @@ class PrototypeIterator {
const int kProxyPrototypeLimit = 100 * 1000;
- PrototypeIterator(Isolate* isolate, Handle<Object> receiver,
- WhereToStart where_to_start = START_AT_PROTOTYPE)
- : did_jump_to_prototype_chain_(false),
- object_(NULL),
+ PrototypeIterator(Isolate* isolate, Handle<JSReceiver> receiver,
+ WhereToStart where_to_start = START_AT_PROTOTYPE,
+ WhereToEnd where_to_end = END_AT_NULL)
+ : object_(NULL),
handle_(receiver),
isolate_(isolate),
+ where_to_end_(where_to_end),
+ is_at_end_(false),
seen_proxies_(0) {
CHECK(!handle_.is_null());
- if (where_to_start == START_AT_PROTOTYPE) {
- Advance();
- }
+ if (where_to_start == START_AT_PROTOTYPE) Advance();
}
- PrototypeIterator(Isolate* isolate, Object* receiver,
- WhereToStart where_to_start = START_AT_PROTOTYPE)
- : did_jump_to_prototype_chain_(false),
- object_(receiver),
+ PrototypeIterator(Isolate* isolate, JSReceiver* receiver,
+ WhereToStart where_to_start = START_AT_PROTOTYPE,
+ WhereToEnd where_to_end = END_AT_NULL)
+ : object_(receiver),
isolate_(isolate),
+ where_to_end_(where_to_end),
+ is_at_end_(false),
seen_proxies_(0) {
- if (where_to_start == START_AT_PROTOTYPE) {
- Advance();
- }
+ if (where_to_start == START_AT_PROTOTYPE) Advance();
}
explicit PrototypeIterator(Map* receiver_map)
- : did_jump_to_prototype_chain_(true),
- object_(receiver_map->prototype()),
- isolate_(receiver_map->GetIsolate()) {}
+ : object_(receiver_map->prototype()),
+ isolate_(receiver_map->GetIsolate()),
+ where_to_end_(END_AT_NULL),
+ is_at_end_(object_->IsNull()) {}
explicit PrototypeIterator(Handle<Map> receiver_map)
- : did_jump_to_prototype_chain_(true),
- object_(NULL),
+ : object_(NULL),
handle_(handle(receiver_map->prototype(), receiver_map->GetIsolate())),
- isolate_(receiver_map->GetIsolate()) {}
+ isolate_(receiver_map->GetIsolate()),
+ where_to_end_(END_AT_NULL),
+ is_at_end_(handle_->IsNull()) {}
~PrototypeIterator() {}
@@ -93,32 +96,30 @@ class PrototypeIterator {
void Advance() {
if (handle_.is_null() && object_->IsJSProxy()) {
- did_jump_to_prototype_chain_ = true;
+ is_at_end_ = true;
object_ = isolate_->heap()->null_value();
return;
} else if (!handle_.is_null() && handle_->IsJSProxy()) {
- did_jump_to_prototype_chain_ = true;
- handle_ = handle(isolate_->heap()->null_value(), isolate_);
+ is_at_end_ = true;
+ handle_ = isolate_->factory()->null_value();
return;
}
AdvanceIgnoringProxies();
}
void AdvanceIgnoringProxies() {
- if (!did_jump_to_prototype_chain_) {
- did_jump_to_prototype_chain_ = true;
- if (handle_.is_null()) {
- object_ = object_->GetRootMap(isolate_)->prototype();
- } else {
- handle_ = handle(handle_->GetRootMap(isolate_)->prototype(), isolate_);
- }
+ Object* object = handle_.is_null() ? object_ : *handle_;
+ Map* map = HeapObject::cast(object)->map();
+
+ Object* prototype = map->prototype();
+ is_at_end_ = where_to_end_ == END_AT_NON_HIDDEN
+ ? !map->has_hidden_prototype()
+ : prototype->IsNull();
+
+ if (handle_.is_null()) {
+ object_ = prototype;
} else {
- if (handle_.is_null()) {
- object_ = HeapObject::cast(object_)->map()->prototype();
- } else {
- handle_ =
- handle(HeapObject::cast(*handle_)->map()->prototype(), isolate_);
- }
+ handle_ = handle(prototype, isolate_);
}
}
@@ -129,6 +130,7 @@ class PrototypeIterator {
if (!HasAccess()) {
// Abort the lookup if we do not have access to the current object.
handle_ = isolate_->factory()->null_value();
+ is_at_end_ = true;
return true;
}
if (handle_.is_null() || !handle_->IsJSProxy()) {
@@ -143,41 +145,21 @@ class PrototypeIterator {
*isolate_->factory()->NewRangeError(MessageTemplate::kStackOverflow));
return false;
}
- did_jump_to_prototype_chain_ = true;
MaybeHandle<Object> proto =
JSProxy::GetPrototype(Handle<JSProxy>::cast(handle_));
- return proto.ToHandle(&handle_);
- }
-
- bool IsAtEnd(WhereToEnd where_to_end = END_AT_NULL) const {
- if (handle_.is_null()) {
- return object_->IsNull() ||
- (did_jump_to_prototype_chain_ &&
- where_to_end == END_AT_NON_HIDDEN &&
- !HeapObject::cast(object_)->map()->is_hidden_prototype());
- } else {
- return handle_->IsNull() ||
- (did_jump_to_prototype_chain_ &&
- where_to_end == END_AT_NON_HIDDEN &&
- !Handle<HeapObject>::cast(handle_)->map()->is_hidden_prototype());
- }
- }
-
- bool IsAtEnd(Object* final_object) {
- DCHECK(handle_.is_null());
- return object_->IsNull() || object_ == final_object;
+ if (!proto.ToHandle(&handle_)) return false;
+ is_at_end_ = where_to_end_ == END_AT_NON_HIDDEN || handle_->IsNull();
+ return true;
}
- bool IsAtEnd(Handle<Object> final_object) {
- DCHECK(!handle_.is_null());
- return handle_->IsNull() || *handle_ == *final_object;
- }
+ bool IsAtEnd() const { return is_at_end_; }
private:
- bool did_jump_to_prototype_chain_;
Object* object_;
Handle<Object> handle_;
Isolate* isolate_;
+ WhereToEnd where_to_end_;
+ bool is_at_end_;
int seen_proxies_;
DISALLOW_COPY_AND_ASSIGN(PrototypeIterator);
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
index 6fafdfb4ad..ce72188ae1 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -210,7 +210,7 @@ void RegExpMacroAssemblerARM::CheckGreedyLoop(Label* on_equal) {
void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
- int start_reg, bool read_backward, Label* on_no_match) {
+ int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
Label fallthrough;
__ ldr(r0, register_location(start_reg)); // Index of start of capture
__ ldr(r1, register_location(start_reg + 1)); // Index of end of capture
@@ -302,7 +302,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
// r0: Address byte_offset1 - Address captured substring's start.
// r1: Address byte_offset2 - Address of current character position.
// r2: size_t byte_length - length of capture in bytes(!)
- // r3: Isolate* isolate
+ // r3: Isolate* isolate or 0 if unicode flag.
// Address of start of capture.
__ add(r0, r0, Operand(end_of_input_address()));
@@ -316,7 +316,14 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
__ sub(r1, r1, r4);
}
// Isolate.
- __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
+#ifdef V8_I18N_SUPPORT
+ if (unicode) {
+ __ mov(r3, Operand(0));
+ } else // NOLINT
+#endif // V8_I18N_SUPPORT
+ {
+ __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
+ }
{
AllowExternalCallThatCantCauseGC scope(masm_);
@@ -798,9 +805,12 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ cmp(current_input_offset(), Operand::Zero());
__ b(eq, &exit_label_);
// Advance current position after a zero-length match.
+ Label advance;
+ __ bind(&advance);
__ add(current_input_offset(),
current_input_offset(),
Operand((mode_ == UC16) ? 2 : 1));
+ if (global_unicode()) CheckNotInSurrogatePair(0, &advance);
}
__ b(&load_char_start_regexp);
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
index 233a98f761..f808538a44 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
@@ -38,7 +38,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
virtual void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward,
+ bool read_backward, bool unicode,
Label* on_no_match);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c,
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index 9948597ca0..941cceaa59 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -274,7 +274,7 @@ void RegExpMacroAssemblerARM64::CheckGreedyLoop(Label* on_equal) {
void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
- int start_reg, bool read_backward, Label* on_no_match) {
+ int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
Label fallthrough;
Register capture_start_offset = w10;
@@ -388,7 +388,7 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
// x0: Address byte_offset1 - Address captured substring's start.
// x1: Address byte_offset2 - Address of current character position.
// w2: size_t byte_length - length of capture in bytes(!)
- // x3: Isolate* isolate
+ // x3: Isolate* isolate or 0 if unicode flag
// Address of start of capture.
__ Add(x0, input_end(), Operand(capture_start_offset, SXTW));
@@ -400,7 +400,14 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
__ Sub(x1, x1, Operand(capture_length, SXTW));
}
// Isolate.
- __ Mov(x3, ExternalReference::isolate_address(isolate()));
+#ifdef V8_I18N_SUPPORT
+ if (unicode) {
+ __ Mov(x3, Operand(0));
+ } else // NOLINT
+#endif // V8_I18N_SUPPORT
+ {
+ __ Mov(x3, ExternalReference::isolate_address(isolate()));
+ }
{
AllowExternalCallThatCantCauseGC scope(masm_);
@@ -991,9 +998,12 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// Offset from the end is zero if we already reached the end.
__ Cbz(current_input_offset(), &return_w0);
// Advance current position after a zero-length match.
+ Label advance;
+ __ bind(&advance);
__ Add(current_input_offset(),
current_input_offset(),
Operand((mode_ == UC16) ? 2 : 1));
+ if (global_unicode()) CheckNotInSurrogatePair(0, &advance);
}
__ B(&load_char_start_regexp);
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
index d71f063d00..69624f606e 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
@@ -43,7 +43,7 @@ class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
virtual void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward,
+ bool read_backward, bool unicode,
Label* on_no_match);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c,
diff --git a/deps/v8/src/regexp/bytecodes-irregexp.h b/deps/v8/src/regexp/bytecodes-irregexp.h
index 2dbfbc0b82..3848f15dc7 100644
--- a/deps/v8/src/regexp/bytecodes-irregexp.h
+++ b/deps/v8/src/regexp/bytecodes-irregexp.h
@@ -6,6 +6,8 @@
#ifndef V8_REGEXP_BYTECODES_IRREGEXP_H_
#define V8_REGEXP_BYTECODES_IRREGEXP_H_
+#ifdef V8_INTERPRETED_REGEXP
+
namespace v8 {
namespace internal {
@@ -18,56 +20,58 @@ const unsigned int MAX_FIRST_ARG = 0x7fffffu;
const int BYTECODE_SHIFT = 8;
#define BYTECODE_ITERATOR(V) \
-V(BREAK, 0, 4) /* bc8 */ \
-V(PUSH_CP, 1, 4) /* bc8 pad24 */ \
-V(PUSH_BT, 2, 8) /* bc8 pad24 offset32 */ \
-V(PUSH_REGISTER, 3, 4) /* bc8 reg_idx24 */ \
-V(SET_REGISTER_TO_CP, 4, 8) /* bc8 reg_idx24 offset32 */ \
-V(SET_CP_TO_REGISTER, 5, 4) /* bc8 reg_idx24 */ \
-V(SET_REGISTER_TO_SP, 6, 4) /* bc8 reg_idx24 */ \
-V(SET_SP_TO_REGISTER, 7, 4) /* bc8 reg_idx24 */ \
-V(SET_REGISTER, 8, 8) /* bc8 reg_idx24 value32 */ \
-V(ADVANCE_REGISTER, 9, 8) /* bc8 reg_idx24 value32 */ \
-V(POP_CP, 10, 4) /* bc8 pad24 */ \
-V(POP_BT, 11, 4) /* bc8 pad24 */ \
-V(POP_REGISTER, 12, 4) /* bc8 reg_idx24 */ \
-V(FAIL, 13, 4) /* bc8 pad24 */ \
-V(SUCCEED, 14, 4) /* bc8 pad24 */ \
-V(ADVANCE_CP, 15, 4) /* bc8 offset24 */ \
-V(GOTO, 16, 8) /* bc8 pad24 addr32 */ \
-V(LOAD_CURRENT_CHAR, 17, 8) /* bc8 offset24 addr32 */ \
-V(LOAD_CURRENT_CHAR_UNCHECKED, 18, 4) /* bc8 offset24 */ \
-V(LOAD_2_CURRENT_CHARS, 19, 8) /* bc8 offset24 addr32 */ \
-V(LOAD_2_CURRENT_CHARS_UNCHECKED, 20, 4) /* bc8 offset24 */ \
-V(LOAD_4_CURRENT_CHARS, 21, 8) /* bc8 offset24 addr32 */ \
-V(LOAD_4_CURRENT_CHARS_UNCHECKED, 22, 4) /* bc8 offset24 */ \
-V(CHECK_4_CHARS, 23, 12) /* bc8 pad24 uint32 addr32 */ \
-V(CHECK_CHAR, 24, 8) /* bc8 pad8 uint16 addr32 */ \
-V(CHECK_NOT_4_CHARS, 25, 12) /* bc8 pad24 uint32 addr32 */ \
-V(CHECK_NOT_CHAR, 26, 8) /* bc8 pad8 uint16 addr32 */ \
-V(AND_CHECK_4_CHARS, 27, 16) /* bc8 pad24 uint32 uint32 addr32 */ \
-V(AND_CHECK_CHAR, 28, 12) /* bc8 pad8 uint16 uint32 addr32 */ \
-V(AND_CHECK_NOT_4_CHARS, 29, 16) /* bc8 pad24 uint32 uint32 addr32 */ \
-V(AND_CHECK_NOT_CHAR, 30, 12) /* bc8 pad8 uint16 uint32 addr32 */ \
-V(MINUS_AND_CHECK_NOT_CHAR, 31, 12) /* bc8 pad8 uc16 uc16 uc16 addr32 */ \
-V(CHECK_CHAR_IN_RANGE, 32, 12) /* bc8 pad24 uc16 uc16 addr32 */ \
-V(CHECK_CHAR_NOT_IN_RANGE, 33, 12) /* bc8 pad24 uc16 uc16 addr32 */ \
-V(CHECK_BIT_IN_TABLE, 34, 24) /* bc8 pad24 addr32 bits128 */ \
-V(CHECK_LT, 35, 8) /* bc8 pad8 uc16 addr32 */ \
-V(CHECK_GT, 36, 8) /* bc8 pad8 uc16 addr32 */ \
-V(CHECK_NOT_BACK_REF, 37, 8) /* bc8 reg_idx24 addr32 */ \
-V(CHECK_NOT_BACK_REF_NO_CASE, 38, 8) /* bc8 reg_idx24 addr32 */ \
-V(CHECK_NOT_BACK_REF_BACKWARD, 39, 8) /* bc8 reg_idx24 addr32 */ \
-V(CHECK_NOT_BACK_REF_NO_CASE_BACKWARD, 40, 8) /* bc8 reg_idx24 addr32 */ \
-V(CHECK_NOT_REGS_EQUAL, 41, 12) /* bc8 regidx24 reg_idx32 addr32 */ \
-V(CHECK_REGISTER_LT, 42, 12) /* bc8 reg_idx24 value32 addr32 */ \
-V(CHECK_REGISTER_GE, 43, 12) /* bc8 reg_idx24 value32 addr32 */ \
-V(CHECK_REGISTER_EQ_POS, 44, 8) /* bc8 reg_idx24 addr32 */ \
-V(CHECK_AT_START, 45, 8) /* bc8 pad24 addr32 */ \
-V(CHECK_NOT_AT_START, 46, 8) /* bc8 offset24 addr32 */ \
-V(CHECK_GREEDY, 47, 8) /* bc8 pad24 addr32 */ \
-V(ADVANCE_CP_AND_GOTO, 48, 8) /* bc8 offset24 addr32 */ \
-V(SET_CURRENT_POSITION_FROM_END, 49, 4) /* bc8 idx24 */
+ V(BREAK, 0, 4) /* bc8 */ \
+ V(PUSH_CP, 1, 4) /* bc8 pad24 */ \
+ V(PUSH_BT, 2, 8) /* bc8 pad24 offset32 */ \
+ V(PUSH_REGISTER, 3, 4) /* bc8 reg_idx24 */ \
+ V(SET_REGISTER_TO_CP, 4, 8) /* bc8 reg_idx24 offset32 */ \
+ V(SET_CP_TO_REGISTER, 5, 4) /* bc8 reg_idx24 */ \
+ V(SET_REGISTER_TO_SP, 6, 4) /* bc8 reg_idx24 */ \
+ V(SET_SP_TO_REGISTER, 7, 4) /* bc8 reg_idx24 */ \
+ V(SET_REGISTER, 8, 8) /* bc8 reg_idx24 value32 */ \
+ V(ADVANCE_REGISTER, 9, 8) /* bc8 reg_idx24 value32 */ \
+ V(POP_CP, 10, 4) /* bc8 pad24 */ \
+ V(POP_BT, 11, 4) /* bc8 pad24 */ \
+ V(POP_REGISTER, 12, 4) /* bc8 reg_idx24 */ \
+ V(FAIL, 13, 4) /* bc8 pad24 */ \
+ V(SUCCEED, 14, 4) /* bc8 pad24 */ \
+ V(ADVANCE_CP, 15, 4) /* bc8 offset24 */ \
+ V(GOTO, 16, 8) /* bc8 pad24 addr32 */ \
+ V(LOAD_CURRENT_CHAR, 17, 8) /* bc8 offset24 addr32 */ \
+ V(LOAD_CURRENT_CHAR_UNCHECKED, 18, 4) /* bc8 offset24 */ \
+ V(LOAD_2_CURRENT_CHARS, 19, 8) /* bc8 offset24 addr32 */ \
+ V(LOAD_2_CURRENT_CHARS_UNCHECKED, 20, 4) /* bc8 offset24 */ \
+ V(LOAD_4_CURRENT_CHARS, 21, 8) /* bc8 offset24 addr32 */ \
+ V(LOAD_4_CURRENT_CHARS_UNCHECKED, 22, 4) /* bc8 offset24 */ \
+ V(CHECK_4_CHARS, 23, 12) /* bc8 pad24 uint32 addr32 */ \
+ V(CHECK_CHAR, 24, 8) /* bc8 pad8 uint16 addr32 */ \
+ V(CHECK_NOT_4_CHARS, 25, 12) /* bc8 pad24 uint32 addr32 */ \
+ V(CHECK_NOT_CHAR, 26, 8) /* bc8 pad8 uint16 addr32 */ \
+ V(AND_CHECK_4_CHARS, 27, 16) /* bc8 pad24 uint32 uint32 addr32 */ \
+ V(AND_CHECK_CHAR, 28, 12) /* bc8 pad8 uint16 uint32 addr32 */ \
+ V(AND_CHECK_NOT_4_CHARS, 29, 16) /* bc8 pad24 uint32 uint32 addr32 */ \
+ V(AND_CHECK_NOT_CHAR, 30, 12) /* bc8 pad8 uint16 uint32 addr32 */ \
+ V(MINUS_AND_CHECK_NOT_CHAR, 31, 12) /* bc8 pad8 uc16 uc16 uc16 addr32 */ \
+ V(CHECK_CHAR_IN_RANGE, 32, 12) /* bc8 pad24 uc16 uc16 addr32 */ \
+ V(CHECK_CHAR_NOT_IN_RANGE, 33, 12) /* bc8 pad24 uc16 uc16 addr32 */ \
+ V(CHECK_BIT_IN_TABLE, 34, 24) /* bc8 pad24 addr32 bits128 */ \
+ V(CHECK_LT, 35, 8) /* bc8 pad8 uc16 addr32 */ \
+ V(CHECK_GT, 36, 8) /* bc8 pad8 uc16 addr32 */ \
+ V(CHECK_NOT_BACK_REF, 37, 8) /* bc8 reg_idx24 addr32 */ \
+ V(CHECK_NOT_BACK_REF_NO_CASE, 38, 8) /* bc8 reg_idx24 addr32 */ \
+ V(CHECK_NOT_BACK_REF_NO_CASE_UNICODE, 39, 8) \
+ V(CHECK_NOT_BACK_REF_BACKWARD, 40, 8) /* bc8 reg_idx24 addr32 */ \
+ V(CHECK_NOT_BACK_REF_NO_CASE_BACKWARD, 41, 8) /* bc8 reg_idx24 addr32 */ \
+ V(CHECK_NOT_BACK_REF_NO_CASE_UNICODE_BACKWARD, 42, 8) \
+ V(CHECK_NOT_REGS_EQUAL, 43, 12) /* bc8 regidx24 reg_idx32 addr32 */ \
+ V(CHECK_REGISTER_LT, 44, 12) /* bc8 reg_idx24 value32 addr32 */ \
+ V(CHECK_REGISTER_GE, 45, 12) /* bc8 reg_idx24 value32 addr32 */ \
+ V(CHECK_REGISTER_EQ_POS, 46, 8) /* bc8 reg_idx24 addr32 */ \
+ V(CHECK_AT_START, 47, 8) /* bc8 pad24 addr32 */ \
+ V(CHECK_NOT_AT_START, 48, 8) /* bc8 offset24 addr32 */ \
+ V(CHECK_GREEDY, 49, 8) /* bc8 pad24 addr32 */ \
+ V(ADVANCE_CP_AND_GOTO, 50, 8) /* bc8 offset24 addr32 */ \
+ V(SET_CURRENT_POSITION_FROM_END, 51, 4) /* bc8 idx24 */
#define DECLARE_BYTECODES(name, code, length) \
static const int BC_##name = code;
@@ -82,4 +86,6 @@ BYTECODE_ITERATOR(DECLARE_BYTECODE_LENGTH)
} // namespace internal
} // namespace v8
+#endif // V8_INTERPRETED_REGEXP
+
#endif // V8_REGEXP_BYTECODES_IRREGEXP_H_
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
index 6ef0f5fff6..4c22b43f77 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
@@ -189,7 +189,7 @@ void RegExpMacroAssemblerIA32::CheckGreedyLoop(Label* on_equal) {
void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
- int start_reg, bool read_backward, Label* on_no_match) {
+ int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
Label fallthrough;
__ mov(edx, register_location(start_reg)); // Index of start of capture
__ mov(ebx, register_location(start_reg + 1)); // Index of end of capture
@@ -296,11 +296,18 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
// Address byte_offset1 - Address captured substring's start.
// Address byte_offset2 - Address of current character position.
// size_t byte_length - length of capture in bytes(!)
- // Isolate* isolate
+// Isolate* isolate or 0 if unicode flag.
// Set isolate.
- __ mov(Operand(esp, 3 * kPointerSize),
- Immediate(ExternalReference::isolate_address(isolate())));
+#ifdef V8_I18N_SUPPORT
+ if (unicode) {
+ __ mov(Operand(esp, 3 * kPointerSize), Immediate(0));
+ } else // NOLINT
+#endif // V8_I18N_SUPPORT
+ {
+ __ mov(Operand(esp, 3 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(isolate())));
+ }
// Set byte_length.
__ mov(Operand(esp, 2 * kPointerSize), ebx);
// Set byte_offset2.
@@ -822,13 +829,15 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ test(edi, edi);
__ j(zero, &exit_label_, Label::kNear);
// Advance current position after a zero-length match.
+ Label advance;
+ __ bind(&advance);
if (mode_ == UC16) {
__ add(edi, Immediate(2));
} else {
__ inc(edi);
}
+ if (global_unicode()) CheckNotInSurrogatePair(0, &advance);
}
-
__ jmp(&load_char_start_regexp);
} else {
__ mov(eax, Immediate(SUCCESS));
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
index 1ef87eef38..fa174137a4 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.h
@@ -37,7 +37,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
virtual void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward,
+ bool read_backward, bool unicode,
Label* on_no_match);
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(uint32_t c,
diff --git a/deps/v8/src/regexp/interpreter-irregexp.cc b/deps/v8/src/regexp/interpreter-irregexp.cc
index ea748e4e55..14834d512a 100644
--- a/deps/v8/src/regexp/interpreter-irregexp.cc
+++ b/deps/v8/src/regexp/interpreter-irregexp.cc
@@ -4,6 +4,8 @@
// A simple interpreter for the Irregexp byte code.
+#ifdef V8_INTERPRETED_REGEXP
+
#include "src/regexp/interpreter-irregexp.h"
#include "src/ast/ast.h"
@@ -13,38 +15,32 @@
#include "src/unicode.h"
#include "src/utils.h"
+#ifdef V8_I18N_SUPPORT
+#include "unicode/uchar.h"
+#endif // V8_I18N_SUPPORT
+
namespace v8 {
namespace internal {
-
typedef unibrow::Mapping<unibrow::Ecma262Canonicalize> Canonicalize;
-static bool BackRefMatchesNoCase(Canonicalize* interp_canonicalize,
- int from,
- int current,
- int len,
- Vector<const uc16> subject) {
- for (int i = 0; i < len; i++) {
- unibrow::uchar old_char = subject[from++];
- unibrow::uchar new_char = subject[current++];
- if (old_char == new_char) continue;
- unibrow::uchar old_string[1] = { old_char };
- unibrow::uchar new_string[1] = { new_char };
- interp_canonicalize->get(old_char, '\0', old_string);
- interp_canonicalize->get(new_char, '\0', new_string);
- if (old_string[0] != new_string[0]) {
- return false;
- }
- }
- return true;
+static bool BackRefMatchesNoCase(Isolate* isolate, int from, int current,
+ int len, Vector<const uc16> subject,
+ bool unicode) {
+ Address offset_a =
+ reinterpret_cast<Address>(const_cast<uc16*>(&subject.at(from)));
+ Address offset_b =
+ reinterpret_cast<Address>(const_cast<uc16*>(&subject.at(current)));
+ size_t length = len * kUC16Size;
+ return RegExpMacroAssembler::CaseInsensitiveCompareUC16(
+ offset_a, offset_b, length, unicode ? nullptr : isolate) == 1;
}
-static bool BackRefMatchesNoCase(Canonicalize* interp_canonicalize,
- int from,
- int current,
- int len,
- Vector<const uint8_t> subject) {
+static bool BackRefMatchesNoCase(Isolate* isolate, int from, int current,
+ int len, Vector<const uint8_t> subject,
+ bool unicode) {
+ // For Latin1 characters the unicode flag makes no difference.
for (int i = 0; i < len; i++) {
unsigned int old_char = subject[from++];
unsigned int new_char = subject[current++];
@@ -522,13 +518,16 @@ static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
pc += BC_CHECK_NOT_BACK_REF_BACKWARD_LENGTH;
break;
}
+ BYTECODE(CHECK_NOT_BACK_REF_NO_CASE_UNICODE)
BYTECODE(CHECK_NOT_BACK_REF_NO_CASE) {
+ bool unicode =
+ (insn & BYTECODE_MASK) == BC_CHECK_NOT_BACK_REF_NO_CASE_UNICODE;
int from = registers[insn >> BYTECODE_SHIFT];
int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
if (from >= 0 && len > 0) {
if (current + len > subject.length() ||
- !BackRefMatchesNoCase(isolate->interp_canonicalize_mapping(),
- from, current, len, subject)) {
+ !BackRefMatchesNoCase(isolate, from, current, len, subject,
+ unicode)) {
pc = code_base + Load32Aligned(pc + 4);
break;
}
@@ -537,13 +536,16 @@ static RegExpImpl::IrregexpResult RawMatch(Isolate* isolate,
pc += BC_CHECK_NOT_BACK_REF_NO_CASE_LENGTH;
break;
}
+ BYTECODE(CHECK_NOT_BACK_REF_NO_CASE_UNICODE_BACKWARD)
BYTECODE(CHECK_NOT_BACK_REF_NO_CASE_BACKWARD) {
+ bool unicode = (insn & BYTECODE_MASK) ==
+ BC_CHECK_NOT_BACK_REF_NO_CASE_UNICODE_BACKWARD;
int from = registers[insn >> BYTECODE_SHIFT];
int len = registers[(insn >> BYTECODE_SHIFT) + 1] - from;
if (from >= 0 && len > 0) {
if (current - len < 0 ||
- !BackRefMatchesNoCase(isolate->interp_canonicalize_mapping(),
- from, current - len, len, subject)) {
+ !BackRefMatchesNoCase(isolate, from, current - len, len, subject,
+ unicode)) {
pc = code_base + Load32Aligned(pc + 4);
break;
}
@@ -619,3 +621,5 @@ RegExpImpl::IrregexpResult IrregexpInterpreter::Match(
} // namespace internal
} // namespace v8
+
+#endif // V8_INTERPRETED_REGEXP
diff --git a/deps/v8/src/regexp/interpreter-irregexp.h b/deps/v8/src/regexp/interpreter-irregexp.h
index 244af99091..887fab6d0e 100644
--- a/deps/v8/src/regexp/interpreter-irregexp.h
+++ b/deps/v8/src/regexp/interpreter-irregexp.h
@@ -7,12 +7,13 @@
#ifndef V8_REGEXP_INTERPRETER_IRREGEXP_H_
#define V8_REGEXP_INTERPRETER_IRREGEXP_H_
+#ifdef V8_INTERPRETED_REGEXP
+
#include "src/regexp/jsregexp.h"
namespace v8 {
namespace internal {
-
class IrregexpInterpreter {
public:
static RegExpImpl::IrregexpResult Match(Isolate* isolate,
@@ -26,4 +27,6 @@ class IrregexpInterpreter {
} // namespace internal
} // namespace v8
+#endif // V8_INTERPRETED_REGEXP
+
#endif // V8_REGEXP_INTERPRETER_IRREGEXP_H_
diff --git a/deps/v8/src/regexp/jsregexp-inl.h b/deps/v8/src/regexp/jsregexp-inl.h
index 3eb7c3c170..ca7a9fe991 100644
--- a/deps/v8/src/regexp/jsregexp-inl.h
+++ b/deps/v8/src/regexp/jsregexp-inl.h
@@ -47,7 +47,10 @@ int32_t* RegExpImpl::GlobalCache::FetchNext() {
register_array_size_);
} else {
int last_start_index = last_match[0];
- if (last_start_index == last_end_index) last_end_index++;
+ if (last_start_index == last_end_index) {
+ // Zero-length match. Advance by one code point.
+ last_end_index = AdvanceZeroLength(last_end_index);
+ }
if (last_end_index > subject_->length()) {
num_matches_ = 0; // Signal failed match.
return NULL;
diff --git a/deps/v8/src/regexp/jsregexp.cc b/deps/v8/src/regexp/jsregexp.cc
index 34d20fe781..80f48ca1a9 100644
--- a/deps/v8/src/regexp/jsregexp.cc
+++ b/deps/v8/src/regexp/jsregexp.cc
@@ -25,6 +25,11 @@
#include "src/string-search.h"
#include "src/unicode-decoder.h"
+#ifdef V8_I18N_SUPPORT
+#include "unicode/uset.h"
+#include "unicode/utypes.h"
+#endif // V8_I18N_SUPPORT
+
#ifndef V8_INTERPRETED_REGEXP
#if V8_TARGET_ARCH_IA32
#include "src/regexp/ia32/regexp-macro-assembler-ia32.h"
@@ -72,7 +77,7 @@ ContainedInLattice AddRange(ContainedInLattice containment,
int ranges_length,
Interval new_range) {
DCHECK((ranges_length & 1) == 1);
- DCHECK(ranges[ranges_length - 1] == String::kMaxUtf16CodeUnit + 1);
+ DCHECK(ranges[ranges_length - 1] == String::kMaxCodePoint + 1);
if (containment == kLatticeUnknown) return containment;
bool inside = false;
int last = 0;
@@ -145,9 +150,8 @@ MaybeHandle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
PostponeInterruptsScope postpone(isolate);
RegExpCompileData parse_result;
FlatStringReader reader(isolate, pattern);
- if (!RegExpParser::ParseRegExp(re->GetIsolate(), &zone, &reader,
- flags & JSRegExp::kMultiline,
- flags & JSRegExp::kUnicode, &parse_result)) {
+ if (!RegExpParser::ParseRegExp(re->GetIsolate(), &zone, &reader, flags,
+ &parse_result)) {
// Throw an exception if we fail to parse the pattern.
return ThrowRegExpException(re, pattern, parse_result.error);
}
@@ -371,18 +375,16 @@ bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
pattern = String::Flatten(pattern);
RegExpCompileData compile_data;
FlatStringReader reader(isolate, pattern);
- if (!RegExpParser::ParseRegExp(isolate, &zone, &reader,
- flags & JSRegExp::kMultiline,
- flags & JSRegExp::kUnicode, &compile_data)) {
+ if (!RegExpParser::ParseRegExp(isolate, &zone, &reader, flags,
+ &compile_data)) {
// Throw an exception if we fail to parse the pattern.
// THIS SHOULD NOT HAPPEN. We already pre-parsed it successfully once.
USE(ThrowRegExpException(re, pattern, compile_data.error));
return false;
}
- RegExpEngine::CompilationResult result = RegExpEngine::Compile(
- isolate, &zone, &compile_data, flags & JSRegExp::kIgnoreCase,
- flags & JSRegExp::kGlobal, flags & JSRegExp::kMultiline,
- flags & JSRegExp::kSticky, pattern, sample_subject, is_one_byte);
+ RegExpEngine::CompilationResult result =
+ RegExpEngine::Compile(isolate, &zone, &compile_data, flags, pattern,
+ sample_subject, is_one_byte);
if (result.error_message != NULL) {
// Unable to compile regexp.
Handle<String> error_message = isolate->factory()->NewStringFromUtf8(
@@ -636,7 +638,6 @@ Handle<JSArray> RegExpImpl::SetLastMatchInfo(Handle<JSArray> last_match_info,
RegExpImpl::GlobalCache::GlobalCache(Handle<JSRegExp> regexp,
Handle<String> subject,
- bool is_global,
Isolate* isolate)
: register_array_(NULL),
register_array_size_(0),
@@ -661,7 +662,8 @@ RegExpImpl::GlobalCache::GlobalCache(Handle<JSRegExp> regexp,
}
}
- if (is_global && !interpreted) {
+ DCHECK_NE(0, regexp->GetFlags() & JSRegExp::kGlobal);
+ if (!interpreted) {
register_array_size_ =
Max(registers_per_match_, Isolate::kJSRegexpStaticOffsetsVectorSize);
max_matches_ = register_array_size_ / registers_per_match_;
@@ -690,6 +692,16 @@ RegExpImpl::GlobalCache::GlobalCache(Handle<JSRegExp> regexp,
last_match[1] = 0;
}
+int RegExpImpl::GlobalCache::AdvanceZeroLength(int last_index) {
+ if ((regexp_->GetFlags() & JSRegExp::kUnicode) != 0 &&
+ last_index + 1 < subject_->length() &&
+ unibrow::Utf16::IsLeadSurrogate(subject_->Get(last_index)) &&
+ unibrow::Utf16::IsTrailSurrogate(subject_->Get(last_index + 1))) {
+ // Advance over the surrogate pair.
+ return last_index + 2;
+ }
+ return last_index + 1;
+}
// -------------------------------------------------------------------
// Implementation of the Irregexp regular expression engine.
@@ -945,7 +957,7 @@ class FrequencyCollator {
class RegExpCompiler {
public:
RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count,
- bool ignore_case, bool is_one_byte);
+ JSRegExp::Flags flags, bool is_one_byte);
int AllocateRegister() {
if (next_register_ >= RegExpMacroAssembler::kMaxRegister) {
@@ -955,6 +967,22 @@ class RegExpCompiler {
return next_register_++;
}
+ // Lookarounds to match lone surrogates for unicode character class matches
+ // are never nested. We can therefore reuse registers.
+ int UnicodeLookaroundStackRegister() {
+ if (unicode_lookaround_stack_register_ == kNoRegister) {
+ unicode_lookaround_stack_register_ = AllocateRegister();
+ }
+ return unicode_lookaround_stack_register_;
+ }
+
+ int UnicodeLookaroundPositionRegister() {
+ if (unicode_lookaround_position_register_ == kNoRegister) {
+ unicode_lookaround_position_register_ = AllocateRegister();
+ }
+ return unicode_lookaround_position_register_;
+ }
+
RegExpEngine::CompilationResult Assemble(RegExpMacroAssembler* assembler,
RegExpNode* start,
int capture_count,
@@ -981,7 +1009,8 @@ class RegExpCompiler {
void SetRegExpTooBig() { reg_exp_too_big_ = true; }
- inline bool ignore_case() { return ignore_case_; }
+ inline bool ignore_case() { return (flags_ & JSRegExp::kIgnoreCase) != 0; }
+ inline bool unicode() { return (flags_ & JSRegExp::kUnicode) != 0; }
inline bool one_byte() { return one_byte_; }
inline bool optimize() { return optimize_; }
inline void set_optimize(bool value) { optimize_ = value; }
@@ -1006,10 +1035,12 @@ class RegExpCompiler {
private:
EndNode* accept_;
int next_register_;
+ int unicode_lookaround_stack_register_;
+ int unicode_lookaround_position_register_;
List<RegExpNode*>* work_list_;
int recursion_depth_;
RegExpMacroAssembler* macro_assembler_;
- bool ignore_case_;
+ JSRegExp::Flags flags_;
bool one_byte_;
bool reg_exp_too_big_;
bool limiting_recursion_;
@@ -1041,11 +1072,13 @@ static RegExpEngine::CompilationResult IrregexpRegExpTooBig(Isolate* isolate) {
// Attempts to compile the regexp using an Irregexp code generator. Returns
// a fixed array or a null handle depending on whether it succeeded.
RegExpCompiler::RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count,
- bool ignore_case, bool one_byte)
+ JSRegExp::Flags flags, bool one_byte)
: next_register_(2 * (capture_count + 1)),
+ unicode_lookaround_stack_register_(kNoRegister),
+ unicode_lookaround_position_register_(kNoRegister),
work_list_(NULL),
recursion_depth_(0),
- ignore_case_(ignore_case),
+ flags_(flags),
one_byte_(one_byte),
reg_exp_too_big_(false),
limiting_recursion_(false),
@@ -1941,15 +1974,13 @@ static void SplitSearchSpace(ZoneList<int>* ranges,
// know that the character is in the range of min_char to max_char inclusive.
// Either label can be NULL indicating backtracking. Either label can also be
// equal to the fall_through label.
-static void GenerateBranches(RegExpMacroAssembler* masm,
- ZoneList<int>* ranges,
- int start_index,
- int end_index,
- uc16 min_char,
- uc16 max_char,
- Label* fall_through,
- Label* even_label,
- Label* odd_label) {
+static void GenerateBranches(RegExpMacroAssembler* masm, ZoneList<int>* ranges,
+ int start_index, int end_index, uc32 min_char,
+ uc32 max_char, Label* fall_through,
+ Label* even_label, Label* odd_label) {
+ DCHECK_LE(min_char, String::kMaxUtf16CodeUnit);
+ DCHECK_LE(max_char, String::kMaxUtf16CodeUnit);
+
int first = ranges->at(start_index);
int last = ranges->at(end_index) - 1;
@@ -2098,9 +2129,7 @@ static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
Label* on_failure, int cp_offset, bool check_offset,
bool preloaded, Zone* zone) {
ZoneList<CharacterRange>* ranges = cc->ranges(zone);
- if (!CharacterRange::IsCanonical(ranges)) {
- CharacterRange::Canonicalize(ranges);
- }
+ CharacterRange::Canonicalize(ranges);
int max_char;
if (one_byte) {
@@ -2142,23 +2171,14 @@ static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
}
return;
}
- if (last_valid_range == 0 &&
- !cc->is_negated() &&
- ranges->at(0).IsEverything(max_char)) {
- // This is a common case hit by non-anchored expressions.
- if (check_offset) {
- macro_assembler->CheckPosition(cp_offset, on_failure);
- }
- return;
- }
if (!preloaded) {
macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check_offset);
}
if (cc->is_standard(zone) &&
- macro_assembler->CheckSpecialCharacterClass(cc->standard_type(),
- on_failure)) {
+ macro_assembler->CheckSpecialCharacterClass(cc->standard_type(),
+ on_failure)) {
return;
}
@@ -2470,12 +2490,14 @@ bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler,
} else {
// For 2-character preloads in one-byte mode or 1-character preloads in
// two-byte mode we also use a 16 bit load with zero extend.
+ static const uint32_t kTwoByteMask = 0xffff;
+ static const uint32_t kFourByteMask = 0xffffffff;
if (details->characters() == 2 && compiler->one_byte()) {
- if ((mask & 0xffff) == 0xffff) need_mask = false;
+ if ((mask & kTwoByteMask) == kTwoByteMask) need_mask = false;
} else if (details->characters() == 1 && !compiler->one_byte()) {
- if ((mask & 0xffff) == 0xffff) need_mask = false;
+ if ((mask & kTwoByteMask) == kTwoByteMask) need_mask = false;
} else {
- if (mask == 0xffffffff) need_mask = false;
+ if (mask == kFourByteMask) need_mask = false;
}
}
@@ -2798,9 +2820,7 @@ RegExpNode* TextNode::FilterOneByte(int depth, bool ignore_case) {
DCHECK(elm.text_type() == TextElement::CHAR_CLASS);
RegExpCharacterClass* cc = elm.char_class();
ZoneList<CharacterRange>* ranges = cc->ranges(zone());
- if (!CharacterRange::IsCanonical(ranges)) {
- CharacterRange::Canonicalize(ranges);
- }
+ CharacterRange::Canonicalize(ranges);
// Now they are in order so we only need to look at the first.
int range_count = ranges->length();
if (cc->is_negated()) {
@@ -3289,6 +3309,36 @@ bool TextNode::SkipPass(int int_pass, bool ignore_case) {
}
+TextNode* TextNode::CreateForCharacterRanges(Zone* zone,
+ ZoneList<CharacterRange>* ranges,
+ bool read_backward,
+ RegExpNode* on_success) {
+ DCHECK_NOT_NULL(ranges);
+ ZoneList<TextElement>* elms = new (zone) ZoneList<TextElement>(1, zone);
+ elms->Add(
+ TextElement::CharClass(new (zone) RegExpCharacterClass(ranges, false)),
+ zone);
+ return new (zone) TextNode(elms, read_backward, on_success);
+}
+
+
+TextNode* TextNode::CreateForSurrogatePair(Zone* zone, CharacterRange lead,
+ CharacterRange trail,
+ bool read_backward,
+ RegExpNode* on_success) {
+ ZoneList<CharacterRange>* lead_ranges = CharacterRange::List(zone, lead);
+ ZoneList<CharacterRange>* trail_ranges = CharacterRange::List(zone, trail);
+ ZoneList<TextElement>* elms = new (zone) ZoneList<TextElement>(2, zone);
+ elms->Add(TextElement::CharClass(
+ new (zone) RegExpCharacterClass(lead_ranges, false)),
+ zone);
+ elms->Add(TextElement::CharClass(
+ new (zone) RegExpCharacterClass(trail_ranges, false)),
+ zone);
+ return new (zone) TextNode(elms, read_backward, on_success);
+}
+
+
// This generates the code to match a text node. A text node can contain
// straight character sequences (possibly to be matched in a case-independent
// way) and character classes. For efficiency we do not do this in a single
@@ -3385,10 +3435,7 @@ void TextNode::MakeCaseIndependent(Isolate* isolate, bool is_one_byte) {
// independent case and it slows us down if we don't know that.
if (cc->is_standard(zone())) continue;
ZoneList<CharacterRange>* ranges = cc->ranges(zone());
- int range_count = ranges->length();
- for (int j = 0; j < range_count; j++) {
- ranges->at(j).AddCaseEquivalents(isolate, zone(), ranges, is_one_byte);
- }
+ CharacterRange::AddCaseEquivalents(isolate, zone(), ranges, is_one_byte);
}
}
}
@@ -3405,9 +3452,7 @@ RegExpNode* TextNode::GetSuccessorOfOmnivorousTextNode(
if (elm.text_type() != TextElement::CHAR_CLASS) return NULL;
RegExpCharacterClass* node = elm.char_class();
ZoneList<CharacterRange>* ranges = node->ranges(zone());
- if (!CharacterRange::IsCanonical(ranges)) {
- CharacterRange::Canonicalize(ranges);
- }
+ CharacterRange::Canonicalize(ranges);
if (node->is_negated()) {
return ranges->length() == 0 ? on_success() : NULL;
}
@@ -3554,27 +3599,29 @@ class AlternativeGenerationList {
};
+static const uc32 kRangeEndMarker = 0x110000;
+
// The '2' variant is has inclusive from and exclusive to.
// This covers \s as defined in ECMA-262 5.1, 15.10.2.12,
// which include WhiteSpace (7.2) or LineTerminator (7.3) values.
-static const int kSpaceRanges[] = { '\t', '\r' + 1, ' ', ' ' + 1,
- 0x00A0, 0x00A1, 0x1680, 0x1681, 0x180E, 0x180F, 0x2000, 0x200B,
- 0x2028, 0x202A, 0x202F, 0x2030, 0x205F, 0x2060, 0x3000, 0x3001,
- 0xFEFF, 0xFF00, 0x10000 };
+static const int kSpaceRanges[] = {
+ '\t', '\r' + 1, ' ', ' ' + 1, 0x00A0, 0x00A1, 0x1680, 0x1681,
+ 0x180E, 0x180F, 0x2000, 0x200B, 0x2028, 0x202A, 0x202F, 0x2030,
+ 0x205F, 0x2060, 0x3000, 0x3001, 0xFEFF, 0xFF00, kRangeEndMarker};
static const int kSpaceRangeCount = arraysize(kSpaceRanges);
static const int kWordRanges[] = {
- '0', '9' + 1, 'A', 'Z' + 1, '_', '_' + 1, 'a', 'z' + 1, 0x10000 };
+ '0', '9' + 1, 'A', 'Z' + 1, '_', '_' + 1, 'a', 'z' + 1, kRangeEndMarker};
static const int kWordRangeCount = arraysize(kWordRanges);
-static const int kDigitRanges[] = { '0', '9' + 1, 0x10000 };
+static const int kDigitRanges[] = {'0', '9' + 1, kRangeEndMarker};
static const int kDigitRangeCount = arraysize(kDigitRanges);
-static const int kSurrogateRanges[] = { 0xd800, 0xe000, 0x10000 };
+static const int kSurrogateRanges[] = {
+ kLeadSurrogateStart, kLeadSurrogateStart + 1, kRangeEndMarker};
static const int kSurrogateRangeCount = arraysize(kSurrogateRanges);
-static const int kLineTerminatorRanges[] = { 0x000A, 0x000B, 0x000D, 0x000E,
- 0x2028, 0x202A, 0x10000 };
+static const int kLineTerminatorRanges[] = {
+ 0x000A, 0x000B, 0x000D, 0x000E, 0x2028, 0x202A, kRangeEndMarker};
static const int kLineTerminatorRangeCount = arraysize(kLineTerminatorRanges);
-
void BoyerMoorePositionInfo::Set(int character) {
SetInterval(Interval(character, character));
}
@@ -3916,6 +3963,11 @@ void ChoiceNode::SetUpPreLoad(RegExpCompiler* compiler,
void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
int choice_count = alternatives_->length();
+ if (choice_count == 1 && alternatives_->at(0).guards() == NULL) {
+ alternatives_->at(0).node()->Emit(compiler, trace);
+ return;
+ }
+
AssertGuardsMentionRegisters(trace);
LimitResult limit_result = LimitVersions(compiler, trace);
@@ -4349,14 +4401,19 @@ void BackReferenceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
DCHECK_EQ(start_reg_ + 1, end_reg_);
if (compiler->ignore_case()) {
- assembler->CheckNotBackReferenceIgnoreCase(start_reg_, read_backward(),
- trace->backtrack());
+ assembler->CheckNotBackReferenceIgnoreCase(
+ start_reg_, read_backward(), compiler->unicode(), trace->backtrack());
} else {
assembler->CheckNotBackReference(start_reg_, read_backward(),
trace->backtrack());
}
// We are going to advance backward, so we may end up at the start.
if (read_backward()) trace->set_at_start(Trace::UNKNOWN);
+
+ // Check that the back reference does not end inside a surrogate pair.
+ if (compiler->unicode() && !compiler->one_byte()) {
+ assembler->CheckNotInSurrogatePair(trace->cp_offset(), trace->backtrack());
+ }
on_success()->Emit(compiler, trace);
}
@@ -4732,8 +4789,8 @@ RegExpNode* RegExpText::ToNode(RegExpCompiler* compiler,
static bool CompareInverseRanges(ZoneList<CharacterRange>* ranges,
const int* special_class,
int length) {
- length--; // Remove final 0x10000.
- DCHECK(special_class[length] == 0x10000);
+ length--; // Remove final marker.
+ DCHECK(special_class[length] == kRangeEndMarker);
DCHECK(ranges->length() != 0);
DCHECK(length != 0);
DCHECK(special_class[0] != 0);
@@ -4753,7 +4810,7 @@ static bool CompareInverseRanges(ZoneList<CharacterRange>* ranges,
return false;
}
}
- if (range.to() != 0xffff) {
+ if (range.to() != String::kMaxCodePoint) {
return false;
}
return true;
@@ -4763,8 +4820,8 @@ static bool CompareInverseRanges(ZoneList<CharacterRange>* ranges,
static bool CompareRanges(ZoneList<CharacterRange>* ranges,
const int* special_class,
int length) {
- length--; // Remove final 0x10000.
- DCHECK(special_class[length] == 0x10000);
+ length--; // Remove final marker.
+ DCHECK(special_class[length] == kRangeEndMarker);
if (ranges->length() * 2 != length) {
return false;
}
@@ -4820,10 +4877,303 @@ bool RegExpCharacterClass::is_standard(Zone* zone) {
}
+UnicodeRangeSplitter::UnicodeRangeSplitter(Zone* zone,
+ ZoneList<CharacterRange>* base)
+ : zone_(zone),
+ table_(zone),
+ bmp_(nullptr),
+ lead_surrogates_(nullptr),
+ trail_surrogates_(nullptr),
+ non_bmp_(nullptr) {
+ // The unicode range splitter categorizes given character ranges into:
+ // - Code points from the BMP representable by one code unit.
+ // - Code points outside the BMP that need to be split into surrogate pairs.
+ // - Lone lead surrogates.
+ // - Lone trail surrogates.
+ // Lone surrogates are valid code points, even though no actual characters.
+ // They require special matching to make sure we do not split surrogate pairs.
+ // We use the dispatch table to accomplish this. The base range is split up
+ // by the table by the overlay ranges, and the Call callback is used to
+ // filter and collect ranges for each category.
+ for (int i = 0; i < base->length(); i++) {
+ table_.AddRange(base->at(i), kBase, zone_);
+ }
+ // Add overlay ranges.
+ table_.AddRange(CharacterRange::Range(0, kLeadSurrogateStart - 1),
+ kBmpCodePoints, zone_);
+ table_.AddRange(CharacterRange::Range(kLeadSurrogateStart, kLeadSurrogateEnd),
+ kLeadSurrogates, zone_);
+ table_.AddRange(
+ CharacterRange::Range(kTrailSurrogateStart, kTrailSurrogateEnd),
+ kTrailSurrogates, zone_);
+ table_.AddRange(
+ CharacterRange::Range(kTrailSurrogateEnd + 1, kNonBmpStart - 1),
+ kBmpCodePoints, zone_);
+ table_.AddRange(CharacterRange::Range(kNonBmpStart, kNonBmpEnd),
+ kNonBmpCodePoints, zone_);
+ table_.ForEach(this);
+}
+
+
+void UnicodeRangeSplitter::Call(uc32 from, DispatchTable::Entry entry) {
+ OutSet* outset = entry.out_set();
+ if (!outset->Get(kBase)) return;
+ ZoneList<CharacterRange>** target = NULL;
+ if (outset->Get(kBmpCodePoints)) {
+ target = &bmp_;
+ } else if (outset->Get(kLeadSurrogates)) {
+ target = &lead_surrogates_;
+ } else if (outset->Get(kTrailSurrogates)) {
+ target = &trail_surrogates_;
+ } else {
+ DCHECK(outset->Get(kNonBmpCodePoints));
+ target = &non_bmp_;
+ }
+ if (*target == NULL) *target = new (zone_) ZoneList<CharacterRange>(2, zone_);
+ (*target)->Add(CharacterRange::Range(entry.from(), entry.to()), zone_);
+}
+
+
+void AddBmpCharacters(RegExpCompiler* compiler, ChoiceNode* result,
+ RegExpNode* on_success, UnicodeRangeSplitter* splitter) {
+ ZoneList<CharacterRange>* bmp = splitter->bmp();
+ if (bmp == nullptr) return;
+ result->AddAlternative(GuardedAlternative(TextNode::CreateForCharacterRanges(
+ compiler->zone(), bmp, compiler->read_backward(), on_success)));
+}
+
+
+void AddNonBmpSurrogatePairs(RegExpCompiler* compiler, ChoiceNode* result,
+ RegExpNode* on_success,
+ UnicodeRangeSplitter* splitter) {
+ ZoneList<CharacterRange>* non_bmp = splitter->non_bmp();
+ if (non_bmp == nullptr) return;
+ DCHECK(compiler->unicode());
+ DCHECK(!compiler->one_byte());
+ Zone* zone = compiler->zone();
+ CharacterRange::Canonicalize(non_bmp);
+ for (int i = 0; i < non_bmp->length(); i++) {
+ // Match surrogate pair.
+ // E.g. [\u10005-\u11005] becomes
+ // \ud800[\udc05-\udfff]|
+ // [\ud801-\ud803][\udc00-\udfff]|
+ // \ud804[\udc00-\udc05]
+ uc32 from = non_bmp->at(i).from();
+ uc32 to = non_bmp->at(i).to();
+ uc16 from_l = unibrow::Utf16::LeadSurrogate(from);
+ uc16 from_t = unibrow::Utf16::TrailSurrogate(from);
+ uc16 to_l = unibrow::Utf16::LeadSurrogate(to);
+ uc16 to_t = unibrow::Utf16::TrailSurrogate(to);
+ if (from_l == to_l) {
+ // The lead surrogate is the same.
+ result->AddAlternative(
+ GuardedAlternative(TextNode::CreateForSurrogatePair(
+ zone, CharacterRange::Singleton(from_l),
+ CharacterRange::Range(from_t, to_t), compiler->read_backward(),
+ on_success)));
+ } else {
+ if (from_t != kTrailSurrogateStart) {
+ // Add [from_l][from_t-\udfff]
+ result->AddAlternative(
+ GuardedAlternative(TextNode::CreateForSurrogatePair(
+ zone, CharacterRange::Singleton(from_l),
+ CharacterRange::Range(from_t, kTrailSurrogateEnd),
+ compiler->read_backward(), on_success)));
+ from_l++;
+ }
+ if (to_t != kTrailSurrogateEnd) {
+ // Add [to_l][\udc00-to_t]
+ result->AddAlternative(
+ GuardedAlternative(TextNode::CreateForSurrogatePair(
+ zone, CharacterRange::Singleton(to_l),
+ CharacterRange::Range(kTrailSurrogateStart, to_t),
+ compiler->read_backward(), on_success)));
+ to_l--;
+ }
+ if (from_l <= to_l) {
+ // Add [from_l-to_l][\udc00-\udfff]
+ result->AddAlternative(
+ GuardedAlternative(TextNode::CreateForSurrogatePair(
+ zone, CharacterRange::Range(from_l, to_l),
+ CharacterRange::Range(kTrailSurrogateStart, kTrailSurrogateEnd),
+ compiler->read_backward(), on_success)));
+ }
+ }
+ }
+}
+
+
+RegExpNode* NegativeLookaroundAgainstReadDirectionAndMatch(
+ RegExpCompiler* compiler, ZoneList<CharacterRange>* lookbehind,
+ ZoneList<CharacterRange>* match, RegExpNode* on_success,
+ bool read_backward) {
+ Zone* zone = compiler->zone();
+ RegExpNode* match_node = TextNode::CreateForCharacterRanges(
+ zone, match, read_backward, on_success);
+ int stack_register = compiler->UnicodeLookaroundStackRegister();
+ int position_register = compiler->UnicodeLookaroundPositionRegister();
+ RegExpLookaround::Builder lookaround(false, match_node, stack_register,
+ position_register);
+ RegExpNode* negative_match = TextNode::CreateForCharacterRanges(
+ zone, lookbehind, !read_backward, lookaround.on_match_success());
+ return lookaround.ForMatch(negative_match);
+}
+
+
+RegExpNode* MatchAndNegativeLookaroundInReadDirection(
+ RegExpCompiler* compiler, ZoneList<CharacterRange>* match,
+ ZoneList<CharacterRange>* lookahead, RegExpNode* on_success,
+ bool read_backward) {
+ Zone* zone = compiler->zone();
+ int stack_register = compiler->UnicodeLookaroundStackRegister();
+ int position_register = compiler->UnicodeLookaroundPositionRegister();
+ RegExpLookaround::Builder lookaround(false, on_success, stack_register,
+ position_register);
+ RegExpNode* negative_match = TextNode::CreateForCharacterRanges(
+ zone, lookahead, read_backward, lookaround.on_match_success());
+ return TextNode::CreateForCharacterRanges(
+ zone, match, read_backward, lookaround.ForMatch(negative_match));
+}
+
+
+void AddLoneLeadSurrogates(RegExpCompiler* compiler, ChoiceNode* result,
+ RegExpNode* on_success,
+ UnicodeRangeSplitter* splitter) {
+ ZoneList<CharacterRange>* lead_surrogates = splitter->lead_surrogates();
+ if (lead_surrogates == nullptr) return;
+ Zone* zone = compiler->zone();
+ // E.g. \ud801 becomes \ud801(?![\udc00-\udfff]).
+ ZoneList<CharacterRange>* trail_surrogates = CharacterRange::List(
+ zone, CharacterRange::Range(kTrailSurrogateStart, kTrailSurrogateEnd));
+
+ RegExpNode* match;
+ if (compiler->read_backward()) {
+ // Reading backward. Assert that reading forward, there is no trail
+ // surrogate, and then backward match the lead surrogate.
+ match = NegativeLookaroundAgainstReadDirectionAndMatch(
+ compiler, trail_surrogates, lead_surrogates, on_success, true);
+ } else {
+ // Reading forward. Forward match the lead surrogate and assert that
+ // no trail surrogate follows.
+ match = MatchAndNegativeLookaroundInReadDirection(
+ compiler, lead_surrogates, trail_surrogates, on_success, false);
+ }
+ result->AddAlternative(GuardedAlternative(match));
+}
+
+
+void AddLoneTrailSurrogates(RegExpCompiler* compiler, ChoiceNode* result,
+ RegExpNode* on_success,
+ UnicodeRangeSplitter* splitter) {
+ ZoneList<CharacterRange>* trail_surrogates = splitter->trail_surrogates();
+ if (trail_surrogates == nullptr) return;
+ Zone* zone = compiler->zone();
+ // E.g. \udc01 becomes (?<![\ud800-\udbff])\udc01
+ ZoneList<CharacterRange>* lead_surrogates = CharacterRange::List(
+ zone, CharacterRange::Range(kLeadSurrogateStart, kLeadSurrogateEnd));
+
+ RegExpNode* match;
+ if (compiler->read_backward()) {
+ // Reading backward. Backward match the trail surrogate and assert that no
+ // lead surrogate precedes it.
+ match = MatchAndNegativeLookaroundInReadDirection(
+ compiler, trail_surrogates, lead_surrogates, on_success, true);
+ } else {
+ // Reading forward. Assert that reading backward, there is no lead
+ // surrogate, and then forward match the trail surrogate.
+ match = NegativeLookaroundAgainstReadDirectionAndMatch(
+ compiler, lead_surrogates, trail_surrogates, on_success, false);
+ }
+ result->AddAlternative(GuardedAlternative(match));
+}
+
+RegExpNode* UnanchoredAdvance(RegExpCompiler* compiler,
+ RegExpNode* on_success) {
+ // This implements ES2015 21.2.5.2.3, AdvanceStringIndex.
+ DCHECK(!compiler->read_backward());
+ Zone* zone = compiler->zone();
+ // Advance any character. If the character happens to be a lead surrogate and
+ // we advanced into the middle of a surrogate pair, it will work out, as
+ // nothing will match from there. We will have to advance again, consuming
+ // the associated trail surrogate.
+ ZoneList<CharacterRange>* range = CharacterRange::List(
+ zone, CharacterRange::Range(0, String::kMaxUtf16CodeUnit));
+ return TextNode::CreateForCharacterRanges(zone, range, false, on_success);
+}
+
+
+void AddUnicodeCaseEquivalents(RegExpCompiler* compiler,
+ ZoneList<CharacterRange>* ranges) {
+#ifdef V8_I18N_SUPPORT
+ // Use ICU to compute the case fold closure over the ranges.
+ DCHECK(compiler->unicode());
+ DCHECK(compiler->ignore_case());
+ USet* set = uset_openEmpty();
+ for (int i = 0; i < ranges->length(); i++) {
+ uset_addRange(set, ranges->at(i).from(), ranges->at(i).to());
+ }
+ ranges->Clear();
+ uset_closeOver(set, USET_CASE_INSENSITIVE);
+ // Full case mapping map single characters to multiple characters.
+ // Those are represented as strings in the set. Remove them so that
+ // we end up with only simple and common case mappings.
+ uset_removeAllStrings(set);
+ int item_count = uset_getItemCount(set);
+ int item_result = 0;
+ UErrorCode ec = U_ZERO_ERROR;
+ Zone* zone = compiler->zone();
+ for (int i = 0; i < item_count; i++) {
+ uc32 start = 0;
+ uc32 end = 0;
+ item_result += uset_getItem(set, i, &start, &end, nullptr, 0, &ec);
+ ranges->Add(CharacterRange::Range(start, end), zone);
+ }
+ // No errors and everything we collected have been ranges.
+ DCHECK_EQ(U_ZERO_ERROR, ec);
+ DCHECK_EQ(0, item_result);
+ uset_close(set);
+#else
+ // Fallback if ICU is not included.
+ CharacterRange::AddCaseEquivalents(compiler->isolate(), compiler->zone(),
+ ranges, compiler->one_byte());
+#endif // V8_I18N_SUPPORT
+ CharacterRange::Canonicalize(ranges);
+}
+
+
RegExpNode* RegExpCharacterClass::ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) {
- return new (compiler->zone())
- TextNode(this, compiler->read_backward(), on_success);
+ set_.Canonicalize();
+ Zone* zone = compiler->zone();
+ ZoneList<CharacterRange>* ranges = this->ranges(zone);
+ if (compiler->unicode() && compiler->ignore_case()) {
+ AddUnicodeCaseEquivalents(compiler, ranges);
+ }
+ if (compiler->unicode() && !compiler->one_byte()) {
+ if (is_negated()) {
+ ZoneList<CharacterRange>* negated =
+ new (zone) ZoneList<CharacterRange>(2, zone);
+ CharacterRange::Negate(ranges, negated, zone);
+ ranges = negated;
+ }
+ if (ranges->length() == 0) {
+ // No matches possible.
+ return new (zone) EndNode(EndNode::BACKTRACK, zone);
+ }
+ if (standard_type() == '*') {
+ return UnanchoredAdvance(compiler, on_success);
+ } else {
+ ChoiceNode* result = new (zone) ChoiceNode(2, zone);
+ UnicodeRangeSplitter splitter(zone, ranges);
+ AddBmpCharacters(compiler, result, on_success, &splitter);
+ AddNonBmpSurrogatePairs(compiler, result, on_success, &splitter);
+ AddLoneLeadSurrogates(compiler, result, on_success, &splitter);
+ AddLoneTrailSurrogates(compiler, result, on_success, &splitter);
+ return result;
+ }
+ } else {
+ return new (zone) TextNode(this, compiler->read_backward(), on_success);
+ }
}
@@ -5338,6 +5688,47 @@ RegExpNode* RegExpEmpty::ToNode(RegExpCompiler* compiler,
}
+RegExpLookaround::Builder::Builder(bool is_positive, RegExpNode* on_success,
+ int stack_pointer_register,
+ int position_register,
+ int capture_register_count,
+ int capture_register_start)
+ : is_positive_(is_positive),
+ on_success_(on_success),
+ stack_pointer_register_(stack_pointer_register),
+ position_register_(position_register) {
+ if (is_positive_) {
+ on_match_success_ = ActionNode::PositiveSubmatchSuccess(
+ stack_pointer_register, position_register, capture_register_count,
+ capture_register_start, on_success_);
+ } else {
+ Zone* zone = on_success_->zone();
+ on_match_success_ = new (zone) NegativeSubmatchSuccess(
+ stack_pointer_register, position_register, capture_register_count,
+ capture_register_start, zone);
+ }
+}
+
+
+RegExpNode* RegExpLookaround::Builder::ForMatch(RegExpNode* match) {
+ if (is_positive_) {
+ return ActionNode::BeginSubmatch(stack_pointer_register_,
+ position_register_, match);
+ } else {
+ Zone* zone = on_success_->zone();
+ // We use a ChoiceNode to represent the negative lookaround. The first
+ // alternative is the negative match. On success, the end node backtracks.
+ // On failure, the second alternative is tried and leads to success.
+ // NegativeLookaheadChoiceNode is a special ChoiceNode that ignores the
+ // first exit when calculating quick checks.
+ ChoiceNode* choice_node = new (zone) NegativeLookaroundChoiceNode(
+ GuardedAlternative(match), GuardedAlternative(on_success_), zone);
+ return ActionNode::BeginSubmatch(stack_pointer_register_,
+ position_register_, choice_node);
+ }
+}
+
+
RegExpNode* RegExpLookaround::ToNode(RegExpCompiler* compiler,
RegExpNode* on_success) {
int stack_pointer_register = compiler->AllocateRegister();
@@ -5352,35 +5743,10 @@ RegExpNode* RegExpLookaround::ToNode(RegExpCompiler* compiler,
RegExpNode* result;
bool was_reading_backward = compiler->read_backward();
compiler->set_read_backward(type() == LOOKBEHIND);
- if (is_positive()) {
- result = ActionNode::BeginSubmatch(
- stack_pointer_register, position_register,
- body()->ToNode(compiler,
- ActionNode::PositiveSubmatchSuccess(
- stack_pointer_register, position_register,
- register_count, register_start, on_success)));
- } else {
- // We use a ChoiceNode for a negative lookahead because it has most of
- // the characteristics we need. It has the body of the lookahead as its
- // first alternative and the expression after the lookahead of the second
- // alternative. If the first alternative succeeds then the
- // NegativeSubmatchSuccess will unwind the stack including everything the
- // choice node set up and backtrack. If the first alternative fails then
- // the second alternative is tried, which is exactly the desired result
- // for a negative lookahead. The NegativeLookaheadChoiceNode is a special
- // ChoiceNode that knows to ignore the first exit when calculating quick
- // checks.
- Zone* zone = compiler->zone();
-
- GuardedAlternative body_alt(
- body()->ToNode(compiler, new (zone) NegativeSubmatchSuccess(
- stack_pointer_register, position_register,
- register_count, register_start, zone)));
- ChoiceNode* choice_node = new (zone) NegativeLookaroundChoiceNode(
- body_alt, GuardedAlternative(on_success), zone);
- result = ActionNode::BeginSubmatch(stack_pointer_register,
- position_register, choice_node);
- }
+ Builder builder(is_positive(), on_success, stack_pointer_register,
+ position_register, register_count, register_start);
+ RegExpNode* match = body_->ToNode(compiler, builder.on_match_success());
+ result = builder.ForMatch(match);
compiler->set_read_backward(was_reading_backward);
return result;
}
@@ -5428,10 +5794,10 @@ static void AddClass(const int* elmv,
ZoneList<CharacterRange>* ranges,
Zone* zone) {
elmc--;
- DCHECK(elmv[elmc] == 0x10000);
+ DCHECK(elmv[elmc] == kRangeEndMarker);
for (int i = 0; i < elmc; i += 2) {
DCHECK(elmv[i] < elmv[i + 1]);
- ranges->Add(CharacterRange(elmv[i], elmv[i + 1] - 1), zone);
+ ranges->Add(CharacterRange::Range(elmv[i], elmv[i + 1] - 1), zone);
}
}
@@ -5441,17 +5807,17 @@ static void AddClassNegated(const int *elmv,
ZoneList<CharacterRange>* ranges,
Zone* zone) {
elmc--;
- DCHECK(elmv[elmc] == 0x10000);
+ DCHECK(elmv[elmc] == kRangeEndMarker);
DCHECK(elmv[0] != 0x0000);
- DCHECK(elmv[elmc-1] != String::kMaxUtf16CodeUnit);
+ DCHECK(elmv[elmc - 1] != String::kMaxCodePoint);
uc16 last = 0x0000;
for (int i = 0; i < elmc; i += 2) {
DCHECK(last <= elmv[i] - 1);
DCHECK(elmv[i] < elmv[i + 1]);
- ranges->Add(CharacterRange(last, elmv[i] - 1), zone);
+ ranges->Add(CharacterRange::Range(last, elmv[i] - 1), zone);
last = elmv[i + 1];
}
- ranges->Add(CharacterRange(last, String::kMaxUtf16CodeUnit), zone);
+ ranges->Add(CharacterRange::Range(last, String::kMaxCodePoint), zone);
}
@@ -5508,115 +5874,73 @@ Vector<const int> CharacterRange::GetWordBounds() {
}
-class CharacterRangeSplitter {
- public:
- CharacterRangeSplitter(ZoneList<CharacterRange>** included,
- ZoneList<CharacterRange>** excluded,
- Zone* zone)
- : included_(included),
- excluded_(excluded),
- zone_(zone) { }
- void Call(uc16 from, DispatchTable::Entry entry);
-
- static const int kInBase = 0;
- static const int kInOverlay = 1;
-
- private:
- ZoneList<CharacterRange>** included_;
- ZoneList<CharacterRange>** excluded_;
- Zone* zone_;
-};
-
-
-void CharacterRangeSplitter::Call(uc16 from, DispatchTable::Entry entry) {
- if (!entry.out_set()->Get(kInBase)) return;
- ZoneList<CharacterRange>** target = entry.out_set()->Get(kInOverlay)
- ? included_
- : excluded_;
- if (*target == NULL) *target = new(zone_) ZoneList<CharacterRange>(2, zone_);
- (*target)->Add(CharacterRange(entry.from(), entry.to()), zone_);
-}
-
-
-void CharacterRange::Split(ZoneList<CharacterRange>* base,
- Vector<const int> overlay,
- ZoneList<CharacterRange>** included,
- ZoneList<CharacterRange>** excluded,
- Zone* zone) {
- DCHECK_NULL(*included);
- DCHECK_NULL(*excluded);
- DispatchTable table(zone);
- for (int i = 0; i < base->length(); i++)
- table.AddRange(base->at(i), CharacterRangeSplitter::kInBase, zone);
- for (int i = 0; i < overlay.length(); i += 2) {
- table.AddRange(CharacterRange(overlay[i], overlay[i + 1] - 1),
- CharacterRangeSplitter::kInOverlay, zone);
- }
- CharacterRangeSplitter callback(included, excluded, zone);
- table.ForEach(&callback);
-}
-
-
void CharacterRange::AddCaseEquivalents(Isolate* isolate, Zone* zone,
ZoneList<CharacterRange>* ranges,
bool is_one_byte) {
- uc16 bottom = from();
- uc16 top = to();
- if (is_one_byte && !RangeContainsLatin1Equivalents(*this)) {
- if (bottom > String::kMaxOneByteCharCode) return;
- if (top > String::kMaxOneByteCharCode) top = String::kMaxOneByteCharCode;
- }
- unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- if (top == bottom) {
- // If this is a singleton we just expand the one character.
- int length = isolate->jsregexp_uncanonicalize()->get(bottom, '\0', chars);
- for (int i = 0; i < length; i++) {
- uc32 chr = chars[i];
- if (chr != bottom) {
- ranges->Add(CharacterRange::Singleton(chars[i]), zone);
- }
- }
- } else {
- // If this is a range we expand the characters block by block,
- // expanding contiguous subranges (blocks) one at a time.
- // The approach is as follows. For a given start character we
- // look up the remainder of the block that contains it (represented
- // by the end point), for instance we find 'z' if the character
- // is 'c'. A block is characterized by the property
- // that all characters uncanonicalize in the same way, except that
- // each entry in the result is incremented by the distance from the first
- // element. So a-z is a block because 'a' uncanonicalizes to ['a', 'A'] and
- // the k'th letter uncanonicalizes to ['a' + k, 'A' + k].
- // Once we've found the end point we look up its uncanonicalization
- // and produce a range for each element. For instance for [c-f]
- // we look up ['z', 'Z'] and produce [c-f] and [C-F]. We then only
- // add a range if it is not already contained in the input, so [c-f]
- // will be skipped but [C-F] will be added. If this range is not
- // completely contained in a block we do this for all the blocks
- // covered by the range (handling characters that is not in a block
- // as a "singleton block").
- unibrow::uchar range[unibrow::Ecma262UnCanonicalize::kMaxWidth];
- int pos = bottom;
- while (pos <= top) {
- int length = isolate->jsregexp_canonrange()->get(pos, '\0', range);
- uc16 block_end;
- if (length == 0) {
- block_end = pos;
- } else {
- DCHECK_EQ(1, length);
- block_end = range[0];
- }
- int end = (block_end > top) ? top : block_end;
- length = isolate->jsregexp_uncanonicalize()->get(block_end, '\0', range);
+ int range_count = ranges->length();
+ for (int i = 0; i < range_count; i++) {
+ CharacterRange range = ranges->at(i);
+ uc32 bottom = range.from();
+ if (bottom > String::kMaxUtf16CodeUnit) return;
+ uc32 top = Min(range.to(), String::kMaxUtf16CodeUnit);
+ // Nothing to be done for surrogates.
+ if (bottom >= kLeadSurrogateStart && top <= kTrailSurrogateEnd) return;
+ if (is_one_byte && !RangeContainsLatin1Equivalents(range)) {
+ if (bottom > String::kMaxOneByteCharCode) return;
+ if (top > String::kMaxOneByteCharCode) top = String::kMaxOneByteCharCode;
+ }
+ unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
+ if (top == bottom) {
+ // If this is a singleton we just expand the one character.
+ int length = isolate->jsregexp_uncanonicalize()->get(bottom, '\0', chars);
for (int i = 0; i < length; i++) {
- uc32 c = range[i];
- uc16 range_from = c - (block_end - pos);
- uc16 range_to = c - (block_end - end);
- if (!(bottom <= range_from && range_to <= top)) {
- ranges->Add(CharacterRange(range_from, range_to), zone);
+ uc32 chr = chars[i];
+ if (chr != bottom) {
+ ranges->Add(CharacterRange::Singleton(chars[i]), zone);
}
}
- pos = end + 1;
+ } else {
+ // If this is a range we expand the characters block by block, expanding
+ // contiguous subranges (blocks) one at a time. The approach is as
+ // follows. For a given start character we look up the remainder of the
+ // block that contains it (represented by the end point), for instance we
+ // find 'z' if the character is 'c'. A block is characterized by the
+ // property that all characters uncanonicalize in the same way, except
+ // that each entry in the result is incremented by the distance from the
+ // first element. So a-z is a block because 'a' uncanonicalizes to ['a',
+ // 'A'] and the k'th letter uncanonicalizes to ['a' + k, 'A' + k]. Once
+ // we've found the end point we look up its uncanonicalization and
+ // produce a range for each element. For instance for [c-f] we look up
+ // ['z', 'Z'] and produce [c-f] and [C-F]. We then only add a range if
+ // it is not already contained in the input, so [c-f] will be skipped but
+ // [C-F] will be added. If this range is not completely contained in a
+ // block we do this for all the blocks covered by the range (handling
+ // characters that is not in a block as a "singleton block").
+ unibrow::uchar equivalents[unibrow::Ecma262UnCanonicalize::kMaxWidth];
+ int pos = bottom;
+ while (pos <= top) {
+ int length =
+ isolate->jsregexp_canonrange()->get(pos, '\0', equivalents);
+ uc32 block_end;
+ if (length == 0) {
+ block_end = pos;
+ } else {
+ DCHECK_EQ(1, length);
+ block_end = equivalents[0];
+ }
+ int end = (block_end > top) ? top : block_end;
+ length = isolate->jsregexp_uncanonicalize()->get(block_end, '\0',
+ equivalents);
+ for (int i = 0; i < length; i++) {
+ uc32 c = equivalents[i];
+ uc32 range_from = c - (block_end - pos);
+ uc32 range_to = c - (block_end - end);
+ if (!(bottom <= range_from && range_to <= top)) {
+ ranges->Add(CharacterRange::Range(range_from, range_to), zone);
+ }
+ }
+ pos = end + 1;
+ }
}
}
}
@@ -5672,8 +5996,8 @@ static int InsertRangeInCanonicalList(ZoneList<CharacterRange>* list,
// list[0..count] for the result. Returns the number of resulting
// canonicalized ranges. Inserting a range may collapse existing ranges into
// fewer ranges, so the return value can be anything in the range 1..count+1.
- uc16 from = insert.from();
- uc16 to = insert.to();
+ uc32 from = insert.from();
+ uc32 to = insert.to();
int start_pos = 0;
int end_pos = count;
for (int i = count - 1; i >= 0; i--) {
@@ -5706,7 +6030,7 @@ static int InsertRangeInCanonicalList(ZoneList<CharacterRange>* list,
CharacterRange to_replace = list->at(start_pos);
int new_from = Min(to_replace.from(), from);
int new_to = Max(to_replace.to(), to);
- list->at(start_pos) = CharacterRange(new_from, new_to);
+ list->at(start_pos) = CharacterRange::Range(new_from, new_to);
return count;
}
// Replace a number of existing ranges from start_pos to end_pos - 1.
@@ -5717,7 +6041,7 @@ static int InsertRangeInCanonicalList(ZoneList<CharacterRange>* list,
if (end_pos < count) {
MoveRanges(list, end_pos, start_pos + 1, count - end_pos);
}
- list->at(start_pos) = CharacterRange(new_from, new_to);
+ list->at(start_pos) = CharacterRange::Range(new_from, new_to);
return count - (end_pos - start_pos) + 1;
}
@@ -5773,20 +6097,20 @@ void CharacterRange::Negate(ZoneList<CharacterRange>* ranges,
DCHECK(CharacterRange::IsCanonical(ranges));
DCHECK_EQ(0, negated_ranges->length());
int range_count = ranges->length();
- uc16 from = 0;
+ uc32 from = 0;
int i = 0;
if (range_count > 0 && ranges->at(0).from() == 0) {
- from = ranges->at(0).to();
+ from = ranges->at(0).to() + 1;
i = 1;
}
while (i < range_count) {
CharacterRange range = ranges->at(i);
- negated_ranges->Add(CharacterRange(from + 1, range.from() - 1), zone);
- from = range.to();
+ negated_ranges->Add(CharacterRange::Range(from, range.from() - 1), zone);
+ from = range.to() + 1;
i++;
}
- if (from < String::kMaxUtf16CodeUnit) {
- negated_ranges->Add(CharacterRange(from + 1, String::kMaxUtf16CodeUnit),
+ if (from < String::kMaxCodePoint) {
+ negated_ranges->Add(CharacterRange::Range(from, String::kMaxCodePoint),
zone);
}
}
@@ -5838,7 +6162,7 @@ bool OutSet::Get(unsigned value) const {
}
-const uc16 DispatchTable::Config::kNoKey = unibrow::Utf8::kBadChar;
+const uc32 DispatchTable::Config::kNoKey = unibrow::Utf8::kBadChar;
void DispatchTable::AddRange(CharacterRange full_range, int value,
@@ -5866,8 +6190,9 @@ void DispatchTable::AddRange(CharacterRange full_range, int value,
if (entry->from() < current.from() && entry->to() >= current.from()) {
// Snap the overlapping range in half around the start point of
// the range we're adding.
- CharacterRange left(entry->from(), current.from() - 1);
- CharacterRange right(current.from(), entry->to());
+ CharacterRange left =
+ CharacterRange::Range(entry->from(), current.from() - 1);
+ CharacterRange right = CharacterRange::Range(current.from(), entry->to());
// The left part of the overlapping range doesn't overlap.
// Truncate the whole entry to be just the left part.
entry->set_to(left.to());
@@ -5919,10 +6244,6 @@ void DispatchTable::AddRange(CharacterRange full_range, int value,
// we're adding so we can just update it and move the start point
// of the range we're adding just past it.
entry->AddValue(value, zone);
- // Bail out if the last interval ended at 0xFFFF since otherwise
- // adding 1 will wrap around to 0.
- if (entry->to() == String::kMaxUtf16CodeUnit)
- break;
DCHECK(entry->to() + 1 > current.from());
current.set_from(entry->to() + 1);
} else {
@@ -5940,7 +6261,7 @@ void DispatchTable::AddRange(CharacterRange full_range, int value,
}
-OutSet* DispatchTable::Get(uc16 value) {
+OutSet* DispatchTable::Get(uc32 value) {
ZoneSplayTree<Config>::Locator loc;
if (!tree()->FindGreatestLessThan(value, &loc))
return empty();
@@ -5990,7 +6311,7 @@ void TextNode::CalculateOffsets() {
void Analysis::VisitText(TextNode* that) {
- if (ignore_case_) {
+ if (ignore_case()) {
that->MakeCaseIndependent(isolate(), is_one_byte_);
}
EnsureAnalyzed(that->on_success());
@@ -6173,8 +6494,7 @@ class AddDispatchRange {
void AddDispatchRange::Call(uc32 from, DispatchTable::Entry entry) {
- CharacterRange range(from, entry.to());
- constructor_->AddRange(range);
+ constructor_->AddRange(CharacterRange::Range(from, entry.to()));
}
@@ -6212,16 +6532,16 @@ void DispatchTableConstructor::AddInverse(ZoneList<CharacterRange>* ranges) {
for (int i = 0; i < ranges->length(); i++) {
CharacterRange range = ranges->at(i);
if (last < range.from())
- AddRange(CharacterRange(last, range.from() - 1));
+ AddRange(CharacterRange::Range(last, range.from() - 1));
if (range.to() >= last) {
- if (range.to() == String::kMaxUtf16CodeUnit) {
+ if (range.to() == String::kMaxCodePoint) {
return;
} else {
last = range.to() + 1;
}
}
}
- AddRange(CharacterRange(last, String::kMaxUtf16CodeUnit));
+ AddRange(CharacterRange::Range(last, String::kMaxCodePoint));
}
@@ -6230,7 +6550,7 @@ void DispatchTableConstructor::VisitText(TextNode* that) {
switch (elm.text_type()) {
case TextElement::ATOM: {
uc16 c = elm.atom()->data()[0];
- AddRange(CharacterRange(c, c));
+ AddRange(CharacterRange::Range(c, c));
break;
}
case TextElement::CHAR_CLASS: {
@@ -6257,14 +6577,48 @@ void DispatchTableConstructor::VisitAction(ActionNode* that) {
}
+RegExpNode* OptionallyStepBackToLeadSurrogate(RegExpCompiler* compiler,
+ RegExpNode* on_success) {
+ // If the regexp matching starts within a surrogate pair, step back
+ // to the lead surrogate and start matching from there.
+ DCHECK(!compiler->read_backward());
+ Zone* zone = compiler->zone();
+ ZoneList<CharacterRange>* lead_surrogates = CharacterRange::List(
+ zone, CharacterRange::Range(kLeadSurrogateStart, kLeadSurrogateEnd));
+ ZoneList<CharacterRange>* trail_surrogates = CharacterRange::List(
+ zone, CharacterRange::Range(kTrailSurrogateStart, kTrailSurrogateEnd));
+
+ ChoiceNode* optional_step_back = new (zone) ChoiceNode(2, zone);
+
+ int stack_register = compiler->UnicodeLookaroundStackRegister();
+ int position_register = compiler->UnicodeLookaroundPositionRegister();
+ RegExpNode* step_back = TextNode::CreateForCharacterRanges(
+ zone, lead_surrogates, true, on_success);
+ RegExpLookaround::Builder builder(true, step_back, stack_register,
+ position_register);
+ RegExpNode* match_trail = TextNode::CreateForCharacterRanges(
+ zone, trail_surrogates, false, builder.on_match_success());
+
+ optional_step_back->AddAlternative(
+ GuardedAlternative(builder.ForMatch(match_trail)));
+ optional_step_back->AddAlternative(GuardedAlternative(on_success));
+
+ return optional_step_back;
+}
+
+
RegExpEngine::CompilationResult RegExpEngine::Compile(
- Isolate* isolate, Zone* zone, RegExpCompileData* data, bool ignore_case,
- bool is_global, bool is_multiline, bool is_sticky, Handle<String> pattern,
+ Isolate* isolate, Zone* zone, RegExpCompileData* data,
+ JSRegExp::Flags flags, Handle<String> pattern,
Handle<String> sample_subject, bool is_one_byte) {
if ((data->capture_count + 1) * 2 - 1 > RegExpMacroAssembler::kMaxRegister) {
return IrregexpRegExpTooBig(isolate);
}
- RegExpCompiler compiler(isolate, zone, data->capture_count, ignore_case,
+ bool ignore_case = flags & JSRegExp::kIgnoreCase;
+ bool is_sticky = flags & JSRegExp::kSticky;
+ bool is_global = flags & JSRegExp::kGlobal;
+ bool is_unicode = flags & JSRegExp::kUnicode;
+ RegExpCompiler compiler(isolate, zone, data->capture_count, flags,
is_one_byte);
if (compiler.optimize()) compiler.set_optimize(!TooMuchRegExpCode(pattern));
@@ -6316,11 +6670,13 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
if (node != NULL) {
node = node->FilterOneByte(RegExpCompiler::kMaxRecursion, ignore_case);
}
+ } else if (compiler.unicode() && (is_global || is_sticky)) {
+ node = OptionallyStepBackToLeadSurrogate(&compiler, node);
}
if (node == NULL) node = new(zone) EndNode(EndNode::BACKTRACK, zone);
data->node = node;
- Analysis analysis(isolate, ignore_case, is_one_byte);
+ Analysis analysis(isolate, flags, is_one_byte);
analysis.EnsureAnalyzed(node);
if (analysis.has_failed()) {
const char* error_message = analysis.error_message();
@@ -6381,10 +6737,13 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
}
if (is_global) {
- macro_assembler.set_global_mode(
- (data->tree->min_match() > 0)
- ? RegExpMacroAssembler::GLOBAL_NO_ZERO_LENGTH_CHECK
- : RegExpMacroAssembler::GLOBAL);
+ RegExpMacroAssembler::GlobalMode mode = RegExpMacroAssembler::GLOBAL;
+ if (data->tree->min_match() > 0) {
+ mode = RegExpMacroAssembler::GLOBAL_NO_ZERO_LENGTH_CHECK;
+ } else if (is_unicode) {
+ mode = RegExpMacroAssembler::GLOBAL_UNICODE;
+ }
+ macro_assembler.set_global_mode(mode);
}
return compiler.Assemble(&macro_assembler,
diff --git a/deps/v8/src/regexp/jsregexp.h b/deps/v8/src/regexp/jsregexp.h
index 0ad4b79c87..e55d650fab 100644
--- a/deps/v8/src/regexp/jsregexp.h
+++ b/deps/v8/src/regexp/jsregexp.h
@@ -8,6 +8,7 @@
#include "src/allocation.h"
#include "src/assembler.h"
#include "src/regexp/regexp-ast.h"
+#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
namespace internal {
@@ -121,7 +122,6 @@ class RegExpImpl {
public:
GlobalCache(Handle<JSRegExp> regexp,
Handle<String> subject,
- bool is_global,
Isolate* isolate);
INLINE(~GlobalCache());
@@ -137,6 +137,8 @@ class RegExpImpl {
INLINE(bool HasException()) { return num_matches_ < 0; }
private:
+ int AdvanceZeroLength(int last_index);
+
int num_matches_;
int max_matches_;
int current_match_index_;
@@ -265,28 +267,30 @@ class DispatchTable : public ZoneObject {
class Entry {
public:
Entry() : from_(0), to_(0), out_set_(NULL) { }
- Entry(uc16 from, uc16 to, OutSet* out_set)
- : from_(from), to_(to), out_set_(out_set) { }
- uc16 from() { return from_; }
- uc16 to() { return to_; }
- void set_to(uc16 value) { to_ = value; }
+ Entry(uc32 from, uc32 to, OutSet* out_set)
+ : from_(from), to_(to), out_set_(out_set) {
+ DCHECK(from <= to);
+ }
+ uc32 from() { return from_; }
+ uc32 to() { return to_; }
+ void set_to(uc32 value) { to_ = value; }
void AddValue(int value, Zone* zone) {
out_set_ = out_set_->Extend(value, zone);
}
OutSet* out_set() { return out_set_; }
private:
- uc16 from_;
- uc16 to_;
+ uc32 from_;
+ uc32 to_;
OutSet* out_set_;
};
class Config {
public:
- typedef uc16 Key;
+ typedef uc32 Key;
typedef Entry Value;
- static const uc16 kNoKey;
+ static const uc32 kNoKey;
static const Entry NoValue() { return Value(); }
- static inline int Compare(uc16 a, uc16 b) {
+ static inline int Compare(uc32 a, uc32 b) {
if (a == b)
return 0;
else if (a < b)
@@ -297,7 +301,7 @@ class DispatchTable : public ZoneObject {
};
void AddRange(CharacterRange range, int value, Zone* zone);
- OutSet* Get(uc16 value);
+ OutSet* Get(uc32 value);
void Dump();
template <typename Callback>
@@ -315,6 +319,34 @@ class DispatchTable : public ZoneObject {
};
+// Categorizes character ranges into BMP, non-BMP, lead, and trail surrogates.
+class UnicodeRangeSplitter {
+ public:
+ UnicodeRangeSplitter(Zone* zone, ZoneList<CharacterRange>* base);
+ void Call(uc32 from, DispatchTable::Entry entry);
+
+ ZoneList<CharacterRange>* bmp() { return bmp_; }
+ ZoneList<CharacterRange>* lead_surrogates() { return lead_surrogates_; }
+ ZoneList<CharacterRange>* trail_surrogates() { return trail_surrogates_; }
+ ZoneList<CharacterRange>* non_bmp() const { return non_bmp_; }
+
+ private:
+ static const int kBase = 0;
+ // Separate ranges into
+ static const int kBmpCodePoints = 1;
+ static const int kLeadSurrogates = 2;
+ static const int kTrailSurrogates = 3;
+ static const int kNonBmpCodePoints = 4;
+
+ Zone* zone_;
+ DispatchTable table_;
+ ZoneList<CharacterRange>* bmp_;
+ ZoneList<CharacterRange>* lead_surrogates_;
+ ZoneList<CharacterRange>* trail_surrogates_;
+ ZoneList<CharacterRange>* non_bmp_;
+};
+
+
#define FOR_EACH_NODE_TYPE(VISIT) \
VISIT(End) \
VISIT(Action) \
@@ -690,6 +722,17 @@ class TextNode: public SeqRegExpNode {
read_backward_(read_backward) {
elms_->Add(TextElement::CharClass(that), zone());
}
+ // Create TextNode for a single character class for the given ranges.
+ static TextNode* CreateForCharacterRanges(Zone* zone,
+ ZoneList<CharacterRange>* ranges,
+ bool read_backward,
+ RegExpNode* on_success);
+ // Create TextNode for a surrogate pair with a range given for the
+ // lead and the trail surrogate each.
+ static TextNode* CreateForSurrogatePair(Zone* zone, CharacterRange lead,
+ CharacterRange trail,
+ bool read_backward,
+ RegExpNode* on_success);
virtual void Accept(NodeVisitor* visitor);
virtual void Emit(RegExpCompiler* compiler, Trace* trace);
virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start);
@@ -813,8 +856,7 @@ class BackReferenceNode: public SeqRegExpNode {
class EndNode: public RegExpNode {
public:
enum Action { ACCEPT, BACKTRACK, NEGATIVE_SUBMATCH_SUCCESS };
- explicit EndNode(Action action, Zone* zone)
- : RegExpNode(zone), action_(action) { }
+ EndNode(Action action, Zone* zone) : RegExpNode(zone), action_(action) {}
virtual void Accept(NodeVisitor* visitor);
virtual void Emit(RegExpCompiler* compiler, Trace* trace);
virtual int EatsAtLeast(int still_to_find,
@@ -1440,9 +1482,9 @@ FOR_EACH_NODE_TYPE(DECLARE_VISIT)
// +-------+ +------------+
class Analysis: public NodeVisitor {
public:
- Analysis(Isolate* isolate, bool ignore_case, bool is_one_byte)
+ Analysis(Isolate* isolate, JSRegExp::Flags flags, bool is_one_byte)
: isolate_(isolate),
- ignore_case_(ignore_case),
+ flags_(flags),
is_one_byte_(is_one_byte),
error_message_(NULL) {}
void EnsureAnalyzed(RegExpNode* node);
@@ -1464,9 +1506,12 @@ FOR_EACH_NODE_TYPE(DECLARE_VISIT)
Isolate* isolate() const { return isolate_; }
+ bool ignore_case() const { return (flags_ & JSRegExp::kIgnoreCase) != 0; }
+ bool unicode() const { return (flags_ & JSRegExp::kUnicode) != 0; }
+
private:
Isolate* isolate_;
- bool ignore_case_;
+ JSRegExp::Flags flags_;
bool is_one_byte_;
const char* error_message_;
@@ -1505,8 +1550,8 @@ class RegExpEngine: public AllStatic {
};
static CompilationResult Compile(Isolate* isolate, Zone* zone,
- RegExpCompileData* input, bool ignore_case,
- bool global, bool multiline, bool sticky,
+ RegExpCompileData* input,
+ JSRegExp::Flags flags,
Handle<String> pattern,
Handle<String> sample_subject,
bool is_one_byte);
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
index 9c59328ed1..6197f45452 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
@@ -215,7 +215,7 @@ void RegExpMacroAssemblerMIPS::CheckGreedyLoop(Label* on_equal) {
void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
- int start_reg, bool read_backward, Label* on_no_match) {
+ int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
Label fallthrough;
__ lw(a0, register_location(start_reg)); // Index of start of capture.
__ lw(a1, register_location(start_reg + 1)); // Index of end of capture.
@@ -310,7 +310,7 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
// a0: Address byte_offset1 - Address captured substring's start.
// a1: Address byte_offset2 - Address of current character position.
// a2: size_t byte_length - length of capture in bytes(!).
- // a3: Isolate* isolate.
+ // a3: Isolate* isolate or 0 if unicode flag.
// Address of start of capture.
__ Addu(a0, a0, Operand(end_of_input_address()));
@@ -324,7 +324,14 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
__ Subu(a1, a1, Operand(s3));
}
// Isolate.
- __ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
+#ifdef V8_I18N_SUPPORT
+ if (unicode) {
+ __ mov(a3, zero_reg);
+ } else // NOLINT
+#endif // V8_I18N_SUPPORT
+ {
+ __ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
+ }
{
AllowExternalCallThatCantCauseGC scope(masm_);
@@ -801,9 +808,12 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ Branch(&exit_label_, eq, current_input_offset(),
Operand(zero_reg));
// Advance current position after a zero-length match.
+ Label advance;
+ __ bind(&advance);
__ Addu(current_input_offset(),
current_input_offset(),
Operand((mode_ == UC16) ? 2 : 1));
+ if (global_unicode()) CheckNotInSurrogatePair(0, &advance);
}
__ Branch(&load_char_start_regexp);
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
index 902e2208fe..6dedb1e748 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
@@ -37,7 +37,7 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
virtual void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward,
+ bool read_backward, bool unicode,
Label* on_no_match);
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(uint32_t c,
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
index 5153bd018b..bf95a9c97f 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
@@ -251,7 +251,7 @@ void RegExpMacroAssemblerMIPS::CheckGreedyLoop(Label* on_equal) {
void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
- int start_reg, bool read_backward, Label* on_no_match) {
+ int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
Label fallthrough;
__ ld(a0, register_location(start_reg)); // Index of start of capture.
__ ld(a1, register_location(start_reg + 1)); // Index of end of capture.
@@ -346,7 +346,7 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
// a0: Address byte_offset1 - Address captured substring's start.
// a1: Address byte_offset2 - Address of current character position.
// a2: size_t byte_length - length of capture in bytes(!).
- // a3: Isolate* isolate.
+ // a3: Isolate* isolate or 0 if unicode flag.
// Address of start of capture.
__ Daddu(a0, a0, Operand(end_of_input_address()));
@@ -360,7 +360,14 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
__ Dsubu(a1, a1, Operand(s3));
}
// Isolate.
- __ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
+#ifdef V8_I18N_SUPPORT
+ if (unicode) {
+ __ mov(a3, zero_reg);
+ } else // NOLINT
+#endif // V8_I18N_SUPPORT
+ {
+ __ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
+ }
{
AllowExternalCallThatCantCauseGC scope(masm_);
@@ -664,10 +671,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
s3.bit() | s4.bit() | s5.bit() | s6.bit() | s7.bit() | fp.bit();
RegList argument_registers = a0.bit() | a1.bit() | a2.bit() | a3.bit();
- if (kMipsAbi == kN64) {
- // TODO(plind): Should probably alias a4-a7, for clarity.
- argument_registers |= a4.bit() | a5.bit() | a6.bit() | a7.bit();
- }
+ argument_registers |= a4.bit() | a5.bit() | a6.bit() | a7.bit();
__ MultiPush(argument_registers | registers_to_retain | ra.bit());
// Set frame pointer in space for it if this is not a direct call
@@ -841,9 +845,12 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ Branch(&exit_label_, eq, current_input_offset(),
Operand(zero_reg));
// Advance current position after a zero-length match.
+ Label advance;
+ __ bind(&advance);
__ Daddu(current_input_offset(),
current_input_offset(),
Operand((mode_ == UC16) ? 2 : 1));
+ if (global_unicode()) CheckNotInSurrogatePair(0, &advance);
}
__ Branch(&load_char_start_regexp);
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
index 9a8ca179d5..df2c6c554f 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
@@ -37,7 +37,7 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
virtual void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward,
+ bool read_backward, bool unicode,
Label* on_no_match);
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(uint32_t c,
@@ -96,7 +96,6 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
void print_regexp_frame_constants();
private:
-#if defined(MIPS_ABI_N64)
// Offsets from frame_pointer() of function parameters and stored registers.
static const int kFramePointer = 0;
@@ -105,7 +104,7 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
static const int kStoredRegisters = kFramePointer;
// Return address (stored from link register, read into pc on return).
-// TODO(plind): This 9 - is 8 s-regs (s0..s7) plus fp.
+ // TODO(plind): This 9 - is 8 s-regs (s0..s7) plus fp.
static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize;
static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
@@ -131,43 +130,6 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
// First register address. Following registers are below it on the stack.
static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
-#elif defined(MIPS_ABI_O32)
- // Offsets from frame_pointer() of function parameters and stored registers.
- static const int kFramePointer = 0;
-
- // Above the frame pointer - Stored registers and stack passed parameters.
- // Registers s0 to s7, fp, and ra.
- static const int kStoredRegisters = kFramePointer;
- // Return address (stored from link register, read into pc on return).
- static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize;
- static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
- // Stack frame header.
- static const int kStackFrameHeader = kReturnAddress + kPointerSize;
- // Stack parameters placed by caller.
- static const int kRegisterOutput =
- kStackFrameHeader + 4 * kPointerSize + kPointerSize;
- static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
- static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
- static const int kDirectCall = kStackHighEnd + kPointerSize;
- static const int kIsolate = kDirectCall + kPointerSize;
-
- // Below the frame pointer.
- // Register parameters stored by setup code.
- static const int kInputEnd = kFramePointer - kPointerSize;
- static const int kInputStart = kInputEnd - kPointerSize;
- static const int kStartIndex = kInputStart - kPointerSize;
- static const int kInputString = kStartIndex - kPointerSize;
- // When adding local variables remember to push space for them in
- // the frame in GetCode.
- static const int kSuccessfulCaptures = kInputString - kPointerSize;
- static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
- // First register address. Following registers are below it on the stack.
- static const int kRegisterZero = kStringStartMinusOne - kPointerSize;
-
-#else
-# error "undefined MIPS ABI"
-#endif
-
// Initial size of code buffer.
static const size_t kRegExpCodeSize = 1024;
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index f3ddf7bf98..c05c580e86 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -225,9 +225,8 @@ void RegExpMacroAssemblerPPC::CheckGreedyLoop(Label* on_equal) {
BranchOrBacktrack(eq, on_equal);
}
-
void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
- int start_reg, bool read_backward, Label* on_no_match) {
+ int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
Label fallthrough;
__ LoadP(r3, register_location(start_reg), r0); // Index of start of capture
__ LoadP(r4, register_location(start_reg + 1), r0); // Index of end
@@ -322,7 +321,7 @@ void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
// r3: Address byte_offset1 - Address captured substring's start.
// r4: Address byte_offset2 - Address of current character position.
// r5: size_t byte_length - length of capture in bytes(!)
- // r6: Isolate* isolate
+ // r6: Isolate* isolate or 0 if unicode flag.
// Address of start of capture.
__ add(r3, r3, end_of_input_address());
@@ -336,7 +335,14 @@ void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
__ sub(r4, r4, r25);
}
// Isolate.
- __ mov(r6, Operand(ExternalReference::isolate_address(isolate())));
+#ifdef V8_I18N_SUPPORT
+ if (unicode) {
+ __ li(r6, Operand::Zero());
+ } else // NOLINT
+#endif // V8_I18N_SUPPORT
+ {
+ __ mov(r6, Operand(ExternalReference::isolate_address(isolate())));
+ }
{
AllowExternalCallThatCantCauseGC scope(masm_);
@@ -845,8 +851,11 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
__ cmpi(current_input_offset(), Operand::Zero());
__ beq(&exit_label_);
// Advance current position after a zero-length match.
+ Label advance;
+ __ bind(&advance);
__ addi(current_input_offset(), current_input_offset(),
Operand((mode_ == UC16) ? 2 : 1));
+ if (global_unicode()) CheckNotInSurrogatePair(0, &advance);
}
__ b(&load_char_start_regexp);
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
index 4d1836fc71..d2813872c4 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
@@ -38,7 +38,7 @@ class RegExpMacroAssemblerPPC : public NativeRegExpMacroAssembler {
virtual void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward,
+ bool read_backward, bool unicode,
Label* on_no_match);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c, unsigned mask,
diff --git a/deps/v8/src/regexp/regexp-ast.cc b/deps/v8/src/regexp/regexp-ast.cc
index 31c93b114f..b5c2bb6d91 100644
--- a/deps/v8/src/regexp/regexp-ast.cc
+++ b/deps/v8/src/regexp/regexp-ast.cc
@@ -172,9 +172,9 @@ void* RegExpUnparser::VisitAlternative(RegExpAlternative* that, void* data) {
void RegExpUnparser::VisitCharacterRange(CharacterRange that) {
- os_ << AsUC16(that.from());
+ os_ << AsUC32(that.from());
if (!that.IsSingleton()) {
- os_ << "-" << AsUC16(that.to());
+ os_ << "-" << AsUC32(that.to());
}
}
diff --git a/deps/v8/src/regexp/regexp-ast.h b/deps/v8/src/regexp/regexp-ast.h
index f87778596a..0e718d3b4d 100644
--- a/deps/v8/src/regexp/regexp-ast.h
+++ b/deps/v8/src/regexp/regexp-ast.h
@@ -5,6 +5,7 @@
#ifndef V8_REGEXP_REGEXP_AST_H_
#define V8_REGEXP_REGEXP_AST_H_
+#include "src/objects.h"
#include "src/utils.h"
#include "src/zone.h"
@@ -77,33 +78,38 @@ class CharacterRange {
CharacterRange() : from_(0), to_(0) {}
// For compatibility with the CHECK_OK macro
CharacterRange(void* null) { DCHECK_NULL(null); } // NOLINT
- CharacterRange(uc16 from, uc16 to) : from_(from), to_(to) {}
static void AddClassEscape(uc16 type, ZoneList<CharacterRange>* ranges,
Zone* zone);
static Vector<const int> GetWordBounds();
- static inline CharacterRange Singleton(uc16 value) {
+ static inline CharacterRange Singleton(uc32 value) {
return CharacterRange(value, value);
}
- static inline CharacterRange Range(uc16 from, uc16 to) {
- DCHECK(from <= to);
+ static inline CharacterRange Range(uc32 from, uc32 to) {
+ DCHECK(0 <= from && to <= String::kMaxCodePoint);
+ DCHECK(static_cast<uint32_t>(from) <= static_cast<uint32_t>(to));
return CharacterRange(from, to);
}
static inline CharacterRange Everything() {
- return CharacterRange(0, 0xFFFF);
+ return CharacterRange(0, String::kMaxCodePoint);
}
- bool Contains(uc16 i) { return from_ <= i && i <= to_; }
- uc16 from() const { return from_; }
- void set_from(uc16 value) { from_ = value; }
- uc16 to() const { return to_; }
- void set_to(uc16 value) { to_ = value; }
+ static inline ZoneList<CharacterRange>* List(Zone* zone,
+ CharacterRange range) {
+ ZoneList<CharacterRange>* list =
+ new (zone) ZoneList<CharacterRange>(1, zone);
+ list->Add(range, zone);
+ return list;
+ }
+ bool Contains(uc32 i) { return from_ <= i && i <= to_; }
+ uc32 from() const { return from_; }
+ void set_from(uc32 value) { from_ = value; }
+ uc32 to() const { return to_; }
+ void set_to(uc32 value) { to_ = value; }
bool is_valid() { return from_ <= to_; }
bool IsEverything(uc16 max) { return from_ == 0 && to_ >= max; }
bool IsSingleton() { return (from_ == to_); }
- void AddCaseEquivalents(Isolate* isolate, Zone* zone,
- ZoneList<CharacterRange>* ranges, bool is_one_byte);
- static void Split(ZoneList<CharacterRange>* base, Vector<const int> overlay,
- ZoneList<CharacterRange>** included,
- ZoneList<CharacterRange>** excluded, Zone* zone);
+ static void AddCaseEquivalents(Isolate* isolate, Zone* zone,
+ ZoneList<CharacterRange>* ranges,
+ bool is_one_byte);
// Whether a range list is in canonical form: Ranges ordered by from value,
// and ranges non-overlapping and non-adjacent.
static bool IsCanonical(ZoneList<CharacterRange>* ranges);
@@ -119,8 +125,10 @@ class CharacterRange {
static const int kPayloadMask = (1 << 24) - 1;
private:
- uc16 from_;
- uc16 to_;
+ CharacterRange(uc32 from, uc32 to) : from_(from), to_(to) {}
+
+ uc32 from_;
+ uc32 to_;
};
@@ -303,8 +311,8 @@ class RegExpCharacterClass final : public RegExpTree {
// W : non-ASCII word character
// d : ASCII digit
// D : non-ASCII digit
- // . : non-unicode non-newline
- // * : All characters
+ // . : non-newline
+ // * : All characters, for advancing unanchored regexp
uc16 standard_type() { return set_.standard_set_type(); }
ZoneList<CharacterRange>* ranges(Zone* zone) { return set_.ranges(zone); }
bool is_negated() { return is_negated_; }
@@ -451,6 +459,22 @@ class RegExpLookaround final : public RegExpTree {
int capture_from() { return capture_from_; }
Type type() { return type_; }
+ class Builder {
+ public:
+ Builder(bool is_positive, RegExpNode* on_success,
+ int stack_pointer_register, int position_register,
+ int capture_register_count = 0, int capture_register_start = 0);
+ RegExpNode* on_match_success() { return on_match_success_; }
+ RegExpNode* ForMatch(RegExpNode* match);
+
+ private:
+ bool is_positive_;
+ RegExpNode* on_match_success_;
+ RegExpNode* on_success_;
+ int stack_pointer_register_;
+ int position_register_;
+ };
+
private:
RegExpTree* body_;
bool is_positive_;
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h b/deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h
index 4d0b1bc0a7..a60212903d 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler-irregexp-inl.h
@@ -5,14 +5,14 @@
#ifndef V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
#define V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
+#ifdef V8_INTERPRETED_REGEXP
+
#include "src/ast/ast.h"
#include "src/regexp/bytecodes-irregexp.h"
namespace v8 {
namespace internal {
-#ifdef V8_INTERPRETED_REGEXP
-
void RegExpMacroAssemblerIrregexp::Emit(uint32_t byte,
uint32_t twenty_four_bits) {
uint32_t word = ((twenty_four_bits << BYTECODE_SHIFT) | byte);
@@ -54,9 +54,9 @@ void RegExpMacroAssemblerIrregexp::Emit32(uint32_t word) {
pc_ += 4;
}
-#endif // V8_INTERPRETED_REGEXP
-
} // namespace internal
} // namespace v8
+#endif // V8_INTERPRETED_REGEXP
+
#endif // V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_INL_H_
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc b/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc
index 751ee441c8..a0bb5e7d73 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#ifdef V8_INTERPRETED_REGEXP
+
#include "src/regexp/regexp-macro-assembler-irregexp.h"
#include "src/ast/ast.h"
@@ -9,12 +11,9 @@
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-macro-assembler-irregexp-inl.h"
-
namespace v8 {
namespace internal {
-#ifdef V8_INTERPRETED_REGEXP
-
RegExpMacroAssemblerIrregexp::RegExpMacroAssemblerIrregexp(Isolate* isolate,
Vector<byte> buffer,
Zone* zone)
@@ -382,11 +381,13 @@ void RegExpMacroAssemblerIrregexp::CheckNotBackReference(int start_reg,
void RegExpMacroAssemblerIrregexp::CheckNotBackReferenceIgnoreCase(
- int start_reg, bool read_backward, Label* on_not_equal) {
+ int start_reg, bool read_backward, bool unicode, Label* on_not_equal) {
DCHECK(start_reg >= 0);
DCHECK(start_reg <= kMaxRegister);
- Emit(read_backward ? BC_CHECK_NOT_BACK_REF_NO_CASE_BACKWARD
- : BC_CHECK_NOT_BACK_REF_NO_CASE,
+ Emit(read_backward ? (unicode ? BC_CHECK_NOT_BACK_REF_NO_CASE_UNICODE_BACKWARD
+ : BC_CHECK_NOT_BACK_REF_NO_CASE_BACKWARD)
+ : (unicode ? BC_CHECK_NOT_BACK_REF_NO_CASE_UNICODE
+ : BC_CHECK_NOT_BACK_REF_NO_CASE),
start_reg);
EmitOrLink(on_not_equal);
}
@@ -454,7 +455,7 @@ void RegExpMacroAssemblerIrregexp::Expand() {
}
}
-#endif // V8_INTERPRETED_REGEXP
-
} // namespace internal
} // namespace v8
+
+#endif // V8_INTERPRETED_REGEXP
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.h b/deps/v8/src/regexp/regexp-macro-assembler-irregexp.h
index f1ace63a74..dad2e9ac73 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler-irregexp.h
@@ -5,13 +5,13 @@
#ifndef V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
#define V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
+#ifdef V8_INTERPRETED_REGEXP
+
#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
namespace internal {
-#ifdef V8_INTERPRETED_REGEXP
-
// A light-weight assembler for the Irregexp byte code.
class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
public:
@@ -85,7 +85,7 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
virtual void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward,
+ bool read_backward, bool unicode,
Label* on_no_match);
virtual void IfRegisterLT(int register_index, int comparand, Label* if_lt);
virtual void IfRegisterGE(int register_index, int comparand, Label* if_ge);
@@ -125,9 +125,9 @@ class RegExpMacroAssemblerIrregexp: public RegExpMacroAssembler {
DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpMacroAssemblerIrregexp);
};
-#endif // V8_INTERPRETED_REGEXP
-
} // namespace internal
} // namespace v8
+#endif // V8_INTERPRETED_REGEXP
+
#endif // V8_REGEXP_REGEXP_MACRO_ASSEMBLER_IRREGEXP_H_
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc b/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
index 5301ead69b..ec86526033 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
@@ -360,11 +360,11 @@ void RegExpMacroAssemblerTracer::CheckNotBackReference(int start_reg,
void RegExpMacroAssemblerTracer::CheckNotBackReferenceIgnoreCase(
- int start_reg, bool read_backward, Label* on_no_match) {
- PrintF(" CheckNotBackReferenceIgnoreCase(register=%d, %s, label[%08x]);\n",
+ int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
+ PrintF(" CheckNotBackReferenceIgnoreCase(register=%d, %s %s, label[%08x]);\n",
start_reg, read_backward ? "backward" : "forward",
- LabelToInt(on_no_match));
- assembler_->CheckNotBackReferenceIgnoreCase(start_reg, read_backward,
+ unicode ? "unicode" : "non-unicode", LabelToInt(on_no_match));
+ assembler_->CheckNotBackReferenceIgnoreCase(start_reg, read_backward, unicode,
on_no_match);
}
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-tracer.h b/deps/v8/src/regexp/regexp-macro-assembler-tracer.h
index 77377aac31..8a9ebe3683 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-tracer.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler-tracer.h
@@ -34,7 +34,7 @@ class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
virtual void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward,
+ bool read_backward, bool unicode,
Label* on_no_match);
virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(unsigned c,
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.cc b/deps/v8/src/regexp/regexp-macro-assembler.cc
index caf8b51fe5..9bb5073a8b 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler.cc
@@ -9,6 +9,10 @@
#include "src/regexp/regexp-stack.h"
#include "src/simulator.h"
+#ifdef V8_I18N_SUPPORT
+#include "unicode/uchar.h"
+#endif // V8_I18N_SUPPORT
+
namespace v8 {
namespace internal {
@@ -23,6 +27,80 @@ RegExpMacroAssembler::~RegExpMacroAssembler() {
}
+int RegExpMacroAssembler::CaseInsensitiveCompareUC16(Address byte_offset1,
+ Address byte_offset2,
+ size_t byte_length,
+ Isolate* isolate) {
+ unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
+ isolate->regexp_macro_assembler_canonicalize();
+ // This function is not allowed to cause a garbage collection.
+ // A GC might move the calling generated code and invalidate the
+ // return address on the stack.
+ DCHECK(byte_length % 2 == 0);
+ uc16* substring1 = reinterpret_cast<uc16*>(byte_offset1);
+ uc16* substring2 = reinterpret_cast<uc16*>(byte_offset2);
+ size_t length = byte_length >> 1;
+
+#ifdef V8_I18N_SUPPORT
+ if (isolate == nullptr) {
+ for (size_t i = 0; i < length; i++) {
+ uc32 c1 = substring1[i];
+ uc32 c2 = substring2[i];
+ if (unibrow::Utf16::IsLeadSurrogate(c1)) {
+ // Non-BMP characters do not have case-equivalents in the BMP.
+ // Both have to be non-BMP for them to be able to match.
+ if (!unibrow::Utf16::IsLeadSurrogate(c2)) return 0;
+ if (i + 1 < length) {
+ uc16 c1t = substring1[i + 1];
+ uc16 c2t = substring2[i + 1];
+ if (unibrow::Utf16::IsTrailSurrogate(c1t) &&
+ unibrow::Utf16::IsTrailSurrogate(c2t)) {
+ c1 = unibrow::Utf16::CombineSurrogatePair(c1, c1t);
+ c2 = unibrow::Utf16::CombineSurrogatePair(c2, c2t);
+ i++;
+ }
+ }
+ }
+ c1 = u_foldCase(c1, U_FOLD_CASE_DEFAULT);
+ c2 = u_foldCase(c2, U_FOLD_CASE_DEFAULT);
+ if (c1 != c2) return 0;
+ }
+ return 1;
+ }
+#endif // V8_I18N_SUPPORT
+ DCHECK_NOT_NULL(isolate);
+ for (size_t i = 0; i < length; i++) {
+ unibrow::uchar c1 = substring1[i];
+ unibrow::uchar c2 = substring2[i];
+ if (c1 != c2) {
+ unibrow::uchar s1[1] = {c1};
+ canonicalize->get(c1, '\0', s1);
+ if (s1[0] != c2) {
+ unibrow::uchar s2[1] = {c2};
+ canonicalize->get(c2, '\0', s2);
+ if (s1[0] != s2[0]) {
+ return 0;
+ }
+ }
+ }
+ }
+ return 1;
+}
+
+
+void RegExpMacroAssembler::CheckNotInSurrogatePair(int cp_offset,
+ Label* on_failure) {
+ Label ok;
+ // Check that current character is not a trail surrogate.
+ LoadCurrentCharacter(cp_offset, &ok);
+ CheckCharacterNotInRange(kTrailSurrogateStart, kTrailSurrogateEnd, &ok);
+ // Check that previous character is not a lead surrogate.
+ LoadCurrentCharacter(cp_offset - 1, &ok);
+ CheckCharacterInRange(kLeadSurrogateStart, kLeadSurrogateEnd, on_failure);
+ Bind(&ok);
+}
+
+
#ifndef V8_INTERPRETED_REGEXP // Avoid unused code, e.g., on ARM.
NativeRegExpMacroAssembler::NativeRegExpMacroAssembler(Isolate* isolate,
@@ -245,40 +323,6 @@ const byte NativeRegExpMacroAssembler::word_character_map[] = {
};
-int NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16(
- Address byte_offset1,
- Address byte_offset2,
- size_t byte_length,
- Isolate* isolate) {
- unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
- isolate->regexp_macro_assembler_canonicalize();
- // This function is not allowed to cause a garbage collection.
- // A GC might move the calling generated code and invalidate the
- // return address on the stack.
- DCHECK(byte_length % 2 == 0);
- uc16* substring1 = reinterpret_cast<uc16*>(byte_offset1);
- uc16* substring2 = reinterpret_cast<uc16*>(byte_offset2);
- size_t length = byte_length >> 1;
-
- for (size_t i = 0; i < length; i++) {
- unibrow::uchar c1 = substring1[i];
- unibrow::uchar c2 = substring2[i];
- if (c1 != c2) {
- unibrow::uchar s1[1] = { c1 };
- canonicalize->get(c1, '\0', s1);
- if (s1[0] != c2) {
- unibrow::uchar s2[1] = { c2 };
- canonicalize->get(c2, '\0', s2);
- if (s1[0] != s2[0]) {
- return 0;
- }
- }
- }
- }
- return 1;
-}
-
-
Address NativeRegExpMacroAssembler::GrowStack(Address stack_pointer,
Address* stack_base,
Isolate* isolate) {
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.h b/deps/v8/src/regexp/regexp-macro-assembler.h
index 20599334cd..6f79a16540 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler.h
@@ -11,6 +11,13 @@
namespace v8 {
namespace internal {
+static const uc32 kLeadSurrogateStart = 0xd800;
+static const uc32 kLeadSurrogateEnd = 0xdbff;
+static const uc32 kTrailSurrogateStart = 0xdc00;
+static const uc32 kTrailSurrogateEnd = 0xdfff;
+static const uc32 kNonBmpStart = 0x10000;
+static const uc32 kNonBmpEnd = 0x10ffff;
+
struct DisjunctDecisionRow {
RegExpCharacterClass cc;
Label* on_match;
@@ -76,7 +83,7 @@ class RegExpMacroAssembler {
virtual void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match) = 0;
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward,
+ bool read_backward, bool unicode,
Label* on_no_match) = 0;
// Check the current character for a match with a literal character. If we
// fail to match then goto the on_failure label. End of input always
@@ -146,25 +153,40 @@ class RegExpMacroAssembler {
virtual void ClearRegisters(int reg_from, int reg_to) = 0;
virtual void WriteStackPointerToRegister(int reg) = 0;
+ // Compares two-byte strings case insensitively.
+ // Called from generated RegExp code.
+ static int CaseInsensitiveCompareUC16(Address byte_offset1,
+ Address byte_offset2,
+ size_t byte_length, Isolate* isolate);
+
+ // Check that we are not in the middle of a surrogate pair.
+ void CheckNotInSurrogatePair(int cp_offset, Label* on_failure);
+
// Controls the generation of large inlined constants in the code.
void set_slow_safe(bool ssc) { slow_safe_compiler_ = ssc; }
bool slow_safe() { return slow_safe_compiler_; }
- enum GlobalMode { NOT_GLOBAL, GLOBAL, GLOBAL_NO_ZERO_LENGTH_CHECK };
+ enum GlobalMode {
+ NOT_GLOBAL,
+ GLOBAL_NO_ZERO_LENGTH_CHECK,
+ GLOBAL,
+ GLOBAL_UNICODE
+ };
// Set whether the regular expression has the global flag. Exiting due to
// a failure in a global regexp may still mean success overall.
inline void set_global_mode(GlobalMode mode) { global_mode_ = mode; }
inline bool global() { return global_mode_ != NOT_GLOBAL; }
inline bool global_with_zero_length_check() {
- return global_mode_ == GLOBAL;
+ return global_mode_ == GLOBAL || global_mode_ == GLOBAL_UNICODE;
}
+ inline bool global_unicode() { return global_mode_ == GLOBAL_UNICODE; }
Isolate* isolate() const { return isolate_; }
Zone* zone() const { return zone_; }
private:
bool slow_safe_compiler_;
- bool global_mode_;
+ GlobalMode global_mode_;
Isolate* isolate_;
Zone* zone_;
};
@@ -199,13 +221,6 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
int previous_index,
Isolate* isolate);
- // Compares two-byte strings case insensitively.
- // Called from generated RegExp code.
- static int CaseInsensitiveCompareUC16(Address byte_offset1,
- Address byte_offset2,
- size_t byte_length,
- Isolate* isolate);
-
// Called from RegExp if the backtrack stack limit is hit.
// Tries to expand the stack. Returns the new stack-pointer if
// successful, and updates the stack_top address, or returns 0 if unable
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index fa8900342c..2fe6fde82a 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -8,27 +8,32 @@
#include "src/factory.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
+#include "src/ostreams.h"
#include "src/regexp/jsregexp.h"
#include "src/utils.h"
+#ifdef V8_I18N_SUPPORT
+#include "unicode/uset.h"
+#endif // V8_I18N_SUPPORT
+
namespace v8 {
namespace internal {
RegExpParser::RegExpParser(FlatStringReader* in, Handle<String>* error,
- bool multiline, bool unicode, Isolate* isolate,
- Zone* zone)
+ JSRegExp::Flags flags, Isolate* isolate, Zone* zone)
: isolate_(isolate),
zone_(zone),
error_(error),
captures_(NULL),
in_(in),
current_(kEndMarker),
+ ignore_case_(flags & JSRegExp::kIgnoreCase),
+ multiline_(flags & JSRegExp::kMultiline),
+ unicode_(flags & JSRegExp::kUnicode),
next_pos_(0),
captures_started_(0),
capture_count_(0),
has_more_(true),
- multiline_(multiline),
- unicode_(unicode),
simple_(false),
contains_anchor_(false),
is_scanned_for_captures_(false),
@@ -36,10 +41,28 @@ RegExpParser::RegExpParser(FlatStringReader* in, Handle<String>* error,
Advance();
}
+template <bool update_position>
+inline uc32 RegExpParser::ReadNext() {
+ int position = next_pos_;
+ uc32 c0 = in()->Get(position);
+ position++;
+ // Read the whole surrogate pair in case of unicode flag, if possible.
+ if (unicode() && position < in()->length() &&
+ unibrow::Utf16::IsLeadSurrogate(static_cast<uc16>(c0))) {
+ uc16 c1 = in()->Get(position);
+ if (unibrow::Utf16::IsTrailSurrogate(c1)) {
+ c0 = unibrow::Utf16::CombineSurrogatePair(static_cast<uc16>(c0), c1);
+ position++;
+ }
+ }
+ if (update_position) next_pos_ = position;
+ return c0;
+}
+
uc32 RegExpParser::Next() {
if (has_next()) {
- return in()->Get(next_pos_);
+ return ReadNext<false>();
} else {
return kEndMarker;
}
@@ -47,25 +70,14 @@ uc32 RegExpParser::Next() {
void RegExpParser::Advance() {
- if (next_pos_ < in()->length()) {
+ if (has_next()) {
StackLimitCheck check(isolate());
if (check.HasOverflowed()) {
ReportError(CStrVector(Isolate::kStackOverflowMessage));
} else if (zone()->excess_allocation()) {
ReportError(CStrVector("Regular expression too large"));
} else {
- current_ = in()->Get(next_pos_);
- next_pos_++;
- // Read the whole surrogate pair in case of unicode flag, if possible.
- if (unicode_ && next_pos_ < in()->length() &&
- unibrow::Utf16::IsLeadSurrogate(static_cast<uc16>(current_))) {
- uc16 trail = in()->Get(next_pos_);
- if (unibrow::Utf16::IsTrailSurrogate(trail)) {
- current_ = unibrow::Utf16::CombineSurrogatePair(
- static_cast<uc16>(current_), trail);
- next_pos_++;
- }
- }
+ current_ = ReadNext<true>();
}
} else {
current_ = kEndMarker;
@@ -92,11 +104,28 @@ void RegExpParser::Advance(int dist) {
bool RegExpParser::simple() { return simple_; }
-
-bool RegExpParser::IsSyntaxCharacter(uc32 c) {
- return c == '^' || c == '$' || c == '\\' || c == '.' || c == '*' ||
- c == '+' || c == '?' || c == '(' || c == ')' || c == '[' || c == ']' ||
- c == '{' || c == '}' || c == '|';
+bool RegExpParser::IsSyntaxCharacterOrSlash(uc32 c) {
+ switch (c) {
+ case '^':
+ case '$':
+ case '\\':
+ case '.':
+ case '*':
+ case '+':
+ case '?':
+ case '(':
+ case ')':
+ case '[':
+ case ']':
+ case '{':
+ case '}':
+ case '|':
+ case '/':
+ return true;
+ default:
+ break;
+ }
+ return false;
}
@@ -142,7 +171,7 @@ RegExpTree* RegExpParser::ParsePattern() {
RegExpTree* RegExpParser::ParseDisjunction() {
// Used to store current state while parsing subexpressions.
RegExpParserState initial_state(NULL, INITIAL, RegExpLookaround::LOOKAHEAD, 0,
- zone());
+ ignore_case(), unicode(), zone());
RegExpParserState* state = &initial_state;
// Cache the builder in a local variable for quick access.
RegExpBuilder* builder = initial_state.builder();
@@ -151,14 +180,14 @@ RegExpTree* RegExpParser::ParseDisjunction() {
case kEndMarker:
if (state->IsSubexpression()) {
// Inside a parenthesized group when hitting end of input.
- ReportError(CStrVector("Unterminated group") CHECK_FAILED);
+ return ReportError(CStrVector("Unterminated group"));
}
DCHECK_EQ(INITIAL, state->group_type());
// Parsing completed successfully.
return builder->ToRegExp();
case ')': {
if (!state->IsSubexpression()) {
- ReportError(CStrVector("Unmatched ')'") CHECK_FAILED);
+ return ReportError(CStrVector("Unmatched ')'"));
}
DCHECK_NE(INITIAL, state->group_type());
@@ -206,7 +235,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
return ReportError(CStrVector("Nothing to repeat"));
case '^': {
Advance();
- if (multiline_) {
+ if (multiline()) {
builder->AddAssertion(
new (zone()) RegExpAssertion(RegExpAssertion::START_OF_LINE));
} else {
@@ -219,8 +248,8 @@ RegExpTree* RegExpParser::ParseDisjunction() {
case '$': {
Advance();
RegExpAssertion::AssertionType assertion_type =
- multiline_ ? RegExpAssertion::END_OF_LINE
- : RegExpAssertion::END_OF_INPUT;
+ multiline() ? RegExpAssertion::END_OF_LINE
+ : RegExpAssertion::END_OF_INPUT;
builder->AddAssertion(new (zone()) RegExpAssertion(assertion_type));
continue;
}
@@ -230,8 +259,9 @@ RegExpTree* RegExpParser::ParseDisjunction() {
ZoneList<CharacterRange>* ranges =
new (zone()) ZoneList<CharacterRange>(2, zone());
CharacterRange::AddClassEscape('.', ranges, zone());
- RegExpTree* atom = new (zone()) RegExpCharacterClass(ranges, false);
- builder->AddAtom(atom);
+ RegExpCharacterClass* cc =
+ new (zone()) RegExpCharacterClass(ranges, false);
+ builder->AddCharacterClass(cc);
break;
}
case '(': {
@@ -265,25 +295,25 @@ RegExpTree* RegExpParser::ParseDisjunction() {
}
// Fall through.
default:
- ReportError(CStrVector("Invalid group") CHECK_FAILED);
- break;
+ return ReportError(CStrVector("Invalid group"));
}
Advance(2);
} else {
if (captures_started_ >= kMaxCaptures) {
- ReportError(CStrVector("Too many captures") CHECK_FAILED);
+ return ReportError(CStrVector("Too many captures"));
}
captures_started_++;
}
// Store current state and begin new disjunction parsing.
state = new (zone()) RegExpParserState(
- state, subexpr_type, lookaround_type, captures_started_, zone());
+ state, subexpr_type, lookaround_type, captures_started_,
+ ignore_case(), unicode(), zone());
builder = state->builder();
continue;
}
case '[': {
- RegExpTree* atom = ParseCharacterClass(CHECK_FAILED);
- builder->AddAtom(atom);
+ RegExpTree* cc = ParseCharacterClass(CHECK_FAILED);
+ builder->AddCharacterClass(cc->AsCharacterClass());
break;
}
// Atom ::
@@ -318,8 +348,26 @@ RegExpTree* RegExpParser::ParseDisjunction() {
ZoneList<CharacterRange>* ranges =
new (zone()) ZoneList<CharacterRange>(2, zone());
CharacterRange::AddClassEscape(c, ranges, zone());
- RegExpTree* atom = new (zone()) RegExpCharacterClass(ranges, false);
- builder->AddAtom(atom);
+ RegExpCharacterClass* cc =
+ new (zone()) RegExpCharacterClass(ranges, false);
+ builder->AddCharacterClass(cc);
+ break;
+ }
+ case 'p':
+ case 'P': {
+ uc32 p = Next();
+ Advance(2);
+ if (unicode()) {
+ ZoneList<CharacterRange>* ranges = ParsePropertyClass();
+ if (ranges == nullptr) {
+ return ReportError(CStrVector("Invalid property name"));
+ }
+ RegExpCharacterClass* cc =
+ new (zone()) RegExpCharacterClass(ranges, p == 'P');
+ builder->AddCharacterClass(cc);
+ } else {
+ builder->AddCharacter(p);
+ }
break;
}
case '1':
@@ -332,7 +380,8 @@ RegExpTree* RegExpParser::ParseDisjunction() {
case '8':
case '9': {
int index = 0;
- if (ParseBackReferenceIndex(&index)) {
+ bool is_backref = ParseBackReferenceIndex(&index CHECK_FAILED);
+ if (is_backref) {
if (state->IsInsideCaptureGroup(index)) {
// The back reference is inside the capture group it refers to.
// Nothing can possibly have been captured yet, so we use empty
@@ -347,24 +396,25 @@ RegExpTree* RegExpParser::ParseDisjunction() {
}
break;
}
+ // With /u, no identity escapes except for syntax characters
+ // are allowed. Otherwise, all identity escapes are allowed.
+ if (unicode()) {
+ return ReportError(CStrVector("Invalid escape"));
+ }
uc32 first_digit = Next();
if (first_digit == '8' || first_digit == '9') {
- // If the 'u' flag is present, only syntax characters can be
- // escaped,
- // no other identity escapes are allowed. If the 'u' flag is not
- // present, all identity escapes are allowed.
- if (!unicode_) {
- builder->AddCharacter(first_digit);
- Advance(2);
- } else {
- return ReportError(CStrVector("Invalid escape"));
- }
+ builder->AddCharacter(first_digit);
+ Advance(2);
break;
}
}
// FALLTHROUGH
case '0': {
Advance();
+ if (unicode() && Next() >= '0' && Next() <= '9') {
+ // With /u, decimal escape with leading 0 are not parsed as octal.
+ return ReportError(CStrVector("Invalid decimal escape"));
+ }
uc32 octal = ParseOctalLiteral();
builder->AddCharacter(octal);
break;
@@ -402,6 +452,10 @@ RegExpTree* RegExpParser::ParseDisjunction() {
// This is outside the specification. We match JSC in
// reading the backslash as a literal character instead
// of as starting an escape.
+ if (unicode()) {
+ // With /u, invalid escapes are not treated as identity escapes.
+ return ReportError(CStrVector("Invalid unicode escape"));
+ }
builder->AddCharacter('\\');
} else {
Advance(2);
@@ -414,11 +468,10 @@ RegExpTree* RegExpParser::ParseDisjunction() {
uc32 value;
if (ParseHexEscape(2, &value)) {
builder->AddCharacter(value);
- } else if (!unicode_) {
+ } else if (!unicode()) {
builder->AddCharacter('x');
} else {
- // If the 'u' flag is present, invalid escapes are not treated as
- // identity escapes.
+ // With /u, invalid escapes are not treated as identity escapes.
return ReportError(CStrVector("Invalid escape"));
}
break;
@@ -427,24 +480,20 @@ RegExpTree* RegExpParser::ParseDisjunction() {
Advance(2);
uc32 value;
if (ParseUnicodeEscape(&value)) {
- builder->AddUnicodeCharacter(value);
- } else if (!unicode_) {
+ builder->AddEscapedUnicodeCharacter(value);
+ } else if (!unicode()) {
builder->AddCharacter('u');
} else {
- // If the 'u' flag is present, invalid escapes are not treated as
- // identity escapes.
+ // With /u, invalid escapes are not treated as identity escapes.
return ReportError(CStrVector("Invalid unicode escape"));
}
break;
}
default:
Advance();
- // If the 'u' flag is present, only syntax characters can be
- // escaped, no
- // other identity escapes are allowed. If the 'u' flag is not
- // present,
- // all identity escapes are allowed.
- if (!unicode_ || IsSyntaxCharacter(current())) {
+ // With /u, no identity escapes except for syntax characters
+ // are allowed. Otherwise, all identity escapes are allowed.
+ if (!unicode() || IsSyntaxCharacterOrSlash(current())) {
builder->AddCharacter(current());
Advance();
} else {
@@ -456,10 +505,16 @@ RegExpTree* RegExpParser::ParseDisjunction() {
case '{': {
int dummy;
if (ParseIntervalQuantifier(&dummy, &dummy)) {
- ReportError(CStrVector("Nothing to repeat") CHECK_FAILED);
+ return ReportError(CStrVector("Nothing to repeat"));
}
// fallthrough
}
+ case '}':
+ case ']':
+ if (unicode()) {
+ return ReportError(CStrVector("Lone quantifier brackets"));
+ }
+ // fallthrough
default:
builder->AddUnicodeCharacter(current());
Advance();
@@ -492,13 +547,15 @@ RegExpTree* RegExpParser::ParseDisjunction() {
case '{':
if (ParseIntervalQuantifier(&min, &max)) {
if (max < min) {
- ReportError(CStrVector("numbers out of order in {} quantifier.")
- CHECK_FAILED);
+ return ReportError(
+ CStrVector("numbers out of order in {} quantifier"));
}
break;
- } else {
- continue;
+ } else if (unicode()) {
+ // With /u, incomplete quantifiers are not allowed.
+ return ReportError(CStrVector("Incomplete quantifier"));
}
+ continue;
default:
continue;
}
@@ -511,7 +568,9 @@ RegExpTree* RegExpParser::ParseDisjunction() {
quantifier_type = RegExpQuantifier::POSSESSIVE;
Advance();
}
- builder->AddQuantifierToAtom(min, max, quantifier_type);
+ if (!builder->AddQuantifierToAtom(min, max, quantifier_type)) {
+ return ReportError(CStrVector("Invalid quantifier"));
+ }
}
}
@@ -740,12 +799,12 @@ bool RegExpParser::ParseHexEscape(int length, uc32* value) {
return true;
}
-
+// This parses RegExpUnicodeEscapeSequence as described in ECMA262.
bool RegExpParser::ParseUnicodeEscape(uc32* value) {
// Accept both \uxxxx and \u{xxxxxx} (if harmony unicode escapes are
// allowed). In the latter case, the number of hex digits between { } is
// arbitrary. \ and u have already been read.
- if (current() == '{' && unicode_) {
+ if (current() == '{' && unicode()) {
int start = position();
Advance();
if (ParseUnlimitedLengthHexNumber(0x10ffff, value)) {
@@ -758,9 +817,75 @@ bool RegExpParser::ParseUnicodeEscape(uc32* value) {
return false;
}
// \u but no {, or \u{...} escapes not allowed.
- return ParseHexEscape(4, value);
+ bool result = ParseHexEscape(4, value);
+ if (result && unicode() && unibrow::Utf16::IsLeadSurrogate(*value) &&
+ current() == '\\') {
+ // Attempt to read trail surrogate.
+ int start = position();
+ if (Next() == 'u') {
+ Advance(2);
+ uc32 trail;
+ if (ParseHexEscape(4, &trail) &&
+ unibrow::Utf16::IsTrailSurrogate(trail)) {
+ *value = unibrow::Utf16::CombineSurrogatePair(static_cast<uc16>(*value),
+ static_cast<uc16>(trail));
+ return true;
+ }
+ }
+ Reset(start);
+ }
+ return result;
}
+ZoneList<CharacterRange>* RegExpParser::ParsePropertyClass() {
+#ifdef V8_I18N_SUPPORT
+ char property_name[3];
+ memset(property_name, 0, sizeof(property_name));
+ if (current() == '{') {
+ Advance();
+ if (current() < 'A' || current() > 'Z') return nullptr;
+ property_name[0] = static_cast<char>(current());
+ Advance();
+ if (current() >= 'a' && current() <= 'z') {
+ property_name[1] = static_cast<char>(current());
+ Advance();
+ }
+ if (current() != '}') return nullptr;
+ } else if (current() >= 'A' && current() <= 'Z') {
+ property_name[0] = static_cast<char>(current());
+ } else {
+ return nullptr;
+ }
+ Advance();
+
+ int32_t category =
+ u_getPropertyValueEnum(UCHAR_GENERAL_CATEGORY_MASK, property_name);
+ if (category == UCHAR_INVALID_CODE) return nullptr;
+
+ USet* set = uset_openEmpty();
+ UErrorCode ec = U_ZERO_ERROR;
+ uset_applyIntPropertyValue(set, UCHAR_GENERAL_CATEGORY_MASK, category, &ec);
+ ZoneList<CharacterRange>* ranges = nullptr;
+ if (ec == U_ZERO_ERROR && !uset_isEmpty(set)) {
+ uset_removeAllStrings(set);
+ int item_count = uset_getItemCount(set);
+ ranges = new (zone()) ZoneList<CharacterRange>(item_count, zone());
+ int item_result = 0;
+ for (int i = 0; i < item_count; i++) {
+ uc32 start = 0;
+ uc32 end = 0;
+ item_result += uset_getItem(set, i, &start, &end, nullptr, 0, &ec);
+ ranges->Add(CharacterRange::Range(start, end), zone());
+ }
+ DCHECK_EQ(U_ZERO_ERROR, ec);
+ DCHECK_EQ(0, item_result);
+ }
+ uset_close(set);
+ return ranges;
+#else // V8_I18N_SUPPORT
+ return nullptr;
+#endif // V8_I18N_SUPPORT
+}
bool RegExpParser::ParseUnlimitedLengthHexNumber(int max_value, uc32* value) {
uc32 x = 0;
@@ -809,20 +934,35 @@ uc32 RegExpParser::ParseClassCharacterEscape() {
case 'c': {
uc32 controlLetter = Next();
uc32 letter = controlLetter & ~('A' ^ 'a');
- // For compatibility with JSC, inside a character class
- // we also accept digits and underscore as control characters.
- if ((controlLetter >= '0' && controlLetter <= '9') ||
- controlLetter == '_' || (letter >= 'A' && letter <= 'Z')) {
+ // For compatibility with JSC, inside a character class. We also accept
+ // digits and underscore as control characters, unless with /u.
+ if (letter >= 'A' && letter <= 'Z') {
Advance(2);
// Control letters mapped to ASCII control characters in the range
// 0x00-0x1f.
return controlLetter & 0x1f;
}
+ if (unicode()) {
+ // With /u, invalid escapes are not treated as identity escapes.
+ ReportError(CStrVector("Invalid class escape"));
+ return 0;
+ }
+ if ((controlLetter >= '0' && controlLetter <= '9') ||
+ controlLetter == '_') {
+ Advance(2);
+ return controlLetter & 0x1f;
+ }
// We match JSC in reading the backslash as a literal
// character instead of as starting an escape.
return '\\';
}
case '0':
+ // With /u, \0 is interpreted as NUL if not followed by another digit.
+ if (unicode() && !(Next() >= '0' && Next() <= '9')) {
+ Advance();
+ return 0;
+ }
+ // Fall through.
case '1':
case '2':
case '3':
@@ -833,43 +973,43 @@ uc32 RegExpParser::ParseClassCharacterEscape() {
// For compatibility, we interpret a decimal escape that isn't
// a back reference (and therefore either \0 or not valid according
// to the specification) as a 1..3 digit octal character code.
+ if (unicode()) {
+ // With /u, decimal escape is not interpreted as octal character code.
+ ReportError(CStrVector("Invalid class escape"));
+ return 0;
+ }
return ParseOctalLiteral();
case 'x': {
Advance();
uc32 value;
- if (ParseHexEscape(2, &value)) {
- return value;
+ if (ParseHexEscape(2, &value)) return value;
+ if (unicode()) {
+ // With /u, invalid escapes are not treated as identity escapes.
+ ReportError(CStrVector("Invalid escape"));
+ return 0;
}
- if (!unicode_) {
- // If \x is not followed by a two-digit hexadecimal, treat it
- // as an identity escape.
- return 'x';
- }
- // If the 'u' flag is present, invalid escapes are not treated as
- // identity escapes.
- ReportError(CStrVector("Invalid escape"));
- return 0;
+ // If \x is not followed by a two-digit hexadecimal, treat it
+ // as an identity escape.
+ return 'x';
}
case 'u': {
Advance();
uc32 value;
- if (ParseUnicodeEscape(&value)) {
- return value;
- }
- if (!unicode_) {
- return 'u';
+ if (ParseUnicodeEscape(&value)) return value;
+ if (unicode()) {
+ // With /u, invalid escapes are not treated as identity escapes.
+ ReportError(CStrVector("Invalid unicode escape"));
+ return 0;
}
- // If the 'u' flag is present, invalid escapes are not treated as
- // identity escapes.
- ReportError(CStrVector("Invalid unicode escape"));
- return 0;
+ // If \u is not followed by a two-digit hexadecimal, treat it
+ // as an identity escape.
+ return 'u';
}
default: {
uc32 result = current();
- // If the 'u' flag is present, only syntax characters can be escaped, no
- // other identity escapes are allowed. If the 'u' flag is not present, all
- // identity escapes are allowed.
- if (!unicode_ || IsSyntaxCharacter(result)) {
+ // With /u, no identity escapes except for syntax characters and '-' are
+ // allowed. Otherwise, all identity escapes are allowed.
+ if (!unicode() || IsSyntaxCharacterOrSlash(result) || result == '-') {
Advance();
return result;
}
@@ -899,13 +1039,13 @@ CharacterRange RegExpParser::ParseClassAtom(uc16* char_class) {
case kEndMarker:
return ReportError(CStrVector("\\ at end of pattern"));
default:
- uc32 c = ParseClassCharacterEscape(CHECK_FAILED);
- return CharacterRange::Singleton(c);
+ first = ParseClassCharacterEscape(CHECK_FAILED);
}
} else {
Advance();
- return CharacterRange::Singleton(first);
}
+
+ return CharacterRange::Singleton(first);
}
@@ -927,6 +1067,7 @@ static inline void AddRangeOrEscape(ZoneList<CharacterRange>* ranges,
RegExpTree* RegExpParser::ParseCharacterClass() {
static const char* kUnterminated = "Unterminated character class";
+ static const char* kRangeInvalid = "Invalid character class";
static const char* kRangeOutOfOrder = "Range out of order in character class";
DCHECK_EQ(current(), '[');
@@ -956,13 +1097,18 @@ RegExpTree* RegExpParser::ParseCharacterClass() {
CharacterRange next = ParseClassAtom(&char_class_2 CHECK_FAILED);
if (char_class != kNoCharClass || char_class_2 != kNoCharClass) {
// Either end is an escaped character class. Treat the '-' verbatim.
+ if (unicode()) {
+ // ES2015 21.2.2.15.1 step 1.
+ return ReportError(CStrVector(kRangeInvalid));
+ }
AddRangeOrEscape(ranges, char_class, first, zone());
ranges->Add(CharacterRange::Singleton('-'), zone());
AddRangeOrEscape(ranges, char_class_2, next, zone());
continue;
}
+ // ES2015 21.2.2.15.1 step 6.
if (first.from() > next.to()) {
- return ReportError(CStrVector(kRangeOutOfOrder) CHECK_FAILED);
+ return ReportError(CStrVector(kRangeOutOfOrder));
}
ranges->Add(CharacterRange::Range(first.from(), next.to()), zone());
} else {
@@ -970,7 +1116,7 @@ RegExpTree* RegExpParser::ParseCharacterClass() {
}
}
if (!has_more()) {
- return ReportError(CStrVector(kUnterminated) CHECK_FAILED);
+ return ReportError(CStrVector(kUnterminated));
}
Advance();
if (ranges->length() == 0) {
@@ -985,10 +1131,10 @@ RegExpTree* RegExpParser::ParseCharacterClass() {
bool RegExpParser::ParseRegExp(Isolate* isolate, Zone* zone,
- FlatStringReader* input, bool multiline,
- bool unicode, RegExpCompileData* result) {
+ FlatStringReader* input, JSRegExp::Flags flags,
+ RegExpCompileData* result) {
DCHECK(result != NULL);
- RegExpParser parser(input, &result->error, multiline, unicode, isolate, zone);
+ RegExpParser parser(input, &result->error, flags, isolate, zone);
RegExpTree* tree = parser.ParsePattern();
if (parser.failed()) {
DCHECK(tree == NULL);
@@ -1010,11 +1156,13 @@ bool RegExpParser::ParseRegExp(Isolate* isolate, Zone* zone,
return !parser.failed();
}
-
-RegExpBuilder::RegExpBuilder(Zone* zone)
+RegExpBuilder::RegExpBuilder(Zone* zone, bool ignore_case, bool unicode)
: zone_(zone),
pending_empty_(false),
+ ignore_case_(ignore_case),
+ unicode_(unicode),
characters_(NULL),
+ pending_surrogate_(kNoPendingSurrogate),
terms_(),
alternatives_()
#ifdef DEBUG
@@ -1025,7 +1173,51 @@ RegExpBuilder::RegExpBuilder(Zone* zone)
}
+void RegExpBuilder::AddLeadSurrogate(uc16 lead_surrogate) {
+ DCHECK(unibrow::Utf16::IsLeadSurrogate(lead_surrogate));
+ FlushPendingSurrogate();
+ // Hold onto the lead surrogate, waiting for a trail surrogate to follow.
+ pending_surrogate_ = lead_surrogate;
+}
+
+
+void RegExpBuilder::AddTrailSurrogate(uc16 trail_surrogate) {
+ DCHECK(unibrow::Utf16::IsTrailSurrogate(trail_surrogate));
+ if (pending_surrogate_ != kNoPendingSurrogate) {
+ uc16 lead_surrogate = pending_surrogate_;
+ pending_surrogate_ = kNoPendingSurrogate;
+ DCHECK(unibrow::Utf16::IsLeadSurrogate(lead_surrogate));
+ uc32 combined =
+ unibrow::Utf16::CombineSurrogatePair(lead_surrogate, trail_surrogate);
+ if (NeedsDesugaringForIgnoreCase(combined)) {
+ AddCharacterClassForDesugaring(combined);
+ } else {
+ ZoneList<uc16> surrogate_pair(2, zone());
+ surrogate_pair.Add(lead_surrogate, zone());
+ surrogate_pair.Add(trail_surrogate, zone());
+ RegExpAtom* atom =
+ new (zone()) RegExpAtom(surrogate_pair.ToConstVector());
+ AddAtom(atom);
+ }
+ } else {
+ pending_surrogate_ = trail_surrogate;
+ FlushPendingSurrogate();
+ }
+}
+
+
+void RegExpBuilder::FlushPendingSurrogate() {
+ if (pending_surrogate_ != kNoPendingSurrogate) {
+ DCHECK(unicode());
+ uc32 c = pending_surrogate_;
+ pending_surrogate_ = kNoPendingSurrogate;
+ AddCharacterClassForDesugaring(c);
+ }
+}
+
+
void RegExpBuilder::FlushCharacters() {
+ FlushPendingSurrogate();
pending_empty_ = false;
if (characters_ != NULL) {
RegExpTree* atom = new (zone()) RegExpAtom(characters_->ToConstVector());
@@ -1053,31 +1245,61 @@ void RegExpBuilder::FlushText() {
void RegExpBuilder::AddCharacter(uc16 c) {
+ FlushPendingSurrogate();
pending_empty_ = false;
- if (characters_ == NULL) {
- characters_ = new (zone()) ZoneList<uc16>(4, zone());
+ if (NeedsDesugaringForIgnoreCase(c)) {
+ AddCharacterClassForDesugaring(c);
+ } else {
+ if (characters_ == NULL) {
+ characters_ = new (zone()) ZoneList<uc16>(4, zone());
+ }
+ characters_->Add(c, zone());
+ LAST(ADD_CHAR);
}
- characters_->Add(c, zone());
- LAST(ADD_CHAR);
}
void RegExpBuilder::AddUnicodeCharacter(uc32 c) {
if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
- ZoneList<uc16> surrogate_pair(2, zone());
- surrogate_pair.Add(unibrow::Utf16::LeadSurrogate(c), zone());
- surrogate_pair.Add(unibrow::Utf16::TrailSurrogate(c), zone());
- RegExpAtom* atom = new (zone()) RegExpAtom(surrogate_pair.ToConstVector());
- AddAtom(atom);
+ DCHECK(unicode());
+ AddLeadSurrogate(unibrow::Utf16::LeadSurrogate(c));
+ AddTrailSurrogate(unibrow::Utf16::TrailSurrogate(c));
+ } else if (unicode() && unibrow::Utf16::IsLeadSurrogate(c)) {
+ AddLeadSurrogate(c);
+ } else if (unicode() && unibrow::Utf16::IsTrailSurrogate(c)) {
+ AddTrailSurrogate(c);
} else {
AddCharacter(static_cast<uc16>(c));
}
}
+void RegExpBuilder::AddEscapedUnicodeCharacter(uc32 character) {
+ // A lead or trail surrogate parsed via escape sequence will not
+ // pair up with any preceding lead or following trail surrogate.
+ FlushPendingSurrogate();
+ AddUnicodeCharacter(character);
+ FlushPendingSurrogate();
+}
void RegExpBuilder::AddEmpty() { pending_empty_ = true; }
+void RegExpBuilder::AddCharacterClass(RegExpCharacterClass* cc) {
+ if (NeedsDesugaringForUnicode(cc)) {
+ // With /u, character class needs to be desugared, so it
+ // must be a standalone term instead of being part of a RegExpText.
+ AddTerm(cc);
+ } else {
+ AddAtom(cc);
+ }
+}
+
+void RegExpBuilder::AddCharacterClassForDesugaring(uc32 c) {
+ AddTerm(new (zone()) RegExpCharacterClass(
+ CharacterRange::List(zone(), CharacterRange::Singleton(c)), false));
+}
+
+
void RegExpBuilder::AddAtom(RegExpTree* term) {
if (term->IsEmpty()) {
AddEmpty();
@@ -1094,6 +1316,13 @@ void RegExpBuilder::AddAtom(RegExpTree* term) {
}
+void RegExpBuilder::AddTerm(RegExpTree* term) {
+ FlushText();
+ terms_.Add(term, zone());
+ LAST(ADD_ATOM);
+}
+
+
void RegExpBuilder::AddAssertion(RegExpTree* assert) {
FlushText();
terms_.Add(assert, zone());
@@ -1121,6 +1350,47 @@ void RegExpBuilder::FlushTerms() {
}
+bool RegExpBuilder::NeedsDesugaringForUnicode(RegExpCharacterClass* cc) {
+ if (!unicode()) return false;
+ switch (cc->standard_type()) {
+ case 's': // white space
+ case 'w': // ASCII word character
+ case 'd': // ASCII digit
+ return false; // These characters do not need desugaring.
+ default:
+ break;
+ }
+ ZoneList<CharacterRange>* ranges = cc->ranges(zone());
+ CharacterRange::Canonicalize(ranges);
+ for (int i = ranges->length() - 1; i >= 0; i--) {
+ uc32 from = ranges->at(i).from();
+ uc32 to = ranges->at(i).to();
+ // Check for non-BMP characters.
+ if (to >= kNonBmpStart) return true;
+ // Check for lone surrogates.
+ if (from <= kTrailSurrogateEnd && to >= kLeadSurrogateStart) return true;
+ }
+ return false;
+}
+
+
+bool RegExpBuilder::NeedsDesugaringForIgnoreCase(uc32 c) {
+#ifdef V8_I18N_SUPPORT
+ if (unicode() && ignore_case()) {
+ USet* set = uset_open(c, c);
+ uset_closeOver(set, USET_CASE_INSENSITIVE);
+ uset_removeAllStrings(set);
+ bool result = uset_size(set) > 1;
+ uset_close(set);
+ return result;
+ }
+ // In the case where ICU is not included, we act as if the unicode flag is
+ // not set, and do not desugar.
+#endif // V8_I18N_SUPPORT
+ return false;
+}
+
+
RegExpTree* RegExpBuilder::ToRegExp() {
FlushTerms();
int num_alternatives = alternatives_.length();
@@ -1129,12 +1399,12 @@ RegExpTree* RegExpBuilder::ToRegExp() {
return new (zone()) RegExpDisjunction(alternatives_.GetList(zone()));
}
-
-void RegExpBuilder::AddQuantifierToAtom(
+bool RegExpBuilder::AddQuantifierToAtom(
int min, int max, RegExpQuantifier::QuantifierType quantifier_type) {
+ FlushPendingSurrogate();
if (pending_empty_) {
pending_empty_ = false;
- return;
+ return true;
}
RegExpTree* atom;
if (characters_ != NULL) {
@@ -1157,23 +1427,26 @@ void RegExpBuilder::AddQuantifierToAtom(
} else if (terms_.length() > 0) {
DCHECK(last_added_ == ADD_ATOM);
atom = terms_.RemoveLast();
+ // With /u, lookarounds are not quantifiable.
+ if (unicode() && atom->IsLookaround()) return false;
if (atom->max_match() == 0) {
// Guaranteed to only match an empty string.
LAST(ADD_TERM);
if (min == 0) {
- return;
+ return true;
}
terms_.Add(atom, zone());
- return;
+ return true;
}
} else {
// Only call immediately after adding an atom or character!
UNREACHABLE();
- return;
+ return false;
}
terms_.Add(new (zone()) RegExpQuantifier(min, max, quantifier_type, atom),
zone());
LAST(ADD_TERM);
+ return true;
}
} // namespace internal
diff --git a/deps/v8/src/regexp/regexp-parser.h b/deps/v8/src/regexp/regexp-parser.h
index af9b765fba..acf783cc41 100644
--- a/deps/v8/src/regexp/regexp-parser.h
+++ b/deps/v8/src/regexp/regexp-parser.h
@@ -99,28 +99,43 @@ class BufferedZoneList {
// Accumulates RegExp atoms and assertions into lists of terms and alternatives.
class RegExpBuilder : public ZoneObject {
public:
- explicit RegExpBuilder(Zone* zone);
+ RegExpBuilder(Zone* zone, bool ignore_case, bool unicode);
void AddCharacter(uc16 character);
void AddUnicodeCharacter(uc32 character);
+ void AddEscapedUnicodeCharacter(uc32 character);
// "Adds" an empty expression. Does nothing except consume a
// following quantifier
void AddEmpty();
+ void AddCharacterClass(RegExpCharacterClass* cc);
+ void AddCharacterClassForDesugaring(uc32 c);
void AddAtom(RegExpTree* tree);
+ void AddTerm(RegExpTree* tree);
void AddAssertion(RegExpTree* tree);
void NewAlternative(); // '|'
- void AddQuantifierToAtom(int min, int max,
+ bool AddQuantifierToAtom(int min, int max,
RegExpQuantifier::QuantifierType type);
RegExpTree* ToRegExp();
private:
+ static const uc16 kNoPendingSurrogate = 0;
+ void AddLeadSurrogate(uc16 lead_surrogate);
+ void AddTrailSurrogate(uc16 trail_surrogate);
+ void FlushPendingSurrogate();
void FlushCharacters();
void FlushText();
void FlushTerms();
+ bool NeedsDesugaringForUnicode(RegExpCharacterClass* cc);
+ bool NeedsDesugaringForIgnoreCase(uc32 c);
Zone* zone() const { return zone_; }
+ bool ignore_case() const { return ignore_case_; }
+ bool unicode() const { return unicode_; }
Zone* zone_;
bool pending_empty_;
+ bool ignore_case_;
+ bool unicode_;
ZoneList<uc16>* characters_;
+ uc16 pending_surrogate_;
BufferedZoneList<RegExpTree, 2> terms_;
BufferedZoneList<RegExpTree, 2> text_;
BufferedZoneList<RegExpTree, 2> alternatives_;
@@ -135,12 +150,11 @@ class RegExpBuilder : public ZoneObject {
class RegExpParser BASE_EMBEDDED {
public:
- RegExpParser(FlatStringReader* in, Handle<String>* error, bool multiline_mode,
- bool unicode, Isolate* isolate, Zone* zone);
+ RegExpParser(FlatStringReader* in, Handle<String>* error,
+ JSRegExp::Flags flags, Isolate* isolate, Zone* zone);
static bool ParseRegExp(Isolate* isolate, Zone* zone, FlatStringReader* input,
- bool multiline, bool unicode,
- RegExpCompileData* result);
+ JSRegExp::Flags flags, RegExpCompileData* result);
RegExpTree* ParsePattern();
RegExpTree* ParseDisjunction();
@@ -160,6 +174,7 @@ class RegExpParser BASE_EMBEDDED {
bool ParseHexEscape(int length, uc32* value);
bool ParseUnicodeEscape(uc32* value);
bool ParseUnlimitedLengthHexNumber(int max_value, uc32* value);
+ ZoneList<CharacterRange>* ParsePropertyClass();
uc32 ParseOctalLiteral();
@@ -183,8 +198,11 @@ class RegExpParser BASE_EMBEDDED {
int captures_started() { return captures_started_; }
int position() { return next_pos_ - 1; }
bool failed() { return failed_; }
+ bool ignore_case() const { return ignore_case_; }
+ bool multiline() const { return multiline_; }
+ bool unicode() const { return unicode_; }
- static bool IsSyntaxCharacter(uc32 c);
+ static bool IsSyntaxCharacterOrSlash(uc32 c);
static const int kMaxCaptures = 1 << 16;
static const uc32 kEndMarker = (1 << 21);
@@ -203,9 +221,10 @@ class RegExpParser BASE_EMBEDDED {
RegExpParserState(RegExpParserState* previous_state,
SubexpressionType group_type,
RegExpLookaround::Type lookaround_type,
- int disjunction_capture_index, Zone* zone)
+ int disjunction_capture_index, bool ignore_case,
+ bool unicode, Zone* zone)
: previous_state_(previous_state),
- builder_(new (zone) RegExpBuilder(zone)),
+ builder_(new (zone) RegExpBuilder(zone, ignore_case, unicode)),
group_type_(group_type),
lookaround_type_(lookaround_type),
disjunction_capture_index_(disjunction_capture_index) {}
@@ -249,6 +268,8 @@ class RegExpParser BASE_EMBEDDED {
bool has_more() { return has_more_; }
bool has_next() { return next_pos_ < in()->length(); }
uc32 Next();
+ template <bool update_position>
+ uc32 ReadNext();
FlatStringReader* in() { return in_; }
void ScanForCaptures();
@@ -258,13 +279,14 @@ class RegExpParser BASE_EMBEDDED {
ZoneList<RegExpCapture*>* captures_;
FlatStringReader* in_;
uc32 current_;
+ bool ignore_case_;
+ bool multiline_;
+ bool unicode_;
int next_pos_;
int captures_started_;
// The capture count is only valid after we have scanned for captures.
int capture_count_;
bool has_more_;
- bool multiline_;
- bool unicode_;
bool simple_;
bool contains_anchor_;
bool is_scanned_for_captures_;
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
index 286f159cc8..952034fb0c 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -203,7 +203,7 @@ void RegExpMacroAssemblerX64::CheckGreedyLoop(Label* on_equal) {
void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
- int start_reg, bool read_backward, Label* on_no_match) {
+ int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
Label fallthrough;
ReadPositionFromRegister(rdx, start_reg); // Offset of start of capture
ReadPositionFromRegister(rbx, start_reg + 1); // Offset of end of capture
@@ -308,8 +308,10 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Address byte_offset1 - Address captured substring's start.
// Address byte_offset2 - Address of current character position.
// size_t byte_length - length of capture in bytes(!)
- // Isolate* isolate
+// Isolate* isolate or 0 if unicode flag.
#ifdef _WIN64
+ DCHECK(rcx.is(arg_reg_1));
+ DCHECK(rdx.is(arg_reg_2));
// Compute and set byte_offset1 (start of capture).
__ leap(rcx, Operand(rsi, rdx, times_1, 0));
// Set byte_offset2.
@@ -317,11 +319,9 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
if (read_backward) {
__ subq(rdx, rbx);
}
- // Set byte_length.
- __ movp(r8, rbx);
- // Isolate.
- __ LoadAddress(r9, ExternalReference::isolate_address(isolate()));
#else // AMD64 calling convention
+ DCHECK(rdi.is(arg_reg_1));
+ DCHECK(rsi.is(arg_reg_2));
// Compute byte_offset2 (current position = rsi+rdi).
__ leap(rax, Operand(rsi, rdi, times_1, 0));
// Compute and set byte_offset1 (start of capture).
@@ -331,11 +331,19 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
if (read_backward) {
__ subq(rsi, rbx);
}
+#endif // _WIN64
+
// Set byte_length.
- __ movp(rdx, rbx);
+ __ movp(arg_reg_3, rbx);
// Isolate.
- __ LoadAddress(rcx, ExternalReference::isolate_address(isolate()));
-#endif
+#ifdef V8_I18N_SUPPORT
+ if (unicode) {
+ __ movp(arg_reg_4, Immediate(0));
+ } else // NOLINT
+#endif // V8_I18N_SUPPORT
+ {
+ __ LoadAddress(arg_reg_4, ExternalReference::isolate_address(isolate()));
+ }
{ // NOLINT: Can't find a way to open this scope without confusing the
// linter.
@@ -869,11 +877,14 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ testp(rdi, rdi);
__ j(zero, &exit_label_, Label::kNear);
// Advance current position after a zero-length match.
+ Label advance;
+ __ bind(&advance);
if (mode_ == UC16) {
__ addq(rdi, Immediate(2));
} else {
__ incq(rdi);
}
+ if (global_unicode()) CheckNotInSurrogatePair(0, &advance);
}
__ jmp(&load_char_start_regexp);
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
index 257804739f..4c37771d38 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.h
@@ -38,7 +38,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
virtual void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward,
+ bool read_backward, bool unicode,
Label* on_no_match);
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(uint32_t c,
diff --git a/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc b/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc
index 01d0b249b6..6e6209282f 100644
--- a/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc
+++ b/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc
@@ -187,9 +187,8 @@ void RegExpMacroAssemblerX87::CheckGreedyLoop(Label* on_equal) {
__ bind(&fallthrough);
}
-
void RegExpMacroAssemblerX87::CheckNotBackReferenceIgnoreCase(
- int start_reg, bool read_backward, Label* on_no_match) {
+ int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
Label fallthrough;
__ mov(edx, register_location(start_reg)); // Index of start of capture
__ mov(ebx, register_location(start_reg + 1)); // Index of end of capture
@@ -296,11 +295,18 @@ void RegExpMacroAssemblerX87::CheckNotBackReferenceIgnoreCase(
// Address byte_offset1 - Address captured substring's start.
// Address byte_offset2 - Address of current character position.
// size_t byte_length - length of capture in bytes(!)
- // Isolate* isolate
+// Isolate* isolate or 0 if unicode flag.
// Set isolate.
- __ mov(Operand(esp, 3 * kPointerSize),
- Immediate(ExternalReference::isolate_address(isolate())));
+#ifdef V8_I18N_SUPPORT
+ if (unicode) {
+ __ mov(Operand(esp, 3 * kPointerSize), Immediate(0));
+ } else // NOLINT
+#endif // V8_I18N_SUPPORT
+ {
+ __ mov(Operand(esp, 3 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(isolate())));
+ }
// Set byte_length.
__ mov(Operand(esp, 2 * kPointerSize), ebx);
// Set byte_offset2.
@@ -822,13 +828,15 @@ Handle<HeapObject> RegExpMacroAssemblerX87::GetCode(Handle<String> source) {
__ test(edi, edi);
__ j(zero, &exit_label_, Label::kNear);
// Advance current position after a zero-length match.
+ Label advance;
+ __ bind(&advance);
if (mode_ == UC16) {
__ add(edi, Immediate(2));
} else {
__ inc(edi);
}
+ if (global_unicode()) CheckNotInSurrogatePair(0, &advance);
}
-
__ jmp(&load_char_start_regexp);
} else {
__ mov(eax, Immediate(SUCCESS));
diff --git a/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.h b/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.h
index c95541224f..2f689612b7 100644
--- a/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.h
+++ b/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.h
@@ -37,7 +37,7 @@ class RegExpMacroAssemblerX87: public NativeRegExpMacroAssembler {
virtual void CheckNotBackReference(int start_reg, bool read_backward,
Label* on_no_match);
virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
- bool read_backward,
+ bool read_backward, bool unicode,
Label* on_no_match);
virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
virtual void CheckNotCharacterAfterAnd(uint32_t c,
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index 2d4ee9c1a8..e17cbb1d6b 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -58,16 +58,18 @@ static void GetICCounts(SharedFunctionInfo* shared,
int* ic_with_type_info_count, int* ic_generic_count,
int* ic_total_count, int* type_info_percentage,
int* generic_percentage) {
- Code* shared_code = shared->code();
*ic_total_count = 0;
*ic_generic_count = 0;
*ic_with_type_info_count = 0;
- Object* raw_info = shared_code->type_feedback_info();
- if (raw_info->IsTypeFeedbackInfo()) {
- TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info);
- *ic_with_type_info_count = info->ic_with_type_info_count();
- *ic_generic_count = info->ic_generic_count();
- *ic_total_count = info->ic_total_count();
+ if (shared->code()->kind() == Code::FUNCTION) {
+ Code* shared_code = shared->code();
+ Object* raw_info = shared_code->type_feedback_info();
+ if (raw_info->IsTypeFeedbackInfo()) {
+ TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info);
+ *ic_with_type_info_count = info->ic_with_type_info_count();
+ *ic_generic_count = info->ic_generic_count();
+ *ic_total_count = info->ic_total_count();
+ }
}
// Harvest vector-ics as well
@@ -136,8 +138,160 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function,
}
}
+void RuntimeProfiler::MaybeOptimizeFullCodegen(JSFunction* function,
+ int frame_count,
+ bool frame_optimized) {
+ SharedFunctionInfo* shared = function->shared();
+ Code* shared_code = shared->code();
+ if (shared_code->kind() != Code::FUNCTION) return;
+ if (function->IsInOptimizationQueue()) return;
+
+ if (FLAG_always_osr) {
+ AttemptOnStackReplacement(function, Code::kMaxLoopNestingMarker);
+ // Fall through and do a normal optimized compile as well.
+ } else if (!frame_optimized &&
+ (function->IsMarkedForOptimization() ||
+ function->IsMarkedForConcurrentOptimization() ||
+ function->IsOptimized())) {
+ // Attempt OSR if we are still running unoptimized code even though the
+ // the function has long been marked or even already been optimized.
+ int ticks = shared_code->profiler_ticks();
+ int64_t allowance =
+ kOSRCodeSizeAllowanceBase +
+ static_cast<int64_t>(ticks) * kOSRCodeSizeAllowancePerTick;
+ if (shared_code->CodeSize() > allowance &&
+ ticks < Code::ProfilerTicksField::kMax) {
+ shared_code->set_profiler_ticks(ticks + 1);
+ } else {
+ AttemptOnStackReplacement(function);
+ }
+ return;
+ }
+
+ // Only record top-level code on top of the execution stack and
+ // avoid optimizing excessively large scripts since top-level code
+ // will be executed only once.
+ const int kMaxToplevelSourceSize = 10 * 1024;
+ if (shared->is_toplevel() &&
+ (frame_count > 1 || shared->SourceSize() > kMaxToplevelSourceSize)) {
+ return;
+ }
+
+ // Do not record non-optimizable functions.
+ if (shared->optimization_disabled()) {
+ if (shared->deopt_count() >= FLAG_max_opt_count) {
+ // If optimization was disabled due to many deoptimizations,
+ // then check if the function is hot and try to reenable optimization.
+ int ticks = shared_code->profiler_ticks();
+ if (ticks >= kProfilerTicksBeforeReenablingOptimization) {
+ shared_code->set_profiler_ticks(0);
+ shared->TryReenableOptimization();
+ } else {
+ shared_code->set_profiler_ticks(ticks + 1);
+ }
+ }
+ return;
+ }
+ if (function->IsOptimized()) return;
+
+ int ticks = shared_code->profiler_ticks();
+
+ if (ticks >= kProfilerTicksBeforeOptimization) {
+ int typeinfo, generic, total, type_percentage, generic_percentage;
+ GetICCounts(shared, &typeinfo, &generic, &total, &type_percentage,
+ &generic_percentage);
+ if (type_percentage >= FLAG_type_info_threshold &&
+ generic_percentage <= FLAG_generic_ic_threshold) {
+ // If this particular function hasn't had any ICs patched for enough
+ // ticks, optimize it now.
+ Optimize(function, "hot and stable");
+ } else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
+ Optimize(function, "not much type info but very hot");
+ } else {
+ shared_code->set_profiler_ticks(ticks + 1);
+ if (FLAG_trace_opt_verbose) {
+ PrintF("[not yet optimizing ");
+ function->PrintName();
+ PrintF(", not enough type info: %d/%d (%d%%)]\n", typeinfo, total,
+ type_percentage);
+ }
+ }
+ } else if (!any_ic_changed_ &&
+ shared_code->instruction_size() < kMaxSizeEarlyOpt) {
+ // If no IC was patched since the last tick and this function is very
+ // small, optimistically optimize it now.
+ int typeinfo, generic, total, type_percentage, generic_percentage;
+ GetICCounts(shared, &typeinfo, &generic, &total, &type_percentage,
+ &generic_percentage);
+ if (type_percentage >= FLAG_type_info_threshold &&
+ generic_percentage <= FLAG_generic_ic_threshold) {
+ Optimize(function, "small function");
+ } else {
+ shared_code->set_profiler_ticks(ticks + 1);
+ }
+ } else {
+ shared_code->set_profiler_ticks(ticks + 1);
+ }
+}
+
+void RuntimeProfiler::MaybeOptimizeIgnition(JSFunction* function,
+ bool frame_optimized) {
+ if (function->IsInOptimizationQueue()) return;
-void RuntimeProfiler::OptimizeNow() {
+ SharedFunctionInfo* shared = function->shared();
+ int ticks = shared->profiler_ticks();
+
+ // TODO(rmcilroy): Also ensure we only OSR top-level code if it is smaller
+ // than kMaxToplevelSourceSize.
+ // TODO(rmcilroy): Consider whether we should optimize small functions when
+ // they are first seen on the stack (e.g., kMaxSizeEarlyOpt).
+
+ if (!frame_optimized && (function->IsMarkedForOptimization() ||
+ function->IsMarkedForConcurrentOptimization() ||
+ function->IsOptimized())) {
+ // TODO(rmcilroy): Support OSR in these cases.
+
+ return;
+ }
+
+ // Do not optimize non-optimizable functions.
+ if (shared->optimization_disabled()) {
+ if (shared->deopt_count() >= FLAG_max_opt_count) {
+ // If optimization was disabled due to many deoptimizations,
+ // then check if the function is hot and try to reenable optimization.
+ if (ticks >= kProfilerTicksBeforeReenablingOptimization) {
+ shared->set_profiler_ticks(0);
+ shared->TryReenableOptimization();
+ }
+ }
+ return;
+ }
+
+ if (function->IsOptimized()) return;
+
+ if (ticks >= kProfilerTicksBeforeOptimization) {
+ int typeinfo, generic, total, type_percentage, generic_percentage;
+ GetICCounts(shared, &typeinfo, &generic, &total, &type_percentage,
+ &generic_percentage);
+ if (type_percentage >= FLAG_type_info_threshold &&
+ generic_percentage <= FLAG_generic_ic_threshold) {
+ // If this particular function hasn't had any ICs patched for enough
+ // ticks, optimize it now.
+ Optimize(function, "hot and stable");
+ } else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
+ Optimize(function, "not much type info but very hot");
+ } else {
+ if (FLAG_trace_opt_verbose) {
+ PrintF("[not yet optimizing ");
+ function->PrintName();
+ PrintF(", not enough type info: %d/%d (%d%%)]\n", typeinfo, total,
+ type_percentage);
+ }
+ }
+ }
+}
+
+void RuntimeProfiler::MarkCandidatesForOptimization() {
HandleScope scope(isolate_);
if (!isolate_->use_crankshaft()) return;
@@ -155,9 +309,6 @@ void RuntimeProfiler::OptimizeNow() {
JavaScriptFrame* frame = it.frame();
JSFunction* function = frame->function();
- SharedFunctionInfo* shared = function->shared();
- Code* shared_code = shared->code();
-
List<JSFunction*> functions(4);
frame->GetFunctions(&functions);
for (int i = functions.length(); --i >= 0; ) {
@@ -168,94 +319,10 @@ void RuntimeProfiler::OptimizeNow() {
}
}
- if (shared_code->kind() != Code::FUNCTION) continue;
- if (function->IsInOptimizationQueue()) continue;
-
- if (FLAG_always_osr) {
- AttemptOnStackReplacement(function, Code::kMaxLoopNestingMarker);
- // Fall through and do a normal optimized compile as well.
- } else if (!frame->is_optimized() &&
- (function->IsMarkedForOptimization() ||
- function->IsMarkedForConcurrentOptimization() ||
- function->IsOptimized())) {
- // Attempt OSR if we are still running unoptimized code even though the
- // the function has long been marked or even already been optimized.
- int ticks = shared_code->profiler_ticks();
- int64_t allowance =
- kOSRCodeSizeAllowanceBase +
- static_cast<int64_t>(ticks) * kOSRCodeSizeAllowancePerTick;
- if (shared_code->CodeSize() > allowance &&
- ticks < Code::ProfilerTicksField::kMax) {
- shared_code->set_profiler_ticks(ticks + 1);
- } else {
- AttemptOnStackReplacement(function);
- }
- continue;
- }
-
- // Only record top-level code on top of the execution stack and
- // avoid optimizing excessively large scripts since top-level code
- // will be executed only once.
- const int kMaxToplevelSourceSize = 10 * 1024;
- if (shared->is_toplevel() &&
- (frame_count > 1 || shared->SourceSize() > kMaxToplevelSourceSize)) {
- continue;
- }
-
- // Do not record non-optimizable functions.
- if (shared->optimization_disabled()) {
- if (shared->deopt_count() >= FLAG_max_opt_count) {
- // If optimization was disabled due to many deoptimizations,
- // then check if the function is hot and try to reenable optimization.
- int ticks = shared_code->profiler_ticks();
- if (ticks >= kProfilerTicksBeforeReenablingOptimization) {
- shared_code->set_profiler_ticks(0);
- shared->TryReenableOptimization();
- } else {
- shared_code->set_profiler_ticks(ticks + 1);
- }
- }
- continue;
- }
- if (function->IsOptimized()) continue;
-
- int ticks = shared_code->profiler_ticks();
-
- if (ticks >= kProfilerTicksBeforeOptimization) {
- int typeinfo, generic, total, type_percentage, generic_percentage;
- GetICCounts(shared, &typeinfo, &generic, &total, &type_percentage,
- &generic_percentage);
- if (type_percentage >= FLAG_type_info_threshold &&
- generic_percentage <= FLAG_generic_ic_threshold) {
- // If this particular function hasn't had any ICs patched for enough
- // ticks, optimize it now.
- Optimize(function, "hot and stable");
- } else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
- Optimize(function, "not much type info but very hot");
- } else {
- shared_code->set_profiler_ticks(ticks + 1);
- if (FLAG_trace_opt_verbose) {
- PrintF("[not yet optimizing ");
- function->PrintName();
- PrintF(", not enough type info: %d/%d (%d%%)]\n", typeinfo, total,
- type_percentage);
- }
- }
- } else if (!any_ic_changed_ &&
- shared_code->instruction_size() < kMaxSizeEarlyOpt) {
- // If no IC was patched since the last tick and this function is very
- // small, optimistically optimize it now.
- int typeinfo, generic, total, type_percentage, generic_percentage;
- GetICCounts(shared, &typeinfo, &generic, &total, &type_percentage,
- &generic_percentage);
- if (type_percentage >= FLAG_type_info_threshold &&
- generic_percentage <= FLAG_generic_ic_threshold) {
- Optimize(function, "small function");
- } else {
- shared_code->set_profiler_ticks(ticks + 1);
- }
+ if (FLAG_ignition) {
+ MaybeOptimizeIgnition(function, frame->is_optimized());
} else {
- shared_code->set_profiler_ticks(ticks + 1);
+ MaybeOptimizeFullCodegen(function, frame_count, frame->is_optimized());
}
}
any_ic_changed_ = false;
diff --git a/deps/v8/src/runtime-profiler.h b/deps/v8/src/runtime-profiler.h
index 0d57929d06..aa2f65eb29 100644
--- a/deps/v8/src/runtime-profiler.h
+++ b/deps/v8/src/runtime-profiler.h
@@ -23,13 +23,16 @@ class RuntimeProfiler {
public:
explicit RuntimeProfiler(Isolate* isolate);
- void OptimizeNow();
+ void MarkCandidatesForOptimization();
void NotifyICChanged() { any_ic_changed_ = true; }
void AttemptOnStackReplacement(JSFunction* function, int nesting_levels = 1);
private:
+ void MaybeOptimizeFullCodegen(JSFunction* function, int frame_count,
+ bool frame_optimized);
+ void MaybeOptimizeIgnition(JSFunction* function, bool frame_optimized);
void Optimize(JSFunction* function, const char* reason);
bool CodeSizeOKForOSR(Code* shared_code);
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index f2a217d7f7..f651ed40e1 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -201,6 +201,15 @@ RUNTIME_FUNCTION(Runtime_GetArrayKeys) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0);
CONVERT_NUMBER_CHECKED(uint32_t, length, Uint32, args[1]);
+ if (array->HasFastStringWrapperElements()) {
+ int string_length =
+ String::cast(Handle<JSValue>::cast(array)->value())->length();
+ int backing_store_length = array->elements()->length();
+ return *isolate->factory()->NewNumberFromUint(
+ Min(length,
+ static_cast<uint32_t>(Max(string_length, backing_store_length))));
+ }
+
if (!array->elements()->IsDictionary()) {
RUNTIME_ASSERT(array->HasFastSmiOrObjectElements() ||
array->HasFastDoubleElements());
@@ -208,8 +217,8 @@ RUNTIME_FUNCTION(Runtime_GetArrayKeys) {
return *isolate->factory()->NewNumberFromUint(Min(actual_length, length));
}
- KeyAccumulator accumulator(isolate, ALL_PROPERTIES);
- // No need to separate protoype levels since we only get numbers/element keys
+ KeyAccumulator accumulator(isolate, OWN_ONLY, ALL_PROPERTIES);
+ // No need to separate prototype levels since we only get element keys.
for (PrototypeIterator iter(isolate, array,
PrototypeIterator::START_AT_RECEIVER);
!iter.IsAtEnd(); iter.Advance()) {
@@ -482,15 +491,6 @@ RUNTIME_FUNCTION(Runtime_GetCachedArrayIndex) {
}
-RUNTIME_FUNCTION(Runtime_FastOneByteArrayJoin) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
- // Returning undefined means that this fast path fails and one has to resort
- // to a slow path.
- return isolate->heap()->undefined_value();
-}
-
-
RUNTIME_FUNCTION(Runtime_ArraySpeciesConstructor) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index ccd15e8b5d..e27685dd3f 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -81,8 +81,7 @@ RUNTIME_FUNCTION(Runtime_HomeObjectSymbol) {
return isolate->heap()->home_object_symbol();
}
-
-static MaybeHandle<Object> DefineClass(Isolate* isolate, Handle<Object> name,
+static MaybeHandle<Object> DefineClass(Isolate* isolate,
Handle<Object> super_class,
Handle<JSFunction> constructor,
int start_position, int end_position) {
@@ -105,8 +104,7 @@ static MaybeHandle<Object> DefineClass(Isolate* isolate, Handle<Object> name,
ASSIGN_RETURN_ON_EXCEPTION(
isolate, prototype_parent,
Runtime::GetObjectProperty(isolate, super_class,
- isolate->factory()->prototype_string(),
- SLOPPY),
+ isolate->factory()->prototype_string()),
Object);
if (!prototype_parent->IsNull() && !prototype_parent->IsJSReceiver()) {
THROW_NEW_ERROR(
@@ -138,17 +136,12 @@ static MaybeHandle<Object> DefineClass(Isolate* isolate, Handle<Object> name,
map->SetConstructor(*constructor);
Handle<JSObject> prototype = isolate->factory()->NewJSObjectFromMap(map);
- Handle<String> name_string = name->IsString()
- ? Handle<String>::cast(name)
- : isolate->factory()->empty_string();
- constructor->shared()->set_name(*name_string);
-
if (!super_class->IsTheHole()) {
// Derived classes, just like builtins, don't create implicit receivers in
// [[construct]]. Instead they just set up new.target and call into the
// constructor. Hence we can reuse the builtins construct stub for derived
// classes.
- Handle<Code> stub(isolate->builtins()->JSBuiltinsConstructStub());
+ Handle<Code> stub(isolate->builtins()->JSBuiltinsConstructStubForDerived());
constructor->shared()->set_construct_stub(*stub);
}
@@ -195,35 +188,20 @@ static MaybeHandle<Object> DefineClass(Isolate* isolate, Handle<Object> name,
RUNTIME_FUNCTION(Runtime_DefineClass) {
HandleScope scope(isolate);
- DCHECK(args.length() == 5);
- CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, super_class, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 2);
- CONVERT_SMI_ARG_CHECKED(start_position, 3);
- CONVERT_SMI_ARG_CHECKED(end_position, 4);
+ DCHECK(args.length() == 4);
+ CONVERT_ARG_HANDLE_CHECKED(Object, super_class, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 1);
+ CONVERT_SMI_ARG_CHECKED(start_position, 2);
+ CONVERT_SMI_ARG_CHECKED(end_position, 3);
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, DefineClass(isolate, name, super_class, constructor,
+ isolate, result, DefineClass(isolate, super_class, constructor,
start_position, end_position));
return *result;
}
-RUNTIME_FUNCTION(Runtime_DefineClassMethod) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 2);
-
- RETURN_FAILURE_ON_EXCEPTION(isolate,
- JSObject::DefinePropertyOrElementIgnoreAttributes(
- object, name, function, DONT_ENUM));
- return isolate->heap()->undefined_value();
-}
-
-
RUNTIME_FUNCTION(Runtime_FinalizeClassDefinition) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
@@ -244,12 +222,10 @@ RUNTIME_FUNCTION(Runtime_FinalizeClassDefinition) {
return *constructor;
}
-
static MaybeHandle<Object> LoadFromSuper(Isolate* isolate,
Handle<Object> receiver,
Handle<JSObject> home_object,
- Handle<Name> name,
- LanguageMode language_mode) {
+ Handle<Name> name) {
if (home_object->IsAccessCheckNeeded() &&
!isolate->MayAccess(handle(isolate->context()), home_object)) {
isolate->ReportFailedAccessCheck(home_object);
@@ -259,22 +235,19 @@ static MaybeHandle<Object> LoadFromSuper(Isolate* isolate,
PrototypeIterator iter(isolate, home_object);
Handle<Object> proto = PrototypeIterator::GetCurrent(iter);
if (!proto->IsJSReceiver()) {
- return Object::ReadAbsentProperty(isolate, proto, name, language_mode);
+ return Object::ReadAbsentProperty(isolate, proto, name);
}
LookupIterator it(receiver, name, Handle<JSReceiver>::cast(proto));
Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
- Object::GetProperty(&it, language_mode), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result, Object::GetProperty(&it), Object);
return result;
}
-
static MaybeHandle<Object> LoadElementFromSuper(Isolate* isolate,
Handle<Object> receiver,
Handle<JSObject> home_object,
- uint32_t index,
- LanguageMode language_mode) {
+ uint32_t index) {
if (home_object->IsAccessCheckNeeded() &&
!isolate->MayAccess(handle(isolate->context()), home_object)) {
isolate->ReportFailedAccessCheck(home_object);
@@ -285,50 +258,44 @@ static MaybeHandle<Object> LoadElementFromSuper(Isolate* isolate,
Handle<Object> proto = PrototypeIterator::GetCurrent(iter);
if (!proto->IsJSReceiver()) {
Handle<Object> name = isolate->factory()->NewNumberFromUint(index);
- return Object::ReadAbsentProperty(isolate, proto, name, language_mode);
+ return Object::ReadAbsentProperty(isolate, proto, name);
}
LookupIterator it(isolate, receiver, index, Handle<JSReceiver>::cast(proto));
Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
- Object::GetProperty(&it, language_mode), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result, Object::GetProperty(&it), Object);
return result;
}
-// TODO(conradw): It would be more efficient to have a separate runtime function
-// for strong mode.
RUNTIME_FUNCTION(Runtime_LoadFromSuper) {
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
CONVERT_ARG_HANDLE_CHECKED(JSObject, home_object, 1);
CONVERT_ARG_HANDLE_CHECKED(Name, name, 2);
- CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 3);
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- LoadFromSuper(isolate, receiver, home_object, name, language_mode));
+ isolate, result, LoadFromSuper(isolate, receiver, home_object, name));
return *result;
}
RUNTIME_FUNCTION(Runtime_LoadKeyedFromSuper) {
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
CONVERT_ARG_HANDLE_CHECKED(JSObject, home_object, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 2);
- CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 3);
uint32_t index = 0;
Handle<Object> result;
if (key->ToArrayIndex(&index)) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, LoadElementFromSuper(isolate, receiver, home_object,
- index, language_mode));
+ isolate, result,
+ LoadElementFromSuper(isolate, receiver, home_object, index));
return *result;
}
@@ -338,13 +305,12 @@ RUNTIME_FUNCTION(Runtime_LoadKeyedFromSuper) {
// TODO(verwaest): Unify using LookupIterator.
if (name->AsArrayIndex(&index)) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, LoadElementFromSuper(isolate, receiver, home_object,
- index, language_mode));
+ isolate, result,
+ LoadElementFromSuper(isolate, receiver, home_object, index));
return *result;
}
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- LoadFromSuper(isolate, receiver, home_object, name, language_mode));
+ isolate, result, LoadFromSuper(isolate, receiver, home_object, name));
return *result;
}
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index 15a3a14156..263c4f9e77 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -52,8 +52,7 @@ Object* CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
if (check.JsHasOverflowed(1 * KB)) return isolate->StackOverflow();
Handle<Code> code;
- Handle<Code> unoptimized(function->shared()->code());
- if (Compiler::GetOptimizedCode(function, unoptimized, mode).ToHandle(&code)) {
+ if (Compiler::GetOptimizedCode(function, mode).ToHandle(&code)) {
// Optimization succeeded, return optimized code.
function->ReplaceCode(*code);
} else {
@@ -72,6 +71,8 @@ Object* CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
DCHECK(function->code()->kind() == Code::FUNCTION ||
function->code()->kind() == Code::OPTIMIZED_FUNCTION ||
+ (function->code()->is_interpreter_entry_trampoline() &&
+ function->shared()->HasBytecodeArray()) ||
function->IsInOptimizationQueue());
return function->code();
}
@@ -135,6 +136,8 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
static_cast<Deoptimizer::BailoutType>(type_arg);
Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
DCHECK(AllowHeapAllocation::IsAllowed());
+ TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
+ TRACE_EVENT0("v8", "V8.DeoptimizeCode");
Handle<JSFunction> function = deoptimizer->function();
Handle<Code> optimized_code = deoptimizer->compiled_code();
@@ -247,7 +250,6 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
function->shared()->ast_node_count() > 512)
? Compiler::CONCURRENT
: Compiler::NOT_CONCURRENT;
- Handle<Code> result = Handle<Code>::null();
OptimizedCompileJob* job = NULL;
if (mode == Compiler::CONCURRENT) {
@@ -268,22 +270,24 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
job = dispatcher->FindReadyOSRCandidate(function, ast_id);
}
+ MaybeHandle<Code> maybe_result;
if (job != NULL) {
if (FLAG_trace_osr) {
PrintF("[OSR - Found ready: ");
function->PrintName();
PrintF(" at AST id %d]\n", ast_id.ToInt());
}
- result = Compiler::GetConcurrentlyOptimizedCode(job);
+ maybe_result = Compiler::GetConcurrentlyOptimizedCode(job);
} else if (IsSuitableForOnStackReplacement(isolate, function)) {
if (FLAG_trace_osr) {
PrintF("[OSR - Compiling: ");
function->PrintName();
PrintF(" at AST id %d]\n", ast_id.ToInt());
}
- MaybeHandle<Code> maybe_result = Compiler::GetOptimizedCode(
- function, caller_code, mode, ast_id,
+ maybe_result = Compiler::GetOptimizedCode(
+ function, mode, ast_id,
(mode == Compiler::NOT_CONCURRENT) ? frame : nullptr);
+ Handle<Code> result;
if (maybe_result.ToHandle(&result) &&
result.is_identical_to(isolate->builtins()->InOptimizationQueue())) {
// Optimization is queued. Return to check later.
@@ -295,7 +299,9 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
BackEdgeTable::Revert(isolate, *caller_code);
// Check whether we ended up with usable optimized code.
- if (!result.is_null() && result->kind() == Code::OPTIMIZED_FUNCTION) {
+ Handle<Code> result;
+ if (maybe_result.ToHandle(&result) &&
+ result->kind() == Code::OPTIMIZED_FUNCTION) {
DeoptimizationInputData* data =
DeoptimizationInputData::cast(result->deoptimization_data());
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index 80791dea76..c29ea9a35d 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -22,8 +22,7 @@ RUNTIME_FUNCTION(Runtime_DebugBreak) {
// Get the top-most JavaScript frame.
JavaScriptFrameIterator it(isolate);
isolate->debug()->Break(args, it.frame());
- isolate->debug()->SetAfterBreakTarget(it.frame());
- return isolate->heap()->undefined_value();
+ return isolate->debug()->SetAfterBreakTarget(it.frame());
}
@@ -82,7 +81,7 @@ static Handle<Object> DebugGetProperty(LookupIterator* it,
return it->isolate()->factory()->undefined_value();
}
MaybeHandle<Object> maybe_result =
- JSObject::GetPropertyWithAccessor(it, SLOPPY);
+ JSObject::GetPropertyWithAccessor(it);
Handle<Object> result;
if (!maybe_result.ToHandle(&result)) {
result = handle(it->isolate()->pending_exception(), it->isolate());
@@ -334,10 +333,14 @@ RUNTIME_FUNCTION(Runtime_DebugGetPropertyDetails) {
details->set(
2, isolate->heap()->ToBoolean(it.state() == LookupIterator::INTERCEPTOR));
if (has_js_accessors) {
- AccessorPair* accessors = AccessorPair::cast(*maybe_pair);
+ Handle<AccessorPair> accessors = Handle<AccessorPair>::cast(maybe_pair);
details->set(3, isolate->heap()->ToBoolean(has_caught));
- details->set(4, accessors->GetComponent(ACCESSOR_GETTER));
- details->set(5, accessors->GetComponent(ACCESSOR_SETTER));
+ Handle<Object> getter =
+ AccessorPair::GetComponent(accessors, ACCESSOR_GETTER);
+ Handle<Object> setter =
+ AccessorPair::GetComponent(accessors, ACCESSOR_SETTER);
+ details->set(4, *getter);
+ details->set(5, *setter);
}
return *isolate->factory()->NewJSArrayWithElements(details);
@@ -526,7 +529,8 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
bool constructor = frame_inspector.IsConstructor();
// Get scope info and read from it for local variable information.
- Handle<JSFunction> function(JSFunction::cast(frame_inspector.GetFunction()));
+ Handle<JSFunction> function =
+ Handle<JSFunction>::cast(frame_inspector.GetFunction());
RUNTIME_ASSERT(function->shared()->IsSubjectToDebugging());
Handle<SharedFunctionInfo> shared(function->shared());
Handle<ScopeInfo> scope_info(shared->scope_info());
@@ -550,13 +554,13 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
// Use the value from the stack.
if (scope_info->LocalIsSynthetic(i)) continue;
locals->set(local * 2, scope_info->LocalName(i));
- locals->set(local * 2 + 1, frame_inspector.GetExpression(i));
+ locals->set(local * 2 + 1, *(frame_inspector.GetExpression(i)));
local++;
}
if (local < local_count) {
// Get the context containing declarations.
Handle<Context> context(
- Context::cast(frame_inspector.GetContext())->declaration_context());
+ Handle<Context>::cast(frame_inspector.GetContext())->closure_context());
for (; i < scope_info->LocalCount(); ++i) {
if (scope_info->LocalIsSynthetic(i)) continue;
Handle<String> name(scope_info->LocalName(i));
@@ -635,7 +639,7 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
details->set(kFrameDetailsFrameIdIndex, *frame_id);
// Add the function (same as in function frame).
- details->set(kFrameDetailsFunctionIndex, frame_inspector.GetFunction());
+ details->set(kFrameDetailsFunctionIndex, *(frame_inspector.GetFunction()));
// Add the arguments count.
details->set(kFrameDetailsArgumentCountIndex, Smi::FromInt(argument_count));
@@ -685,7 +689,7 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
// Parameter value.
if (i < frame_inspector.GetParametersCount()) {
// Get the value from the stack.
- details->set(details_index++, frame_inspector.GetParameter(i));
+ details->set(details_index++, *(frame_inspector.GetParameter(i)));
} else {
details->set(details_index++, heap->undefined_value());
}
@@ -704,25 +708,7 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
// Add the receiver (same as in function frame).
Handle<Object> receiver(it.frame()->receiver(), isolate);
DCHECK(!function->shared()->IsBuiltin());
- if (!receiver->IsJSObject() && is_sloppy(shared->language_mode())) {
- // If the receiver is not a JSObject and the function is not a builtin or
- // strict-mode we have hit an optimization where a value object is not
- // converted into a wrapped JS objects. To hide this optimization from the
- // debugger, we wrap the receiver by creating correct wrapper object based
- // on the function's native context.
- // See ECMA-262 6.0, 9.2.1.2, 6 b iii.
- if (receiver->IsUndefined()) {
- receiver = handle(function->global_proxy());
- } else {
- Context* context = function->context();
- Handle<Context> native_context(Context::cast(context->native_context()));
- if (!Object::ToObject(isolate, receiver, native_context)
- .ToHandle(&receiver)) {
- // This only happens if the receiver is forcibly set in %_CallFunction.
- return heap->undefined_value();
- }
- }
- }
+ DCHECK_IMPLIES(is_sloppy(shared->language_mode()), receiver->IsJSReceiver());
details->set(kFrameDetailsReceiverIndex, *receiver);
DCHECK_EQ(details_size, details_index);
@@ -1329,14 +1315,14 @@ RUNTIME_FUNCTION(Runtime_DebugGetLoadedScripts) {
return *result;
}
-
-static bool HasInPrototypeChainIgnoringProxies(Isolate* isolate, Object* object,
+static bool HasInPrototypeChainIgnoringProxies(Isolate* isolate,
+ JSObject* object,
Object* proto) {
PrototypeIterator iter(isolate, object, PrototypeIterator::START_AT_RECEIVER);
while (true) {
iter.AdvanceIgnoringProxies();
if (iter.IsAtEnd()) return false;
- if (iter.IsAtEnd(proto)) return true;
+ if (iter.GetCurrent() == proto) return true;
}
}
@@ -1443,7 +1429,7 @@ RUNTIME_FUNCTION(Runtime_DebugGetPrototype) {
// TODO(1543): Come up with a solution for clients to handle potential errors
// thrown by an intermediate proxy.
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, prototype,
- Object::GetPrototype(isolate, obj));
+ JSReceiver::GetPrototype(isolate, obj));
return *prototype;
}
diff --git a/deps/v8/src/runtime/runtime-forin.cc b/deps/v8/src/runtime/runtime-forin.cc
index ff6804c8fb..c44945c94c 100644
--- a/deps/v8/src/runtime/runtime-forin.cc
+++ b/deps/v8/src/runtime/runtime-forin.cc
@@ -5,11 +5,90 @@
#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
+#include "src/factory.h"
+#include "src/isolate-inl.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
+namespace {
+
+// Returns either a FixedArray or, if the given {receiver} has an enum cache
+// that contains all enumerable properties of the {receiver} and its prototypes
+// have none, the map of the {receiver}. This is used to speed up the check for
+// deletions during a for-in.
+MaybeHandle<HeapObject> Enumerate(Handle<JSReceiver> receiver) {
+ Isolate* const isolate = receiver->GetIsolate();
+ // Test if we have an enum cache for {receiver}.
+ if (!receiver->IsSimpleEnum()) {
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, keys,
+ JSReceiver::GetKeys(receiver, INCLUDE_PROTOS, ENUMERABLE_STRINGS),
+ HeapObject);
+ // Test again, since cache may have been built by GetKeys() calls above.
+ if (!receiver->IsSimpleEnum()) return keys;
+ }
+ return handle(receiver->map(), isolate);
+}
+
+
+MaybeHandle<Object> Filter(Handle<JSReceiver> receiver, Handle<Object> key) {
+ Isolate* const isolate = receiver->GetIsolate();
+ // TODO(turbofan): Fast case for array indices.
+ Handle<Name> name;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, name, Object::ToName(isolate, key),
+ Object);
+ Maybe<bool> result = JSReceiver::HasProperty(receiver, name);
+ MAYBE_RETURN_NULL(result);
+ if (result.FromJust()) return name;
+ return isolate->factory()->undefined_value();
+}
+
+} // namespace
+
+
+RUNTIME_FUNCTION(Runtime_ForInEnumerate) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+ Handle<HeapObject> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, Enumerate(receiver));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION_RETURN_TRIPLE(Runtime_ForInPrepare) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ Handle<JSReceiver> receiver = args.at<JSReceiver>(0);
+ Handle<Object> cache_type;
+ if (!Enumerate(receiver).ToHandle(&cache_type)) {
+ return MakeTriple(isolate->heap()->exception(), nullptr, nullptr);
+ }
+ Handle<FixedArray> cache_array;
+ int cache_length;
+ if (cache_type->IsMap()) {
+ Handle<Map> cache_map = Handle<Map>::cast(cache_type);
+ Handle<DescriptorArray> descriptors(cache_map->instance_descriptors(),
+ isolate);
+ cache_length = cache_map->EnumLength();
+ if (cache_length && descriptors->HasEnumCache()) {
+ cache_array = handle(descriptors->GetEnumCache(), isolate);
+ } else {
+ cache_array = isolate->factory()->empty_fixed_array();
+ cache_length = 0;
+ }
+ } else {
+ cache_array = Handle<FixedArray>::cast(cache_type);
+ cache_length = cache_array->length();
+ cache_type = handle(Smi::FromInt(1), isolate);
+ }
+ return MakeTriple(*cache_type, *cache_array, Smi::FromInt(cache_length));
+}
+
+
RUNTIME_FUNCTION(Runtime_ForInDone) {
SealHandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -26,15 +105,9 @@ RUNTIME_FUNCTION(Runtime_ForInFilter) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- // TODO(turbofan): Fast case for array indices.
- Handle<Name> name;
- if (!Object::ToName(isolate, key).ToHandle(&name)) {
- return isolate->heap()->exception();
- }
- Maybe<bool> result = JSReceiver::HasProperty(receiver, name);
- if (!result.IsJust()) return isolate->heap()->exception();
- if (result.FromJust()) return *name;
- return isolate->heap()->undefined_value();
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, Filter(receiver, key));
+ return *result;
}
@@ -46,20 +119,13 @@ RUNTIME_FUNCTION(Runtime_ForInNext) {
CONVERT_ARG_HANDLE_CHECKED(Object, cache_type, 2);
CONVERT_SMI_ARG_CHECKED(index, 3);
Handle<Object> key = handle(cache_array->get(index), isolate);
- // Don't need filtering if expected map still matches that of the receiver,
- // and neither for proxies.
- if (receiver->map() == *cache_type || *cache_type == Smi::FromInt(0)) {
+ // Don't need filtering if expected map still matches that of the receiver.
+ if (receiver->map() == *cache_type) {
return *key;
}
- // TODO(turbofan): Fast case for array indices.
- Handle<Name> name;
- if (!Object::ToName(isolate, key).ToHandle(&name)) {
- return isolate->heap()->exception();
- }
- Maybe<bool> result = JSReceiver::HasProperty(receiver, name);
- if (!result.IsJust()) return isolate->heap()->exception();
- if (result.FromJust()) return *name;
- return isolate->heap()->undefined_value();
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, Filter(receiver, key));
+ return *result;
}
diff --git a/deps/v8/src/runtime/runtime-function.cc b/deps/v8/src/runtime/runtime-function.cc
index befd337098..d424a9ebfe 100644
--- a/deps/v8/src/runtime/runtime-function.cc
+++ b/deps/v8/src/runtime/runtime-function.cc
@@ -55,11 +55,14 @@ RUNTIME_FUNCTION(Runtime_FunctionGetScript) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
- if (function->IsJSBoundFunction()) return isolate->heap()->undefined_value();
- Handle<Object> script(Handle<JSFunction>::cast(function)->shared()->script(),
- isolate);
- if (!script->IsScript()) return isolate->heap()->undefined_value();
- return *Script::GetWrapper(Handle<Script>::cast(script));
+ if (function->IsJSFunction()) {
+ Handle<Object> script(
+ Handle<JSFunction>::cast(function)->shared()->script(), isolate);
+ if (script->IsScript()) {
+ return *Script::GetWrapper(Handle<Script>::cast(script));
+ }
+ }
+ return isolate->heap()->undefined_value();
}
@@ -67,8 +70,10 @@ RUNTIME_FUNCTION(Runtime_FunctionGetSourceCode) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
- if (function->IsJSBoundFunction()) return isolate->heap()->undefined_value();
- return *Handle<JSFunction>::cast(function)->shared()->GetSourceCode();
+ if (function->IsJSFunction()) {
+ return *Handle<JSFunction>::cast(function)->shared()->GetSourceCode();
+ }
+ return isolate->heap()->undefined_value();
}
@@ -86,13 +91,9 @@ RUNTIME_FUNCTION(Runtime_FunctionGetPositionForOffset) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 2);
- CONVERT_ARG_CHECKED(Code, code, 0);
+ CONVERT_ARG_CHECKED(AbstractCode, abstract_code, 0);
CONVERT_NUMBER_CHECKED(int, offset, Int32, args[1]);
-
- RUNTIME_ASSERT(0 <= offset && offset < code->Size());
-
- Address pc = code->address() + offset;
- return Smi::FromInt(code->SourcePosition(pc));
+ return Smi::FromInt(abstract_code->SourcePosition(offset));
}
@@ -166,6 +167,9 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
// Set the code, scope info, formal parameter count, and the length
// of the target shared function info.
target_shared->ReplaceCode(source_shared->code());
+ if (source_shared->HasBytecodeArray()) {
+ target_shared->set_function_data(source_shared->bytecode_array());
+ }
target_shared->set_scope_info(source_shared->scope_info());
target_shared->set_length(source_shared->length());
target_shared->set_feedback_vector(source_shared->feedback_vector());
@@ -232,7 +236,6 @@ RUNTIME_FUNCTION(Runtime_IsConstructor) {
return isolate->heap()->ToBoolean(object->IsConstructor());
}
-
RUNTIME_FUNCTION(Runtime_SetForceInlineFlag) {
SealHandleScope shs(isolate);
RUNTIME_ASSERT(args.length() == 1);
diff --git a/deps/v8/src/runtime/runtime-generator.cc b/deps/v8/src/runtime/runtime-generator.cc
index 926cd3ce2d..dab0621592 100644
--- a/deps/v8/src/runtime/runtime-generator.cc
+++ b/deps/v8/src/runtime/runtime-generator.cc
@@ -22,11 +22,8 @@ RUNTIME_FUNCTION(Runtime_CreateJSGeneratorObject) {
RUNTIME_ASSERT(function->shared()->is_generator());
Handle<JSGeneratorObject> generator;
- if (frame->IsConstructor()) {
- generator = handle(JSGeneratorObject::cast(frame->receiver()));
- } else {
- generator = isolate->factory()->NewJSGeneratorObject(function);
- }
+ DCHECK(!frame->IsConstructor());
+ generator = isolate->factory()->NewJSGeneratorObject(function);
generator->set_function(*function);
generator->set_context(Context::cast(frame->context()));
generator->set_receiver(frame->receiver());
@@ -39,7 +36,7 @@ RUNTIME_FUNCTION(Runtime_CreateJSGeneratorObject) {
RUNTIME_FUNCTION(Runtime_SuspendJSGeneratorObject) {
HandleScope handle_scope(isolate);
- DCHECK(args.length() == 1 || args.length() == 2);
+ DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator_object, 0);
JavaScriptFrameIterator stack_iterator(isolate);
@@ -58,18 +55,6 @@ RUNTIME_FUNCTION(Runtime_SuspendJSGeneratorObject) {
DCHECK_GE(operands_count, 1 + args.length());
operands_count -= 1 + args.length();
- // Second argument indicates that we need to patch the handler table because
- // a delegating yield introduced a try-catch statement at expression level,
- // hence the operand count was off when we statically computed it.
- // TODO(mstarzinger): This special case disappears with do-expressions.
- if (args.length() == 2) {
- CONVERT_SMI_ARG_CHECKED(handler_index, 1);
- Handle<Code> code(frame->unchecked_code());
- Handle<HandlerTable> table(HandlerTable::cast(code->handler_table()));
- int handler_depth = operands_count - TryBlockConstant::kElementCount;
- table->SetRangeDepth(handler_index, handler_depth);
- }
-
if (operands_count == 0) {
// Although it's semantically harmless to call this function with an
// operands_count of zero, it is also unnecessary.
@@ -90,9 +75,9 @@ RUNTIME_FUNCTION(Runtime_SuspendJSGeneratorObject) {
// called if the suspended activation had operands on the stack, stack handlers
// needing rewinding, or if the resume should throw an exception. The fast path
// is handled directly in FullCodeGenerator::EmitGeneratorResume(), which is
-// inlined into GeneratorNext and GeneratorThrow. EmitGeneratorResumeResume is
-// called in any case, as it needs to reconstruct the stack frame and make space
-// for arguments and operands.
+// inlined into GeneratorNext, GeneratorReturn, and GeneratorThrow.
+// EmitGeneratorResume is called in any case, as it needs to reconstruct the
+// stack frame and make space for arguments and operands.
RUNTIME_FUNCTION(Runtime_ResumeJSGeneratorObject) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 3);
@@ -128,7 +113,10 @@ RUNTIME_FUNCTION(Runtime_ResumeJSGeneratorObject) {
JSGeneratorObject::ResumeMode resume_mode =
static_cast<JSGeneratorObject::ResumeMode>(resume_mode_int);
switch (resume_mode) {
+ // Note: this looks like NEXT and RETURN are the same but RETURN receives
+ // special treatment in the generator code (to which we return here).
case JSGeneratorObject::NEXT:
+ case JSGeneratorObject::RETURN:
return value;
case JSGeneratorObject::THROW:
return isolate->Throw(value);
@@ -180,6 +168,16 @@ RUNTIME_FUNCTION(Runtime_GeneratorGetReceiver) {
}
+// Returns input of generator activation.
+RUNTIME_FUNCTION(Runtime_GeneratorGetInput) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
+
+ return generator->input();
+}
+
+
// Returns generator continuation as a PC offset, or the magic -1 or 0 values.
RUNTIME_FUNCTION(Runtime_GeneratorGetContinuation) {
HandleScope scope(isolate);
@@ -198,26 +196,33 @@ RUNTIME_FUNCTION(Runtime_GeneratorGetSourcePosition) {
if (generator->is_suspended()) {
Handle<Code> code(generator->function()->code(), isolate);
int offset = generator->continuation();
-
- RUNTIME_ASSERT(0 <= offset && offset < code->Size());
- Address pc = code->address() + offset;
-
- return Smi::FromInt(code->SourcePosition(pc));
+ RUNTIME_ASSERT(0 <= offset && offset < code->instruction_size());
+ return Smi::FromInt(code->SourcePosition(offset));
}
return isolate->heap()->undefined_value();
}
+// Optimization for the following three functions is disabled in
+// js/generator.js and compiler/ast-graph-builder.cc.
+
+
RUNTIME_FUNCTION(Runtime_GeneratorNext) {
- UNREACHABLE(); // Optimization disabled in SetUpGenerators().
- return NULL;
+ UNREACHABLE();
+ return nullptr;
+}
+
+
+RUNTIME_FUNCTION(Runtime_GeneratorReturn) {
+ UNREACHABLE();
+ return nullptr;
}
RUNTIME_FUNCTION(Runtime_GeneratorThrow) {
- UNREACHABLE(); // Optimization disabled in SetUpGenerators().
- return NULL;
+ UNREACHABLE();
+ return nullptr;
}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-i18n.cc b/deps/v8/src/runtime/runtime-i18n.cc
index e1f0c8e959..e57f8d3626 100644
--- a/deps/v8/src/runtime/runtime-i18n.cc
+++ b/deps/v8/src/runtime/runtime-i18n.cc
@@ -586,8 +586,9 @@ RUNTIME_FUNCTION(Runtime_StringNormalize) {
// TODO(mnita): check Normalizer2 (not available in ICU 46)
UErrorCode status = U_ZERO_ERROR;
+ icu::UnicodeString input(false, u_value, string_value.length());
icu::UnicodeString result;
- icu::Normalizer::normalize(u_value, normalizationForms[form_id], 0, result,
+ icu::Normalizer::normalize(input, normalizationForms[form_id], 0, result,
status);
if (U_FAILURE(status)) {
return isolate->heap()->undefined_value();
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index ee664645d4..0ca2e84d3d 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -395,7 +395,7 @@ bool ComputeLocation(Isolate* isolate, MessageLocation* target) {
List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
it.frame()->Summarize(&frames);
FrameSummary& summary = frames.last();
- int pos = summary.code()->SourcePosition(summary.pc());
+ int pos = summary.abstract_code()->SourcePosition(summary.code_offset());
*target = MessageLocation(casted_script, pos, pos + 1, handle(fun));
return true;
}
@@ -448,6 +448,14 @@ RUNTIME_FUNCTION(Runtime_ThrowConstructedNonConstructable) {
}
+RUNTIME_FUNCTION(Runtime_ThrowDerivedConstructorReturnedNonObject) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(0, args.length());
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kDerivedConstructorReturn));
+}
+
+
// ES6 section 7.3.17 CreateListFromArrayLike (obj)
RUNTIME_FUNCTION(Runtime_CreateListFromArrayLike) {
HandleScope scope(isolate);
@@ -469,5 +477,17 @@ RUNTIME_FUNCTION(Runtime_IncrementUseCounter) {
return isolate->heap()->undefined_value();
}
+
+RUNTIME_FUNCTION(Runtime_GetAndResetRuntimeCallStats) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(0, args.length());
+ std::stringstream stats_stream;
+ isolate->counters()->runtime_call_stats()->Print(stats_stream);
+ Handle<String> result =
+ isolate->factory()->NewStringFromAsciiChecked(stats_stream.str().c_str());
+ isolate->counters()->runtime_call_stats()->Reset();
+ return *result;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-interpreter.cc b/deps/v8/src/runtime/runtime-interpreter.cc
index d061a4916d..cbacb554ad 100644
--- a/deps/v8/src/runtime/runtime-interpreter.cc
+++ b/deps/v8/src/runtime/runtime-interpreter.cc
@@ -4,8 +4,14 @@
#include "src/runtime/runtime-utils.h"
+#include <iomanip>
+
#include "src/arguments.h"
+#include "src/frames-inl.h"
+#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/bytecodes.h"
#include "src/isolate-inl.h"
+#include "src/ostreams.h"
namespace v8 {
namespace internal {
@@ -130,7 +136,7 @@ RUNTIME_FUNCTION(Runtime_InterpreterLogicalNot) {
RUNTIME_FUNCTION(Runtime_InterpreterTypeOf) {
- SealHandleScope shs(isolate);
+ HandleScope shs(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
return Object::cast(*Object::TypeOf(isolate, x));
@@ -147,55 +153,121 @@ RUNTIME_FUNCTION(Runtime_InterpreterNewClosure) {
shared, context, static_cast<PretenureFlag>(pretenured_flag));
}
-
-RUNTIME_FUNCTION(Runtime_InterpreterForInPrepare) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
-
- Object* property_names = Runtime_GetPropertyNamesFast(
- 1, Handle<Object>::cast(receiver).location(), isolate);
- if (isolate->has_pending_exception()) {
- return property_names;
+namespace {
+
+void PrintRegisters(std::ostream& os, bool is_input,
+ Handle<BytecodeArray> bytecode_array, int bytecode_offset,
+ Handle<Object> accumulator) {
+ static const int kRegFieldWidth = static_cast<int>(strlen("accumulator"));
+ static const char* kInputColourCode = "\033[0;36m";
+ static const char* kOutputColourCode = "\033[0;35m";
+ static const char* kNormalColourCode = "\033[0;m";
+ const char* kArrowDirection = is_input ? " -> " : " <- ";
+ if (FLAG_log_colour) {
+ os << (is_input ? kInputColourCode : kOutputColourCode);
}
- Handle<Object> cache_type(property_names, isolate);
- Handle<FixedArray> cache_array;
- int cache_length;
-
- Handle<Map> receiver_map = handle(receiver->map(), isolate);
- if (cache_type->IsMap()) {
- Handle<Map> cache_type_map =
- handle(Handle<Map>::cast(cache_type)->map(), isolate);
- DCHECK(cache_type_map.is_identical_to(isolate->factory()->meta_map()));
- int enum_length = cache_type_map->EnumLength();
- DescriptorArray* descriptors = receiver_map->instance_descriptors();
- if (enum_length > 0 && descriptors->HasEnumCache()) {
- cache_array = handle(descriptors->GetEnumCache(), isolate);
- cache_length = cache_array->length();
- } else {
- cache_array = isolate->factory()->empty_fixed_array();
- cache_length = 0;
- }
- } else {
- cache_array = Handle<FixedArray>::cast(cache_type);
- cache_length = cache_array->length();
-
- STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
- if (receiver_map->instance_type() == JS_PROXY_TYPE) {
- // Zero indicates proxy
- cache_type = Handle<Object>(Smi::FromInt(0), isolate);
- } else {
- // One entails slow check
- cache_type = Handle<Object>(Smi::FromInt(1), isolate);
+ // Print accumulator.
+ os << " [ accumulator" << kArrowDirection;
+ accumulator->ShortPrint();
+ os << " ]" << std::endl;
+
+ // Find the location of the register file.
+ JavaScriptFrameIterator frame_iterator(bytecode_array->GetIsolate());
+ JavaScriptFrame* frame = frame_iterator.frame();
+ Address register_file =
+ frame->fp() + InterpreterFrameConstants::kRegisterFilePointerFromFp;
+
+ // Print the registers.
+ interpreter::BytecodeArrayIterator bytecode_iterator(bytecode_array);
+ bytecode_iterator.set_current_offset(
+ bytecode_offset - BytecodeArray::kHeaderSize + kHeapObjectTag);
+ interpreter::Bytecode bytecode = bytecode_iterator.current_bytecode();
+ int operand_count = interpreter::Bytecodes::NumberOfOperands(bytecode);
+ for (int operand_index = 0; operand_index < operand_count; operand_index++) {
+ interpreter::OperandType operand_type =
+ interpreter::Bytecodes::GetOperandType(bytecode, operand_index);
+ bool should_print =
+ is_input
+ ? interpreter::Bytecodes::IsRegisterInputOperandType(operand_type)
+ : interpreter::Bytecodes::IsRegisterOutputOperandType(operand_type);
+ if (should_print) {
+ interpreter::Register first_reg =
+ bytecode_iterator.GetRegisterOperand(operand_index);
+ int range = bytecode_iterator.GetRegisterOperandRange(operand_index);
+ for (int reg_index = first_reg.index();
+ reg_index < first_reg.index() + range; reg_index++) {
+ Address reg_location = register_file - reg_index * kPointerSize;
+ Object* reg_object = Memory::Object_at(reg_location);
+ os << " [ " << std::setw(kRegFieldWidth)
+ << interpreter::Register(reg_index).ToString(
+ bytecode_array->parameter_count())
+ << kArrowDirection;
+ reg_object->ShortPrint(os);
+ os << " ]" << std::endl;
+ }
}
}
+ if (FLAG_log_colour) {
+ os << kNormalColourCode;
+ }
+}
+
+} // namespace
- Handle<FixedArray> result = isolate->factory()->NewFixedArray(3);
- result->set(0, *cache_type);
- result->set(1, *cache_array);
- result->set(2, Smi::FromInt(cache_length));
- return *result;
+RUNTIME_FUNCTION(Runtime_InterpreterTraceBytecodeEntry) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(3, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(BytecodeArray, bytecode_array, 0);
+ CONVERT_SMI_ARG_CHECKED(bytecode_offset, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, accumulator, 2);
+ OFStream os(stdout);
+
+ // Print bytecode.
+ const uint8_t* bytecode_address =
+ reinterpret_cast<const uint8_t*>(*bytecode_array) + bytecode_offset;
+ Vector<char> buf = Vector<char>::New(50);
+ SNPrintF(buf, "%p", bytecode_address);
+ os << " -> " << buf.start() << " (" << bytecode_offset << ") : ";
+ interpreter::Bytecodes::Decode(os, bytecode_address,
+ bytecode_array->parameter_count());
+ os << std::endl;
+
+ // Print all input registers and accumulator.
+ PrintRegisters(os, true, bytecode_array, bytecode_offset, accumulator);
+
+ os << std::flush;
+ return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_InterpreterTraceBytecodeExit) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(3, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(BytecodeArray, bytecode_array, 0);
+ CONVERT_SMI_ARG_CHECKED(bytecode_offset, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, accumulator, 2);
+ OFStream os(stdout);
+
+ // Print all output registers and accumulator.
+ PrintRegisters(os, false, bytecode_array, bytecode_offset, accumulator);
+ os << std::flush;
+ return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_InterpreterClearPendingMessage) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(0, args.length());
+ Object* message = isolate->thread_local_top()->pending_message_obj_;
+ isolate->clear_pending_message();
+ return message;
+}
+
+RUNTIME_FUNCTION(Runtime_InterpreterSetPendingMessage) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, message, 0);
+ isolate->thread_local_top()->pending_message_obj_ = *message;
+ return isolate->heap()->undefined_value();
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-json.cc b/deps/v8/src/runtime/runtime-json.cc
index 45f8183052..07232d59b8 100644
--- a/deps/v8/src/runtime/runtime-json.cc
+++ b/deps/v8/src/runtime/runtime-json.cc
@@ -7,9 +7,9 @@
#include "src/arguments.h"
#include "src/char-predicates-inl.h"
#include "src/isolate-inl.h"
+#include "src/json-parser.h"
#include "src/json-stringifier.h"
#include "src/objects-inl.h"
-#include "src/parsing/json-parser.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index b0e41dcdaa..e73095720e 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -400,59 +400,5 @@ RUNTIME_FUNCTION(Runtime_CreateArrayLiteralStubBailout) {
return *result;
}
-
-RUNTIME_FUNCTION(Runtime_StoreArrayLiteralElement) {
- HandleScope scope(isolate);
- RUNTIME_ASSERT(args.length() == 5);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
- CONVERT_SMI_ARG_CHECKED(store_index, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- CONVERT_ARG_HANDLE_CHECKED(LiteralsArray, literals, 3);
- CONVERT_SMI_ARG_CHECKED(literal_index, 4);
-
- Object* raw_literal_cell = literals->literal(literal_index);
- JSArray* boilerplate = NULL;
- if (raw_literal_cell->IsAllocationSite()) {
- AllocationSite* site = AllocationSite::cast(raw_literal_cell);
- boilerplate = JSArray::cast(site->transition_info());
- } else {
- boilerplate = JSArray::cast(raw_literal_cell);
- }
- Handle<JSArray> boilerplate_object(boilerplate);
- ElementsKind elements_kind = object->GetElementsKind();
- DCHECK(IsFastElementsKind(elements_kind));
- // Smis should never trigger transitions.
- DCHECK(!value->IsSmi());
-
- if (value->IsNumber()) {
- DCHECK(IsFastSmiElementsKind(elements_kind));
- ElementsKind transitioned_kind = IsFastHoleyElementsKind(elements_kind)
- ? FAST_HOLEY_DOUBLE_ELEMENTS
- : FAST_DOUBLE_ELEMENTS;
- if (IsMoreGeneralElementsKindTransition(
- boilerplate_object->GetElementsKind(), transitioned_kind)) {
- JSObject::TransitionElementsKind(boilerplate_object, transitioned_kind);
- }
- JSObject::TransitionElementsKind(object, transitioned_kind);
- DCHECK(IsFastDoubleElementsKind(object->GetElementsKind()));
- FixedDoubleArray* double_array = FixedDoubleArray::cast(object->elements());
- HeapNumber* number = HeapNumber::cast(*value);
- double_array->set(store_index, number->Number());
- } else {
- if (!IsFastObjectElementsKind(elements_kind)) {
- ElementsKind transitioned_kind = IsFastHoleyElementsKind(elements_kind)
- ? FAST_HOLEY_ELEMENTS
- : FAST_ELEMENTS;
- JSObject::TransitionElementsKind(object, transitioned_kind);
- if (IsMoreGeneralElementsKindTransition(
- boilerplate_object->GetElementsKind(), transitioned_kind)) {
- JSObject::TransitionElementsKind(boilerplate_object, transitioned_kind);
- }
- }
- FixedArray* object_array = FixedArray::cast(object->elements());
- object_array->set(store_index, *value);
- }
- return *object;
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-maths.cc b/deps/v8/src/runtime/runtime-maths.cc
index 427d2b868a..9c4fde1cef 100644
--- a/deps/v8/src/runtime/runtime-maths.cc
+++ b/deps/v8/src/runtime/runtime-maths.cc
@@ -14,13 +14,13 @@
namespace v8 {
namespace internal {
-#define RUNTIME_UNARY_MATH(Name, name) \
- RUNTIME_FUNCTION(Runtime_Math##Name) { \
- HandleScope scope(isolate); \
- DCHECK(args.length() == 1); \
- isolate->counters()->math_##name()->Increment(); \
- CONVERT_DOUBLE_ARG_CHECKED(x, 0); \
- return *isolate->factory()->NewHeapNumber(std::name(x)); \
+#define RUNTIME_UNARY_MATH(Name, name) \
+ RUNTIME_FUNCTION(Runtime_Math##Name) { \
+ HandleScope scope(isolate); \
+ DCHECK(args.length() == 1); \
+ isolate->counters()->math_##name##_runtime()->Increment(); \
+ CONVERT_DOUBLE_ARG_CHECKED(x, 0); \
+ return *isolate->factory()->NewHeapNumber(std::name(x)); \
}
RUNTIME_UNARY_MATH(Acos, acos)
@@ -81,8 +81,7 @@ static const double kPiDividedBy4 = 0.78539816339744830962;
RUNTIME_FUNCTION(Runtime_MathAtan2) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
- isolate->counters()->math_atan2()->Increment();
-
+ isolate->counters()->math_atan2_runtime()->Increment();
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
CONVERT_DOUBLE_ARG_CHECKED(y, 1);
double result;
@@ -104,7 +103,7 @@ RUNTIME_FUNCTION(Runtime_MathAtan2) {
RUNTIME_FUNCTION(Runtime_MathExpRT) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
- isolate->counters()->math_exp()->Increment();
+ isolate->counters()->math_exp_runtime()->Increment();
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
lazily_initialize_fast_exp(isolate);
@@ -115,7 +114,7 @@ RUNTIME_FUNCTION(Runtime_MathExpRT) {
RUNTIME_FUNCTION(Runtime_MathClz32) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
- isolate->counters()->math_clz32()->Increment();
+ isolate->counters()->math_clz32_runtime()->Increment();
CONVERT_NUMBER_CHECKED(uint32_t, x, Uint32, args[0]);
return *isolate->factory()->NewNumberFromUint(
@@ -126,7 +125,7 @@ RUNTIME_FUNCTION(Runtime_MathClz32) {
RUNTIME_FUNCTION(Runtime_MathFloor) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
- isolate->counters()->math_floor()->Increment();
+ isolate->counters()->math_floor_runtime()->Increment();
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
return *isolate->factory()->NewNumber(Floor(x));
@@ -138,7 +137,7 @@ RUNTIME_FUNCTION(Runtime_MathFloor) {
RUNTIME_FUNCTION(Runtime_MathPow) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
- isolate->counters()->math_pow()->Increment();
+ isolate->counters()->math_pow_runtime()->Increment();
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -161,7 +160,7 @@ RUNTIME_FUNCTION(Runtime_MathPow) {
RUNTIME_FUNCTION(Runtime_MathPowRT) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
- isolate->counters()->math_pow()->Increment();
+ isolate->counters()->math_pow_runtime()->Increment();
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
CONVERT_DOUBLE_ARG_CHECKED(y, 1);
@@ -179,7 +178,7 @@ RUNTIME_FUNCTION(Runtime_RoundNumber) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(input, 0);
- isolate->counters()->math_round()->Increment();
+ isolate->counters()->math_round_runtime()->Increment();
if (!input->IsHeapNumber()) {
DCHECK(input->IsSmi());
@@ -221,7 +220,7 @@ RUNTIME_FUNCTION(Runtime_RoundNumber) {
RUNTIME_FUNCTION(Runtime_MathSqrt) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
- isolate->counters()->math_sqrt()->Increment();
+ isolate->counters()->math_sqrt_runtime()->Increment();
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
lazily_initialize_fast_sqrt(isolate);
@@ -239,16 +238,6 @@ RUNTIME_FUNCTION(Runtime_MathFround) {
}
-RUNTIME_FUNCTION(Runtime_IsMinusZero) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(Object, obj, 0);
- if (!obj->IsHeapNumber()) return isolate->heap()->false_value();
- HeapNumber* number = HeapNumber::cast(obj);
- return isolate->heap()->ToBoolean(IsMinusZero(number->value()));
-}
-
-
RUNTIME_FUNCTION(Runtime_GenerateRandomNumbers) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 75ddb7bc22..415920d6c6 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -15,11 +15,9 @@
namespace v8 {
namespace internal {
-
MaybeHandle<Object> Runtime::GetObjectProperty(Isolate* isolate,
Handle<Object> object,
- Handle<Object> key,
- LanguageMode language_mode) {
+ Handle<Object> key) {
if (object->IsUndefined() || object->IsNull()) {
THROW_NEW_ERROR(
isolate,
@@ -32,14 +30,12 @@ MaybeHandle<Object> Runtime::GetObjectProperty(Isolate* isolate,
LookupIterator::PropertyOrElement(isolate, object, key, &success);
if (!success) return MaybeHandle<Object>();
- return Object::GetProperty(&it, language_mode);
+ return Object::GetProperty(&it);
}
-
static MaybeHandle<Object> KeyedGetObjectProperty(Isolate* isolate,
Handle<Object> receiver_obj,
- Handle<Object> key_obj,
- LanguageMode language_mode) {
+ Handle<Object> key_obj) {
// Fast cases for getting named properties of the receiver JSObject
// itself.
//
@@ -113,8 +109,7 @@ static MaybeHandle<Object> KeyedGetObjectProperty(Isolate* isolate,
}
// Fall back to GetObjectProperty.
- return Runtime::GetObjectProperty(isolate, receiver_obj, key_obj,
- language_mode);
+ return Runtime::GetObjectProperty(isolate, receiver_obj, key_obj);
}
@@ -158,10 +153,10 @@ MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
RUNTIME_FUNCTION(Runtime_GetPrototype) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, obj, 0);
Handle<Object> prototype;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, prototype,
- Object::GetPrototype(isolate, obj));
+ JSReceiver::GetPrototype(isolate, obj));
return *prototype;
}
@@ -230,8 +225,10 @@ MUST_USE_RESULT static MaybeHandle<Object> GetOwnProperty(Isolate* isolate,
if (is_accessor_pair) {
Handle<AccessorPair> accessors =
Handle<AccessorPair>::cast(it.GetAccessors());
- Handle<Object> getter(accessors->GetComponent(ACCESSOR_GETTER), isolate);
- Handle<Object> setter(accessors->GetComponent(ACCESSOR_SETTER), isolate);
+ Handle<Object> getter =
+ AccessorPair::GetComponent(accessors, ACCESSOR_GETTER);
+ Handle<Object> setter =
+ AccessorPair::GetComponent(accessors, ACCESSOR_SETTER);
elms->set(GETTER_INDEX, *getter);
elms->set(SETTER_INDEX, *setter);
} else {
@@ -266,31 +263,6 @@ RUNTIME_FUNCTION(Runtime_GetOwnProperty_Legacy) {
}
-// ES6 19.1.2.6
-RUNTIME_FUNCTION(Runtime_GetOwnProperty) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, raw_name, 1);
- // 1. Let obj be ? ToObject(O).
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, object,
- Execution::ToObject(isolate, object));
- // 2. Let key be ? ToPropertyKey(P).
- Handle<Name> key;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, key,
- Object::ToName(isolate, raw_name));
-
- // 3. Let desc be ? obj.[[GetOwnProperty]](key).
- PropertyDescriptor desc;
- Maybe<bool> found = JSReceiver::GetOwnPropertyDescriptor(
- isolate, Handle<JSReceiver>::cast(object), key, &desc);
- MAYBE_RETURN(found, isolate->heap()->exception());
- // 4. Return FromPropertyDescriptor(desc).
- if (!found.FromJust()) return isolate->heap()->undefined_value();
- return *desc.ToObject(isolate);
-}
-
-
RUNTIME_FUNCTION(Runtime_OptimizeObjectForAddingMultipleProperties) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
@@ -409,23 +381,7 @@ RUNTIME_FUNCTION(Runtime_GetProperty) {
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Runtime::GetObjectProperty(isolate, object, key, SLOPPY));
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetPropertyStrong) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
-
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Runtime::GetObjectProperty(isolate, object, key, STRONG));
+ isolate, result, Runtime::GetObjectProperty(isolate, object, key));
return *result;
}
@@ -440,23 +396,7 @@ RUNTIME_FUNCTION(Runtime_KeyedGetProperty) {
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- KeyedGetObjectProperty(isolate, receiver_obj, key_obj, SLOPPY));
- return *result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_KeyedGetPropertyStrong) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_ARG_HANDLE_CHECKED(Object, receiver_obj, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key_obj, 1);
-
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- KeyedGetObjectProperty(isolate, receiver_obj, key_obj, STRONG));
+ isolate, result, KeyedGetObjectProperty(isolate, receiver_obj, key_obj));
return *result;
}
@@ -563,10 +503,8 @@ namespace {
Object* DeleteProperty(Isolate* isolate, Handle<Object> object,
Handle<Object> key, LanguageMode language_mode) {
Handle<JSReceiver> receiver;
- if (!JSReceiver::ToObject(isolate, object).ToHandle(&receiver)) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kUndefinedOrNullToObject));
- }
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+ Object::ToObject(isolate, object));
Maybe<bool> result =
Runtime::DeleteObjectProperty(isolate, receiver, key, language_mode);
MAYBE_RETURN(result, isolate->heap()->exception());
@@ -603,11 +541,10 @@ static Object* HasOwnPropertyImplementation(Isolate* isolate,
// Handle hidden prototypes. If there's a hidden prototype above this thing
// then we have to check it for properties, because they are supposed to
// look like they are on this object.
- PrototypeIterator iter(isolate, object);
- if (!iter.IsAtEnd() &&
- PrototypeIterator::GetCurrent<HeapObject>(iter)
- ->map()
- ->is_hidden_prototype()) {
+ if (object->map()->has_hidden_prototype()) {
+ PrototypeIterator iter(isolate, object);
+ DCHECK(!iter.IsAtEnd());
+
// TODO(verwaest): The recursion is not necessary for keys that are array
// indices. Removing this.
// Casting to JSObject is fine because JSProxies are never used as
@@ -652,7 +589,7 @@ RUNTIME_FUNCTION(Runtime_HasOwnProperty) {
}
Map* map = js_obj->map();
if (!key_is_array_index && !map->has_named_interceptor() &&
- !HeapObject::cast(map->prototype())->map()->is_hidden_prototype()) {
+ !map->has_hidden_prototype()) {
return isolate->heap()->false_value();
}
// Slow case.
@@ -716,32 +653,6 @@ RUNTIME_FUNCTION(Runtime_PropertyIsEnumerable) {
}
-// Returns either a FixedArray or, if the given object has an enum cache that
-// contains all enumerable properties of the object and its prototypes have
-// none, the map of the object. This is used to speed up the check for
-// deletions during a for-in.
-RUNTIME_FUNCTION(Runtime_GetPropertyNamesFast) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
-
- CONVERT_ARG_CHECKED(JSReceiver, raw_object, 0);
-
- if (raw_object->IsSimpleEnum()) return raw_object->map();
-
- HandleScope scope(isolate);
- Handle<JSReceiver> object(raw_object);
- Handle<FixedArray> content;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, content, JSReceiver::GetKeys(object, JSReceiver::INCLUDE_PROTOS,
- ENUMERABLE_STRINGS));
-
- // Test again, since cache may have been built by preceding call.
- if (object->IsSimpleEnum()) return object->map();
-
- return *content;
-}
-
-
RUNTIME_FUNCTION(Runtime_GetOwnPropertyKeys) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
@@ -751,8 +662,8 @@ RUNTIME_FUNCTION(Runtime_GetOwnPropertyKeys) {
Handle<FixedArray> keys;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, keys, JSReceiver::GetKeys(object, JSReceiver::OWN_ONLY, filter,
- CONVERT_TO_STRING));
+ isolate, keys,
+ JSReceiver::GetKeys(object, OWN_ONLY, filter, CONVERT_TO_STRING));
return *isolate->factory()->NewJSArrayWithElements(keys);
}
@@ -943,6 +854,30 @@ RUNTIME_FUNCTION(Runtime_DefineDataPropertyUnchecked) {
return *result;
}
+RUNTIME_FUNCTION(Runtime_DefineDataPropertyInLiteral) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 5);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+ CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
+ CONVERT_SMI_ARG_CHECKED(set_function_name, 4);
+
+ if (FLAG_harmony_function_name && set_function_name) {
+ DCHECK(value->IsJSFunction());
+ JSFunction::SetName(Handle<JSFunction>::cast(value), name,
+ isolate->factory()->empty_string());
+ }
+
+ LookupIterator it = LookupIterator::PropertyOrElement(isolate, object, name,
+ LookupIterator::OWN);
+ // Cannot fail since this should only be called when
+ // creating an object literal.
+ CHECK(JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, attrs,
+ Object::DONT_THROW)
+ .IsJust());
+ return *object;
+}
// Return property without being observable by accessors or interceptors.
RUNTIME_FUNCTION(Runtime_GetDataProperty) {
@@ -972,34 +907,6 @@ RUNTIME_FUNCTION(Runtime_ValueOf) {
}
-RUNTIME_FUNCTION(Runtime_SetValueOf) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_CHECKED(Object, obj, 0);
- CONVERT_ARG_CHECKED(Object, value, 1);
- if (!obj->IsJSValue()) return value;
- JSValue::cast(obj)->set_value(value);
- return value;
-}
-
-
-RUNTIME_FUNCTION(Runtime_JSValueGetValue) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSValue, obj, 0);
- return JSValue::cast(obj)->value();
-}
-
-
-RUNTIME_FUNCTION(Runtime_ObjectEquals) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_CHECKED(Object, obj1, 0);
- CONVERT_ARG_CHECKED(Object, obj2, 1);
- return isolate->heap()->ToBoolean(obj1 == obj2);
-}
-
-
RUNTIME_FUNCTION(Runtime_IsJSReceiver) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
@@ -1034,6 +941,11 @@ RUNTIME_FUNCTION(Runtime_DefineGetterPropertyUnchecked) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, getter, 2);
CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
+ if (FLAG_harmony_function_name &&
+ String::cast(getter->shared()->name())->length() == 0) {
+ JSFunction::SetName(getter, name, isolate->factory()->get_string());
+ }
+
RETURN_FAILURE_ON_EXCEPTION(
isolate,
JSObject::DefineAccessor(object, name, getter,
@@ -1050,6 +962,11 @@ RUNTIME_FUNCTION(Runtime_DefineSetterPropertyUnchecked) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, setter, 2);
CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
+ if (FLAG_harmony_function_name &&
+ String::cast(setter->shared()->name())->length() == 0) {
+ JSFunction::SetName(setter, name, isolate->factory()->set_string());
+ }
+
RETURN_FAILURE_ON_EXCEPTION(
isolate,
JSObject::DefineAccessor(object, name, isolate->factory()->null_value(),
@@ -1063,11 +980,9 @@ RUNTIME_FUNCTION(Runtime_ToObject) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
Handle<JSReceiver> receiver;
- if (JSReceiver::ToObject(isolate, object).ToHandle(&receiver)) {
- return *receiver;
- }
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kUndefinedOrNullToObject));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+ Object::ToObject(isolate, object));
+ return *receiver;
}
@@ -1180,41 +1095,33 @@ RUNTIME_FUNCTION(Runtime_StrictEquals) {
}
-// TODO(bmeurer): Kill this special wrapper and use TF compatible LessThan,
-// GreaterThan, etc. which return true or false.
-RUNTIME_FUNCTION(Runtime_Compare) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, ncr, 2);
- Maybe<ComparisonResult> result = Object::Compare(x, y);
- if (result.IsJust()) {
- switch (result.FromJust()) {
- case ComparisonResult::kLessThan:
- return Smi::FromInt(LESS);
- case ComparisonResult::kEqual:
- return Smi::FromInt(EQUAL);
- case ComparisonResult::kGreaterThan:
- return Smi::FromInt(GREATER);
- case ComparisonResult::kUndefined:
- return *ncr;
- }
- UNREACHABLE();
- }
- return isolate->heap()->exception();
+RUNTIME_FUNCTION(Runtime_SameValue) {
+ SealHandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_CHECKED(Object, x, 0);
+ CONVERT_ARG_CHECKED(Object, y, 1);
+ return isolate->heap()->ToBoolean(x->SameValue(y));
+}
+
+
+RUNTIME_FUNCTION(Runtime_SameValueZero) {
+ SealHandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_CHECKED(Object, x, 0);
+ CONVERT_ARG_CHECKED(Object, y, 1);
+ return isolate->heap()->ToBoolean(x->SameValueZero(y));
}
// TODO(bmeurer): Kill this special wrapper and use TF compatible LessThan,
// GreaterThan, etc. which return true or false.
-RUNTIME_FUNCTION(Runtime_Compare_Strong) {
+RUNTIME_FUNCTION(Runtime_Compare) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, ncr, 2);
- Maybe<ComparisonResult> result = Object::Compare(x, y, Strength::STRONG);
+ Maybe<ComparisonResult> result = Object::Compare(x, y);
if (result.IsJust()) {
switch (result.FromJust()) {
case ComparisonResult::kLessThan:
@@ -1268,7 +1175,9 @@ RUNTIME_FUNCTION(Runtime_InstanceOf) {
NewTypeError(MessageTemplate::kInstanceofNonobjectProto, prototype));
}
// Return whether or not {prototype} is in the prototype chain of {object}.
- Maybe<bool> result = Object::HasInPrototypeChain(isolate, object, prototype);
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
+ Maybe<bool> result =
+ JSReceiver::HasInPrototypeChain(isolate, receiver, prototype);
MAYBE_RETURN(result, isolate->heap()->exception());
return isolate->heap()->ToBoolean(result.FromJust());
}
@@ -1277,9 +1186,10 @@ RUNTIME_FUNCTION(Runtime_InstanceOf) {
RUNTIME_FUNCTION(Runtime_HasInPrototypeChain) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
- Maybe<bool> result = Object::HasInPrototypeChain(isolate, object, prototype);
+ Maybe<bool> result =
+ JSReceiver::HasInPrototypeChain(isolate, object, prototype);
MAYBE_RETURN(result, isolate->heap()->exception());
return isolate->heap()->ToBoolean(result.FromJust());
}
@@ -1291,7 +1201,11 @@ RUNTIME_FUNCTION(Runtime_CreateIterResultObject) {
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, done, 1);
- return *isolate->factory()->NewJSIteratorResult(value, done);
+ Handle<JSObject> result =
+ isolate->factory()->NewJSObjectFromMap(isolate->iterator_result_map());
+ result->InObjectPropertyAtPut(JSIteratorResult::kValueIndex, *value);
+ result->InObjectPropertyAtPut(JSIteratorResult::kDoneIndex, *done);
+ return *result;
}
diff --git a/deps/v8/src/runtime/runtime-operators.cc b/deps/v8/src/runtime/runtime-operators.cc
index b5e92af8f6..02fd0cd332 100644
--- a/deps/v8/src/runtime/runtime-operators.cc
+++ b/deps/v8/src/runtime/runtime-operators.cc
@@ -21,18 +21,6 @@ RUNTIME_FUNCTION(Runtime_Multiply) {
}
-RUNTIME_FUNCTION(Runtime_Multiply_Strong) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Object::Multiply(isolate, lhs, rhs, Strength::STRONG));
- return *result;
-}
-
-
RUNTIME_FUNCTION(Runtime_Divide) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -45,18 +33,6 @@ RUNTIME_FUNCTION(Runtime_Divide) {
}
-RUNTIME_FUNCTION(Runtime_Divide_Strong) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Object::Divide(isolate, lhs, rhs, Strength::STRONG));
- return *result;
-}
-
-
RUNTIME_FUNCTION(Runtime_Modulus) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -69,18 +45,6 @@ RUNTIME_FUNCTION(Runtime_Modulus) {
}
-RUNTIME_FUNCTION(Runtime_Modulus_Strong) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Object::Modulus(isolate, lhs, rhs, Strength::STRONG));
- return *result;
-}
-
-
RUNTIME_FUNCTION(Runtime_Add) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -93,18 +57,6 @@ RUNTIME_FUNCTION(Runtime_Add) {
}
-RUNTIME_FUNCTION(Runtime_Add_Strong) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Object::Add(isolate, lhs, rhs, Strength::STRONG));
- return *result;
-}
-
-
RUNTIME_FUNCTION(Runtime_Subtract) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -117,18 +69,6 @@ RUNTIME_FUNCTION(Runtime_Subtract) {
}
-RUNTIME_FUNCTION(Runtime_Subtract_Strong) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Object::Subtract(isolate, lhs, rhs, Strength::STRONG));
- return *result;
-}
-
-
RUNTIME_FUNCTION(Runtime_ShiftLeft) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -141,18 +81,6 @@ RUNTIME_FUNCTION(Runtime_ShiftLeft) {
}
-RUNTIME_FUNCTION(Runtime_ShiftLeft_Strong) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Object::ShiftLeft(isolate, lhs, rhs, Strength::STRONG));
- return *result;
-}
-
-
RUNTIME_FUNCTION(Runtime_ShiftRight) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -165,18 +93,6 @@ RUNTIME_FUNCTION(Runtime_ShiftRight) {
}
-RUNTIME_FUNCTION(Runtime_ShiftRight_Strong) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Object::ShiftRight(isolate, lhs, rhs, Strength::STRONG));
- return *result;
-}
-
-
RUNTIME_FUNCTION(Runtime_ShiftRightLogical) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -189,19 +105,6 @@ RUNTIME_FUNCTION(Runtime_ShiftRightLogical) {
}
-RUNTIME_FUNCTION(Runtime_ShiftRightLogical_Strong) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Object::ShiftRightLogical(isolate, lhs, rhs, Strength::STRONG));
- return *result;
-}
-
-
RUNTIME_FUNCTION(Runtime_BitwiseAnd) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -214,18 +117,6 @@ RUNTIME_FUNCTION(Runtime_BitwiseAnd) {
}
-RUNTIME_FUNCTION(Runtime_BitwiseAnd_Strong) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Object::BitwiseAnd(isolate, lhs, rhs, Strength::STRONG));
- return *result;
-}
-
-
RUNTIME_FUNCTION(Runtime_BitwiseOr) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -238,18 +129,6 @@ RUNTIME_FUNCTION(Runtime_BitwiseOr) {
}
-RUNTIME_FUNCTION(Runtime_BitwiseOr_Strong) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Object::BitwiseOr(isolate, lhs, rhs, Strength::STRONG));
- return *result;
-}
-
-
RUNTIME_FUNCTION(Runtime_BitwiseXor) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -261,17 +140,5 @@ RUNTIME_FUNCTION(Runtime_BitwiseXor) {
return *result;
}
-
-RUNTIME_FUNCTION(Runtime_BitwiseXor_Strong) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Object::BitwiseXor(isolate, lhs, rhs, Strength::STRONG));
- return *result;
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-proxy.cc b/deps/v8/src/runtime/runtime-proxy.cc
index 3a521c6b7c..7764d25c58 100644
--- a/deps/v8/src/runtime/runtime-proxy.cc
+++ b/deps/v8/src/runtime/runtime-proxy.cc
@@ -58,9 +58,8 @@ RUNTIME_FUNCTION(Runtime_JSProxyCall) {
ElementsAccessor* accessor = arg_array->GetElementsAccessor();
{
DisallowHeapAllocation no_gc;
- FixedArrayBase* elements = arg_array->elements();
for (int i = 0; i < arguments_length; i++) {
- accessor->Set(elements, i, args[i + 1]);
+ accessor->Set(arg_array, i, args[i + 1]);
}
}
// 8. Return Call(trap, handler, Ā«target, thisArgument, argArrayĀ»).
@@ -119,9 +118,8 @@ RUNTIME_FUNCTION(Runtime_JSProxyConstruct) {
ElementsAccessor* accessor = arg_array->GetElementsAccessor();
{
DisallowHeapAllocation no_gc;
- FixedArrayBase* elements = arg_array->elements();
for (int i = 0; i < arguments_length; i++) {
- accessor->Set(elements, i, args[i + 1]);
+ accessor->Set(arg_array, i, args[i + 1]);
}
}
// 8. Let newObj be ? Call(trap, handler, Ā«target, argArray, newTarget Ā»).
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index 138b4dc71c..df86aa870c 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -492,7 +492,7 @@ MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithString(
}
}
- RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
+ RegExpImpl::GlobalCache global_cache(regexp, subject, isolate);
if (global_cache.HasException()) return isolate->heap()->exception();
int32_t* current_match = global_cache.FetchNext();
@@ -568,7 +568,7 @@ MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithEmptyString(
}
}
- RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
+ RegExpImpl::GlobalCache global_cache(regexp, subject, isolate);
if (global_cache.HasException()) return isolate->heap()->exception();
int32_t* current_match = global_cache.FetchNext();
@@ -876,7 +876,7 @@ static Object* SearchRegExpMultiple(Isolate* isolate, Handle<String> subject,
}
}
- RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
+ RegExpImpl::GlobalCache global_cache(regexp, subject, isolate);
if (global_cache.HasException()) return isolate->heap()->exception();
// Ensured in Runtime_RegExpExecMultiple.
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index 094f1a10ed..a8f3a74918 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -65,21 +65,27 @@ static Object* DeclareGlobals(Isolate* isolate, Handle<JSGlobalObject> global,
// Check whether we can reconfigure the existing property into a
// function.
PropertyDetails old_details = it.property_details();
- // TODO(verwaest): ACCESSOR_CONSTANT invalidly includes
- // ExecutableAccessInfo,
- // which are actually data properties, not accessor properties.
if (old_details.IsReadOnly() || old_details.IsDontEnum() ||
- old_details.type() == ACCESSOR_CONSTANT) {
+ (it.state() == LookupIterator::ACCESSOR &&
+ it.GetAccessors()->IsAccessorPair())) {
return ThrowRedeclarationError(isolate, name);
}
// If the existing property is not configurable, keep its attributes. Do
attr = old_attributes;
}
+
+ // If the current state is ACCESSOR, this could mean it's an AccessorInfo
+ // type property. We are not allowed to call into such setters during global
+ // function declaration since this would break e.g., onload. Meaning
+ // 'function onload() {}' would invalidly register that function as the
+ // onload callback. To avoid this situation, we first delete the property
+ // before readding it as a regular data property below.
+ if (it.state() == LookupIterator::ACCESSOR) it.Delete();
}
// Define or redefine own property.
- RETURN_FAILURE_ON_EXCEPTION(isolate, JSObject::SetOwnPropertyIgnoreAttributes(
- global, name, value, attr));
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate, JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, attr));
return isolate->heap()->undefined_value();
}
@@ -196,8 +202,8 @@ RUNTIME_FUNCTION(Runtime_InitializeConstGlobal) {
}
}
- RETURN_FAILURE_ON_EXCEPTION(isolate, JSObject::SetOwnPropertyIgnoreAttributes(
- global, name, value, attr));
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate, JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, attr));
return *value;
}
@@ -414,10 +420,8 @@ RUNTIME_FUNCTION(Runtime_InitializeLegacyConstLookupSlot) {
namespace {
// Find the arguments of the JavaScript function invocation that called
-// into C++ code. Collect these in a newly allocated array of handles (possibly
-// prefixed by a number of empty handles).
+// into C++ code. Collect these in a newly allocated array of handles.
base::SmartArrayPointer<Handle<Object>> GetCallerArguments(Isolate* isolate,
- int prefix_argc,
int* total_argc) {
// Find frame containing arguments passed to the caller.
JavaScriptFrameIterator it(isolate);
@@ -442,14 +446,14 @@ base::SmartArrayPointer<Handle<Object>> GetCallerArguments(Isolate* isolate,
iter++;
argument_count--;
- *total_argc = prefix_argc + argument_count;
+ *total_argc = argument_count;
base::SmartArrayPointer<Handle<Object>> param_data(
NewArray<Handle<Object>>(*total_argc));
bool should_deoptimize = false;
for (int i = 0; i < argument_count; i++) {
should_deoptimize = should_deoptimize || iter->IsMaterializedObject();
Handle<Object> value = iter->GetValue();
- param_data[prefix_argc + i] = value;
+ param_data[i] = value;
iter++;
}
@@ -463,12 +467,12 @@ base::SmartArrayPointer<Handle<Object>> GetCallerArguments(Isolate* isolate,
frame = it.frame();
int args_count = frame->ComputeParametersCount();
- *total_argc = prefix_argc + args_count;
+ *total_argc = args_count;
base::SmartArrayPointer<Handle<Object>> param_data(
NewArray<Handle<Object>>(*total_argc));
for (int i = 0; i < args_count; i++) {
Handle<Object> val = Handle<Object>(frame->GetParameter(i), isolate);
- param_data[prefix_argc + i] = val;
+ param_data[i] = val;
}
return param_data;
}
@@ -564,46 +568,6 @@ Handle<JSObject> NewSloppyArguments(Isolate* isolate, Handle<JSFunction> callee,
}
-template <typename T>
-Handle<JSObject> NewStrictArguments(Isolate* isolate, Handle<JSFunction> callee,
- T parameters, int argument_count) {
- Handle<JSObject> result =
- isolate->factory()->NewArgumentsObject(callee, argument_count);
-
- if (argument_count > 0) {
- Handle<FixedArray> array =
- isolate->factory()->NewUninitializedFixedArray(argument_count);
- DisallowHeapAllocation no_gc;
- WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < argument_count; i++) {
- array->set(i, parameters[i], mode);
- }
- result->set_elements(*array);
- }
- return result;
-}
-
-
-template <typename T>
-Handle<JSObject> NewRestArguments(Isolate* isolate, Handle<JSFunction> callee,
- T parameters, int argument_count,
- int start_index) {
- int num_elements = std::max(0, argument_count - start_index);
- Handle<JSObject> result = isolate->factory()->NewJSArray(
- FAST_ELEMENTS, num_elements, num_elements, Strength::WEAK,
- DONT_INITIALIZE_ARRAY_ELEMENTS);
- {
- DisallowHeapAllocation no_gc;
- FixedArray* elements = FixedArray::cast(result->elements());
- WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < num_elements; i++) {
- elements->set(i, parameters[i + start_index], mode);
- }
- }
- return result;
-}
-
-
class HandleArguments BASE_EMBEDDED {
public:
explicit HandleArguments(Handle<Object>* array) : array_(array) {}
@@ -634,39 +598,60 @@ RUNTIME_FUNCTION(Runtime_NewSloppyArguments_Generic) {
// inlined, we use the slow but accurate {GetCallerArguments}.
int argument_count = 0;
base::SmartArrayPointer<Handle<Object>> arguments =
- GetCallerArguments(isolate, 0, &argument_count);
+ GetCallerArguments(isolate, &argument_count);
HandleArguments argument_getter(arguments.get());
return *NewSloppyArguments(isolate, callee, argument_getter, argument_count);
}
-RUNTIME_FUNCTION(Runtime_NewStrictArguments_Generic) {
+RUNTIME_FUNCTION(Runtime_NewStrictArguments) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0);
// This generic runtime function can also be used when the caller has been
// inlined, we use the slow but accurate {GetCallerArguments}.
int argument_count = 0;
base::SmartArrayPointer<Handle<Object>> arguments =
- GetCallerArguments(isolate, 0, &argument_count);
- HandleArguments argument_getter(arguments.get());
- return *NewStrictArguments(isolate, callee, argument_getter, argument_count);
+ GetCallerArguments(isolate, &argument_count);
+ Handle<JSObject> result =
+ isolate->factory()->NewArgumentsObject(callee, argument_count);
+ if (argument_count) {
+ Handle<FixedArray> array =
+ isolate->factory()->NewUninitializedFixedArray(argument_count);
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
+ for (int i = 0; i < argument_count; i++) {
+ array->set(i, *arguments[i], mode);
+ }
+ result->set_elements(*array);
+ }
+ return *result;
}
-RUNTIME_FUNCTION(Runtime_NewRestArguments_Generic) {
+RUNTIME_FUNCTION(Runtime_NewRestParameter) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0)
- CONVERT_SMI_ARG_CHECKED(start_index, 1);
+ int start_index = callee->shared()->internal_formal_parameter_count();
// This generic runtime function can also be used when the caller has been
// inlined, we use the slow but accurate {GetCallerArguments}.
int argument_count = 0;
base::SmartArrayPointer<Handle<Object>> arguments =
- GetCallerArguments(isolate, 0, &argument_count);
- HandleArguments argument_getter(arguments.get());
- return *NewRestArguments(isolate, callee, argument_getter, argument_count,
- start_index);
+ GetCallerArguments(isolate, &argument_count);
+ int num_elements = std::max(0, argument_count - start_index);
+ Handle<JSObject> result = isolate->factory()->NewJSArray(
+ FAST_ELEMENTS, num_elements, num_elements, Strength::WEAK,
+ DONT_INITIALIZE_ARRAY_ELEMENTS);
+ {
+ DisallowHeapAllocation no_gc;
+ FixedArray* elements = FixedArray::cast(result->elements());
+ WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+ for (int i = 0; i < num_elements; i++) {
+ elements->set(i, *arguments[i + start_index], mode);
+ }
+ }
+ return *result;
}
@@ -687,42 +672,6 @@ RUNTIME_FUNCTION(Runtime_NewSloppyArguments) {
}
-RUNTIME_FUNCTION(Runtime_NewStrictArguments) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0)
- Object** parameters = reinterpret_cast<Object**>(args[1]);
- CONVERT_SMI_ARG_CHECKED(argument_count, 2);
-#ifdef DEBUG
- // This runtime function does not materialize the correct arguments when the
- // caller has been inlined, better make sure we are not hitting that case.
- JavaScriptFrameIterator it(isolate);
- DCHECK(!it.frame()->HasInlinedFrames());
-#endif // DEBUG
- ParameterArguments argument_getter(parameters);
- return *NewStrictArguments(isolate, callee, argument_getter, argument_count);
-}
-
-
-RUNTIME_FUNCTION(Runtime_NewRestParam) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- CONVERT_SMI_ARG_CHECKED(num_params, 0);
- Object** parameters = reinterpret_cast<Object**>(args[1]);
- CONVERT_SMI_ARG_CHECKED(rest_index, 2);
-#ifdef DEBUG
- // This runtime function does not materialize the correct arguments when the
- // caller has been inlined, better make sure we are not hitting that case.
- JavaScriptFrameIterator it(isolate);
- DCHECK(!it.frame()->HasInlinedFrames());
-#endif // DEBUG
- Handle<JSFunction> callee;
- ParameterArguments argument_getter(parameters);
- return *NewRestArguments(isolate, callee, argument_getter, num_params,
- rest_index);
-}
-
-
RUNTIME_FUNCTION(Runtime_NewClosure) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -957,17 +906,14 @@ RUNTIME_FUNCTION(Runtime_DeclareModules) {
RUNTIME_FUNCTION(Runtime_DeleteLookupSlot) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_ARG_HANDLE_CHECKED(Context, context, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
int index;
PropertyAttributes attributes;
- ContextLookupFlags flags = FOLLOW_CHAINS;
- BindingFlags binding_flags;
- Handle<Object> holder =
- context->Lookup(name, flags, &index, &attributes, &binding_flags);
+ BindingFlags flags;
+ Handle<Object> holder = isolate->context()->Lookup(
+ name, FOLLOW_CHAINS, &index, &attributes, &flags);
// If the slot was not found the result is true.
if (holder.is_null()) {
@@ -991,161 +937,158 @@ RUNTIME_FUNCTION(Runtime_DeleteLookupSlot) {
}
-static Object* ComputeReceiverForNonGlobal(Isolate* isolate, JSObject* holder) {
- DCHECK(!holder->IsJSGlobalObject());
-
- // If the holder isn't a context extension object, we just return it
- // as the receiver. This allows arguments objects to be used as
- // receivers, but only if they are put in the context scope chain
- // explicitly via a with-statement.
- if (holder->map()->instance_type() != JS_CONTEXT_EXTENSION_OBJECT_TYPE) {
- return holder;
- }
- // Fall back to using the global object as the implicit receiver if
- // the property turns out to be a local variable allocated in a
- // context extension object - introduced via eval.
- return isolate->heap()->undefined_value();
-}
-
-
-static ObjectPair LoadLookupSlotHelper(Arguments args, Isolate* isolate,
- bool throw_error) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
+namespace {
- if (!args[0]->IsContext() || !args[1]->IsString()) {
- return MakePair(isolate->ThrowIllegalOperation(), NULL);
- }
- Handle<Context> context = args.at<Context>(0);
- Handle<String> name = args.at<String>(1);
+MaybeHandle<Object> LoadLookupSlot(Handle<String> name,
+ Object::ShouldThrow should_throw,
+ Handle<Object>* receiver_return = nullptr) {
+ Isolate* const isolate = name->GetIsolate();
int index;
PropertyAttributes attributes;
- ContextLookupFlags flags = FOLLOW_CHAINS;
- BindingFlags binding_flags;
- Handle<Object> holder =
- context->Lookup(name, flags, &index, &attributes, &binding_flags);
- if (isolate->has_pending_exception()) {
- return MakePair(isolate->heap()->exception(), NULL);
- }
+ BindingFlags flags;
+ Handle<Object> holder = isolate->context()->Lookup(
+ name, FOLLOW_CHAINS, &index, &attributes, &flags);
+ if (isolate->has_pending_exception()) return MaybeHandle<Object>();
if (index != Context::kNotFound) {
DCHECK(holder->IsContext());
// If the "property" we were looking for is a local variable, the
// receiver is the global object; see ECMA-262, 3rd., 10.1.6 and 10.2.3.
Handle<Object> receiver = isolate->factory()->undefined_value();
- Object* value = Context::cast(*holder)->get(index);
+ Handle<Object> value = handle(Context::cast(*holder)->get(index), isolate);
// Check for uninitialized bindings.
- switch (binding_flags) {
+ switch (flags) {
case MUTABLE_CHECK_INITIALIZED:
case IMMUTABLE_CHECK_INITIALIZED_HARMONY:
if (value->IsTheHole()) {
- Handle<Object> error = isolate->factory()->NewReferenceError(
- MessageTemplate::kNotDefined, name);
- isolate->Throw(*error);
- return MakePair(isolate->heap()->exception(), NULL);
+ THROW_NEW_ERROR(isolate,
+ NewReferenceError(MessageTemplate::kNotDefined, name),
+ Object);
+ }
+ // FALLTHROUGH
+ case IMMUTABLE_CHECK_INITIALIZED:
+ if (value->IsTheHole()) {
+ DCHECK(attributes & READ_ONLY);
+ value = isolate->factory()->undefined_value();
}
// FALLTHROUGH
case MUTABLE_IS_INITIALIZED:
case IMMUTABLE_IS_INITIALIZED:
case IMMUTABLE_IS_INITIALIZED_HARMONY:
DCHECK(!value->IsTheHole());
- return MakePair(value, *receiver);
- case IMMUTABLE_CHECK_INITIALIZED:
- if (value->IsTheHole()) {
- DCHECK((attributes & READ_ONLY) != 0);
- value = isolate->heap()->undefined_value();
- }
- return MakePair(value, *receiver);
+ if (receiver_return) *receiver_return = receiver;
+ return value;
case MISSING_BINDING:
- UNREACHABLE();
- return MakePair(NULL, NULL);
+ break;
}
+ UNREACHABLE();
}
// Otherwise, if the slot was found the holder is a context extension
// object, subject of a with, or a global object. We read the named
// property from it.
if (!holder.is_null()) {
- Handle<JSReceiver> object = Handle<JSReceiver>::cast(holder);
- // GetProperty below can cause GC.
- Handle<Object> receiver_handle(
- object->IsJSGlobalObject()
- ? Object::cast(isolate->heap()->undefined_value())
- : object->IsJSProxy() ? static_cast<Object*>(*object)
- : ComputeReceiverForNonGlobal(
- isolate, JSObject::cast(*object)),
- isolate);
-
// No need to unhole the value here. This is taken care of by the
// GetProperty function.
Handle<Object> value;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, value, Object::GetProperty(object, name),
- MakePair(isolate->heap()->exception(), NULL));
- return MakePair(*value, *receiver_handle);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, value, Object::GetProperty(holder, name),
+ Object);
+ if (receiver_return) {
+ *receiver_return =
+ (holder->IsJSGlobalObject() || holder->IsJSContextExtensionObject())
+ ? Handle<Object>::cast(isolate->factory()->undefined_value())
+ : holder;
+ }
+ return value;
}
- if (throw_error) {
+ if (should_throw == Object::THROW_ON_ERROR) {
// The property doesn't exist - throw exception.
- Handle<Object> error = isolate->factory()->NewReferenceError(
- MessageTemplate::kNotDefined, name);
- isolate->Throw(*error);
- return MakePair(isolate->heap()->exception(), NULL);
- } else {
- // The property doesn't exist - return undefined.
- return MakePair(isolate->heap()->undefined_value(),
- isolate->heap()->undefined_value());
+ THROW_NEW_ERROR(
+ isolate, NewReferenceError(MessageTemplate::kNotDefined, name), Object);
}
+
+ // The property doesn't exist - return undefined.
+ if (receiver_return) *receiver_return = isolate->factory()->undefined_value();
+ return isolate->factory()->undefined_value();
}
+} // namespace
-RUNTIME_FUNCTION_RETURN_PAIR(Runtime_LoadLookupSlot) {
- return LoadLookupSlotHelper(args, isolate, true);
+
+RUNTIME_FUNCTION(Runtime_LoadLookupSlot) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+ Handle<Object> value;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, value, LoadLookupSlot(name, Object::THROW_ON_ERROR));
+ return *value;
}
-RUNTIME_FUNCTION_RETURN_PAIR(Runtime_LoadLookupSlotNoReferenceError) {
- return LoadLookupSlotHelper(args, isolate, false);
+RUNTIME_FUNCTION(Runtime_LoadLookupSlotInsideTypeof) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+ Handle<Object> value;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, value, LoadLookupSlot(name, Object::DONT_THROW));
+ return *value;
}
-RUNTIME_FUNCTION(Runtime_StoreLookupSlot) {
+RUNTIME_FUNCTION_RETURN_PAIR(Runtime_LoadLookupSlotForCall) {
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
+ DCHECK_EQ(1, args.length());
+ DCHECK(args[0]->IsString());
+ Handle<String> name = args.at<String>(0);
+ Handle<Object> value;
+ Handle<Object> receiver;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, value, LoadLookupSlot(name, Object::THROW_ON_ERROR, &receiver),
+ MakePair(isolate->heap()->exception(), nullptr));
+ return MakePair(*value, *receiver);
+}
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
- CONVERT_ARG_HANDLE_CHECKED(Context, context, 1);
- CONVERT_ARG_HANDLE_CHECKED(String, name, 2);
- CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 3);
+
+namespace {
+
+MaybeHandle<Object> StoreLookupSlot(Handle<String> name, Handle<Object> value,
+ LanguageMode language_mode) {
+ Isolate* const isolate = name->GetIsolate();
+ Handle<Context> context(isolate->context(), isolate);
int index;
PropertyAttributes attributes;
- ContextLookupFlags flags = FOLLOW_CHAINS;
- BindingFlags binding_flags;
+ BindingFlags flags;
Handle<Object> holder =
- context->Lookup(name, flags, &index, &attributes, &binding_flags);
+ context->Lookup(name, FOLLOW_CHAINS, &index, &attributes, &flags);
if (holder.is_null()) {
// In case of JSProxy, an exception might have been thrown.
- if (isolate->has_pending_exception()) return isolate->heap()->exception();
+ if (isolate->has_pending_exception()) return MaybeHandle<Object>();
}
// The property was found in a context slot.
if (index != Context::kNotFound) {
- if ((binding_flags == MUTABLE_CHECK_INITIALIZED ||
- binding_flags == IMMUTABLE_CHECK_INITIALIZED_HARMONY) &&
+ if ((flags == MUTABLE_CHECK_INITIALIZED ||
+ flags == IMMUTABLE_CHECK_INITIALIZED_HARMONY) &&
Handle<Context>::cast(holder)->is_the_hole(index)) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewReferenceError(MessageTemplate::kNotDefined, name));
+ THROW_NEW_ERROR(isolate,
+ NewReferenceError(MessageTemplate::kNotDefined, name),
+ Object);
}
if ((attributes & READ_ONLY) == 0) {
Handle<Context>::cast(holder)->set(index, *value);
} else if (is_strict(language_mode)) {
// Setting read only property in strict mode.
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kStrictCannotAssign, name));
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kStrictCannotAssign, name),
+ Object);
}
- return *value;
+ return value;
}
// Slow case: The property is not in a context slot. It is either in a
@@ -1157,101 +1100,42 @@ RUNTIME_FUNCTION(Runtime_StoreLookupSlot) {
object = Handle<JSReceiver>::cast(holder);
} else if (is_strict(language_mode)) {
// If absent in strict mode: throw.
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewReferenceError(MessageTemplate::kNotDefined, name));
+ THROW_NEW_ERROR(
+ isolate, NewReferenceError(MessageTemplate::kNotDefined, name), Object);
} else {
// If absent in sloppy mode: add the property to the global object.
object = Handle<JSReceiver>(context->global_object());
}
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, Object::SetProperty(object, name, value, language_mode));
-
- return *value;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, value, Object::SetProperty(object, name, value, language_mode),
+ Object);
+ return value;
}
+} // namespace
+
-RUNTIME_FUNCTION(Runtime_ArgumentsLength) {
+RUNTIME_FUNCTION(Runtime_StoreLookupSlot_Sloppy) {
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
- int argument_count = 0;
- GetCallerArguments(isolate, 0, &argument_count);
- return Smi::FromInt(argument_count);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
+ StoreLookupSlot(name, value, SLOPPY));
+ return *value;
}
-RUNTIME_FUNCTION(Runtime_Arguments) {
+RUNTIME_FUNCTION(Runtime_StoreLookupSlot_Strict) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, raw_key, 0);
-
- // Determine the actual arguments passed to the function.
- int argument_count_signed = 0;
- base::SmartArrayPointer<Handle<Object>> arguments =
- GetCallerArguments(isolate, 0, &argument_count_signed);
- const uint32_t argument_count = argument_count_signed;
-
- // Try to convert the key to an index. If successful and within
- // index return the the argument from the frame.
- uint32_t index = 0;
- if (raw_key->ToArrayIndex(&index) && index < argument_count) {
- return *arguments[index];
- }
-
- if (raw_key->IsSymbol()) {
- Handle<Symbol> symbol = Handle<Symbol>::cast(raw_key);
- if (Name::Equals(symbol, isolate->factory()->iterator_symbol())) {
- return isolate->native_context()->array_values_iterator();
- }
- // Lookup in the initial Object.prototype object.
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Object::GetProperty(isolate->initial_object_prototype(),
- Handle<Symbol>::cast(raw_key)));
- return *result;
- }
-
- // Convert the key to a string.
- Handle<Object> converted;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, converted,
- Object::ToString(isolate, raw_key));
- Handle<String> key = Handle<String>::cast(converted);
-
- // Try to convert the string key into an array index.
- if (key->AsArrayIndex(&index)) {
- if (index < argument_count) {
- return *arguments[index];
- } else {
- Handle<Object> initial_prototype(isolate->initial_object_prototype());
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Object::GetElement(isolate, initial_prototype, index));
- return *result;
- }
- }
-
- // Handle special arguments properties.
- if (String::Equals(isolate->factory()->length_string(), key)) {
- return Smi::FromInt(argument_count);
- }
- if (String::Equals(isolate->factory()->callee_string(), key)) {
- JavaScriptFrameIterator it(isolate);
- JSFunction* function = it.frame()->function();
- if (is_strict(function->shared()->language_mode())) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kStrictPoisonPill));
- }
- return function;
- }
-
- // Lookup in the initial Object.prototype object.
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Object::GetProperty(isolate->initial_object_prototype(), key));
- return *result;
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
+ StoreLookupSlot(name, value, STRICT));
+ return *value;
}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-simd.cc b/deps/v8/src/runtime/runtime-simd.cc
index 59e4fa1edb..9e5614242a 100644
--- a/deps/v8/src/runtime/runtime-simd.cc
+++ b/deps/v8/src/runtime/runtime-simd.cc
@@ -164,46 +164,6 @@ RUNTIME_FUNCTION(Runtime_IsSimdValue) {
}
-RUNTIME_FUNCTION(Runtime_SimdSameValue) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(Simd128Value, a, 0);
- bool result = false;
- // args[1] is of unknown type.
- if (args[1]->IsSimd128Value()) {
- Simd128Value* b = Simd128Value::cast(args[1]);
- if (a->map() == b->map()) {
- if (a->IsFloat32x4()) {
- result = Float32x4::cast(*a)->SameValue(Float32x4::cast(b));
- } else {
- result = a->BitwiseEquals(b);
- }
- }
- }
- return isolate->heap()->ToBoolean(result);
-}
-
-
-RUNTIME_FUNCTION(Runtime_SimdSameValueZero) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(Simd128Value, a, 0);
- bool result = false;
- // args[1] is of unknown type.
- if (args[1]->IsSimd128Value()) {
- Simd128Value* b = Simd128Value::cast(args[1]);
- if (a->map() == b->map()) {
- if (a->IsFloat32x4()) {
- result = Float32x4::cast(*a)->SameValueZero(Float32x4::cast(b));
- } else {
- result = a->BitwiseEquals(b);
- }
- }
- }
- return isolate->heap()->ToBoolean(result);
-}
-
-
//-------------------------------------------------------------------
// Utility macros.
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index bd4dd699b4..fcec47ddab 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -341,7 +341,7 @@ RUNTIME_FUNCTION(Runtime_StringMatch) {
RUNTIME_ASSERT(regexp_info->HasFastObjectElements());
- RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
+ RegExpImpl::GlobalCache global_cache(regexp, subject, isolate);
if (global_cache.HasException()) return isolate->heap()->exception();
int capture_count = regexp->CaptureCount();
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 3b92d7f6ee..5f27a609a6 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -377,8 +377,7 @@ RUNTIME_FUNCTION(Runtime_AbortJS) {
RUNTIME_FUNCTION(Runtime_NativeScriptsCount) {
DCHECK(args.length() == 0);
- return Smi::FromInt(Natives::GetBuiltinsCount() +
- ExtraNatives::GetBuiltinsCount());
+ return Smi::FromInt(Natives::GetBuiltinsCount());
}
@@ -409,53 +408,54 @@ RUNTIME_FUNCTION(Runtime_DisassembleFunction) {
return isolate->heap()->undefined_value();
}
+namespace {
-static int StackSize(Isolate* isolate) {
+int StackSize(Isolate* isolate) {
int n = 0;
for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) n++;
return n;
}
-
-static void PrintTransition(Isolate* isolate, Object* result) {
- // indentation
- {
- const int nmax = 80;
- int n = StackSize(isolate);
- if (n <= nmax)
- PrintF("%4d:%*s", n, n, "");
- else
- PrintF("%4d:%*s", n, nmax, "...");
- }
-
- if (result == NULL) {
- JavaScriptFrame::PrintTop(isolate, stdout, true, false);
- PrintF(" {\n");
+void PrintIndentation(Isolate* isolate) {
+ const int nmax = 80;
+ int n = StackSize(isolate);
+ if (n <= nmax) {
+ PrintF("%4d:%*s", n, n, "");
} else {
- // function result
- PrintF("} -> ");
- result->ShortPrint();
- PrintF("\n");
+ PrintF("%4d:%*s", n, nmax, "...");
}
}
+} // namespace
RUNTIME_FUNCTION(Runtime_TraceEnter) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 0);
- PrintTransition(isolate, NULL);
+ DCHECK_EQ(0, args.length());
+ PrintIndentation(isolate);
+ JavaScriptFrame::PrintTop(isolate, stdout, true, false);
+ PrintF(" {\n");
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(Runtime_TraceExit) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Object, obj, 0);
- PrintTransition(isolate, obj);
+ PrintIndentation(isolate);
+ PrintF("} -> ");
+ obj->ShortPrint();
+ PrintF("\n");
return obj; // return TOS
}
+RUNTIME_FUNCTION(Runtime_TraceTailCall) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(0, args.length());
+ PrintIndentation(isolate);
+ PrintF("} -> tail call ->\n");
+ return isolate->heap()->undefined_value();
+}
RUNTIME_FUNCTION(Runtime_HaveSameMap) {
SealHandleScope shs(isolate);
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index a82b71ddf2..bf0ee9f1c1 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -414,42 +414,6 @@ RUNTIME_FUNCTION(Runtime_IsSharedInteger32TypedArray) {
}
-RUNTIME_FUNCTION(Runtime_DataViewInitialize) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, buffer, 1);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(byte_offset, 2);
- CONVERT_NUMBER_ARG_HANDLE_CHECKED(byte_length, 3);
-
- DCHECK_EQ(v8::ArrayBufferView::kInternalFieldCount,
- holder->GetInternalFieldCount());
- for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
- holder->SetInternalField(i, Smi::FromInt(0));
- }
- size_t buffer_length = 0;
- size_t offset = 0;
- size_t length = 0;
- RUNTIME_ASSERT(
- TryNumberToSize(isolate, buffer->byte_length(), &buffer_length));
- RUNTIME_ASSERT(TryNumberToSize(isolate, *byte_offset, &offset));
- RUNTIME_ASSERT(TryNumberToSize(isolate, *byte_length, &length));
-
- // TODO(jkummerow): When we have a "safe numerics" helper class, use it here.
- // Entire range [offset, offset + length] must be in bounds.
- RUNTIME_ASSERT(offset <= buffer_length);
- RUNTIME_ASSERT(offset + length <= buffer_length);
- // No overflow.
- RUNTIME_ASSERT(offset + length >= offset);
-
- holder->set_buffer(*buffer);
- holder->set_byte_offset(*byte_offset);
- holder->set_byte_length(*byte_length);
-
- return isolate->heap()->undefined_value();
-}
-
-
inline static bool NeedToFlipBytes(bool is_little_endian) {
#ifdef V8_TARGET_LITTLE_ENDIAN
return !is_little_endian;
diff --git a/deps/v8/src/runtime/runtime-utils.h b/deps/v8/src/runtime/runtime-utils.h
index ded2c090c8..c673b5a155 100644
--- a/deps/v8/src/runtime/runtime-utils.h
+++ b/deps/v8/src/runtime/runtime-utils.h
@@ -162,6 +162,22 @@ static inline ObjectPair MakePair(Object* x, Object* y) {
}
#endif
+
+// A mechanism to return a triple of Object pointers. In all calling
+// conventions, a struct of two pointers is returned in memory,
+// allocated by the caller, and passed as a pointer in a hidden first parameter.
+struct ObjectTriple {
+ Object* x;
+ Object* y;
+ Object* z;
+};
+
+static inline ObjectTriple MakeTriple(Object* x, Object* y, Object* z) {
+ ObjectTriple result = {x, y, z};
+ // ObjectTriple is assigned to a hidden first argument.
+ return result;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime.cc b/deps/v8/src/runtime/runtime.cc
index 90f4e4ce33..151e240f25 100644
--- a/deps/v8/src/runtime/runtime.cc
+++ b/deps/v8/src/runtime/runtime.cc
@@ -27,6 +27,12 @@ FOR_EACH_INTRINSIC_RETURN_OBJECT(F)
FOR_EACH_INTRINSIC_RETURN_PAIR(P)
#undef P
+#define T(name, number_of_args, result_size) \
+ ObjectTriple Runtime_##name(int args_length, Object** args_object, \
+ Isolate* isolate);
+FOR_EACH_INTRINSIC_RETURN_TRIPLE(T)
+#undef T
+
#define F(name, number_of_args, result_size) \
{ \
@@ -124,5 +130,6 @@ std::ostream& operator<<(std::ostream& os, Runtime::FunctionId id) {
return os << Runtime::FunctionForId(id)->name;
}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index 283087ae06..3b97bb2b36 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -6,6 +6,7 @@
#define V8_RUNTIME_RUNTIME_H_
#include "src/allocation.h"
+#include "src/base/platform/time.h"
#include "src/objects.h"
#include "src/unicode.h"
#include "src/zone.h"
@@ -50,7 +51,6 @@ namespace internal {
F(GetCachedArrayIndex, 1, 1) \
F(FixedArrayGet, 2, 1) \
F(FixedArraySet, 3, 1) \
- F(FastOneByteArrayJoin, 2, 1) \
F(ArraySpeciesConstructor, 1, 1)
@@ -73,7 +73,6 @@ namespace internal {
F(AtomicsFutexWakeOrRequeue, 5, 1) \
F(AtomicsFutexNumWaitersForTesting, 2, 1)
-
#define FOR_EACH_INTRINSIC_CLASSES(F) \
F(ThrowNonMethodError, 0, 1) \
F(ThrowUnsupportedSuperError, 0, 1) \
@@ -82,18 +81,16 @@ namespace internal {
F(ThrowStaticPrototypeError, 0, 1) \
F(ThrowIfStaticPrototype, 1, 1) \
F(HomeObjectSymbol, 0, 1) \
- F(DefineClass, 5, 1) \
+ F(DefineClass, 4, 1) \
F(FinalizeClassDefinition, 2, 1) \
- F(DefineClassMethod, 3, 1) \
- F(LoadFromSuper, 4, 1) \
- F(LoadKeyedFromSuper, 4, 1) \
+ F(LoadFromSuper, 3, 1) \
+ F(LoadKeyedFromSuper, 3, 1) \
F(StoreToSuper_Strict, 4, 1) \
F(StoreToSuper_Sloppy, 4, 1) \
F(StoreKeyedToSuper_Strict, 4, 1) \
F(StoreKeyedToSuper_Sloppy, 4, 1) \
F(GetSuperConstructor, 1, 1)
-
#define FOR_EACH_INTRINSIC_COLLECTIONS(F) \
F(StringGetRawHashField, 1, 1) \
F(TheHole, 0, 1) \
@@ -203,11 +200,11 @@ namespace internal {
#define FOR_EACH_INTRINSIC_FORIN(F) \
F(ForInDone, 2, 1) \
+ F(ForInEnumerate, 1, 1) \
F(ForInFilter, 2, 1) \
F(ForInNext, 4, 1) \
F(ForInStep, 1, 1)
-
#define FOR_EACH_INTRINSIC_INTERPRETER(F) \
F(InterpreterEquals, 2, 1) \
F(InterpreterNotEquals, 2, 1) \
@@ -221,8 +218,10 @@ namespace internal {
F(InterpreterLogicalNot, 1, 1) \
F(InterpreterTypeOf, 1, 1) \
F(InterpreterNewClosure, 2, 1) \
- F(InterpreterForInPrepare, 1, 1)
-
+ F(InterpreterTraceBytecodeEntry, 3, 1) \
+ F(InterpreterTraceBytecodeExit, 3, 1) \
+ F(InterpreterClearPendingMessage, 0, 1) \
+ F(InterpreterSetPendingMessage, 1, 1)
#define FOR_EACH_INTRINSIC_FUNCTION(F) \
F(FunctionGetName, 1, 1) \
@@ -248,18 +247,19 @@ namespace internal {
F(IsFunction, 1, 1) \
F(FunctionToString, 1, 1)
-
#define FOR_EACH_INTRINSIC_GENERATOR(F) \
F(CreateJSGeneratorObject, 0, 1) \
- F(SuspendJSGeneratorObject, -1, 1) \
+ F(SuspendJSGeneratorObject, 1, 1) \
F(ResumeJSGeneratorObject, 3, 1) \
F(GeneratorClose, 1, 1) \
F(GeneratorGetFunction, 1, 1) \
F(GeneratorGetContext, 1, 1) \
F(GeneratorGetReceiver, 1, 1) \
+ F(GeneratorGetInput, 1, 1) \
F(GeneratorGetContinuation, 1, 1) \
F(GeneratorGetSourcePosition, 1, 1) \
F(GeneratorNext, 2, 1) \
+ F(GeneratorReturn, 2, 1) \
F(GeneratorThrow, 2, 1)
@@ -293,50 +293,52 @@ namespace internal {
#endif
-#define FOR_EACH_INTRINSIC_INTERNAL(F) \
- F(CheckIsBootstrapping, 0, 1) \
- F(ExportFromRuntime, 1, 1) \
- F(ExportExperimentalFromRuntime, 1, 1) \
- F(InstallToContext, 1, 1) \
- F(Throw, 1, 1) \
- F(ReThrow, 1, 1) \
- F(UnwindAndFindExceptionHandler, 0, 1) \
- F(PromoteScheduledException, 0, 1) \
- F(ThrowReferenceError, 1, 1) \
- F(ThrowApplyNonFunction, 1, 1) \
- F(NewTypeError, 2, 1) \
- F(NewSyntaxError, 2, 1) \
- F(NewReferenceError, 2, 1) \
- F(ThrowIllegalInvocation, 0, 1) \
- F(ThrowIteratorResultNotAnObject, 1, 1) \
- F(ThrowStackOverflow, 0, 1) \
- F(ThrowStrongModeImplicitConversion, 0, 1) \
- F(PromiseRejectEvent, 3, 1) \
- F(PromiseRevokeReject, 1, 1) \
- F(StackGuard, 0, 1) \
- F(Interrupt, 0, 1) \
- F(AllocateInNewSpace, 1, 1) \
- F(AllocateInTargetSpace, 2, 1) \
- F(CollectStackTrace, 2, 1) \
- F(MessageGetStartPosition, 1, 1) \
- F(MessageGetScript, 1, 1) \
- F(FormatMessageString, 4, 1) \
- F(CallSiteGetFileNameRT, 1, 1) \
- F(CallSiteGetFunctionNameRT, 1, 1) \
- F(CallSiteGetScriptNameOrSourceUrlRT, 1, 1) \
- F(CallSiteGetMethodNameRT, 1, 1) \
- F(CallSiteGetLineNumberRT, 1, 1) \
- F(CallSiteGetColumnNumberRT, 1, 1) \
- F(CallSiteIsNativeRT, 1, 1) \
- F(CallSiteIsToplevelRT, 1, 1) \
- F(CallSiteIsEvalRT, 1, 1) \
- F(CallSiteIsConstructorRT, 1, 1) \
- F(IS_VAR, 1, 1) \
- F(IncrementStatsCounter, 1, 1) \
- F(ThrowConstructedNonConstructable, 1, 1) \
- F(ThrowCalledNonCallable, 1, 1) \
- F(CreateListFromArrayLike, 1, 1) \
- F(IncrementUseCounter, 1, 1)
+#define FOR_EACH_INTRINSIC_INTERNAL(F) \
+ F(CheckIsBootstrapping, 0, 1) \
+ F(ExportFromRuntime, 1, 1) \
+ F(ExportExperimentalFromRuntime, 1, 1) \
+ F(InstallToContext, 1, 1) \
+ F(Throw, 1, 1) \
+ F(ReThrow, 1, 1) \
+ F(UnwindAndFindExceptionHandler, 0, 1) \
+ F(PromoteScheduledException, 0, 1) \
+ F(ThrowReferenceError, 1, 1) \
+ F(ThrowApplyNonFunction, 1, 1) \
+ F(NewTypeError, 2, 1) \
+ F(NewSyntaxError, 2, 1) \
+ F(NewReferenceError, 2, 1) \
+ F(ThrowIllegalInvocation, 0, 1) \
+ F(ThrowIteratorResultNotAnObject, 1, 1) \
+ F(ThrowStackOverflow, 0, 1) \
+ F(ThrowStrongModeImplicitConversion, 0, 1) \
+ F(PromiseRejectEvent, 3, 1) \
+ F(PromiseRevokeReject, 1, 1) \
+ F(StackGuard, 0, 1) \
+ F(Interrupt, 0, 1) \
+ F(AllocateInNewSpace, 1, 1) \
+ F(AllocateInTargetSpace, 2, 1) \
+ F(CollectStackTrace, 2, 1) \
+ F(MessageGetStartPosition, 1, 1) \
+ F(MessageGetScript, 1, 1) \
+ F(FormatMessageString, 4, 1) \
+ F(CallSiteGetFileNameRT, 1, 1) \
+ F(CallSiteGetFunctionNameRT, 1, 1) \
+ F(CallSiteGetScriptNameOrSourceUrlRT, 1, 1) \
+ F(CallSiteGetMethodNameRT, 1, 1) \
+ F(CallSiteGetLineNumberRT, 1, 1) \
+ F(CallSiteGetColumnNumberRT, 1, 1) \
+ F(CallSiteIsNativeRT, 1, 1) \
+ F(CallSiteIsToplevelRT, 1, 1) \
+ F(CallSiteIsEvalRT, 1, 1) \
+ F(CallSiteIsConstructorRT, 1, 1) \
+ F(IS_VAR, 1, 1) \
+ F(IncrementStatsCounter, 1, 1) \
+ F(ThrowConstructedNonConstructable, 1, 1) \
+ F(ThrowDerivedConstructorReturnedNonObject, 0, 1) \
+ F(ThrowCalledNonCallable, 1, 1) \
+ F(CreateListFromArrayLike, 1, 1) \
+ F(IncrementUseCounter, 1, 1) \
+ F(GetAndResetRuntimeCallStats, 0, 1)
#define FOR_EACH_INTRINSIC_JSON(F) \
@@ -345,12 +347,11 @@ namespace internal {
F(ParseJson, 1, 1)
-#define FOR_EACH_INTRINSIC_LITERALS(F) \
- F(CreateRegExpLiteral, 4, 1) \
- F(CreateObjectLiteral, 4, 1) \
- F(CreateArrayLiteral, 4, 1) \
- F(CreateArrayLiteralStubBailout, 3, 1) \
- F(StoreArrayLiteralElement, 5, 1)
+#define FOR_EACH_INTRINSIC_LITERALS(F) \
+ F(CreateRegExpLiteral, 4, 1) \
+ F(CreateObjectLiteral, 4, 1) \
+ F(CreateArrayLiteral, 4, 1) \
+ F(CreateArrayLiteralStubBailout, 3, 1)
#define FOR_EACH_INTRINSIC_LIVEEDIT(F) \
@@ -385,7 +386,6 @@ namespace internal {
F(RoundNumber, 1, 1) \
F(MathSqrt, 1, 1) \
F(MathFround, 1, 1) \
- F(IsMinusZero, 1, 1) \
F(GenerateRandomNumbers, 1, 1)
@@ -410,18 +410,14 @@ namespace internal {
F(GetHoleNaNUpper, 0, 1) \
F(GetHoleNaNLower, 0, 1)
-
#define FOR_EACH_INTRINSIC_OBJECT(F) \
F(GetPrototype, 1, 1) \
F(InternalSetPrototype, 2, 1) \
F(SetPrototype, 2, 1) \
- F(GetOwnProperty, 2, 1) \
F(GetOwnProperty_Legacy, 2, 1) \
F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
F(GetProperty, 2, 1) \
- F(GetPropertyStrong, 2, 1) \
F(KeyedGetProperty, 2, 1) \
- F(KeyedGetPropertyStrong, 2, 1) \
F(LoadGlobalViaContext, 1, 1) \
F(StoreGlobalViaContext_Sloppy, 2, 1) \
F(StoreGlobalViaContext_Strict, 2, 1) \
@@ -434,7 +430,6 @@ namespace internal {
F(HasOwnProperty, 2, 1) \
F(HasProperty, 2, 1) \
F(PropertyIsEnumerable, 2, 1) \
- F(GetPropertyNamesFast, 1, 1) \
F(GetOwnPropertyKeys, 2, 1) \
F(GetInterceptorInfo, 1, 1) \
F(ToFastProperties, 1, 1) \
@@ -448,12 +443,10 @@ namespace internal {
F(IsJSGlobalProxy, 1, 1) \
F(DefineAccessorPropertyUnchecked, 5, 1) \
F(DefineDataPropertyUnchecked, 4, 1) \
+ F(DefineDataPropertyInLiteral, 5, 1) \
F(GetDataProperty, 2, 1) \
F(HasFastPackedElements, 1, 1) \
F(ValueOf, 1, 1) \
- F(SetValueOf, 2, 1) \
- F(JSValueGetValue, 1, 1) \
- F(ObjectEquals, 2, 1) \
F(IsJSReceiver, 1, 1) \
F(IsStrong, 1, 1) \
F(ClassOf, 1, 1) \
@@ -470,8 +463,9 @@ namespace internal {
F(ToName, 1, 1) \
F(Equals, 2, 1) \
F(StrictEquals, 2, 1) \
+ F(SameValue, 2, 1) \
+ F(SameValueZero, 2, 1) \
F(Compare, 3, 1) \
- F(Compare_Strong, 3, 1) \
F(InstanceOf, 2, 1) \
F(HasInPrototypeChain, 2, 1) \
F(CreateIterResultObject, 2, 1) \
@@ -479,7 +473,6 @@ namespace internal {
F(ObjectDefineProperties, 2, 1) \
F(ObjectDefineProperty, 3, 1)
-
#define FOR_EACH_INTRINSIC_OBSERVE(F) \
F(IsObserved, 1, 1) \
F(SetIsObserved, 1, 1) \
@@ -493,30 +486,18 @@ namespace internal {
F(GetObjectContextObjectGetNotifier, 1, 1) \
F(GetObjectContextNotifierPerformChange, 1, 1)
-
#define FOR_EACH_INTRINSIC_OPERATORS(F) \
F(Multiply, 2, 1) \
- F(Multiply_Strong, 2, 1) \
F(Divide, 2, 1) \
- F(Divide_Strong, 2, 1) \
F(Modulus, 2, 1) \
- F(Modulus_Strong, 2, 1) \
F(Add, 2, 1) \
- F(Add_Strong, 2, 1) \
F(Subtract, 2, 1) \
- F(Subtract_Strong, 2, 1) \
F(ShiftLeft, 2, 1) \
- F(ShiftLeft_Strong, 2, 1) \
F(ShiftRight, 2, 1) \
- F(ShiftRight_Strong, 2, 1) \
F(ShiftRightLogical, 2, 1) \
- F(ShiftRightLogical_Strong, 2, 1) \
F(BitwiseAnd, 2, 1) \
- F(BitwiseAnd_Strong, 2, 1) \
F(BitwiseOr, 2, 1) \
- F(BitwiseOr_Strong, 2, 1) \
- F(BitwiseXor, 2, 1) \
- F(BitwiseXor_Strong, 2, 1)
+ F(BitwiseXor, 2, 1)
#define FOR_EACH_INTRINSIC_PROXY(F) \
F(IsJSProxy, 1, 1) \
@@ -538,7 +519,6 @@ namespace internal {
F(RegExpExecReThrow, 4, 1) \
F(IsRegExp, 1, 1)
-
#define FOR_EACH_INTRINSIC_SCOPES(F) \
F(ThrowConstAssignError, 0, 1) \
F(DeclareGlobals, 2, 1) \
@@ -547,11 +527,9 @@ namespace internal {
F(DeclareLookupSlot, 3, 1) \
F(InitializeLegacyConstLookupSlot, 3, 1) \
F(NewSloppyArguments_Generic, 1, 1) \
- F(NewStrictArguments_Generic, 1, 1) \
- F(NewRestArguments_Generic, 2, 1) \
+ F(NewStrictArguments, 1, 1) \
+ F(NewRestParameter, 1, 1) \
F(NewSloppyArguments, 3, 1) \
- F(NewStrictArguments, 3, 1) \
- F(NewRestParam, 3, 1) \
F(NewClosure, 1, 1) \
F(NewClosure_Tenured, 1, 1) \
F(NewScriptContext, 2, 1) \
@@ -562,317 +540,321 @@ namespace internal {
F(IsJSModule, 1, 1) \
F(PushModuleContext, 2, 1) \
F(DeclareModules, 1, 1) \
- F(DeleteLookupSlot, 2, 1) \
- F(StoreLookupSlot, 4, 1) \
- F(ArgumentsLength, 0, 1) \
- F(Arguments, 1, 1)
-
-
-#define FOR_EACH_INTRINSIC_SIMD(F) \
- F(IsSimdValue, 1, 1) \
- F(SimdSameValue, 2, 1) \
- F(SimdSameValueZero, 2, 1) \
- F(CreateFloat32x4, 4, 1) \
- F(CreateInt32x4, 4, 1) \
- F(CreateUint32x4, 4, 1) \
- F(CreateBool32x4, 4, 1) \
- F(CreateInt16x8, 8, 1) \
- F(CreateUint16x8, 8, 1) \
- F(CreateBool16x8, 8, 1) \
- F(CreateInt8x16, 16, 1) \
- F(CreateUint8x16, 16, 1) \
- F(CreateBool8x16, 16, 1) \
- F(Float32x4Check, 1, 1) \
- F(Float32x4ExtractLane, 2, 1) \
- F(Float32x4ReplaceLane, 3, 1) \
- F(Float32x4Abs, 1, 1) \
- F(Float32x4Neg, 1, 1) \
- F(Float32x4Sqrt, 1, 1) \
- F(Float32x4RecipApprox, 1, 1) \
- F(Float32x4RecipSqrtApprox, 1, 1) \
- F(Float32x4Add, 2, 1) \
- F(Float32x4Sub, 2, 1) \
- F(Float32x4Mul, 2, 1) \
- F(Float32x4Div, 2, 1) \
- F(Float32x4Min, 2, 1) \
- F(Float32x4Max, 2, 1) \
- F(Float32x4MinNum, 2, 1) \
- F(Float32x4MaxNum, 2, 1) \
- F(Float32x4Equal, 2, 1) \
- F(Float32x4NotEqual, 2, 1) \
- F(Float32x4LessThan, 2, 1) \
- F(Float32x4LessThanOrEqual, 2, 1) \
- F(Float32x4GreaterThan, 2, 1) \
- F(Float32x4GreaterThanOrEqual, 2, 1) \
- F(Float32x4Select, 3, 1) \
- F(Float32x4Swizzle, 5, 1) \
- F(Float32x4Shuffle, 6, 1) \
- F(Float32x4FromInt32x4, 1, 1) \
- F(Float32x4FromUint32x4, 1, 1) \
- F(Float32x4FromInt32x4Bits, 1, 1) \
- F(Float32x4FromUint32x4Bits, 1, 1) \
- F(Float32x4FromInt16x8Bits, 1, 1) \
- F(Float32x4FromUint16x8Bits, 1, 1) \
- F(Float32x4FromInt8x16Bits, 1, 1) \
- F(Float32x4FromUint8x16Bits, 1, 1) \
- F(Float32x4Load, 2, 1) \
- F(Float32x4Load1, 2, 1) \
- F(Float32x4Load2, 2, 1) \
- F(Float32x4Load3, 2, 1) \
- F(Float32x4Store, 3, 1) \
- F(Float32x4Store1, 3, 1) \
- F(Float32x4Store2, 3, 1) \
- F(Float32x4Store3, 3, 1) \
- F(Int32x4Check, 1, 1) \
- F(Int32x4ExtractLane, 2, 1) \
- F(Int32x4ReplaceLane, 3, 1) \
- F(Int32x4Neg, 1, 1) \
- F(Int32x4Add, 2, 1) \
- F(Int32x4Sub, 2, 1) \
- F(Int32x4Mul, 2, 1) \
- F(Int32x4Min, 2, 1) \
- F(Int32x4Max, 2, 1) \
- F(Int32x4And, 2, 1) \
- F(Int32x4Or, 2, 1) \
- F(Int32x4Xor, 2, 1) \
- F(Int32x4Not, 1, 1) \
- F(Int32x4ShiftLeftByScalar, 2, 1) \
- F(Int32x4ShiftRightByScalar, 2, 1) \
- F(Int32x4Equal, 2, 1) \
- F(Int32x4NotEqual, 2, 1) \
- F(Int32x4LessThan, 2, 1) \
- F(Int32x4LessThanOrEqual, 2, 1) \
- F(Int32x4GreaterThan, 2, 1) \
- F(Int32x4GreaterThanOrEqual, 2, 1) \
- F(Int32x4Select, 3, 1) \
- F(Int32x4Swizzle, 5, 1) \
- F(Int32x4Shuffle, 6, 1) \
- F(Int32x4FromFloat32x4, 1, 1) \
- F(Int32x4FromUint32x4, 1, 1) \
- F(Int32x4FromFloat32x4Bits, 1, 1) \
- F(Int32x4FromUint32x4Bits, 1, 1) \
- F(Int32x4FromInt16x8Bits, 1, 1) \
- F(Int32x4FromUint16x8Bits, 1, 1) \
- F(Int32x4FromInt8x16Bits, 1, 1) \
- F(Int32x4FromUint8x16Bits, 1, 1) \
- F(Int32x4Load, 2, 1) \
- F(Int32x4Load1, 2, 1) \
- F(Int32x4Load2, 2, 1) \
- F(Int32x4Load3, 2, 1) \
- F(Int32x4Store, 3, 1) \
- F(Int32x4Store1, 3, 1) \
- F(Int32x4Store2, 3, 1) \
- F(Int32x4Store3, 3, 1) \
- F(Uint32x4Check, 1, 1) \
- F(Uint32x4ExtractLane, 2, 1) \
- F(Uint32x4ReplaceLane, 3, 1) \
- F(Uint32x4Add, 2, 1) \
- F(Uint32x4Sub, 2, 1) \
- F(Uint32x4Mul, 2, 1) \
- F(Uint32x4Min, 2, 1) \
- F(Uint32x4Max, 2, 1) \
- F(Uint32x4And, 2, 1) \
- F(Uint32x4Or, 2, 1) \
- F(Uint32x4Xor, 2, 1) \
- F(Uint32x4Not, 1, 1) \
- F(Uint32x4ShiftLeftByScalar, 2, 1) \
- F(Uint32x4ShiftRightByScalar, 2, 1) \
- F(Uint32x4Equal, 2, 1) \
- F(Uint32x4NotEqual, 2, 1) \
- F(Uint32x4LessThan, 2, 1) \
- F(Uint32x4LessThanOrEqual, 2, 1) \
- F(Uint32x4GreaterThan, 2, 1) \
- F(Uint32x4GreaterThanOrEqual, 2, 1) \
- F(Uint32x4Select, 3, 1) \
- F(Uint32x4Swizzle, 5, 1) \
- F(Uint32x4Shuffle, 6, 1) \
- F(Uint32x4FromFloat32x4, 1, 1) \
- F(Uint32x4FromInt32x4, 1, 1) \
- F(Uint32x4FromFloat32x4Bits, 1, 1) \
- F(Uint32x4FromInt32x4Bits, 1, 1) \
- F(Uint32x4FromInt16x8Bits, 1, 1) \
- F(Uint32x4FromUint16x8Bits, 1, 1) \
- F(Uint32x4FromInt8x16Bits, 1, 1) \
- F(Uint32x4FromUint8x16Bits, 1, 1) \
- F(Uint32x4Load, 2, 1) \
- F(Uint32x4Load1, 2, 1) \
- F(Uint32x4Load2, 2, 1) \
- F(Uint32x4Load3, 2, 1) \
- F(Uint32x4Store, 3, 1) \
- F(Uint32x4Store1, 3, 1) \
- F(Uint32x4Store2, 3, 1) \
- F(Uint32x4Store3, 3, 1) \
- F(Bool32x4Check, 1, 1) \
- F(Bool32x4ExtractLane, 2, 1) \
- F(Bool32x4ReplaceLane, 3, 1) \
- F(Bool32x4And, 2, 1) \
- F(Bool32x4Or, 2, 1) \
- F(Bool32x4Xor, 2, 1) \
- F(Bool32x4Not, 1, 1) \
- F(Bool32x4AnyTrue, 1, 1) \
- F(Bool32x4AllTrue, 1, 1) \
- F(Bool32x4Swizzle, 5, 1) \
- F(Bool32x4Shuffle, 6, 1) \
- F(Int16x8Check, 1, 1) \
- F(Int16x8ExtractLane, 2, 1) \
- F(Int16x8ReplaceLane, 3, 1) \
- F(Int16x8Neg, 1, 1) \
- F(Int16x8Add, 2, 1) \
- F(Int16x8AddSaturate, 2, 1) \
- F(Int16x8Sub, 2, 1) \
- F(Int16x8SubSaturate, 2, 1) \
- F(Int16x8Mul, 2, 1) \
- F(Int16x8Min, 2, 1) \
- F(Int16x8Max, 2, 1) \
- F(Int16x8And, 2, 1) \
- F(Int16x8Or, 2, 1) \
- F(Int16x8Xor, 2, 1) \
- F(Int16x8Not, 1, 1) \
- F(Int16x8ShiftLeftByScalar, 2, 1) \
- F(Int16x8ShiftRightByScalar, 2, 1) \
- F(Int16x8Equal, 2, 1) \
- F(Int16x8NotEqual, 2, 1) \
- F(Int16x8LessThan, 2, 1) \
- F(Int16x8LessThanOrEqual, 2, 1) \
- F(Int16x8GreaterThan, 2, 1) \
- F(Int16x8GreaterThanOrEqual, 2, 1) \
- F(Int16x8Select, 3, 1) \
- F(Int16x8Swizzle, 9, 1) \
- F(Int16x8Shuffle, 10, 1) \
- F(Int16x8FromUint16x8, 1, 1) \
- F(Int16x8FromFloat32x4Bits, 1, 1) \
- F(Int16x8FromInt32x4Bits, 1, 1) \
- F(Int16x8FromUint32x4Bits, 1, 1) \
- F(Int16x8FromUint16x8Bits, 1, 1) \
- F(Int16x8FromInt8x16Bits, 1, 1) \
- F(Int16x8FromUint8x16Bits, 1, 1) \
- F(Int16x8Load, 2, 1) \
- F(Int16x8Store, 3, 1) \
- F(Uint16x8Check, 1, 1) \
- F(Uint16x8ExtractLane, 2, 1) \
- F(Uint16x8ReplaceLane, 3, 1) \
- F(Uint16x8Add, 2, 1) \
- F(Uint16x8AddSaturate, 2, 1) \
- F(Uint16x8Sub, 2, 1) \
- F(Uint16x8SubSaturate, 2, 1) \
- F(Uint16x8Mul, 2, 1) \
- F(Uint16x8Min, 2, 1) \
- F(Uint16x8Max, 2, 1) \
- F(Uint16x8And, 2, 1) \
- F(Uint16x8Or, 2, 1) \
- F(Uint16x8Xor, 2, 1) \
- F(Uint16x8Not, 1, 1) \
- F(Uint16x8ShiftLeftByScalar, 2, 1) \
- F(Uint16x8ShiftRightByScalar, 2, 1) \
- F(Uint16x8Equal, 2, 1) \
- F(Uint16x8NotEqual, 2, 1) \
- F(Uint16x8LessThan, 2, 1) \
- F(Uint16x8LessThanOrEqual, 2, 1) \
- F(Uint16x8GreaterThan, 2, 1) \
- F(Uint16x8GreaterThanOrEqual, 2, 1) \
- F(Uint16x8Select, 3, 1) \
- F(Uint16x8Swizzle, 9, 1) \
- F(Uint16x8Shuffle, 10, 1) \
- F(Uint16x8FromInt16x8, 1, 1) \
- F(Uint16x8FromFloat32x4Bits, 1, 1) \
- F(Uint16x8FromInt32x4Bits, 1, 1) \
- F(Uint16x8FromUint32x4Bits, 1, 1) \
- F(Uint16x8FromInt16x8Bits, 1, 1) \
- F(Uint16x8FromInt8x16Bits, 1, 1) \
- F(Uint16x8FromUint8x16Bits, 1, 1) \
- F(Uint16x8Load, 2, 1) \
- F(Uint16x8Store, 3, 1) \
- F(Bool16x8Check, 1, 1) \
- F(Bool16x8ExtractLane, 2, 1) \
- F(Bool16x8ReplaceLane, 3, 1) \
- F(Bool16x8And, 2, 1) \
- F(Bool16x8Or, 2, 1) \
- F(Bool16x8Xor, 2, 1) \
- F(Bool16x8Not, 1, 1) \
- F(Bool16x8AnyTrue, 1, 1) \
- F(Bool16x8AllTrue, 1, 1) \
- F(Bool16x8Swizzle, 9, 1) \
- F(Bool16x8Shuffle, 10, 1) \
- F(Int8x16Check, 1, 1) \
- F(Int8x16ExtractLane, 2, 1) \
- F(Int8x16ReplaceLane, 3, 1) \
- F(Int8x16Neg, 1, 1) \
- F(Int8x16Add, 2, 1) \
- F(Int8x16AddSaturate, 2, 1) \
- F(Int8x16Sub, 2, 1) \
- F(Int8x16SubSaturate, 2, 1) \
- F(Int8x16Mul, 2, 1) \
- F(Int8x16Min, 2, 1) \
- F(Int8x16Max, 2, 1) \
- F(Int8x16And, 2, 1) \
- F(Int8x16Or, 2, 1) \
- F(Int8x16Xor, 2, 1) \
- F(Int8x16Not, 1, 1) \
- F(Int8x16ShiftLeftByScalar, 2, 1) \
- F(Int8x16ShiftRightByScalar, 2, 1) \
- F(Int8x16Equal, 2, 1) \
- F(Int8x16NotEqual, 2, 1) \
- F(Int8x16LessThan, 2, 1) \
- F(Int8x16LessThanOrEqual, 2, 1) \
- F(Int8x16GreaterThan, 2, 1) \
- F(Int8x16GreaterThanOrEqual, 2, 1) \
- F(Int8x16Select, 3, 1) \
- F(Int8x16Swizzle, 17, 1) \
- F(Int8x16Shuffle, 18, 1) \
- F(Int8x16FromUint8x16, 1, 1) \
- F(Int8x16FromFloat32x4Bits, 1, 1) \
- F(Int8x16FromInt32x4Bits, 1, 1) \
- F(Int8x16FromUint32x4Bits, 1, 1) \
- F(Int8x16FromInt16x8Bits, 1, 1) \
- F(Int8x16FromUint16x8Bits, 1, 1) \
- F(Int8x16FromUint8x16Bits, 1, 1) \
- F(Int8x16Load, 2, 1) \
- F(Int8x16Store, 3, 1) \
- F(Uint8x16Check, 1, 1) \
- F(Uint8x16ExtractLane, 2, 1) \
- F(Uint8x16ReplaceLane, 3, 1) \
- F(Uint8x16Add, 2, 1) \
- F(Uint8x16AddSaturate, 2, 1) \
- F(Uint8x16Sub, 2, 1) \
- F(Uint8x16SubSaturate, 2, 1) \
- F(Uint8x16Mul, 2, 1) \
- F(Uint8x16Min, 2, 1) \
- F(Uint8x16Max, 2, 1) \
- F(Uint8x16And, 2, 1) \
- F(Uint8x16Or, 2, 1) \
- F(Uint8x16Xor, 2, 1) \
- F(Uint8x16Not, 1, 1) \
- F(Uint8x16ShiftLeftByScalar, 2, 1) \
- F(Uint8x16ShiftRightByScalar, 2, 1) \
- F(Uint8x16Equal, 2, 1) \
- F(Uint8x16NotEqual, 2, 1) \
- F(Uint8x16LessThan, 2, 1) \
- F(Uint8x16LessThanOrEqual, 2, 1) \
- F(Uint8x16GreaterThan, 2, 1) \
- F(Uint8x16GreaterThanOrEqual, 2, 1) \
- F(Uint8x16Select, 3, 1) \
- F(Uint8x16Swizzle, 17, 1) \
- F(Uint8x16Shuffle, 18, 1) \
- F(Uint8x16FromInt8x16, 1, 1) \
- F(Uint8x16FromFloat32x4Bits, 1, 1) \
- F(Uint8x16FromInt32x4Bits, 1, 1) \
- F(Uint8x16FromUint32x4Bits, 1, 1) \
- F(Uint8x16FromInt16x8Bits, 1, 1) \
- F(Uint8x16FromUint16x8Bits, 1, 1) \
- F(Uint8x16FromInt8x16Bits, 1, 1) \
- F(Uint8x16Load, 2, 1) \
- F(Uint8x16Store, 3, 1) \
- F(Bool8x16Check, 1, 1) \
- F(Bool8x16ExtractLane, 2, 1) \
- F(Bool8x16ReplaceLane, 3, 1) \
- F(Bool8x16And, 2, 1) \
- F(Bool8x16Or, 2, 1) \
- F(Bool8x16Xor, 2, 1) \
- F(Bool8x16Not, 1, 1) \
- F(Bool8x16AnyTrue, 1, 1) \
- F(Bool8x16AllTrue, 1, 1) \
- F(Bool8x16Swizzle, 17, 1) \
- F(Bool8x16Shuffle, 18, 1)
+ F(DeleteLookupSlot, 1, 1) \
+ F(LoadLookupSlot, 1, 1) \
+ F(LoadLookupSlotInsideTypeof, 1, 1) \
+ F(StoreLookupSlot_Sloppy, 2, 1) \
+ F(StoreLookupSlot_Strict, 2, 1)
+
+#define FOR_EACH_INTRINSIC_SIMD(F) \
+ F(IsSimdValue, 1, 1) \
+ F(CreateFloat32x4, 4, 1) \
+ F(CreateInt32x4, 4, 1) \
+ F(CreateUint32x4, 4, 1) \
+ F(CreateBool32x4, 4, 1) \
+ F(CreateInt16x8, 8, 1) \
+ F(CreateUint16x8, 8, 1) \
+ F(CreateBool16x8, 8, 1) \
+ F(CreateInt8x16, 16, 1) \
+ F(CreateUint8x16, 16, 1) \
+ F(CreateBool8x16, 16, 1) \
+ F(Float32x4Check, 1, 1) \
+ F(Float32x4ExtractLane, 2, 1) \
+ F(Float32x4ReplaceLane, 3, 1) \
+ F(Float32x4Abs, 1, 1) \
+ F(Float32x4Neg, 1, 1) \
+ F(Float32x4Sqrt, 1, 1) \
+ F(Float32x4RecipApprox, 1, 1) \
+ F(Float32x4RecipSqrtApprox, 1, 1) \
+ F(Float32x4Add, 2, 1) \
+ F(Float32x4Sub, 2, 1) \
+ F(Float32x4Mul, 2, 1) \
+ F(Float32x4Div, 2, 1) \
+ F(Float32x4Min, 2, 1) \
+ F(Float32x4Max, 2, 1) \
+ F(Float32x4MinNum, 2, 1) \
+ F(Float32x4MaxNum, 2, 1) \
+ F(Float32x4Equal, 2, 1) \
+ F(Float32x4NotEqual, 2, 1) \
+ F(Float32x4LessThan, 2, 1) \
+ F(Float32x4LessThanOrEqual, 2, 1) \
+ F(Float32x4GreaterThan, 2, 1) \
+ F(Float32x4GreaterThanOrEqual, 2, 1) \
+ F(Float32x4Select, 3, 1) \
+ F(Float32x4Swizzle, 5, 1) \
+ F(Float32x4Shuffle, 6, 1) \
+ F(Float32x4FromInt32x4, 1, 1) \
+ F(Float32x4FromUint32x4, 1, 1) \
+ F(Float32x4FromInt32x4Bits, 1, 1) \
+ F(Float32x4FromUint32x4Bits, 1, 1) \
+ F(Float32x4FromInt16x8Bits, 1, 1) \
+ F(Float32x4FromUint16x8Bits, 1, 1) \
+ F(Float32x4FromInt8x16Bits, 1, 1) \
+ F(Float32x4FromUint8x16Bits, 1, 1) \
+ F(Float32x4Load, 2, 1) \
+ F(Float32x4Load1, 2, 1) \
+ F(Float32x4Load2, 2, 1) \
+ F(Float32x4Load3, 2, 1) \
+ F(Float32x4Store, 3, 1) \
+ F(Float32x4Store1, 3, 1) \
+ F(Float32x4Store2, 3, 1) \
+ F(Float32x4Store3, 3, 1) \
+ F(Int32x4Check, 1, 1) \
+ F(Int32x4ExtractLane, 2, 1) \
+ F(Int32x4ReplaceLane, 3, 1) \
+ F(Int32x4Neg, 1, 1) \
+ F(Int32x4Add, 2, 1) \
+ F(Int32x4Sub, 2, 1) \
+ F(Int32x4Mul, 2, 1) \
+ F(Int32x4Min, 2, 1) \
+ F(Int32x4Max, 2, 1) \
+ F(Int32x4And, 2, 1) \
+ F(Int32x4Or, 2, 1) \
+ F(Int32x4Xor, 2, 1) \
+ F(Int32x4Not, 1, 1) \
+ F(Int32x4ShiftLeftByScalar, 2, 1) \
+ F(Int32x4ShiftRightByScalar, 2, 1) \
+ F(Int32x4Equal, 2, 1) \
+ F(Int32x4NotEqual, 2, 1) \
+ F(Int32x4LessThan, 2, 1) \
+ F(Int32x4LessThanOrEqual, 2, 1) \
+ F(Int32x4GreaterThan, 2, 1) \
+ F(Int32x4GreaterThanOrEqual, 2, 1) \
+ F(Int32x4Select, 3, 1) \
+ F(Int32x4Swizzle, 5, 1) \
+ F(Int32x4Shuffle, 6, 1) \
+ F(Int32x4FromFloat32x4, 1, 1) \
+ F(Int32x4FromUint32x4, 1, 1) \
+ F(Int32x4FromFloat32x4Bits, 1, 1) \
+ F(Int32x4FromUint32x4Bits, 1, 1) \
+ F(Int32x4FromInt16x8Bits, 1, 1) \
+ F(Int32x4FromUint16x8Bits, 1, 1) \
+ F(Int32x4FromInt8x16Bits, 1, 1) \
+ F(Int32x4FromUint8x16Bits, 1, 1) \
+ F(Int32x4Load, 2, 1) \
+ F(Int32x4Load1, 2, 1) \
+ F(Int32x4Load2, 2, 1) \
+ F(Int32x4Load3, 2, 1) \
+ F(Int32x4Store, 3, 1) \
+ F(Int32x4Store1, 3, 1) \
+ F(Int32x4Store2, 3, 1) \
+ F(Int32x4Store3, 3, 1) \
+ F(Uint32x4Check, 1, 1) \
+ F(Uint32x4ExtractLane, 2, 1) \
+ F(Uint32x4ReplaceLane, 3, 1) \
+ F(Uint32x4Add, 2, 1) \
+ F(Uint32x4Sub, 2, 1) \
+ F(Uint32x4Mul, 2, 1) \
+ F(Uint32x4Min, 2, 1) \
+ F(Uint32x4Max, 2, 1) \
+ F(Uint32x4And, 2, 1) \
+ F(Uint32x4Or, 2, 1) \
+ F(Uint32x4Xor, 2, 1) \
+ F(Uint32x4Not, 1, 1) \
+ F(Uint32x4ShiftLeftByScalar, 2, 1) \
+ F(Uint32x4ShiftRightByScalar, 2, 1) \
+ F(Uint32x4Equal, 2, 1) \
+ F(Uint32x4NotEqual, 2, 1) \
+ F(Uint32x4LessThan, 2, 1) \
+ F(Uint32x4LessThanOrEqual, 2, 1) \
+ F(Uint32x4GreaterThan, 2, 1) \
+ F(Uint32x4GreaterThanOrEqual, 2, 1) \
+ F(Uint32x4Select, 3, 1) \
+ F(Uint32x4Swizzle, 5, 1) \
+ F(Uint32x4Shuffle, 6, 1) \
+ F(Uint32x4FromFloat32x4, 1, 1) \
+ F(Uint32x4FromInt32x4, 1, 1) \
+ F(Uint32x4FromFloat32x4Bits, 1, 1) \
+ F(Uint32x4FromInt32x4Bits, 1, 1) \
+ F(Uint32x4FromInt16x8Bits, 1, 1) \
+ F(Uint32x4FromUint16x8Bits, 1, 1) \
+ F(Uint32x4FromInt8x16Bits, 1, 1) \
+ F(Uint32x4FromUint8x16Bits, 1, 1) \
+ F(Uint32x4Load, 2, 1) \
+ F(Uint32x4Load1, 2, 1) \
+ F(Uint32x4Load2, 2, 1) \
+ F(Uint32x4Load3, 2, 1) \
+ F(Uint32x4Store, 3, 1) \
+ F(Uint32x4Store1, 3, 1) \
+ F(Uint32x4Store2, 3, 1) \
+ F(Uint32x4Store3, 3, 1) \
+ F(Bool32x4Check, 1, 1) \
+ F(Bool32x4ExtractLane, 2, 1) \
+ F(Bool32x4ReplaceLane, 3, 1) \
+ F(Bool32x4And, 2, 1) \
+ F(Bool32x4Or, 2, 1) \
+ F(Bool32x4Xor, 2, 1) \
+ F(Bool32x4Not, 1, 1) \
+ F(Bool32x4AnyTrue, 1, 1) \
+ F(Bool32x4AllTrue, 1, 1) \
+ F(Bool32x4Swizzle, 5, 1) \
+ F(Bool32x4Shuffle, 6, 1) \
+ F(Bool32x4Equal, 2, 1) \
+ F(Bool32x4NotEqual, 2, 1) \
+ F(Int16x8Check, 1, 1) \
+ F(Int16x8ExtractLane, 2, 1) \
+ F(Int16x8ReplaceLane, 3, 1) \
+ F(Int16x8Neg, 1, 1) \
+ F(Int16x8Add, 2, 1) \
+ F(Int16x8AddSaturate, 2, 1) \
+ F(Int16x8Sub, 2, 1) \
+ F(Int16x8SubSaturate, 2, 1) \
+ F(Int16x8Mul, 2, 1) \
+ F(Int16x8Min, 2, 1) \
+ F(Int16x8Max, 2, 1) \
+ F(Int16x8And, 2, 1) \
+ F(Int16x8Or, 2, 1) \
+ F(Int16x8Xor, 2, 1) \
+ F(Int16x8Not, 1, 1) \
+ F(Int16x8ShiftLeftByScalar, 2, 1) \
+ F(Int16x8ShiftRightByScalar, 2, 1) \
+ F(Int16x8Equal, 2, 1) \
+ F(Int16x8NotEqual, 2, 1) \
+ F(Int16x8LessThan, 2, 1) \
+ F(Int16x8LessThanOrEqual, 2, 1) \
+ F(Int16x8GreaterThan, 2, 1) \
+ F(Int16x8GreaterThanOrEqual, 2, 1) \
+ F(Int16x8Select, 3, 1) \
+ F(Int16x8Swizzle, 9, 1) \
+ F(Int16x8Shuffle, 10, 1) \
+ F(Int16x8FromUint16x8, 1, 1) \
+ F(Int16x8FromFloat32x4Bits, 1, 1) \
+ F(Int16x8FromInt32x4Bits, 1, 1) \
+ F(Int16x8FromUint32x4Bits, 1, 1) \
+ F(Int16x8FromUint16x8Bits, 1, 1) \
+ F(Int16x8FromInt8x16Bits, 1, 1) \
+ F(Int16x8FromUint8x16Bits, 1, 1) \
+ F(Int16x8Load, 2, 1) \
+ F(Int16x8Store, 3, 1) \
+ F(Uint16x8Check, 1, 1) \
+ F(Uint16x8ExtractLane, 2, 1) \
+ F(Uint16x8ReplaceLane, 3, 1) \
+ F(Uint16x8Add, 2, 1) \
+ F(Uint16x8AddSaturate, 2, 1) \
+ F(Uint16x8Sub, 2, 1) \
+ F(Uint16x8SubSaturate, 2, 1) \
+ F(Uint16x8Mul, 2, 1) \
+ F(Uint16x8Min, 2, 1) \
+ F(Uint16x8Max, 2, 1) \
+ F(Uint16x8And, 2, 1) \
+ F(Uint16x8Or, 2, 1) \
+ F(Uint16x8Xor, 2, 1) \
+ F(Uint16x8Not, 1, 1) \
+ F(Uint16x8ShiftLeftByScalar, 2, 1) \
+ F(Uint16x8ShiftRightByScalar, 2, 1) \
+ F(Uint16x8Equal, 2, 1) \
+ F(Uint16x8NotEqual, 2, 1) \
+ F(Uint16x8LessThan, 2, 1) \
+ F(Uint16x8LessThanOrEqual, 2, 1) \
+ F(Uint16x8GreaterThan, 2, 1) \
+ F(Uint16x8GreaterThanOrEqual, 2, 1) \
+ F(Uint16x8Select, 3, 1) \
+ F(Uint16x8Swizzle, 9, 1) \
+ F(Uint16x8Shuffle, 10, 1) \
+ F(Uint16x8FromInt16x8, 1, 1) \
+ F(Uint16x8FromFloat32x4Bits, 1, 1) \
+ F(Uint16x8FromInt32x4Bits, 1, 1) \
+ F(Uint16x8FromUint32x4Bits, 1, 1) \
+ F(Uint16x8FromInt16x8Bits, 1, 1) \
+ F(Uint16x8FromInt8x16Bits, 1, 1) \
+ F(Uint16x8FromUint8x16Bits, 1, 1) \
+ F(Uint16x8Load, 2, 1) \
+ F(Uint16x8Store, 3, 1) \
+ F(Bool16x8Check, 1, 1) \
+ F(Bool16x8ExtractLane, 2, 1) \
+ F(Bool16x8ReplaceLane, 3, 1) \
+ F(Bool16x8And, 2, 1) \
+ F(Bool16x8Or, 2, 1) \
+ F(Bool16x8Xor, 2, 1) \
+ F(Bool16x8Not, 1, 1) \
+ F(Bool16x8AnyTrue, 1, 1) \
+ F(Bool16x8AllTrue, 1, 1) \
+ F(Bool16x8Swizzle, 9, 1) \
+ F(Bool16x8Shuffle, 10, 1) \
+ F(Bool16x8Equal, 2, 1) \
+ F(Bool16x8NotEqual, 2, 1) \
+ F(Int8x16Check, 1, 1) \
+ F(Int8x16ExtractLane, 2, 1) \
+ F(Int8x16ReplaceLane, 3, 1) \
+ F(Int8x16Neg, 1, 1) \
+ F(Int8x16Add, 2, 1) \
+ F(Int8x16AddSaturate, 2, 1) \
+ F(Int8x16Sub, 2, 1) \
+ F(Int8x16SubSaturate, 2, 1) \
+ F(Int8x16Mul, 2, 1) \
+ F(Int8x16Min, 2, 1) \
+ F(Int8x16Max, 2, 1) \
+ F(Int8x16And, 2, 1) \
+ F(Int8x16Or, 2, 1) \
+ F(Int8x16Xor, 2, 1) \
+ F(Int8x16Not, 1, 1) \
+ F(Int8x16ShiftLeftByScalar, 2, 1) \
+ F(Int8x16ShiftRightByScalar, 2, 1) \
+ F(Int8x16Equal, 2, 1) \
+ F(Int8x16NotEqual, 2, 1) \
+ F(Int8x16LessThan, 2, 1) \
+ F(Int8x16LessThanOrEqual, 2, 1) \
+ F(Int8x16GreaterThan, 2, 1) \
+ F(Int8x16GreaterThanOrEqual, 2, 1) \
+ F(Int8x16Select, 3, 1) \
+ F(Int8x16Swizzle, 17, 1) \
+ F(Int8x16Shuffle, 18, 1) \
+ F(Int8x16FromUint8x16, 1, 1) \
+ F(Int8x16FromFloat32x4Bits, 1, 1) \
+ F(Int8x16FromInt32x4Bits, 1, 1) \
+ F(Int8x16FromUint32x4Bits, 1, 1) \
+ F(Int8x16FromInt16x8Bits, 1, 1) \
+ F(Int8x16FromUint16x8Bits, 1, 1) \
+ F(Int8x16FromUint8x16Bits, 1, 1) \
+ F(Int8x16Load, 2, 1) \
+ F(Int8x16Store, 3, 1) \
+ F(Uint8x16Check, 1, 1) \
+ F(Uint8x16ExtractLane, 2, 1) \
+ F(Uint8x16ReplaceLane, 3, 1) \
+ F(Uint8x16Add, 2, 1) \
+ F(Uint8x16AddSaturate, 2, 1) \
+ F(Uint8x16Sub, 2, 1) \
+ F(Uint8x16SubSaturate, 2, 1) \
+ F(Uint8x16Mul, 2, 1) \
+ F(Uint8x16Min, 2, 1) \
+ F(Uint8x16Max, 2, 1) \
+ F(Uint8x16And, 2, 1) \
+ F(Uint8x16Or, 2, 1) \
+ F(Uint8x16Xor, 2, 1) \
+ F(Uint8x16Not, 1, 1) \
+ F(Uint8x16ShiftLeftByScalar, 2, 1) \
+ F(Uint8x16ShiftRightByScalar, 2, 1) \
+ F(Uint8x16Equal, 2, 1) \
+ F(Uint8x16NotEqual, 2, 1) \
+ F(Uint8x16LessThan, 2, 1) \
+ F(Uint8x16LessThanOrEqual, 2, 1) \
+ F(Uint8x16GreaterThan, 2, 1) \
+ F(Uint8x16GreaterThanOrEqual, 2, 1) \
+ F(Uint8x16Select, 3, 1) \
+ F(Uint8x16Swizzle, 17, 1) \
+ F(Uint8x16Shuffle, 18, 1) \
+ F(Uint8x16FromInt8x16, 1, 1) \
+ F(Uint8x16FromFloat32x4Bits, 1, 1) \
+ F(Uint8x16FromInt32x4Bits, 1, 1) \
+ F(Uint8x16FromUint32x4Bits, 1, 1) \
+ F(Uint8x16FromInt16x8Bits, 1, 1) \
+ F(Uint8x16FromUint16x8Bits, 1, 1) \
+ F(Uint8x16FromInt8x16Bits, 1, 1) \
+ F(Uint8x16Load, 2, 1) \
+ F(Uint8x16Store, 3, 1) \
+ F(Bool8x16Check, 1, 1) \
+ F(Bool8x16ExtractLane, 2, 1) \
+ F(Bool8x16ReplaceLane, 3, 1) \
+ F(Bool8x16And, 2, 1) \
+ F(Bool8x16Or, 2, 1) \
+ F(Bool8x16Xor, 2, 1) \
+ F(Bool8x16Not, 1, 1) \
+ F(Bool8x16AnyTrue, 1, 1) \
+ F(Bool8x16AllTrue, 1, 1) \
+ F(Bool8x16Swizzle, 17, 1) \
+ F(Bool8x16Shuffle, 18, 1) \
+ F(Bool8x16Equal, 2, 1) \
+ F(Bool8x16NotEqual, 2, 1)
#define FOR_EACH_INTRINSIC_STRINGS(F) \
@@ -914,7 +896,6 @@ namespace internal {
F(SymbolRegistry, 0, 1) \
F(SymbolIsPrivate, 1, 1)
-
#define FOR_EACH_INTRINSIC_TEST(F) \
F(DeoptimizeFunction, 1, 1) \
F(DeoptimizeNow, 0, 1) \
@@ -942,6 +923,7 @@ namespace internal {
F(DisassembleFunction, 1, 1) \
F(TraceEnter, 0, 1) \
F(TraceExit, 1, 1) \
+ F(TraceTailCall, 0, 1) \
F(HaveSameMap, 2, 1) \
F(InNewSpace, 1, 1) \
F(HasFastSmiElements, 1, 1) \
@@ -963,7 +945,6 @@ namespace internal {
F(HasFixedFloat64Elements, 1, 1) \
F(HasFixedUint8ClampedElements, 1, 1)
-
#define FOR_EACH_INTRINSIC_TYPEDARRAY(F) \
F(ArrayBufferGetByteLength, 1, 1) \
F(ArrayBufferSliceImpl, 4, 1) \
@@ -981,7 +962,6 @@ namespace internal {
F(IsSharedTypedArray, 1, 1) \
F(IsSharedIntegerTypedArray, 1, 1) \
F(IsSharedInteger32TypedArray, 1, 1) \
- F(DataViewInitialize, 4, 1) \
F(DataViewGetUint8, 3, 1) \
F(DataViewGetInt8, 3, 1) \
F(DataViewGetUint16, 3, 1) \
@@ -1004,11 +984,11 @@ namespace internal {
F(URIEscape, 1, 1) \
F(URIUnescape, 1, 1)
-
#define FOR_EACH_INTRINSIC_RETURN_PAIR(F) \
- F(LoadLookupSlot, 2, 2) \
- F(LoadLookupSlotNoReferenceError, 2, 2)
+ F(LoadLookupSlotForCall, 1, 2)
+#define FOR_EACH_INTRINSIC_RETURN_TRIPLE(F) \
+ F(ForInPrepare, 1, 3)
// Most intrinsics are implemented in the runtime/ directory, but ICs are
// implemented in ic.cc for now.
@@ -1029,7 +1009,7 @@ namespace internal {
F(LoadIC_MissFromStubFailure, 4, 1) \
F(LoadPropertyWithInterceptor, 3, 1) \
F(LoadPropertyWithInterceptorOnly, 3, 1) \
- F(StoreCallbackProperty, 5, 1) \
+ F(StoreCallbackProperty, 6, 1) \
F(StoreIC_Miss, 5, 1) \
F(StoreIC_MissFromStubFailure, 5, 1) \
F(StoreIC_Slow, 5, 1) \
@@ -1074,8 +1054,9 @@ namespace internal {
// FOR_EACH_INTRINSIC defines the list of all intrinsics, coming in 2 flavors,
// either returning an object or a pair.
-#define FOR_EACH_INTRINSIC(F) \
- FOR_EACH_INTRINSIC_RETURN_PAIR(F) \
+#define FOR_EACH_INTRINSIC(F) \
+ FOR_EACH_INTRINSIC_RETURN_TRIPLE(F) \
+ FOR_EACH_INTRINSIC_RETURN_PAIR(F) \
FOR_EACH_INTRINSIC_RETURN_OBJECT(F)
@@ -1148,8 +1129,7 @@ class Runtime : public AllStatic {
Handle<Object> value, LanguageMode language_mode);
MUST_USE_RESULT static MaybeHandle<Object> GetObjectProperty(
- Isolate* isolate, Handle<Object> object, Handle<Object> key,
- LanguageMode language_mode = SLOPPY);
+ Isolate* isolate, Handle<Object> object, Handle<Object> key);
enum TypedArrayId {
// arrayIds below should be synchronized with typedarray.js natives.
@@ -1203,6 +1183,7 @@ class RuntimeState {
unibrow::Mapping<unibrow::ToUppercase, 128> to_upper_mapping_;
unibrow::Mapping<unibrow::ToLowercase, 128> to_lower_mapping_;
+
base::SmartArrayPointer<Runtime::Function> redirected_intrinsic_functions_;
friend class Isolate;
diff --git a/deps/v8/src/snapshot/serialize.cc b/deps/v8/src/snapshot/serialize.cc
index 421cf0721c..4868abd520 100644
--- a/deps/v8/src/snapshot/serialize.cc
+++ b/deps/v8/src/snapshot/serialize.cc
@@ -54,8 +54,6 @@ ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
"StackGuard::address_of_real_jslimit()");
Add(ExternalReference::new_space_start(isolate).address(),
"Heap::NewSpaceStart()");
- Add(ExternalReference::new_space_mask(isolate).address(),
- "Heap::NewSpaceMask()");
Add(ExternalReference::new_space_allocation_limit_address(isolate).address(),
"Heap::NewSpaceAllocationLimitAddress()");
Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
@@ -82,6 +80,8 @@ ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
Add(ExternalReference::address_of_one_half().address(),
"LDoubleConstant::one_half");
Add(ExternalReference::isolate_address(isolate).address(), "isolate");
+ Add(ExternalReference::interpreter_dispatch_table_address(isolate).address(),
+ "Interpreter::dispatch_table_address");
Add(ExternalReference::address_of_negative_infinity().address(),
"LDoubleConstant::negative_infinity");
Add(ExternalReference::power_double_double_function(isolate).address(),
@@ -119,6 +119,22 @@ ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
"InvokeFunctionCallback");
Add(ExternalReference::invoke_accessor_getter_callback(isolate).address(),
"InvokeAccessorGetterCallback");
+ Add(ExternalReference::f32_trunc_wrapper_function(isolate).address(),
+ "f32_trunc_wrapper");
+ Add(ExternalReference::f32_floor_wrapper_function(isolate).address(),
+ "f32_floor_wrapper");
+ Add(ExternalReference::f32_ceil_wrapper_function(isolate).address(),
+ "f32_ceil_wrapper");
+ Add(ExternalReference::f32_nearest_int_wrapper_function(isolate).address(),
+ "f32_nearest_int_wrapper");
+ Add(ExternalReference::f64_trunc_wrapper_function(isolate).address(),
+ "f64_trunc_wrapper");
+ Add(ExternalReference::f64_floor_wrapper_function(isolate).address(),
+ "f64_floor_wrapper");
+ Add(ExternalReference::f64_ceil_wrapper_function(isolate).address(),
+ "f64_ceil_wrapper");
+ Add(ExternalReference::f64_nearest_int_wrapper_function(isolate).address(),
+ "f64_nearest_int_wrapper");
Add(ExternalReference::log_enter_external_function(isolate).address(),
"Logger::EnterExternal");
Add(ExternalReference::log_leave_external_function(isolate).address(),
@@ -268,9 +284,14 @@ ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
static const AccessorRefTable accessors[] = {
#define ACCESSOR_INFO_DECLARATION(name) \
{ FUNCTION_ADDR(&Accessors::name##Getter), "Accessors::" #name "Getter" } \
- , {FUNCTION_ADDR(&Accessors::name##Setter), "Accessors::" #name "Setter"},
+ ,
ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
#undef ACCESSOR_INFO_DECLARATION
+#define ACCESSOR_SETTER_DECLARATION(name) \
+ { FUNCTION_ADDR(&Accessors::name), "Accessors::" #name } \
+ ,
+ ACCESSOR_SETTER_LIST(ACCESSOR_SETTER_DECLARATION)
+#undef ACCESSOR_INFO_DECLARATION
};
for (unsigned i = 0; i < arraysize(accessors); ++i) {
@@ -299,6 +320,10 @@ ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
Add(ExternalReference::incremental_marking_record_write_function(isolate)
.address(),
"IncrementalMarking::RecordWrite");
+ Add(ExternalReference::incremental_marking_record_write_code_entry_function(
+ isolate)
+ .address(),
+ "IncrementalMarking::RecordWriteOfCodeEntryFromCode");
Add(ExternalReference::store_buffer_overflow_function(isolate).address(),
"StoreBuffer::StoreBufferOverflow");
@@ -622,6 +647,10 @@ void Deserializer::VisitPointers(Object** start, Object** end) {
ReadData(start, end, NEW_SPACE, NULL);
}
+void Deserializer::Synchronize(VisitorSynchronization::SyncTag tag) {
+ static const byte expected = kSynchronize;
+ CHECK_EQ(expected, source_.Get());
+}
void Deserializer::DeserializeDeferredObjects() {
for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
@@ -985,9 +1014,11 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
} \
if (emit_write_barrier && write_barrier_needed) { \
Address current_address = reinterpret_cast<Address>(current); \
+ SLOW_DCHECK(isolate->heap()->ContainsSlow(current_object_address)); \
isolate->heap()->RecordWrite( \
- current_object_address, \
- static_cast<int>(current_address - current_object_address)); \
+ HeapObject::FromAddress(current_object_address), \
+ static_cast<int>(current_address - current_object_address), \
+ *reinterpret_cast<Object**>(current_address)); \
} \
if (!current_was_incremented) { \
current++; \
@@ -1219,11 +1250,13 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
int index = data & kHotObjectMask;
Object* hot_object = hot_objects_.Get(index);
UnalignedCopy(current, &hot_object);
- if (write_barrier_needed && isolate->heap()->InNewSpace(hot_object)) {
+ if (write_barrier_needed) {
Address current_address = reinterpret_cast<Address>(current);
+ SLOW_DCHECK(isolate->heap()->ContainsSlow(current_object_address));
isolate->heap()->RecordWrite(
- current_object_address,
- static_cast<int>(current_address - current_object_address));
+ HeapObject::FromAddress(current_object_address),
+ static_cast<int>(current_address - current_object_address),
+ hot_object);
}
current++;
break;
@@ -1567,7 +1600,7 @@ bool PartialSerializer::ShouldBeInThePartialSnapshotCache(HeapObject* o) {
// would cause dupes.
DCHECK(!o->IsScript());
return o->IsName() || o->IsSharedFunctionInfo() || o->IsHeapNumber() ||
- o->IsCode() || o->IsScopeInfo() || o->IsExecutableAccessorInfo() ||
+ o->IsCode() || o->IsScopeInfo() || o->IsAccessorInfo() ||
o->map() ==
startup_serializer_->isolate()->heap()->fixed_cow_array_map();
}
@@ -1655,9 +1688,10 @@ bool Serializer::SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
return false;
}
-
StartupSerializer::StartupSerializer(Isolate* isolate, SnapshotByteSink* sink)
- : Serializer(isolate, sink), root_index_wave_front_(0) {
+ : Serializer(isolate, sink),
+ root_index_wave_front_(0),
+ serializing_builtins_(false) {
// Clear the cache of objects used by the partial snapshot. After the
// strong roots have been serialized we can create a partial snapshot
// which will repopulate the cache with objects needed by that partial
@@ -1671,17 +1705,30 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
DCHECK(!obj->IsJSFunction());
+ if (obj->IsCode()) {
+ Code* code = Code::cast(obj);
+ // If the function code is compiled (either as native code or bytecode),
+ // replace it with lazy-compile builtin. Only exception is when we are
+ // serializing the canonical interpreter-entry-trampoline builtin.
+ if (code->kind() == Code::FUNCTION ||
+ (!serializing_builtins_ && code->is_interpreter_entry_trampoline())) {
+ obj = isolate()->builtins()->builtin(Builtins::kCompileLazy);
+ }
+ } else if (obj->IsBytecodeArray()) {
+ obj = isolate()->heap()->undefined_value();
+ }
+
int root_index = root_index_map_.Lookup(obj);
+ bool is_immortal_immovable_root = false;
// We can only encode roots as such if it has already been serialized.
// That applies to root indices below the wave front.
- if (root_index != RootIndexMap::kInvalidRootIndex &&
- root_index < root_index_wave_front_) {
- PutRoot(root_index, obj, how_to_code, where_to_point, skip);
- return;
- }
-
- if (obj->IsCode() && Code::cast(obj)->kind() == Code::FUNCTION) {
- obj = isolate()->builtins()->builtin(Builtins::kCompileLazy);
+ if (root_index != RootIndexMap::kInvalidRootIndex) {
+ if (root_index < root_index_wave_front_) {
+ PutRoot(root_index, obj, how_to_code, where_to_point, skip);
+ return;
+ } else {
+ is_immortal_immovable_root = Heap::RootIsImmortalImmovable(root_index);
+ }
}
if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
@@ -1692,6 +1739,14 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
ObjectSerializer object_serializer(this, obj, sink_, how_to_code,
where_to_point);
object_serializer.Serialize();
+
+ if (is_immortal_immovable_root) {
+ // Make sure that the immortal immovable root has been included in the first
+ // chunk of its reserved space , so that it is deserialized onto the first
+ // page of its space and stays immortal immovable.
+ BackReference ref = back_reference_map_.Lookup(obj);
+ CHECK(ref.is_valid() && ref.chunk_index() == 0);
+ }
}
@@ -1708,6 +1763,12 @@ void StartupSerializer::SerializeWeakReferencesAndDeferred() {
Pad();
}
+void StartupSerializer::Synchronize(VisitorSynchronization::SyncTag tag) {
+ // We expect the builtins tag after builtins have been serialized.
+ DCHECK(!serializing_builtins_ || tag == VisitorSynchronization::kBuiltins);
+ serializing_builtins_ = (tag == VisitorSynchronization::kHandleScope);
+ sink_->Put(kSynchronize, "Synchronize");
+}
void Serializer::PutRoot(int root_index,
HeapObject* object,
@@ -1911,24 +1972,36 @@ void Serializer::ObjectSerializer::SerializeExternalString() {
sink_->PutInt(bytes_to_output, "SkipDistance");
}
-
-// Clear and later restore the next link in the weak cell, if the object is one.
-class UnlinkWeakCellScope {
+// Clear and later restore the next link in the weak cell or allocation site.
+// TODO(all): replace this with proper iteration of weak slots in serializer.
+class UnlinkWeakNextScope {
public:
- explicit UnlinkWeakCellScope(HeapObject* object) : weak_cell_(NULL) {
+ explicit UnlinkWeakNextScope(HeapObject* object) : object_(nullptr) {
if (object->IsWeakCell()) {
- weak_cell_ = WeakCell::cast(object);
- next_ = weak_cell_->next();
- weak_cell_->clear_next(object->GetHeap()->the_hole_value());
+ object_ = object;
+ next_ = WeakCell::cast(object)->next();
+ WeakCell::cast(object)->clear_next(object->GetHeap()->the_hole_value());
+ } else if (object->IsAllocationSite()) {
+ object_ = object;
+ next_ = AllocationSite::cast(object)->weak_next();
+ AllocationSite::cast(object)
+ ->set_weak_next(object->GetHeap()->undefined_value());
}
}
- ~UnlinkWeakCellScope() {
- if (weak_cell_) weak_cell_->set_next(next_, UPDATE_WEAK_WRITE_BARRIER);
+ ~UnlinkWeakNextScope() {
+ if (object_ != nullptr) {
+ if (object_->IsWeakCell()) {
+ WeakCell::cast(object_)->set_next(next_, UPDATE_WEAK_WRITE_BARRIER);
+ } else {
+ AllocationSite::cast(object_)
+ ->set_weak_next(next_, UPDATE_WEAK_WRITE_BARRIER);
+ }
+ }
}
private:
- WeakCell* weak_cell_;
+ HeapObject* object_;
Object* next_;
DisallowHeapAllocation no_gc_;
};
@@ -1986,7 +2059,7 @@ void Serializer::ObjectSerializer::Serialize() {
return;
}
- UnlinkWeakCellScope unlink_weak_cell(object_);
+ UnlinkWeakNextScope unlink_weak_next(object_);
object_->IterateBody(map->instance_type(), size, this);
OutputRawData(object_->address() + size);
@@ -2013,7 +2086,7 @@ void Serializer::ObjectSerializer::SerializeDeferred() {
serializer_->PutBackReference(object_, reference);
sink_->PutInt(size >> kPointerSizeLog2, "deferred object size");
- UnlinkWeakCellScope unlink_weak_cell(object_);
+ UnlinkWeakNextScope unlink_weak_next(object_);
object_->IterateBody(map->instance_type(), size, this);
OutputRawData(object_->address() + size);
diff --git a/deps/v8/src/snapshot/serialize.h b/deps/v8/src/snapshot/serialize.h
index 7f4676eafa..f7420efea9 100644
--- a/deps/v8/src/snapshot/serialize.h
+++ b/deps/v8/src/snapshot/serialize.h
@@ -383,6 +383,8 @@ class Deserializer: public SerializerDeserializer {
private:
void VisitPointers(Object** start, Object** end) override;
+ void Synchronize(VisitorSynchronization::SyncTag tag) override;
+
void VisitRuntimeEntry(RelocInfo* rinfo) override { UNREACHABLE(); }
void Initialize(Isolate* isolate);
@@ -471,7 +473,6 @@ class Serializer : public SerializerDeserializer {
public:
Serializer(Isolate* isolate, SnapshotByteSink* sink);
~Serializer() override;
- void VisitPointers(Object** start, Object** end) override;
void EncodeReservations(List<SerializedData::Reservation>* out) const;
@@ -578,6 +579,8 @@ class Serializer : public SerializerDeserializer {
friend class SnapshotData;
private:
+ void VisitPointers(Object** start, Object** end) override;
+
CodeAddressMap* code_address_map_;
// Objects from the same space are put into chunks for bulk-allocation
// when deserializing. We have to make sure that each chunk fits into a
@@ -620,10 +623,11 @@ class PartialSerializer : public Serializer {
// Serialize the objects reachable from a single object pointer.
void Serialize(Object** o);
+
+ private:
void SerializeObject(HeapObject* o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) override;
- private:
int PartialSnapshotCacheIndex(HeapObject* o);
bool ShouldBeInThePartialSnapshotCache(HeapObject* o);
@@ -639,21 +643,23 @@ class StartupSerializer : public Serializer {
StartupSerializer(Isolate* isolate, SnapshotByteSink* sink);
~StartupSerializer() override { OutputStatistics("StartupSerializer"); }
- // The StartupSerializer has to serialize the root array, which is slightly
- // different.
- void VisitPointers(Object** start, Object** end) override;
-
// Serialize the current state of the heap. The order is:
// 1) Strong references.
// 2) Partial snapshot cache.
// 3) Weak references (e.g. the string table).
- virtual void SerializeStrongReferences();
- void SerializeObject(HeapObject* o, HowToCode how_to_code,
- WhereToPoint where_to_point, int skip) override;
+ void SerializeStrongReferences();
void SerializeWeakReferencesAndDeferred();
private:
+ // The StartupSerializer has to serialize the root array, which is slightly
+ // different.
+ void VisitPointers(Object** start, Object** end) override;
+ void SerializeObject(HeapObject* o, HowToCode how_to_code,
+ WhereToPoint where_to_point, int skip) override;
+ void Synchronize(VisitorSynchronization::SyncTag tag) override;
+
intptr_t root_index_wave_front_;
+ bool serializing_builtins_;
DISALLOW_COPY_AND_ASSIGN(StartupSerializer);
};
diff --git a/deps/v8/src/source-position.h b/deps/v8/src/source-position.h
new file mode 100644
index 0000000000..46ee9820b2
--- /dev/null
+++ b/deps/v8/src/source-position.h
@@ -0,0 +1,87 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SOURCE_POSITION_H_
+#define V8_SOURCE_POSITION_H_
+
+#include <ostream>
+
+#include "src/assembler.h"
+#include "src/flags.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// This class encapsulates encoding and decoding of sources positions from
+// which hydrogen values originated.
+// When FLAG_track_hydrogen_positions is set this object encodes the
+// identifier of the inlining and absolute offset from the start of the
+// inlined function.
+// When the flag is not set we simply track absolute offset from the
+// script start.
+class SourcePosition {
+ public:
+ static SourcePosition Unknown() {
+ return SourcePosition::FromRaw(kNoPosition);
+ }
+
+ bool IsUnknown() const { return value_ == kNoPosition; }
+
+ uint32_t position() const { return PositionField::decode(value_); }
+ void set_position(uint32_t position) {
+ if (FLAG_hydrogen_track_positions) {
+ value_ = static_cast<uint32_t>(PositionField::update(value_, position));
+ } else {
+ value_ = position;
+ }
+ }
+
+ uint32_t inlining_id() const { return InliningIdField::decode(value_); }
+ void set_inlining_id(uint32_t inlining_id) {
+ if (FLAG_hydrogen_track_positions) {
+ value_ =
+ static_cast<uint32_t>(InliningIdField::update(value_, inlining_id));
+ }
+ }
+
+ uint32_t raw() const { return value_; }
+
+ private:
+ static const uint32_t kNoPosition =
+ static_cast<uint32_t>(RelocInfo::kNoPosition);
+ typedef BitField<uint32_t, 0, 9> InliningIdField;
+
+ // Offset from the start of the inlined function.
+ typedef BitField<uint32_t, 9, 23> PositionField;
+
+ friend class HPositionInfo;
+ friend class Deoptimizer;
+
+ static SourcePosition FromRaw(uint32_t raw_position) {
+ SourcePosition position;
+ position.value_ = raw_position;
+ return position;
+ }
+
+ // If FLAG_hydrogen_track_positions is set contains bitfields InliningIdField
+ // and PositionField.
+ // Otherwise contains absolute offset from the script start.
+ uint32_t value_;
+};
+
+inline std::ostream& operator<<(std::ostream& os, const SourcePosition& p) {
+ if (p.IsUnknown()) {
+ return os << "<?>";
+ } else if (FLAG_hydrogen_track_positions) {
+ return os << "<" << p.inlining_id() << ":" << p.position() << ">";
+ } else {
+ return os << "<0:" << p.raw() << ">";
+ }
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SOURCE_POSITION_H_
diff --git a/deps/v8/src/startup-data-util.cc b/deps/v8/src/startup-data-util.cc
index 4e0ad97a0c..e20ec218d5 100644
--- a/deps/v8/src/startup-data-util.cc
+++ b/deps/v8/src/startup-data-util.cc
@@ -9,6 +9,7 @@
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
+#include "src/flags.h"
#include "src/utils.h"
@@ -107,7 +108,9 @@ void InitializeExternalStartupData(const char* directory_path) {
char* natives;
char* snapshot;
LoadFromFiles(RelativePath(&natives, directory_path, "natives_blob.bin"),
- RelativePath(&snapshot, directory_path, "snapshot_blob.bin"));
+ RelativePath(&snapshot, directory_path,
+ FLAG_ignition ? "snapshot_blob_ignition.bin"
+ : "snapshot_blob.bin"));
free(natives);
free(snapshot);
#endif // V8_USE_EXTERNAL_STARTUP_DATA
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc
index 2801d23cda..43be8f1f18 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/string-stream.cc
@@ -527,12 +527,18 @@ void StringStream::PrintPrototype(JSFunction* fun, Object* receiver) {
Object* name = fun->shared()->name();
bool print_name = false;
Isolate* isolate = fun->GetIsolate();
- for (PrototypeIterator iter(isolate, receiver,
- PrototypeIterator::START_AT_RECEIVER);
- !iter.IsAtEnd(); iter.Advance()) {
- if (iter.GetCurrent()->IsJSObject()) {
+ if (receiver->IsNull() || receiver->IsUndefined() || receiver->IsJSProxy()) {
+ print_name = true;
+ } else {
+ if (!receiver->IsJSObject()) {
+ receiver = receiver->GetRootMap(isolate)->prototype();
+ }
+
+ for (PrototypeIterator iter(isolate, JSObject::cast(receiver),
+ PrototypeIterator::START_AT_RECEIVER);
+ !iter.IsAtEnd(); iter.Advance()) {
Object* key = iter.GetCurrent<JSObject>()->SlowReverseLookup(fun);
- if (key != isolate->heap()->undefined_value()) {
+ if (!key->IsUndefined()) {
if (!name->IsString() ||
!key->IsString() ||
!String::cast(name)->Equals(String::cast(key))) {
@@ -542,9 +548,8 @@ void StringStream::PrintPrototype(JSFunction* fun, Object* receiver) {
print_name = false;
}
name = key;
+ break;
}
- } else {
- print_name = true;
}
}
PrintName(name);
diff --git a/deps/v8/src/tracing/trace-event.h b/deps/v8/src/tracing/trace-event.h
index d2d423c3be..d17f78589f 100644
--- a/deps/v8/src/tracing/trace-event.h
+++ b/deps/v8/src/tracing/trace-event.h
@@ -24,8 +24,6 @@
enum CategoryGroupEnabledFlags {
// Category group enabled for the recording mode.
kEnabledForRecording_CategoryGroupEnabledFlags = 1 << 0,
- // Category group enabled for the monitoring mode.
- kEnabledForMonitoring_CategoryGroupEnabledFlags = 1 << 1,
// Category group enabled by SetEventCallbackEnabled().
kEnabledForEventCallback_CategoryGroupEnabledFlags = 1 << 2,
// Category group enabled to export events to ETW.
@@ -101,9 +99,7 @@ enum CategoryGroupEnabledFlags {
// Get the number of times traces have been recorded. This is used to implement
// the TRACE_EVENT_IS_NEW_TRACE facility.
// unsigned int TRACE_EVENT_API_GET_NUM_TRACES_RECORDED()
-#define TRACE_EVENT_API_GET_NUM_TRACES_RECORDED \
- v8::internal::tracing::TraceEventHelper::GetCurrentPlatform() \
- ->getNumTracesRecorded
+#define TRACE_EVENT_API_GET_NUM_TRACES_RECORDED UNIMPLEMENTED()
// Add a trace event to the platform tracing system.
// uint64_t TRACE_EVENT_API_ADD_TRACE_EVENT(
diff --git a/deps/v8/src/transitions-inl.h b/deps/v8/src/transitions-inl.h
index 96d9495bf4..9424497e19 100644
--- a/deps/v8/src/transitions-inl.h
+++ b/deps/v8/src/transitions-inl.h
@@ -100,7 +100,9 @@ void TransitionArray::SetTarget(int transition_number, Map* value) {
int TransitionArray::SearchName(Name* name, int* out_insertion_index) {
- return internal::Search<ALL_ENTRIES>(this, name, 0, out_insertion_index);
+ DCHECK(name->IsUniqueName());
+ return internal::Search<ALL_ENTRIES>(this, name, number_of_entries(),
+ out_insertion_index);
}
diff --git a/deps/v8/src/transitions.cc b/deps/v8/src/transitions.cc
index fc24b28867..e63769e4af 100644
--- a/deps/v8/src/transitions.cc
+++ b/deps/v8/src/transitions.cc
@@ -159,20 +159,21 @@ void TransitionArray::Insert(Handle<Map> map, Handle<Name> name,
// static
Map* TransitionArray::SearchTransition(Map* map, PropertyKind kind, Name* name,
PropertyAttributes attributes) {
+ DCHECK(name->IsUniqueName());
Object* raw_transitions = map->raw_transitions();
if (IsSimpleTransition(raw_transitions)) {
Map* target = GetSimpleTransition(raw_transitions);
Name* key = GetSimpleTransitionKey(target);
- if (!key->Equals(name)) return NULL;
+ if (key != name) return nullptr;
PropertyDetails details = GetSimpleTargetDetails(target);
- if (details.attributes() != attributes) return NULL;
- if (details.kind() != kind) return NULL;
+ if (details.attributes() != attributes) return nullptr;
+ if (details.kind() != kind) return nullptr;
return target;
}
if (IsFullTransitionArray(raw_transitions)) {
TransitionArray* transitions = TransitionArray::cast(raw_transitions);
int transition = transitions->Search(kind, name, attributes);
- if (transition == kNotFound) return NULL;
+ if (transition == kNotFound) return nullptr;
return transitions->GetTarget(transition);
}
return NULL;
@@ -195,6 +196,7 @@ Map* TransitionArray::SearchSpecial(Map* map, Symbol* name) {
// static
Handle<Map> TransitionArray::FindTransitionToField(Handle<Map> map,
Handle<Name> name) {
+ DCHECK(name->IsUniqueName());
DisallowHeapAllocation no_gc;
Map* target = SearchTransition(*map, kData, *name, NONE);
if (target == NULL) return Handle<Map>::null();
@@ -545,9 +547,7 @@ int TransitionArray::Search(PropertyKind kind, Name* name,
PropertyAttributes attributes,
int* out_insertion_index) {
int transition = SearchName(name, out_insertion_index);
- if (transition == kNotFound) {
- return kNotFound;
- }
+ if (transition == kNotFound) return kNotFound;
return SearchDetails(transition, kind, attributes, out_insertion_index);
}
} // namespace internal
diff --git a/deps/v8/src/type-cache.cc b/deps/v8/src/type-cache.cc
index 9ed8621487..d05aaa1f4d 100644
--- a/deps/v8/src/type-cache.cc
+++ b/deps/v8/src/type-cache.cc
@@ -5,7 +5,6 @@
#include "src/type-cache.h"
#include "src/base/lazy-instance.h"
-#include "src/types-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/type-cache.h b/deps/v8/src/type-cache.h
index 1b3a26033b..8bd35c0161 100644
--- a/deps/v8/src/type-cache.h
+++ b/deps/v8/src/type-cache.h
@@ -112,10 +112,6 @@ class TypeCache final {
Type* const kStringLengthType =
CreateNative(CreateRange(0.0, String::kMaxLength), Type::TaggedSigned());
- // When initializing arrays, we'll unfold the loop if the number of
- // elements is known to be of this type.
- Type* const kElementLoopUnrollType = CreateRange(0.0, 16.0);
-
#define TYPED_ARRAY(TypeName, type_name, TYPE_NAME, ctype, size) \
Type* const k##TypeName##Array = CreateArray(k##TypeName);
TYPED_ARRAYS(TYPED_ARRAY)
diff --git a/deps/v8/src/type-feedback-vector.cc b/deps/v8/src/type-feedback-vector.cc
index 698f2a6d17..9e60fcffa7 100644
--- a/deps/v8/src/type-feedback-vector.cc
+++ b/deps/v8/src/type-feedback-vector.cc
@@ -15,8 +15,13 @@ namespace internal {
static bool IsPropertyNameFeedback(Object* feedback) {
- return feedback->IsString() ||
- (feedback->IsSymbol() && !Symbol::cast(feedback)->is_private());
+ if (feedback->IsString()) return true;
+ if (!feedback->IsSymbol()) return false;
+ Symbol* symbol = Symbol::cast(feedback);
+ Heap* heap = symbol->GetHeap();
+ return symbol != heap->uninitialized_symbol() &&
+ symbol != heap->premonomorphic_symbol() &&
+ symbol != heap->megamorphic_symbol();
}
diff --git a/deps/v8/src/type-feedback-vector.h b/deps/v8/src/type-feedback-vector.h
index d83b77fa3e..d1c31a285f 100644
--- a/deps/v8/src/type-feedback-vector.h
+++ b/deps/v8/src/type-feedback-vector.h
@@ -343,6 +343,7 @@ class FeedbackNexus {
FeedbackVectorSlot slot() const { return slot_; }
InlineCacheState ic_state() const { return StateFromFeedback(); }
+ bool IsUninitialized() const { return StateFromFeedback() == UNINITIALIZED; }
Map* FindFirstMap() const {
MapHandleList maps;
ExtractMaps(&maps);
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index a8a406efde..eca5eccda4 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -200,7 +200,7 @@ void TypeFeedbackOracle::CompareType(TypeFeedbackId id,
Handle<Object> info = GetInfo(id);
if (!info->IsCode()) {
// For some comparisons we don't have ICs, e.g. LiteralCompareTypeof.
- *left_type = *right_type = *combined_type = Type::None(zone());
+ *left_type = *right_type = *combined_type = Type::None();
return;
}
Handle<Code> code = Handle<Code>::cast(info);
@@ -235,7 +235,7 @@ void TypeFeedbackOracle::BinaryType(TypeFeedbackId id,
// operations covered by the BinaryOpIC we should always have them.
DCHECK(op < BinaryOpICState::FIRST_TOKEN ||
op > BinaryOpICState::LAST_TOKEN);
- *left = *right = *result = Type::None(zone());
+ *left = *right = *result = Type::None();
*fixed_right_arg = Nothing<int>();
*allocation_site = Handle<AllocationSite>::null();
return;
@@ -261,7 +261,7 @@ void TypeFeedbackOracle::BinaryType(TypeFeedbackId id,
Type* TypeFeedbackOracle::CountType(TypeFeedbackId id) {
Handle<Object> object = GetInfo(id);
- if (!object->IsCode()) return Type::None(zone());
+ if (!object->IsCode()) return Type::None();
Handle<Code> code = Handle<Code>::cast(object);
DCHECK_EQ(Code::BINARY_OP_IC, code->kind());
BinaryOpICState state(isolate(), code->extra_ic_state());
diff --git a/deps/v8/src/types-inl.h b/deps/v8/src/types-inl.h
deleted file mode 100644
index 9af4bccd2e..0000000000
--- a/deps/v8/src/types-inl.h
+++ /dev/null
@@ -1,487 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_TYPES_INL_H_
-#define V8_TYPES_INL_H_
-
-#include "src/types.h"
-
-#include "src/factory.h"
-#include "src/handles-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -----------------------------------------------------------------------------
-// TypeImpl
-
-template<class Config>
-typename TypeImpl<Config>::bitset TypeImpl<Config>::BitsetType::SignedSmall() {
- return i::SmiValuesAre31Bits() ? kSigned31 : kSigned32;
-}
-
-
-template<class Config>
-typename TypeImpl<Config>::bitset
-TypeImpl<Config>::BitsetType::UnsignedSmall() {
- return i::SmiValuesAre31Bits() ? kUnsigned30 : kUnsigned31;
-}
-
-
-#define CONSTRUCT_SIMD_TYPE(NAME, Name, name, lane_count, lane_type) \
-template<class Config> \
-typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Name( \
- Isolate* isolate, Region* region) { \
- return Class(i::handle(isolate->heap()->name##_map()), region); \
-}
-SIMD128_TYPES(CONSTRUCT_SIMD_TYPE)
-#undef CONSTRUCT_SIMD_TYPE
-
-
-template<class Config>
-TypeImpl<Config>* TypeImpl<Config>::cast(typename Config::Base* object) {
- TypeImpl* t = static_cast<TypeImpl*>(object);
- DCHECK(t->IsBitset() || t->IsClass() || t->IsConstant() || t->IsRange() ||
- t->IsUnion() || t->IsArray() || t->IsFunction() || t->IsContext());
- return t;
-}
-
-
-// Most precise _current_ type of a value (usually its class).
-template<class Config>
-typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::NowOf(
- i::Object* value, Region* region) {
- if (value->IsSmi() ||
- i::HeapObject::cast(value)->map()->instance_type() == HEAP_NUMBER_TYPE) {
- return Of(value, region);
- }
- return Class(i::handle(i::HeapObject::cast(value)->map()), region);
-}
-
-
-template<class Config>
-bool TypeImpl<Config>::NowContains(i::Object* value) {
- DisallowHeapAllocation no_allocation;
- if (this->IsAny()) return true;
- if (value->IsHeapObject()) {
- i::Map* map = i::HeapObject::cast(value)->map();
- for (Iterator<i::Map> it = this->Classes(); !it.Done(); it.Advance()) {
- if (*it.Current() == map) return true;
- }
- }
- return this->Contains(value);
-}
-
-
-// -----------------------------------------------------------------------------
-// ZoneTypeConfig
-
-// static
-template<class T>
-T* ZoneTypeConfig::handle(T* type) {
- return type;
-}
-
-
-// static
-template<class T>
-T* ZoneTypeConfig::cast(Type* type) {
- return static_cast<T*>(type);
-}
-
-
-// static
-bool ZoneTypeConfig::is_bitset(Type* type) {
- return reinterpret_cast<uintptr_t>(type) & 1;
-}
-
-
-// static
-bool ZoneTypeConfig::is_struct(Type* type, int tag) {
- DCHECK(tag != kRangeStructTag);
- if (is_bitset(type)) return false;
- int type_tag = struct_tag(as_struct(type));
- return type_tag == tag;
-}
-
-
-// static
-bool ZoneTypeConfig::is_range(Type* type) {
- if (is_bitset(type)) return false;
- int type_tag = struct_tag(as_struct(type));
- return type_tag == kRangeStructTag;
-}
-
-
-// static
-bool ZoneTypeConfig::is_class(Type* type) {
- return false;
-}
-
-
-// static
-ZoneTypeConfig::Type::bitset ZoneTypeConfig::as_bitset(Type* type) {
- DCHECK(is_bitset(type));
- return static_cast<Type::bitset>(reinterpret_cast<uintptr_t>(type) ^ 1u);
-}
-
-
-// static
-ZoneTypeConfig::Struct* ZoneTypeConfig::as_struct(Type* type) {
- DCHECK(!is_bitset(type));
- return reinterpret_cast<Struct*>(type);
-}
-
-
-// static
-ZoneTypeConfig::Range* ZoneTypeConfig::as_range(Type* type) {
- DCHECK(!is_bitset(type));
- return reinterpret_cast<Range*>(type);
-}
-
-
-// static
-i::Handle<i::Map> ZoneTypeConfig::as_class(Type* type) {
- UNREACHABLE();
- return i::Handle<i::Map>();
-}
-
-
-// static
-ZoneTypeConfig::Type* ZoneTypeConfig::from_bitset(Type::bitset bitset) {
- return reinterpret_cast<Type*>(static_cast<uintptr_t>(bitset | 1u));
-}
-
-
-// static
-ZoneTypeConfig::Type* ZoneTypeConfig::from_bitset(
- Type::bitset bitset, Zone* Zone) {
- return from_bitset(bitset);
-}
-
-
-// static
-ZoneTypeConfig::Type* ZoneTypeConfig::from_struct(Struct* structure) {
- return reinterpret_cast<Type*>(structure);
-}
-
-
-// static
-ZoneTypeConfig::Type* ZoneTypeConfig::from_range(Range* range) {
- return reinterpret_cast<Type*>(range);
-}
-
-
-// static
-ZoneTypeConfig::Type* ZoneTypeConfig::from_class(
- i::Handle<i::Map> map, Zone* zone) {
- return from_bitset(0);
-}
-
-
-// static
-ZoneTypeConfig::Struct* ZoneTypeConfig::struct_create(
- int tag, int length, Zone* zone) {
- DCHECK(tag != kRangeStructTag);
- Struct* structure = reinterpret_cast<Struct*>(
- zone->New(sizeof(void*) * (length + 2))); // NOLINT
- structure[0] = reinterpret_cast<void*>(tag);
- structure[1] = reinterpret_cast<void*>(length);
- return structure;
-}
-
-
-// static
-void ZoneTypeConfig::struct_shrink(Struct* structure, int length) {
- DCHECK(0 <= length && length <= struct_length(structure));
- structure[1] = reinterpret_cast<void*>(length);
-}
-
-
-// static
-int ZoneTypeConfig::struct_tag(Struct* structure) {
- return static_cast<int>(reinterpret_cast<intptr_t>(structure[0]));
-}
-
-
-// static
-int ZoneTypeConfig::struct_length(Struct* structure) {
- return static_cast<int>(reinterpret_cast<intptr_t>(structure[1]));
-}
-
-
-// static
-Type* ZoneTypeConfig::struct_get(Struct* structure, int i) {
- DCHECK(0 <= i && i <= struct_length(structure));
- return static_cast<Type*>(structure[2 + i]);
-}
-
-
-// static
-void ZoneTypeConfig::struct_set(Struct* structure, int i, Type* x) {
- DCHECK(0 <= i && i <= struct_length(structure));
- structure[2 + i] = x;
-}
-
-
-// static
-template<class V>
-i::Handle<V> ZoneTypeConfig::struct_get_value(Struct* structure, int i) {
- DCHECK(0 <= i && i <= struct_length(structure));
- return i::Handle<V>(static_cast<V**>(structure[2 + i]));
-}
-
-
-// static
-template<class V>
-void ZoneTypeConfig::struct_set_value(
- Struct* structure, int i, i::Handle<V> x) {
- DCHECK(0 <= i && i <= struct_length(structure));
- structure[2 + i] = x.location();
-}
-
-
-// static
-ZoneTypeConfig::Range* ZoneTypeConfig::range_create(Zone* zone) {
- Range* range = reinterpret_cast<Range*>(zone->New(sizeof(Range))); // NOLINT
- range->tag = reinterpret_cast<void*>(kRangeStructTag);
- range->bitset = 0;
- range->limits[0] = 1;
- range->limits[1] = 0;
- return range;
-}
-
-
-// static
-int ZoneTypeConfig::range_get_bitset(ZoneTypeConfig::Range* range) {
- return range->bitset;
-}
-
-
-// static
-void ZoneTypeConfig::range_set_bitset(ZoneTypeConfig::Range* range, int value) {
- range->bitset = value;
-}
-
-
-// static
-double ZoneTypeConfig::range_get_double(ZoneTypeConfig::Range* range,
- int index) {
- DCHECK(index >= 0 && index < 2);
- return range->limits[index];
-}
-
-
-// static
-void ZoneTypeConfig::range_set_double(ZoneTypeConfig::Range* range, int index,
- double value, Zone*) {
- DCHECK(index >= 0 && index < 2);
- range->limits[index] = value;
-}
-
-
-// -----------------------------------------------------------------------------
-// HeapTypeConfig
-
-// static
-template<class T>
-i::Handle<T> HeapTypeConfig::handle(T* type) {
- return i::handle(type, i::HeapObject::cast(type)->GetIsolate());
-}
-
-
-// static
-template<class T>
-i::Handle<T> HeapTypeConfig::cast(i::Handle<Type> type) {
- return i::Handle<T>::cast(type);
-}
-
-
-// static
-bool HeapTypeConfig::is_bitset(Type* type) {
- return type->IsSmi();
-}
-
-
-// static
-bool HeapTypeConfig::is_class(Type* type) {
- return type->IsMap();
-}
-
-
-// static
-bool HeapTypeConfig::is_struct(Type* type, int tag) {
- DCHECK(tag != kRangeStructTag);
- return type->IsFixedArray() && struct_tag(as_struct(type)) == tag;
-}
-
-
-// static
-bool HeapTypeConfig::is_range(Type* type) {
- return type->IsFixedArray() && struct_tag(as_struct(type)) == kRangeStructTag;
-}
-
-
-// static
-HeapTypeConfig::Type::bitset HeapTypeConfig::as_bitset(Type* type) {
- // TODO(rossberg): Breaks the Smi abstraction. Fix once there is a better way.
- return static_cast<Type::bitset>(reinterpret_cast<uintptr_t>(type));
-}
-
-
-// static
-i::Handle<i::Map> HeapTypeConfig::as_class(Type* type) {
- return i::handle(i::Map::cast(type));
-}
-
-
-// static
-i::Handle<HeapTypeConfig::Struct> HeapTypeConfig::as_struct(Type* type) {
- return i::handle(Struct::cast(type));
-}
-
-
-// static
-i::Handle<HeapTypeConfig::Range> HeapTypeConfig::as_range(Type* type) {
- return i::handle(Range::cast(type));
-}
-
-
-// static
-HeapTypeConfig::Type* HeapTypeConfig::from_bitset(Type::bitset bitset) {
- // TODO(rossberg): Breaks the Smi abstraction. Fix once there is a better way.
- return reinterpret_cast<Type*>(static_cast<uintptr_t>(bitset));
-}
-
-
-// static
-i::Handle<HeapTypeConfig::Type> HeapTypeConfig::from_bitset(
- Type::bitset bitset, Isolate* isolate) {
- return i::handle(from_bitset(bitset), isolate);
-}
-
-
-// static
-i::Handle<HeapTypeConfig::Type> HeapTypeConfig::from_class(
- i::Handle<i::Map> map, Isolate* isolate) {
- return i::Handle<Type>::cast(i::Handle<Object>::cast(map));
-}
-
-
-// static
-i::Handle<HeapTypeConfig::Type> HeapTypeConfig::from_struct(
- i::Handle<Struct> structure) {
- return i::Handle<Type>::cast(i::Handle<Object>::cast(structure));
-}
-
-
-// static
-i::Handle<HeapTypeConfig::Type> HeapTypeConfig::from_range(
- i::Handle<Range> range) {
- return i::Handle<Type>::cast(i::Handle<Object>::cast(range));
-}
-
-
-// static
-i::Handle<HeapTypeConfig::Struct> HeapTypeConfig::struct_create(
- int tag, int length, Isolate* isolate) {
- i::Handle<Struct> structure = isolate->factory()->NewFixedArray(length + 1);
- structure->set(0, i::Smi::FromInt(tag));
- return structure;
-}
-
-
-// static
-void HeapTypeConfig::struct_shrink(i::Handle<Struct> structure, int length) {
- structure->Shrink(length + 1);
-}
-
-
-// static
-int HeapTypeConfig::struct_tag(i::Handle<Struct> structure) {
- return static_cast<i::Smi*>(structure->get(0))->value();
-}
-
-
-// static
-int HeapTypeConfig::struct_length(i::Handle<Struct> structure) {
- return structure->length() - 1;
-}
-
-
-// static
-i::Handle<HeapTypeConfig::Type> HeapTypeConfig::struct_get(
- i::Handle<Struct> structure, int i) {
- Type* type = static_cast<Type*>(structure->get(i + 1));
- return i::handle(type, structure->GetIsolate());
-}
-
-
-// static
-void HeapTypeConfig::struct_set(
- i::Handle<Struct> structure, int i, i::Handle<Type> type) {
- structure->set(i + 1, *type);
-}
-
-
-// static
-template<class V>
-i::Handle<V> HeapTypeConfig::struct_get_value(
- i::Handle<Struct> structure, int i) {
- V* x = static_cast<V*>(structure->get(i + 1));
- return i::handle(x, structure->GetIsolate());
-}
-
-
-// static
-template<class V>
-void HeapTypeConfig::struct_set_value(
- i::Handle<Struct> structure, int i, i::Handle<V> x) {
- structure->set(i + 1, *x);
-}
-
-
-// static
-i::Handle<HeapTypeConfig::Range> HeapTypeConfig::range_create(
- Isolate* isolate) {
- i::Handle<Range> range = isolate->factory()->NewFixedArray(4);
- range->set(0, i::Smi::FromInt(kRangeStructTag));
- return range;
-}
-
-
-// static
-int HeapTypeConfig::range_get_bitset(i::Handle<HeapTypeConfig::Range> range) {
- Type* v = static_cast<Type*>(range->get(1));
- return as_bitset(v);
-}
-
-
-// static
-void HeapTypeConfig::range_set_bitset(i::Handle<HeapTypeConfig::Range> range,
- int value) {
- range->set(1, from_bitset(value));
-}
-
-
-// static
-double HeapTypeConfig::range_get_double(i::Handle<HeapTypeConfig::Range> range,
- int index) {
- DCHECK(index >= 0 && index < 2);
- return range->get(index + 2)->Number();
-}
-
-
-// static
-void HeapTypeConfig::range_set_double(i::Handle<HeapTypeConfig::Range> range,
- int index, double value,
- Isolate* isolate) {
- DCHECK(index >= 0 && index < 2);
- i::Handle<Object> number = isolate->factory()->NewNumber(value);
- range->set(index + 2, *number);
-}
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TYPES_INL_H_
diff --git a/deps/v8/src/types.cc b/deps/v8/src/types.cc
index 92610606d5..d54826e34e 100644
--- a/deps/v8/src/types.cc
+++ b/deps/v8/src/types.cc
@@ -6,8 +6,8 @@
#include "src/types.h"
+#include "src/handles-inl.h"
#include "src/ostreams.h"
-#include "src/types-inl.h"
namespace v8 {
namespace internal {
@@ -16,19 +16,17 @@ namespace internal {
// NOTE: If code is marked as being a "shortcut", this means that removing
// the code won't affect the semantics of the surrounding function definition.
+// static
+bool Type::IsInteger(i::Object* x) {
+ return x->IsNumber() && Type::IsInteger(x->Number());
+}
// -----------------------------------------------------------------------------
// Range-related helper functions.
-template <class Config>
-bool TypeImpl<Config>::Limits::IsEmpty() {
- return this->min > this->max;
-}
+bool RangeType::Limits::IsEmpty() { return this->min > this->max; }
-
-template<class Config>
-typename TypeImpl<Config>::Limits TypeImpl<Config>::Limits::Intersect(
- Limits lhs, Limits rhs) {
+RangeType::Limits RangeType::Limits::Intersect(Limits lhs, Limits rhs) {
DisallowHeapAllocation no_allocation;
Limits result(lhs);
if (lhs.min < rhs.min) result.min = rhs.min;
@@ -36,10 +34,7 @@ typename TypeImpl<Config>::Limits TypeImpl<Config>::Limits::Intersect(
return result;
}
-
-template <class Config>
-typename TypeImpl<Config>::Limits TypeImpl<Config>::Limits::Union(
- Limits lhs, Limits rhs) {
+RangeType::Limits RangeType::Limits::Union(Limits lhs, Limits rhs) {
DisallowHeapAllocation no_allocation;
if (lhs.IsEmpty()) return rhs;
if (rhs.IsEmpty()) return lhs;
@@ -49,38 +44,26 @@ typename TypeImpl<Config>::Limits TypeImpl<Config>::Limits::Union(
return result;
}
-
-template<class Config>
-bool TypeImpl<Config>::Overlap(
- typename TypeImpl<Config>::RangeType* lhs,
- typename TypeImpl<Config>::RangeType* rhs) {
+bool Type::Overlap(RangeType* lhs, RangeType* rhs) {
DisallowHeapAllocation no_allocation;
- return !Limits::Intersect(Limits(lhs), Limits(rhs)).IsEmpty();
+ return !RangeType::Limits::Intersect(RangeType::Limits(lhs),
+ RangeType::Limits(rhs))
+ .IsEmpty();
}
-
-template<class Config>
-bool TypeImpl<Config>::Contains(
- typename TypeImpl<Config>::RangeType* lhs,
- typename TypeImpl<Config>::RangeType* rhs) {
+bool Type::Contains(RangeType* lhs, RangeType* rhs) {
DisallowHeapAllocation no_allocation;
return lhs->Min() <= rhs->Min() && rhs->Max() <= lhs->Max();
}
-
-template <class Config>
-bool TypeImpl<Config>::Contains(typename TypeImpl<Config>::RangeType* lhs,
- typename TypeImpl<Config>::ConstantType* rhs) {
+bool Type::Contains(RangeType* lhs, ConstantType* rhs) {
DisallowHeapAllocation no_allocation;
return IsInteger(*rhs->Value()) &&
lhs->Min() <= rhs->Value()->Number() &&
rhs->Value()->Number() <= lhs->Max();
}
-
-template<class Config>
-bool TypeImpl<Config>::Contains(
- typename TypeImpl<Config>::RangeType* range, i::Object* val) {
+bool Type::Contains(RangeType* range, i::Object* val) {
DisallowHeapAllocation no_allocation;
return IsInteger(val) &&
range->Min() <= val->Number() && val->Number() <= range->Max();
@@ -90,8 +73,7 @@ bool TypeImpl<Config>::Contains(
// -----------------------------------------------------------------------------
// Min and Max computation.
-template<class Config>
-double TypeImpl<Config>::Min() {
+double Type::Min() {
DCHECK(this->SemanticIs(Number()));
if (this->IsBitset()) return BitsetType::Min(this->AsBitset());
if (this->IsUnion()) {
@@ -107,9 +89,7 @@ double TypeImpl<Config>::Min() {
return 0;
}
-
-template<class Config>
-double TypeImpl<Config>::Max() {
+double Type::Max() {
DCHECK(this->SemanticIs(Number()));
if (this->IsBitset()) return BitsetType::Max(this->AsBitset());
if (this->IsUnion()) {
@@ -131,12 +111,10 @@ double TypeImpl<Config>::Max() {
// The largest bitset subsumed by this type.
-template<class Config>
-typename TypeImpl<Config>::bitset
-TypeImpl<Config>::BitsetType::Glb(TypeImpl* type) {
+Type::bitset BitsetType::Glb(Type* type) {
DisallowHeapAllocation no_allocation;
// Fast case.
- if (type->IsBitset()) {
+ if (IsBitset(type)) {
return type->AsBitset();
} else if (type->IsUnion()) {
SLOW_DCHECK(type->AsUnion()->Wellformed());
@@ -153,11 +131,9 @@ TypeImpl<Config>::BitsetType::Glb(TypeImpl* type) {
// The smallest bitset subsuming this type, possibly not a proper one.
-template<class Config>
-typename TypeImpl<Config>::bitset
-TypeImpl<Config>::BitsetType::Lub(TypeImpl* type) {
+Type::bitset BitsetType::Lub(Type* type) {
DisallowHeapAllocation no_allocation;
- if (type->IsBitset()) return type->AsBitset();
+ if (IsBitset(type)) return type->AsBitset();
if (type->IsUnion()) {
// Take the representation from the first element, which is always
// a bitset.
@@ -174,14 +150,12 @@ TypeImpl<Config>::BitsetType::Lub(TypeImpl* type) {
if (type->IsContext()) return kInternal & kTaggedPointer;
if (type->IsArray()) return kOtherObject;
if (type->IsFunction()) return kFunction;
+ if (type->IsTuple()) return kInternal;
UNREACHABLE();
return kNone;
}
-
-template<class Config>
-typename TypeImpl<Config>::bitset
-TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
+Type::bitset BitsetType::Lub(i::Map* map) {
DisallowHeapAllocation no_allocation;
switch (map->instance_type()) {
case STRING_TYPE:
@@ -241,7 +215,6 @@ TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
case JS_MAP_TYPE:
case JS_SET_ITERATOR_TYPE:
case JS_MAP_ITERATOR_TYPE:
- case JS_ITERATOR_RESULT_TYPE:
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
case JS_PROMISE_TYPE:
@@ -267,8 +240,7 @@ TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
// over type or class variables, esp ones with bounds...
return kDetectable & kTaggedPointer;
case ALLOCATION_SITE_TYPE:
- case DECLARED_ACCESSOR_INFO_TYPE:
- case EXECUTABLE_ACCESSOR_INFO_TYPE:
+ case ACCESSOR_INFO_TYPE:
case SHARED_FUNCTION_INFO_TYPE:
case ACCESSOR_PAIR_TYPE:
case FIXED_ARRAY_TYPE:
@@ -292,7 +264,6 @@ TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
TYPED_ARRAYS(FIXED_TYPED_ARRAY_CASE)
#undef FIXED_TYPED_ARRAY_CASE
case FILLER_TYPE:
- case DECLARED_ACCESSOR_DESCRIPTOR_TYPE:
case ACCESS_CHECK_INFO_TYPE:
case INTERCEPTOR_INFO_TYPE:
case CALL_HANDLER_INFO_TYPE:
@@ -319,10 +290,7 @@ TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
return kNone;
}
-
-template<class Config>
-typename TypeImpl<Config>::bitset
-TypeImpl<Config>::BitsetType::Lub(i::Object* value) {
+Type::bitset BitsetType::Lub(i::Object* value) {
DisallowHeapAllocation no_allocation;
if (value->IsNumber()) {
return Lub(value->Number()) &
@@ -331,10 +299,7 @@ TypeImpl<Config>::BitsetType::Lub(i::Object* value) {
return Lub(i::HeapObject::cast(value)->map());
}
-
-template<class Config>
-typename TypeImpl<Config>::bitset
-TypeImpl<Config>::BitsetType::Lub(double value) {
+Type::bitset BitsetType::Lub(double value) {
DisallowHeapAllocation no_allocation;
if (i::IsMinusZero(value)) return kMinusZero;
if (std::isnan(value)) return kNaN;
@@ -344,36 +309,24 @@ TypeImpl<Config>::BitsetType::Lub(double value) {
// Minimum values of plain numeric bitsets.
-template <class Config>
-const typename TypeImpl<Config>::BitsetType::Boundary
-TypeImpl<Config>::BitsetType::BoundariesArray[] = {
- {kOtherNumber, kPlainNumber, -V8_INFINITY},
- {kOtherSigned32, kNegative32, kMinInt},
- {kNegative31, kNegative31, -0x40000000},
- {kUnsigned30, kUnsigned30, 0},
- {kOtherUnsigned31, kUnsigned31, 0x40000000},
- {kOtherUnsigned32, kUnsigned32, 0x80000000},
- {kOtherNumber, kPlainNumber, static_cast<double>(kMaxUInt32) + 1}};
-
-
-template <class Config>
-const typename TypeImpl<Config>::BitsetType::Boundary*
-TypeImpl<Config>::BitsetType::Boundaries() {
- return BoundariesArray;
-}
-
-
-template <class Config>
-size_t TypeImpl<Config>::BitsetType::BoundariesSize() {
+const BitsetType::Boundary BitsetType::BoundariesArray[] = {
+ {kOtherNumber, kPlainNumber, -V8_INFINITY},
+ {kOtherSigned32, kNegative32, kMinInt},
+ {kNegative31, kNegative31, -0x40000000},
+ {kUnsigned30, kUnsigned30, 0},
+ {kOtherUnsigned31, kUnsigned31, 0x40000000},
+ {kOtherUnsigned32, kUnsigned32, 0x80000000},
+ {kOtherNumber, kPlainNumber, static_cast<double>(kMaxUInt32) + 1}};
+
+const BitsetType::Boundary* BitsetType::Boundaries() { return BoundariesArray; }
+
+size_t BitsetType::BoundariesSize() {
// Windows doesn't like arraysize here.
// return arraysize(BoundariesArray);
return 7;
}
-
-template <class Config>
-typename TypeImpl<Config>::bitset TypeImpl<Config>::BitsetType::ExpandInternals(
- typename TypeImpl<Config>::bitset bits) {
+Type::bitset BitsetType::ExpandInternals(Type::bitset bits) {
DisallowHeapAllocation no_allocation;
if (!(bits & SEMANTIC(kPlainNumber))) return bits; // Shortcut.
const Boundary* boundaries = Boundaries();
@@ -385,10 +338,7 @@ typename TypeImpl<Config>::bitset TypeImpl<Config>::BitsetType::ExpandInternals(
return bits;
}
-
-template<class Config>
-typename TypeImpl<Config>::bitset
-TypeImpl<Config>::BitsetType::Lub(double min, double max) {
+Type::bitset BitsetType::Lub(double min, double max) {
DisallowHeapAllocation no_allocation;
int lub = kNone;
const Boundary* mins = Boundaries();
@@ -402,17 +352,11 @@ TypeImpl<Config>::BitsetType::Lub(double min, double max) {
return lub | mins[BoundariesSize() - 1].internal;
}
-
-template <class Config>
-typename TypeImpl<Config>::bitset TypeImpl<Config>::BitsetType::NumberBits(
- bitset bits) {
+Type::bitset BitsetType::NumberBits(bitset bits) {
return SEMANTIC(bits & kPlainNumber);
}
-
-template <class Config>
-typename TypeImpl<Config>::bitset TypeImpl<Config>::BitsetType::Glb(
- double min, double max) {
+Type::bitset BitsetType::Glb(double min, double max) {
DisallowHeapAllocation no_allocation;
int glb = kNone;
const Boundary* mins = Boundaries();
@@ -431,9 +375,7 @@ typename TypeImpl<Config>::bitset TypeImpl<Config>::BitsetType::Glb(
return glb & ~(SEMANTIC(kOtherNumber));
}
-
-template <class Config>
-double TypeImpl<Config>::BitsetType::Min(bitset bits) {
+double BitsetType::Min(bitset bits) {
DisallowHeapAllocation no_allocation;
DCHECK(Is(SEMANTIC(bits), kNumber));
const Boundary* mins = Boundaries();
@@ -447,9 +389,7 @@ double TypeImpl<Config>::BitsetType::Min(bitset bits) {
return std::numeric_limits<double>::quiet_NaN();
}
-
-template<class Config>
-double TypeImpl<Config>::BitsetType::Max(bitset bits) {
+double BitsetType::Max(bitset bits) {
DisallowHeapAllocation no_allocation;
DCHECK(Is(SEMANTIC(bits), kNumber));
const Boundary* mins = Boundaries();
@@ -471,9 +411,7 @@ double TypeImpl<Config>::BitsetType::Max(bitset bits) {
// -----------------------------------------------------------------------------
// Predicates.
-
-template<class Config>
-bool TypeImpl<Config>::SimplyEquals(TypeImpl* that) {
+bool Type::SimplyEquals(Type* that) {
DisallowHeapAllocation no_allocation;
if (this->IsClass()) {
return that->IsClass()
@@ -505,20 +443,29 @@ bool TypeImpl<Config>::SimplyEquals(TypeImpl* that) {
}
return true;
}
+ if (this->IsTuple()) {
+ if (!that->IsTuple()) return false;
+ TupleType* this_tuple = this->AsTuple();
+ TupleType* that_tuple = that->AsTuple();
+ if (this_tuple->Arity() != that_tuple->Arity()) {
+ return false;
+ }
+ for (int i = 0, n = this_tuple->Arity(); i < n; ++i) {
+ if (!this_tuple->Element(i)->Equals(that_tuple->Element(i))) return false;
+ }
+ return true;
+ }
UNREACHABLE();
return false;
}
-
-template <class Config>
-typename TypeImpl<Config>::bitset TypeImpl<Config>::Representation() {
+Type::bitset Type::Representation() {
return REPRESENTATION(this->BitsetLub());
}
// Check if [this] <= [that].
-template<class Config>
-bool TypeImpl<Config>::SlowIs(TypeImpl* that) {
+bool Type::SlowIs(Type* that) {
DisallowHeapAllocation no_allocation;
// Fast bitset cases
@@ -542,8 +489,7 @@ bool TypeImpl<Config>::SlowIs(TypeImpl* that) {
// Check if SEMANTIC([this]) <= SEMANTIC([that]). The result of the method
// should be independent of the representation axis of the types.
-template <class Config>
-bool TypeImpl<Config>::SemanticIs(TypeImpl* that) {
+bool Type::SemanticIs(Type* that) {
DisallowHeapAllocation no_allocation;
if (this == that) return true;
@@ -566,7 +512,7 @@ bool TypeImpl<Config>::SemanticIs(TypeImpl* that) {
// T <= (T1 \/ ... \/ Tn) if (T <= T1) \/ ... \/ (T <= Tn)
if (that->IsUnion()) {
for (int i = 0, n = that->AsUnion()->Length(); i < n; ++i) {
- if (this->SemanticIs(that->AsUnion()->Get(i)->unhandle())) return true;
+ if (this->SemanticIs(that->AsUnion()->Get(i))) return true;
if (i > 1 && this->IsRange()) return false; // Shortcut.
}
return false;
@@ -582,9 +528,28 @@ bool TypeImpl<Config>::SemanticIs(TypeImpl* that) {
return this->SimplyEquals(that);
}
+// Most precise _current_ type of a value (usually its class).
+Type* Type::NowOf(i::Object* value, Zone* zone) {
+ if (value->IsSmi() ||
+ i::HeapObject::cast(value)->map()->instance_type() == HEAP_NUMBER_TYPE) {
+ return Of(value, zone);
+ }
+ return Class(i::handle(i::HeapObject::cast(value)->map()), zone);
+}
-template<class Config>
-bool TypeImpl<Config>::NowIs(TypeImpl* that) {
+bool Type::NowContains(i::Object* value) {
+ DisallowHeapAllocation no_allocation;
+ if (this->IsAny()) return true;
+ if (value->IsHeapObject()) {
+ i::Map* map = i::HeapObject::cast(value)->map();
+ for (Iterator<i::Map> it = this->Classes(); !it.Done(); it.Advance()) {
+ if (*it.Current() == map) return true;
+ }
+ }
+ return this->Contains(value);
+}
+
+bool Type::NowIs(Type* that) {
DisallowHeapAllocation no_allocation;
// TODO(rossberg): this is incorrect for
@@ -604,16 +569,14 @@ bool TypeImpl<Config>::NowIs(TypeImpl* that) {
// Check if [this] contains only (currently) stable classes.
-template<class Config>
-bool TypeImpl<Config>::NowStable() {
+bool Type::NowStable() {
DisallowHeapAllocation no_allocation;
return !this->IsClass() || this->AsClass()->Map()->is_stable();
}
// Check if [this] and [that] overlap.
-template<class Config>
-bool TypeImpl<Config>::Maybe(TypeImpl* that) {
+bool Type::Maybe(Type* that) {
DisallowHeapAllocation no_allocation;
// Take care of the representation part (and also approximate
@@ -624,8 +587,7 @@ bool TypeImpl<Config>::Maybe(TypeImpl* that) {
return SemanticMaybe(that);
}
-template <class Config>
-bool TypeImpl<Config>::SemanticMaybe(TypeImpl* that) {
+bool Type::SemanticMaybe(Type* that) {
DisallowHeapAllocation no_allocation;
// (T1 \/ ... \/ Tn) overlaps T if (T1 overlaps T) \/ ... \/ (Tn overlaps T)
@@ -639,7 +601,7 @@ bool TypeImpl<Config>::SemanticMaybe(TypeImpl* that) {
// T overlaps (T1 \/ ... \/ Tn) if (T overlaps T1) \/ ... \/ (T overlaps Tn)
if (that->IsUnion()) {
for (int i = 0, n = that->AsUnion()->Length(); i < n; ++i) {
- if (this->SemanticMaybe(that->AsUnion()->Get(i)->unhandle())) return true;
+ if (this->SemanticMaybe(that->AsUnion()->Get(i))) return true;
}
return false;
}
@@ -679,33 +641,28 @@ bool TypeImpl<Config>::SemanticMaybe(TypeImpl* that) {
// Return the range in [this], or [NULL].
-template<class Config>
-typename TypeImpl<Config>::RangeType* TypeImpl<Config>::GetRange() {
+Type* Type::GetRange() {
DisallowHeapAllocation no_allocation;
- if (this->IsRange()) return this->AsRange();
+ if (this->IsRange()) return this;
if (this->IsUnion() && this->AsUnion()->Get(1)->IsRange()) {
- return this->AsUnion()->Get(1)->AsRange();
+ return this->AsUnion()->Get(1);
}
return NULL;
}
-
-template<class Config>
-bool TypeImpl<Config>::Contains(i::Object* value) {
+bool Type::Contains(i::Object* value) {
DisallowHeapAllocation no_allocation;
for (Iterator<i::Object> it = this->Constants(); !it.Done(); it.Advance()) {
if (*it.Current() == value) return true;
}
if (IsInteger(value)) {
- RangeType* range = this->GetRange();
- if (range != NULL && Contains(range, value)) return true;
+ Type* range = this->GetRange();
+ if (range != NULL && Contains(range->AsRange(), value)) return true;
}
return BitsetType::New(BitsetType::Lub(value))->Is(this);
}
-
-template<class Config>
-bool TypeImpl<Config>::UnionType::Wellformed() {
+bool UnionType::Wellformed() {
DisallowHeapAllocation no_allocation;
// This checks the invariants of the union representation:
// 1. There are at least two elements.
@@ -724,7 +681,7 @@ bool TypeImpl<Config>::UnionType::Wellformed() {
DCHECK(!this->Get(i)->IsUnion()); // (4)
for (int j = 0; j < this->Length(); ++j) {
if (i != j && i != 0)
- DCHECK(!this->Get(i)->SemanticIs(this->Get(j)->unhandle())); // (5)
+ DCHECK(!this->Get(i)->SemanticIs(this->Get(j))); // (5)
}
}
DCHECK(!this->Get(1)->IsRange() ||
@@ -744,14 +701,10 @@ static bool AddIsSafe(int x, int y) {
y >= std::numeric_limits<int>::min() - x;
}
-
-template<class Config>
-typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Intersect(
- TypeHandle type1, TypeHandle type2, Region* region) {
-
+Type* Type::Intersect(Type* type1, Type* type2, Zone* zone) {
// Fast case: bit sets.
if (type1->IsBitset() && type2->IsBitset()) {
- return BitsetType::New(type1->AsBitset() & type2->AsBitset(), region);
+ return BitsetType::New(type1->AsBitset() & type2->AsBitset());
}
// Fast case: top or bottom types.
@@ -775,47 +728,45 @@ typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Intersect(
// semi-fast case above - we should behave the same way regardless of
// representations. Intersection with a universal bitset should only update
// the representations.
- if (type1->SemanticIs(type2->unhandle())) {
- type2 = Any(region);
- } else if (type2->SemanticIs(type1->unhandle())) {
- type1 = Any(region);
+ if (type1->SemanticIs(type2)) {
+ type2 = Any();
+ } else if (type2->SemanticIs(type1)) {
+ type1 = Any();
}
bitset bits =
SEMANTIC(type1->BitsetGlb() & type2->BitsetGlb()) | representation;
int size1 = type1->IsUnion() ? type1->AsUnion()->Length() : 1;
int size2 = type2->IsUnion() ? type2->AsUnion()->Length() : 1;
- if (!AddIsSafe(size1, size2)) return Any(region);
+ if (!AddIsSafe(size1, size2)) return Any();
int size = size1 + size2;
- if (!AddIsSafe(size, 2)) return Any(region);
+ if (!AddIsSafe(size, 2)) return Any();
size += 2;
- UnionHandle result = UnionType::New(size, region);
+ Type* result_type = UnionType::New(size, zone);
+ UnionType* result = result_type->AsUnion();
size = 0;
// Deal with bitsets.
- result->Set(size++, BitsetType::New(bits, region));
+ result->Set(size++, BitsetType::New(bits));
- Limits lims = Limits::Empty();
- size = IntersectAux(type1, type2, result, size, &lims, region);
+ RangeType::Limits lims = RangeType::Limits::Empty();
+ size = IntersectAux(type1, type2, result, size, &lims, zone);
// If the range is not empty, then insert it into the union and
// remove the number bits from the bitset.
if (!lims.IsEmpty()) {
- size = UpdateRange(RangeType::New(lims, representation, region), result,
- size, region);
+ size = UpdateRange(RangeType::New(lims, representation, zone), result, size,
+ zone);
// Remove the number bits.
bitset number_bits = BitsetType::NumberBits(bits);
bits &= ~number_bits;
- result->Set(0, BitsetType::New(bits, region));
+ result->Set(0, BitsetType::New(bits));
}
- return NormalizeUnion(result, size, region);
+ return NormalizeUnion(result_type, size, zone);
}
-
-template<class Config>
-int TypeImpl<Config>::UpdateRange(
- RangeHandle range, UnionHandle result, int size, Region* region) {
+int Type::UpdateRange(Type* range, UnionType* result, int size, Zone* zone) {
if (size == 1) {
result->Set(size++, range);
} else {
@@ -826,7 +777,7 @@ int TypeImpl<Config>::UpdateRange(
// Remove any components that just got subsumed.
for (int i = 2; i < size; ) {
- if (result->Get(i)->SemanticIs(range->unhandle())) {
+ if (result->Get(i)->SemanticIs(range)) {
result->Set(i, result->Get(--size));
} else {
++i;
@@ -835,44 +786,37 @@ int TypeImpl<Config>::UpdateRange(
return size;
}
-
-template <class Config>
-typename TypeImpl<Config>::Limits TypeImpl<Config>::ToLimits(bitset bits,
- Region* region) {
+RangeType::Limits Type::ToLimits(bitset bits, Zone* zone) {
bitset number_bits = BitsetType::NumberBits(bits);
if (number_bits == BitsetType::kNone) {
- return Limits::Empty();
+ return RangeType::Limits::Empty();
}
- return Limits(BitsetType::Min(number_bits), BitsetType::Max(number_bits));
+ return RangeType::Limits(BitsetType::Min(number_bits),
+ BitsetType::Max(number_bits));
}
-
-template <class Config>
-typename TypeImpl<Config>::Limits TypeImpl<Config>::IntersectRangeAndBitset(
- TypeHandle range, TypeHandle bitset, Region* region) {
- Limits range_lims(range->AsRange());
- Limits bitset_lims = ToLimits(bitset->AsBitset(), region);
- return Limits::Intersect(range_lims, bitset_lims);
+RangeType::Limits Type::IntersectRangeAndBitset(Type* range, Type* bitset,
+ Zone* zone) {
+ RangeType::Limits range_lims(range->AsRange());
+ RangeType::Limits bitset_lims = ToLimits(bitset->AsBitset(), zone);
+ return RangeType::Limits::Intersect(range_lims, bitset_lims);
}
-
-template <class Config>
-int TypeImpl<Config>::IntersectAux(TypeHandle lhs, TypeHandle rhs,
- UnionHandle result, int size, Limits* lims,
- Region* region) {
+int Type::IntersectAux(Type* lhs, Type* rhs, UnionType* result, int size,
+ RangeType::Limits* lims, Zone* zone) {
if (lhs->IsUnion()) {
for (int i = 0, n = lhs->AsUnion()->Length(); i < n; ++i) {
size =
- IntersectAux(lhs->AsUnion()->Get(i), rhs, result, size, lims, region);
+ IntersectAux(lhs->AsUnion()->Get(i), rhs, result, size, lims, zone);
}
return size;
}
if (rhs->IsUnion()) {
for (int i = 0, n = rhs->AsUnion()->Length(); i < n; ++i) {
size =
- IntersectAux(lhs, rhs->AsUnion()->Get(i), result, size, lims, region);
+ IntersectAux(lhs, rhs->AsUnion()->Get(i), result, size, lims, zone);
}
return size;
}
@@ -883,40 +827,41 @@ int TypeImpl<Config>::IntersectAux(TypeHandle lhs, TypeHandle rhs,
if (lhs->IsRange()) {
if (rhs->IsBitset()) {
- Limits lim = IntersectRangeAndBitset(lhs, rhs, region);
+ RangeType::Limits lim = IntersectRangeAndBitset(lhs, rhs, zone);
if (!lim.IsEmpty()) {
- *lims = Limits::Union(lim, *lims);
+ *lims = RangeType::Limits::Union(lim, *lims);
}
return size;
}
if (rhs->IsClass()) {
- *lims = Limits::Union(Limits(lhs->AsRange()), *lims);
+ *lims =
+ RangeType::Limits::Union(RangeType::Limits(lhs->AsRange()), *lims);
}
if (rhs->IsConstant() && Contains(lhs->AsRange(), rhs->AsConstant())) {
- return AddToUnion(rhs, result, size, region);
+ return AddToUnion(rhs, result, size, zone);
}
if (rhs->IsRange()) {
- Limits lim = Limits::Intersect(
- Limits(lhs->AsRange()), Limits(rhs->AsRange()));
+ RangeType::Limits lim = RangeType::Limits::Intersect(
+ RangeType::Limits(lhs->AsRange()), RangeType::Limits(rhs->AsRange()));
if (!lim.IsEmpty()) {
- *lims = Limits::Union(lim, *lims);
+ *lims = RangeType::Limits::Union(lim, *lims);
}
}
return size;
}
if (rhs->IsRange()) {
// This case is handled symmetrically above.
- return IntersectAux(rhs, lhs, result, size, lims, region);
+ return IntersectAux(rhs, lhs, result, size, lims, zone);
}
if (lhs->IsBitset() || rhs->IsBitset()) {
- return AddToUnion(lhs->IsBitset() ? rhs : lhs, result, size, region);
+ return AddToUnion(lhs->IsBitset() ? rhs : lhs, result, size, zone);
}
if (lhs->IsClass() != rhs->IsClass()) {
- return AddToUnion(lhs->IsClass() ? rhs : lhs, result, size, region);
+ return AddToUnion(lhs->IsClass() ? rhs : lhs, result, size, zone);
}
- if (lhs->SimplyEquals(rhs->unhandle())) {
- return AddToUnion(lhs, result, size, region);
+ if (lhs->SimplyEquals(rhs)) {
+ return AddToUnion(lhs, result, size, zone);
}
return size;
}
@@ -926,9 +871,7 @@ int TypeImpl<Config>::IntersectAux(TypeHandle lhs, TypeHandle rhs,
// If the range is non-empty, the number bits in the bitset should be
// clear. Moreover, if we have a canonical range (such as Signed32),
// we want to produce a bitset rather than a range.
-template <class Config>
-typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::NormalizeRangeAndBitset(
- RangeHandle range, bitset* bits, Region* region) {
+Type* Type::NormalizeRangeAndBitset(Type* range, bitset* bits, Zone* zone) {
// Fast path: If the bitset does not mention numbers, we can just keep the
// range.
bitset number_bits = BitsetType::NumberBits(*bits);
@@ -940,7 +883,7 @@ typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::NormalizeRangeAndBitset(
// leave the bitset untouched.
bitset range_lub = SEMANTIC(range->BitsetLub());
if (BitsetType::Is(range_lub, *bits)) {
- return None(region);
+ return None();
}
// Slow path: reconcile the bitset range and the range.
@@ -966,17 +909,13 @@ typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::NormalizeRangeAndBitset(
if (bitset_max > range_max) {
range_max = bitset_max;
}
- return RangeType::New(range_min, range_max,
- BitsetType::New(BitsetType::kNone, region), region);
+ return RangeType::New(range_min, range_max, BitsetType::kNone, zone);
}
-
-template<class Config>
-typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Union(
- TypeHandle type1, TypeHandle type2, Region* region) {
+Type* Type::Union(Type* type1, Type* type2, Zone* zone) {
// Fast case: bit sets.
if (type1->IsBitset() && type2->IsBitset()) {
- return BitsetType::New(type1->AsBitset() | type2->AsBitset(), region);
+ return BitsetType::New(type1->AsBitset() | type2->AsBitset());
}
// Fast case: top or bottom types.
@@ -997,63 +936,62 @@ typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Union(
// Slow case: create union.
int size1 = type1->IsUnion() ? type1->AsUnion()->Length() : 1;
int size2 = type2->IsUnion() ? type2->AsUnion()->Length() : 1;
- if (!AddIsSafe(size1, size2)) return Any(region);
+ if (!AddIsSafe(size1, size2)) return Any();
int size = size1 + size2;
- if (!AddIsSafe(size, 2)) return Any(region);
+ if (!AddIsSafe(size, 2)) return Any();
size += 2;
- UnionHandle result = UnionType::New(size, region);
+ Type* result_type = UnionType::New(size, zone);
+ UnionType* result = result_type->AsUnion();
size = 0;
// Compute the new bitset.
bitset new_bitset = SEMANTIC(type1->BitsetGlb() | type2->BitsetGlb());
// Deal with ranges.
- TypeHandle range = None(region);
- RangeType* range1 = type1->GetRange();
- RangeType* range2 = type2->GetRange();
+ Type* range = None();
+ Type* range1 = type1->GetRange();
+ Type* range2 = type2->GetRange();
if (range1 != NULL && range2 != NULL) {
- Limits lims = Limits::Union(Limits(range1), Limits(range2));
- RangeHandle union_range = RangeType::New(lims, representation, region);
- range = NormalizeRangeAndBitset(union_range, &new_bitset, region);
+ RangeType::Limits lims =
+ RangeType::Limits::Union(RangeType::Limits(range1->AsRange()),
+ RangeType::Limits(range2->AsRange()));
+ Type* union_range = RangeType::New(lims, representation, zone);
+ range = NormalizeRangeAndBitset(union_range, &new_bitset, zone);
} else if (range1 != NULL) {
- range = NormalizeRangeAndBitset(handle(range1), &new_bitset, region);
+ range = NormalizeRangeAndBitset(range1, &new_bitset, zone);
} else if (range2 != NULL) {
- range = NormalizeRangeAndBitset(handle(range2), &new_bitset, region);
+ range = NormalizeRangeAndBitset(range2, &new_bitset, zone);
}
new_bitset = SEMANTIC(new_bitset) | representation;
- TypeHandle bits = BitsetType::New(new_bitset, region);
+ Type* bits = BitsetType::New(new_bitset);
result->Set(size++, bits);
if (!range->IsNone()) result->Set(size++, range);
- size = AddToUnion(type1, result, size, region);
- size = AddToUnion(type2, result, size, region);
- return NormalizeUnion(result, size, region);
+ size = AddToUnion(type1, result, size, zone);
+ size = AddToUnion(type2, result, size, zone);
+ return NormalizeUnion(result_type, size, zone);
}
// Add [type] to [result] unless [type] is bitset, range, or already subsumed.
// Return new size of [result].
-template<class Config>
-int TypeImpl<Config>::AddToUnion(
- TypeHandle type, UnionHandle result, int size, Region* region) {
+int Type::AddToUnion(Type* type, UnionType* result, int size, Zone* zone) {
if (type->IsBitset() || type->IsRange()) return size;
if (type->IsUnion()) {
for (int i = 0, n = type->AsUnion()->Length(); i < n; ++i) {
- size = AddToUnion(type->AsUnion()->Get(i), result, size, region);
+ size = AddToUnion(type->AsUnion()->Get(i), result, size, zone);
}
return size;
}
for (int i = 0; i < size; ++i) {
- if (type->SemanticIs(result->Get(i)->unhandle())) return size;
+ if (type->SemanticIs(result->Get(i))) return size;
}
result->Set(size++, type);
return size;
}
-
-template <class Config>
-typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::NormalizeUnion(
- UnionHandle unioned, int size, Region* region) {
+Type* Type::NormalizeUnion(Type* union_type, int size, Zone* zone) {
+ UnionType* unioned = union_type->AsUnion();
DCHECK(size >= 1);
DCHECK(unioned->Get(0)->IsBitset());
// If the union has just one element, return it.
@@ -1069,13 +1007,13 @@ typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::NormalizeUnion(
}
if (unioned->Get(1)->IsRange()) {
return RangeType::New(unioned->Get(1)->AsRange()->Min(),
- unioned->Get(1)->AsRange()->Max(), unioned->Get(0),
- region);
+ unioned->Get(1)->AsRange()->Max(),
+ unioned->Get(0)->AsBitset(), zone);
}
}
unioned->Shrink(size);
SLOW_DCHECK(unioned->Wellformed());
- return unioned;
+ return union_type;
}
@@ -1083,26 +1021,21 @@ typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::NormalizeUnion(
// Component extraction
// static
-template <class Config>
-typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Representation(
- TypeHandle t, Region* region) {
- return BitsetType::New(t->Representation(), region);
+Type* Type::Representation(Type* t, Zone* zone) {
+ return BitsetType::New(t->Representation());
}
// static
-template <class Config>
-typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Semantic(
- TypeHandle t, Region* region) {
- return Intersect(t, BitsetType::New(BitsetType::kSemantic, region), region);
+Type* Type::Semantic(Type* t, Zone* zone) {
+ return Intersect(t, BitsetType::New(BitsetType::kSemantic), zone);
}
// -----------------------------------------------------------------------------
// Iteration.
-template<class Config>
-int TypeImpl<Config>::NumClasses() {
+int Type::NumClasses() {
DisallowHeapAllocation no_allocation;
if (this->IsClass()) {
return 1;
@@ -1117,9 +1050,7 @@ int TypeImpl<Config>::NumClasses() {
}
}
-
-template<class Config>
-int TypeImpl<Config>::NumConstants() {
+int Type::NumConstants() {
DisallowHeapAllocation no_allocation;
if (this->IsConstant()) {
return 1;
@@ -1134,10 +1065,8 @@ int TypeImpl<Config>::NumConstants() {
}
}
-
-template<class Config> template<class T>
-typename TypeImpl<Config>::TypeHandle
-TypeImpl<Config>::Iterator<T>::get_type() {
+template <class T>
+Type* Type::Iterator<T>::get_type() {
DCHECK(!Done());
return type_->IsUnion() ? type_->AsUnion()->Get(index_) : type_;
}
@@ -1145,46 +1074,40 @@ TypeImpl<Config>::Iterator<T>::get_type() {
// C++ cannot specialise nested templates, so we have to go through this
// contortion with an auxiliary template to simulate it.
-template<class Config, class T>
+template <class T>
struct TypeImplIteratorAux {
- static bool matches(typename TypeImpl<Config>::TypeHandle type);
- static i::Handle<T> current(typename TypeImpl<Config>::TypeHandle type);
+ static bool matches(Type* type);
+ static i::Handle<T> current(Type* type);
};
-template<class Config>
-struct TypeImplIteratorAux<Config, i::Map> {
- static bool matches(typename TypeImpl<Config>::TypeHandle type) {
- return type->IsClass();
- }
- static i::Handle<i::Map> current(typename TypeImpl<Config>::TypeHandle type) {
+template <>
+struct TypeImplIteratorAux<i::Map> {
+ static bool matches(Type* type) { return type->IsClass(); }
+ static i::Handle<i::Map> current(Type* type) {
return type->AsClass()->Map();
}
};
-template<class Config>
-struct TypeImplIteratorAux<Config, i::Object> {
- static bool matches(typename TypeImpl<Config>::TypeHandle type) {
- return type->IsConstant();
- }
- static i::Handle<i::Object> current(
- typename TypeImpl<Config>::TypeHandle type) {
+template <>
+struct TypeImplIteratorAux<i::Object> {
+ static bool matches(Type* type) { return type->IsConstant(); }
+ static i::Handle<i::Object> current(Type* type) {
return type->AsConstant()->Value();
}
};
-template<class Config> template<class T>
-bool TypeImpl<Config>::Iterator<T>::matches(TypeHandle type) {
- return TypeImplIteratorAux<Config, T>::matches(type);
+template <class T>
+bool Type::Iterator<T>::matches(Type* type) {
+ return TypeImplIteratorAux<T>::matches(type);
}
-template<class Config> template<class T>
-i::Handle<T> TypeImpl<Config>::Iterator<T>::Current() {
- return TypeImplIteratorAux<Config, T>::current(get_type());
+template <class T>
+i::Handle<T> Type::Iterator<T>::Current() {
+ return TypeImplIteratorAux<T>::current(get_type());
}
-
-template<class Config> template<class T>
-void TypeImpl<Config>::Iterator<T>::Advance() {
+template <class T>
+void Type::Iterator<T>::Advance() {
DisallowHeapAllocation no_allocation;
++index_;
if (type_->IsUnion()) {
@@ -1199,59 +1122,9 @@ void TypeImpl<Config>::Iterator<T>::Advance() {
// -----------------------------------------------------------------------------
-// Conversion between low-level representations.
-
-template<class Config>
-template<class OtherType>
-typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Convert(
- typename OtherType::TypeHandle type, Region* region) {
- if (type->IsBitset()) {
- return BitsetType::New(type->AsBitset(), region);
- } else if (type->IsClass()) {
- return ClassType::New(type->AsClass()->Map(), region);
- } else if (type->IsConstant()) {
- return ConstantType::New(type->AsConstant()->Value(), region);
- } else if (type->IsRange()) {
- return RangeType::New(
- type->AsRange()->Min(), type->AsRange()->Max(),
- BitsetType::New(REPRESENTATION(type->BitsetLub()), region), region);
- } else if (type->IsContext()) {
- TypeHandle outer = Convert<OtherType>(type->AsContext()->Outer(), region);
- return ContextType::New(outer, region);
- } else if (type->IsUnion()) {
- int length = type->AsUnion()->Length();
- UnionHandle unioned = UnionType::New(length, region);
- for (int i = 0; i < length; ++i) {
- TypeHandle t = Convert<OtherType>(type->AsUnion()->Get(i), region);
- unioned->Set(i, t);
- }
- return unioned;
- } else if (type->IsArray()) {
- TypeHandle element = Convert<OtherType>(type->AsArray()->Element(), region);
- return ArrayType::New(element, region);
- } else if (type->IsFunction()) {
- TypeHandle res = Convert<OtherType>(type->AsFunction()->Result(), region);
- TypeHandle rcv = Convert<OtherType>(type->AsFunction()->Receiver(), region);
- FunctionHandle function = FunctionType::New(
- res, rcv, type->AsFunction()->Arity(), region);
- for (int i = 0; i < function->Arity(); ++i) {
- TypeHandle param = Convert<OtherType>(
- type->AsFunction()->Parameter(i), region);
- function->InitParameter(i, param);
- }
- return function;
- } else {
- UNREACHABLE();
- return None(region);
- }
-}
-
-
-// -----------------------------------------------------------------------------
// Printing.
-template<class Config>
-const char* TypeImpl<Config>::BitsetType::Name(bitset bits) {
+const char* BitsetType::Name(bitset bits) {
switch (bits) {
case REPRESENTATION(kAny): return "Any";
#define RETURN_NAMED_REPRESENTATION_TYPE(type, value) \
@@ -1270,10 +1143,8 @@ const char* TypeImpl<Config>::BitsetType::Name(bitset bits) {
}
}
-
-template <class Config>
-void TypeImpl<Config>::BitsetType::Print(std::ostream& os, // NOLINT
- bitset bits) {
+void BitsetType::Print(std::ostream& os, // NOLINT
+ bitset bits) {
DisallowHeapAllocation no_allocation;
const char* name = Name(bits);
if (name != NULL) {
@@ -1309,9 +1180,7 @@ void TypeImpl<Config>::BitsetType::Print(std::ostream& os, // NOLINT
os << ")";
}
-
-template <class Config>
-void TypeImpl<Config>::PrintTo(std::ostream& os, PrintDimension dim) {
+void Type::PrintTo(std::ostream& os, PrintDimension dim) {
DisallowHeapAllocation no_allocation;
if (dim != REPRESENTATION_DIM) {
if (this->IsBitset()) {
@@ -1336,7 +1205,7 @@ void TypeImpl<Config>::PrintTo(std::ostream& os, PrintDimension dim) {
} else if (this->IsUnion()) {
os << "(";
for (int i = 0, n = this->AsUnion()->Length(); i < n; ++i) {
- TypeHandle type_i = this->AsUnion()->Get(i);
+ Type* type_i = this->AsUnion()->Get(i);
if (i > 0) os << " | ";
type_i->PrintTo(os, dim);
}
@@ -1357,6 +1226,14 @@ void TypeImpl<Config>::PrintTo(std::ostream& os, PrintDimension dim) {
}
os << ")->";
this->AsFunction()->Result()->PrintTo(os, dim);
+ } else if (this->IsTuple()) {
+ os << "<";
+ for (int i = 0, n = this->AsTuple()->Arity(); i < n; ++i) {
+ Type* type_i = this->AsTuple()->Element(i);
+ if (i > 0) os << ", ";
+ type_i->PrintTo(os, dim);
+ }
+ os << ">";
} else {
UNREACHABLE();
}
@@ -1369,38 +1246,38 @@ void TypeImpl<Config>::PrintTo(std::ostream& os, PrintDimension dim) {
#ifdef DEBUG
-template <class Config>
-void TypeImpl<Config>::Print() {
+void Type::Print() {
OFStream os(stdout);
PrintTo(os);
os << std::endl;
}
-template <class Config>
-void TypeImpl<Config>::BitsetType::Print(bitset bits) {
+void BitsetType::Print(bitset bits) {
OFStream os(stdout);
Print(os, bits);
os << std::endl;
}
#endif
+BitsetType::bitset BitsetType::SignedSmall() {
+ return i::SmiValuesAre31Bits() ? kSigned31 : kSigned32;
+}
-// -----------------------------------------------------------------------------
-// Instantiations.
+BitsetType::bitset BitsetType::UnsignedSmall() {
+ return i::SmiValuesAre31Bits() ? kUnsigned30 : kUnsigned31;
+}
-template class TypeImpl<ZoneTypeConfig>;
-template class TypeImpl<ZoneTypeConfig>::Iterator<i::Map>;
-template class TypeImpl<ZoneTypeConfig>::Iterator<i::Object>;
+#define CONSTRUCT_SIMD_TYPE(NAME, Name, name, lane_count, lane_type) \
+ Type* Type::Name(Isolate* isolate, Zone* zone) { \
+ return Class(i::handle(isolate->heap()->name##_map()), zone); \
+ }
+SIMD128_TYPES(CONSTRUCT_SIMD_TYPE)
+#undef CONSTRUCT_SIMD_TYPE
-template class TypeImpl<HeapTypeConfig>;
-template class TypeImpl<HeapTypeConfig>::Iterator<i::Map>;
-template class TypeImpl<HeapTypeConfig>::Iterator<i::Object>;
+// -----------------------------------------------------------------------------
+// Instantiations.
-template TypeImpl<ZoneTypeConfig>::TypeHandle
- TypeImpl<ZoneTypeConfig>::Convert<HeapType>(
- TypeImpl<HeapTypeConfig>::TypeHandle, TypeImpl<ZoneTypeConfig>::Region*);
-template TypeImpl<HeapTypeConfig>::TypeHandle
- TypeImpl<HeapTypeConfig>::Convert<Type>(
- TypeImpl<ZoneTypeConfig>::TypeHandle, TypeImpl<HeapTypeConfig>::Region*);
+template class Type::Iterator<i::Map>;
+template class Type::Iterator<i::Object>;
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/types.h b/deps/v8/src/types.h
index 9ce650d943..9984ad8378 100644
--- a/deps/v8/src/types.h
+++ b/deps/v8/src/types.h
@@ -143,14 +143,6 @@ namespace internal {
// bitsets. Bit 0 is reserved for tagging. Class is a heap pointer to the
// respective map. Only structured types require allocation.
// Note that the bitset representation is closed under both Union and Intersect.
-//
-// There are two type representations, using different allocation:
-//
-// - class Type (zone-allocated, for compiler and concurrent compilation)
-// - class HeapType (heap-allocated, for persistent types)
-//
-// Both provide the same API, and the Convert method can be used to interconvert
-// them. For zone types, no query method touches the heap, only constructors do.
// -----------------------------------------------------------------------------
@@ -159,20 +151,21 @@ namespace internal {
// clang-format off
#define MASK_BITSET_TYPE_LIST(V) \
- V(Representation, 0xff800000u) \
- V(Semantic, 0x007ffffeu)
+ V(Representation, 0xffc00000u) \
+ V(Semantic, 0x003ffffeu)
#define REPRESENTATION(k) ((k) & BitsetType::kRepresentation)
#define SEMANTIC(k) ((k) & BitsetType::kSemantic)
#define REPRESENTATION_BITSET_TYPE_LIST(V) \
V(None, 0) \
- V(UntaggedBit, 1u << 23 | kSemantic) \
- V(UntaggedIntegral8, 1u << 24 | kSemantic) \
- V(UntaggedIntegral16, 1u << 25 | kSemantic) \
- V(UntaggedIntegral32, 1u << 26 | kSemantic) \
- V(UntaggedFloat32, 1u << 27 | kSemantic) \
- V(UntaggedFloat64, 1u << 28 | kSemantic) \
+ V(UntaggedBit, 1u << 22 | kSemantic) \
+ V(UntaggedIntegral8, 1u << 23 | kSemantic) \
+ V(UntaggedIntegral16, 1u << 24 | kSemantic) \
+ V(UntaggedIntegral32, 1u << 25 | kSemantic) \
+ V(UntaggedFloat32, 1u << 26 | kSemantic) \
+ V(UntaggedFloat64, 1u << 27 | kSemantic) \
+ V(UntaggedSimd128, 1u << 28 | kSemantic) \
V(UntaggedPointer, 1u << 29 | kSemantic) \
V(TaggedSigned, 1u << 30 | kSemantic) \
V(TaggedPointer, 1u << 31 | kSemantic) \
@@ -267,337 +260,188 @@ namespace internal {
INTERNAL_BITSET_TYPE_LIST(V) \
SEMANTIC_BITSET_TYPE_LIST(V)
+class Type;
// -----------------------------------------------------------------------------
-// The abstract Type class, parameterized over the low-level representation.
-
-// struct Config {
-// typedef TypeImpl<Config> Type;
-// typedef Base;
-// typedef Struct;
-// typedef Range;
-// typedef Region;
-// template<class> struct Handle { typedef type; } // No template typedefs...
-//
-// template<class T> static Handle<T>::type null_handle();
-// template<class T> static Handle<T>::type handle(T* t); // !is_bitset(t)
-// template<class T> static Handle<T>::type cast(Handle<Type>::type);
-//
-// static bool is_bitset(Type*);
-// static bool is_class(Type*);
-// static bool is_struct(Type*, int tag);
-// static bool is_range(Type*);
-//
-// static bitset as_bitset(Type*);
-// static i::Handle<i::Map> as_class(Type*);
-// static Handle<Struct>::type as_struct(Type*);
-// static Handle<Range>::type as_range(Type*);
-//
-// static Type* from_bitset(bitset);
-// static Handle<Type>::type from_bitset(bitset, Region*);
-// static Handle<Type>::type from_class(i::Handle<Map>, Region*);
-// static Handle<Type>::type from_struct(Handle<Struct>::type, int tag);
-// static Handle<Type>::type from_range(Handle<Range>::type);
-//
-// static Handle<Struct>::type struct_create(int tag, int length, Region*);
-// static void struct_shrink(Handle<Struct>::type, int length);
-// static int struct_tag(Handle<Struct>::type);
-// static int struct_length(Handle<Struct>::type);
-// static Handle<Type>::type struct_get(Handle<Struct>::type, int);
-// static void struct_set(Handle<Struct>::type, int, Handle<Type>::type);
-// template<class V>
-// static i::Handle<V> struct_get_value(Handle<Struct>::type, int);
-// template<class V>
-// static void struct_set_value(Handle<Struct>::type, int, i::Handle<V>);
-//
-// static Handle<Range>::type range_create(Region*);
-// static int range_get_bitset(Handle<Range>::type);
-// static void range_set_bitset(Handle<Range>::type, int);
-// static double range_get_double(Handle<Range>::type, int);
-// static void range_set_double(Handle<Range>::type, int, double, Region*);
-// }
-template<class Config>
-class TypeImpl : public Config::Base {
- public:
- // Auxiliary types.
+// Bitset types (internal).
+class BitsetType {
+ public:
typedef uint32_t bitset; // Internal
- class BitsetType; // Internal
- class StructuralType; // Internal
- class UnionType; // Internal
-
- class ClassType;
- class ConstantType;
- class RangeType;
- class ContextType;
- class ArrayType;
- class FunctionType;
-
- typedef typename Config::template Handle<TypeImpl>::type TypeHandle;
- typedef typename Config::template Handle<ClassType>::type ClassHandle;
- typedef typename Config::template Handle<ConstantType>::type ConstantHandle;
- typedef typename Config::template Handle<RangeType>::type RangeHandle;
- typedef typename Config::template Handle<ContextType>::type ContextHandle;
- typedef typename Config::template Handle<ArrayType>::type ArrayHandle;
- typedef typename Config::template Handle<FunctionType>::type FunctionHandle;
- typedef typename Config::template Handle<UnionType>::type UnionHandle;
- typedef typename Config::Region Region;
-
- // Constructors.
-
- #define DEFINE_TYPE_CONSTRUCTOR(type, value) \
- static TypeImpl* type() { \
- return BitsetType::New(BitsetType::k##type); \
- } \
- static TypeHandle type(Region* region) { \
- return BitsetType::New(BitsetType::k##type, region); \
- }
- PROPER_BITSET_TYPE_LIST(DEFINE_TYPE_CONSTRUCTOR)
- #undef DEFINE_TYPE_CONSTRUCTOR
- static TypeImpl* SignedSmall() {
- return BitsetType::New(BitsetType::SignedSmall());
- }
- static TypeHandle SignedSmall(Region* region) {
- return BitsetType::New(BitsetType::SignedSmall(), region);
- }
- static TypeImpl* UnsignedSmall() {
- return BitsetType::New(BitsetType::UnsignedSmall());
- }
- static TypeHandle UnsignedSmall(Region* region) {
- return BitsetType::New(BitsetType::UnsignedSmall(), region);
- }
-
- static TypeHandle Class(i::Handle<i::Map> map, Region* region) {
- return ClassType::New(map, region);
- }
- static TypeHandle Constant(i::Handle<i::Object> value, Region* region) {
- return ConstantType::New(value, region);
- }
- static TypeHandle Range(double min, double max, Region* region) {
- return RangeType::New(
- min, max, BitsetType::New(REPRESENTATION(BitsetType::kTagged |
- BitsetType::kUntaggedNumber),
- region),
- region);
- }
- static TypeHandle Context(TypeHandle outer, Region* region) {
- return ContextType::New(outer, region);
- }
- static TypeHandle Array(TypeHandle element, Region* region) {
- return ArrayType::New(element, region);
- }
- static FunctionHandle Function(
- TypeHandle result, TypeHandle receiver, int arity, Region* region) {
- return FunctionType::New(result, receiver, arity, region);
- }
- static TypeHandle Function(TypeHandle result, Region* region) {
- return Function(result, Any(region), 0, region);
- }
- static TypeHandle Function(
- TypeHandle result, TypeHandle param0, Region* region) {
- FunctionHandle function = Function(result, Any(region), 1, region);
- function->InitParameter(0, param0);
- return function;
- }
- static TypeHandle Function(
- TypeHandle result, TypeHandle param0, TypeHandle param1, Region* region) {
- FunctionHandle function = Function(result, Any(region), 2, region);
- function->InitParameter(0, param0);
- function->InitParameter(1, param1);
- return function;
- }
- static TypeHandle Function(
- TypeHandle result, TypeHandle param0, TypeHandle param1,
- TypeHandle param2, Region* region) {
- FunctionHandle function = Function(result, Any(region), 3, region);
- function->InitParameter(0, param0);
- function->InitParameter(1, param1);
- function->InitParameter(2, param2);
- return function;
- }
- static TypeHandle Function(TypeHandle result, int arity, TypeHandle* params,
- Region* region) {
- FunctionHandle function = Function(result, Any(region), arity, region);
- for (int i = 0; i < arity; ++i) {
- function->InitParameter(i, params[i]);
- }
- return function;
- }
-
-#define CONSTRUCT_SIMD_TYPE(NAME, Name, name, lane_count, lane_type) \
- static TypeHandle Name(Isolate* isolate, Region* region);
- SIMD128_TYPES(CONSTRUCT_SIMD_TYPE)
-#undef CONSTRUCT_SIMD_TYPE
+ enum : uint32_t {
+#define DECLARE_TYPE(type, value) k##type = (value),
+ BITSET_TYPE_LIST(DECLARE_TYPE)
+#undef DECLARE_TYPE
+ kUnusedEOL = 0
+ };
- static TypeHandle Union(TypeHandle type1, TypeHandle type2, Region* reg);
- static TypeHandle Intersect(TypeHandle type1, TypeHandle type2, Region* reg);
+ static bitset SignedSmall();
+ static bitset UnsignedSmall();
- static TypeHandle Of(double value, Region* region) {
- return Config::from_bitset(BitsetType::ExpandInternals(
- BitsetType::Lub(value)), region);
+ bitset Bitset() {
+ return static_cast<bitset>(reinterpret_cast<uintptr_t>(this) ^ 1u);
}
- static TypeHandle Of(i::Object* value, Region* region) {
- return Config::from_bitset(BitsetType::ExpandInternals(
- BitsetType::Lub(value)), region);
- }
- static TypeHandle Of(i::Handle<i::Object> value, Region* region) {
- return Of(*value, region);
+
+ static bool IsInhabited(bitset bits) {
+ return SEMANTIC(bits) != kNone && REPRESENTATION(bits) != kNone;
}
- // Extraction of components.
- static TypeHandle Representation(TypeHandle t, Region* region);
- static TypeHandle Semantic(TypeHandle t, Region* region);
+ static bool SemanticIsInhabited(bitset bits) {
+ return SEMANTIC(bits) != kNone;
+ }
- // Predicates.
- bool IsInhabited() { return BitsetType::IsInhabited(this->BitsetLub()); }
+ static bool Is(bitset bits1, bitset bits2) {
+ return (bits1 | bits2) == bits2;
+ }
- bool Is(TypeImpl* that) { return this == that || this->SlowIs(that); }
- template<class TypeHandle>
- bool Is(TypeHandle that) { return this->Is(*that); }
+ static double Min(bitset);
+ static double Max(bitset);
- bool Maybe(TypeImpl* that);
- template<class TypeHandle>
- bool Maybe(TypeHandle that) { return this->Maybe(*that); }
+ static bitset Glb(Type* type); // greatest lower bound that's a bitset
+ static bitset Glb(double min, double max);
+ static bitset Lub(Type* type); // least upper bound that's a bitset
+ static bitset Lub(i::Map* map);
+ static bitset Lub(i::Object* value);
+ static bitset Lub(double value);
+ static bitset Lub(double min, double max);
+ static bitset ExpandInternals(bitset bits);
- bool Equals(TypeImpl* that) { return this->Is(that) && that->Is(this); }
- template<class TypeHandle>
- bool Equals(TypeHandle that) { return this->Equals(*that); }
+ static const char* Name(bitset);
+ static void Print(std::ostream& os, bitset); // NOLINT
+#ifdef DEBUG
+ static void Print(bitset);
+#endif
- // Equivalent to Constant(val)->Is(this), but avoiding allocation.
- bool Contains(i::Object* val);
- bool Contains(i::Handle<i::Object> val) { return this->Contains(*val); }
+ static bitset NumberBits(bitset bits);
- // State-dependent versions of the above that consider subtyping between
- // a constant and its map class.
- inline static TypeHandle NowOf(i::Object* value, Region* region);
- static TypeHandle NowOf(i::Handle<i::Object> value, Region* region) {
- return NowOf(*value, region);
+ static bool IsBitset(Type* type) {
+ return reinterpret_cast<uintptr_t>(type) & 1;
}
- bool NowIs(TypeImpl* that);
- template<class TypeHandle>
- bool NowIs(TypeHandle that) { return this->NowIs(*that); }
- inline bool NowContains(i::Object* val);
- bool NowContains(i::Handle<i::Object> val) { return this->NowContains(*val); }
- bool NowStable();
+ static Type* NewForTesting(bitset bits) { return New(bits); }
- // Inspection.
+ private:
+ friend class Type;
- bool IsRange() { return Config::is_range(this); }
- bool IsClass() {
- return Config::is_class(this)
- || Config::is_struct(this, StructuralType::kClassTag);
- }
- bool IsConstant() {
- return Config::is_struct(this, StructuralType::kConstantTag);
- }
- bool IsContext() {
- return Config::is_struct(this, StructuralType::kContextTag);
- }
- bool IsArray() {
- return Config::is_struct(this, StructuralType::kArrayTag);
- }
- bool IsFunction() {
- return Config::is_struct(this, StructuralType::kFunctionTag);
+ static Type* New(bitset bits) {
+ return reinterpret_cast<Type*>(static_cast<uintptr_t>(bits | 1u));
}
- ClassType* AsClass() { return ClassType::cast(this); }
- ConstantType* AsConstant() { return ConstantType::cast(this); }
- RangeType* AsRange() { return RangeType::cast(this); }
- ContextType* AsContext() { return ContextType::cast(this); }
- ArrayType* AsArray() { return ArrayType::cast(this); }
- FunctionType* AsFunction() { return FunctionType::cast(this); }
+ struct Boundary {
+ bitset internal;
+ bitset external;
+ double min;
+ };
+ static const Boundary BoundariesArray[];
+ static inline const Boundary* Boundaries();
+ static inline size_t BoundariesSize();
+};
- // Minimum and maximum of a numeric type.
- // These functions do not distinguish between -0 and +0. If the type equals
- // kNaN, they return NaN; otherwise kNaN is ignored. Only call these
- // functions on subtypes of Number.
- double Min();
- double Max();
+// -----------------------------------------------------------------------------
+// Superclass for non-bitset types (internal).
+class TypeBase {
+ protected:
+ friend class Type;
+
+ enum Kind {
+ kClass,
+ kConstant,
+ kContext,
+ kArray,
+ kFunction,
+ kTuple,
+ kUnion,
+ kRange
+ };
- // Extracts a range from the type: if the type is a range or a union
- // containing a range, that range is returned; otherwise, NULL is returned.
- RangeType* GetRange();
+ Kind kind() const { return kind_; }
+ explicit TypeBase(Kind kind) : kind_(kind) {}
- static bool IsInteger(double x) {
- return nearbyint(x) == x && !i::IsMinusZero(x); // Allows for infinities.
- }
- static bool IsInteger(i::Object* x) {
- return x->IsNumber() && IsInteger(x->Number());
+ static bool IsKind(Type* type, Kind kind) {
+ if (BitsetType::IsBitset(type)) return false;
+ TypeBase* base = reinterpret_cast<TypeBase*>(type);
+ return base->kind() == kind;
}
- int NumClasses();
- int NumConstants();
-
- template<class T> class Iterator;
- Iterator<i::Map> Classes() {
- if (this->IsBitset()) return Iterator<i::Map>();
- return Iterator<i::Map>(Config::handle(this));
- }
- Iterator<i::Object> Constants() {
- if (this->IsBitset()) return Iterator<i::Object>();
- return Iterator<i::Object>(Config::handle(this));
+ // The hacky conversion to/from Type*.
+ static Type* AsType(TypeBase* type) { return reinterpret_cast<Type*>(type); }
+ static TypeBase* FromType(Type* type) {
+ return reinterpret_cast<TypeBase*>(type);
}
- // Casting and conversion.
-
- static inline TypeImpl* cast(typename Config::Base* object);
+ private:
+ Kind kind_;
+};
- template<class OtherTypeImpl>
- static TypeHandle Convert(
- typename OtherTypeImpl::TypeHandle type, Region* region);
+// -----------------------------------------------------------------------------
+// Class types.
- // Printing.
+class ClassType : public TypeBase {
+ public:
+ i::Handle<i::Map> Map() { return map_; }
- enum PrintDimension { BOTH_DIMS, SEMANTIC_DIM, REPRESENTATION_DIM };
+ private:
+ friend class Type;
+ friend class BitsetType;
- void PrintTo(std::ostream& os, PrintDimension dim = BOTH_DIMS); // NOLINT
+ static Type* New(i::Handle<i::Map> map, Zone* zone) {
+ return AsType(new (zone->New(sizeof(ClassType)))
+ ClassType(BitsetType::Lub(*map), map));
+ }
-#ifdef DEBUG
- void Print();
-#endif
+ static ClassType* cast(Type* type) {
+ DCHECK(IsKind(type, kClass));
+ return static_cast<ClassType*>(FromType(type));
+ }
- bool IsUnionForTesting() { return IsUnion(); }
+ ClassType(BitsetType::bitset bitset, i::Handle<i::Map> map)
+ : TypeBase(kClass), bitset_(bitset), map_(map) {}
- protected:
- // Friends.
+ BitsetType::bitset Lub() { return bitset_; }
- template<class> friend class Iterator;
- template<class> friend class TypeImpl;
+ BitsetType::bitset bitset_;
+ Handle<i::Map> map_;
+};
- // Handle conversion.
+// -----------------------------------------------------------------------------
+// Constant types.
- template<class T>
- static typename Config::template Handle<T>::type handle(T* type) {
- return Config::handle(type);
- }
- TypeImpl* unhandle() { return this; }
+class ConstantType : public TypeBase {
+ public:
+ i::Handle<i::Object> Value() { return object_; }
- // Internal inspection.
+ private:
+ friend class Type;
+ friend class BitsetType;
- bool IsNone() { return this == None(); }
- bool IsAny() { return this == Any(); }
- bool IsBitset() { return Config::is_bitset(this); }
- bool IsUnion() { return Config::is_struct(this, StructuralType::kUnionTag); }
+ static Type* New(i::Handle<i::Object> value, Zone* zone) {
+ BitsetType::bitset bitset = BitsetType::Lub(*value);
+ return AsType(new (zone->New(sizeof(ConstantType)))
+ ConstantType(bitset, value));
+ }
- bitset AsBitset() {
- DCHECK(this->IsBitset());
- return static_cast<BitsetType*>(this)->Bitset();
+ static ConstantType* cast(Type* type) {
+ DCHECK(IsKind(type, kConstant));
+ return static_cast<ConstantType*>(FromType(type));
}
- UnionType* AsUnion() { return UnionType::cast(this); }
- bitset Representation();
+ ConstantType(BitsetType::bitset bitset, i::Handle<i::Object> object)
+ : TypeBase(kConstant), bitset_(bitset), object_(object) {}
- // Auxiliary functions.
- bool SemanticMaybe(TypeImpl* that);
+ BitsetType::bitset Lub() { return bitset_; }
- bitset BitsetGlb() { return BitsetType::Glb(this); }
- bitset BitsetLub() { return BitsetType::Lub(this); }
+ BitsetType::bitset bitset_;
+ Handle<i::Object> object_;
+};
+// TODO(neis): Also cache value if numerical.
+// TODO(neis): Allow restricting the representation.
- bool SlowIs(TypeImpl* that);
- bool SemanticIs(TypeImpl* that);
+// -----------------------------------------------------------------------------
+// Range types.
+class RangeType : public TypeBase {
+ public:
struct Limits {
double min;
double max;
@@ -609,154 +453,187 @@ class TypeImpl : public Config::Base {
static Limits Union(Limits lhs, Limits rhs);
};
- static bool Overlap(RangeType* lhs, RangeType* rhs);
- static bool Contains(RangeType* lhs, RangeType* rhs);
- static bool Contains(RangeType* range, ConstantType* constant);
- static bool Contains(RangeType* range, i::Object* val);
+ double Min() { return limits_.min; }
+ double Max() { return limits_.max; }
- static int UpdateRange(
- RangeHandle type, UnionHandle result, int size, Region* region);
-
- static Limits IntersectRangeAndBitset(TypeHandle range, TypeHandle bits,
- Region* region);
- static Limits ToLimits(bitset bits, Region* region);
-
- bool SimplyEquals(TypeImpl* that);
- template<class TypeHandle>
- bool SimplyEquals(TypeHandle that) { return this->SimplyEquals(*that); }
-
- static int AddToUnion(
- TypeHandle type, UnionHandle result, int size, Region* region);
- static int IntersectAux(TypeHandle type, TypeHandle other, UnionHandle result,
- int size, Limits* limits, Region* region);
- static TypeHandle NormalizeUnion(UnionHandle unioned, int size,
- Region* region);
- static TypeHandle NormalizeRangeAndBitset(RangeHandle range, bitset* bits,
- Region* region);
-};
+ private:
+ friend class Type;
+ friend class BitsetType;
+ friend class UnionType;
+ static Type* New(double min, double max, BitsetType::bitset representation,
+ Zone* zone) {
+ return New(Limits(min, max), representation, zone);
+ }
-// -----------------------------------------------------------------------------
-// Bitset types (internal).
+ static bool IsInteger(double x) {
+ return nearbyint(x) == x && !i::IsMinusZero(x); // Allows for infinities.
+ }
-template<class Config>
-class TypeImpl<Config>::BitsetType : public TypeImpl<Config> {
- protected:
- friend class TypeImpl<Config>;
+ static Type* New(Limits lim, BitsetType::bitset representation, Zone* zone) {
+ DCHECK(IsInteger(lim.min) && IsInteger(lim.max));
+ DCHECK(lim.min <= lim.max);
+ DCHECK(REPRESENTATION(representation) == representation);
+ BitsetType::bitset bits =
+ SEMANTIC(BitsetType::Lub(lim.min, lim.max)) | representation;
- enum : uint32_t {
- #define DECLARE_TYPE(type, value) k##type = (value),
- BITSET_TYPE_LIST(DECLARE_TYPE)
- #undef DECLARE_TYPE
- kUnusedEOL = 0
- };
+ return AsType(new (zone->New(sizeof(RangeType))) RangeType(bits, lim));
+ }
- static bitset SignedSmall();
- static bitset UnsignedSmall();
+ static RangeType* cast(Type* type) {
+ DCHECK(IsKind(type, kRange));
+ return static_cast<RangeType*>(FromType(type));
+ }
- bitset Bitset() { return Config::as_bitset(this); }
+ RangeType(BitsetType::bitset bitset, Limits limits)
+ : TypeBase(kRange), bitset_(bitset), limits_(limits) {}
- static TypeImpl* New(bitset bits) {
- return Config::from_bitset(bits);
- }
- static TypeHandle New(bitset bits, Region* region) {
- return Config::from_bitset(bits, region);
- }
+ BitsetType::bitset Lub() { return bitset_; }
- static bool IsInhabited(bitset bits) {
- return SEMANTIC(bits) != kNone && REPRESENTATION(bits) != kNone;
- }
+ BitsetType::bitset bitset_;
+ Limits limits_;
+};
- static bool SemanticIsInhabited(bitset bits) {
- return SEMANTIC(bits) != kNone;
+// -----------------------------------------------------------------------------
+// Context types.
+
+class ContextType : public TypeBase {
+ public:
+ Type* Outer() { return outer_; }
+
+ private:
+ friend class Type;
+
+ static Type* New(Type* outer, Zone* zone) {
+ return AsType(new (zone->New(sizeof(ContextType))) ContextType(outer));
}
- static bool Is(bitset bits1, bitset bits2) {
- return (bits1 | bits2) == bits2;
+ static ContextType* cast(Type* type) {
+ DCHECK(IsKind(type, kContext));
+ return static_cast<ContextType*>(FromType(type));
}
- static double Min(bitset);
- static double Max(bitset);
+ explicit ContextType(Type* outer) : TypeBase(kContext), outer_(outer) {}
- static bitset Glb(TypeImpl* type); // greatest lower bound that's a bitset
- static bitset Glb(double min, double max);
- static bitset Lub(TypeImpl* type); // least upper bound that's a bitset
- static bitset Lub(i::Map* map);
- static bitset Lub(i::Object* value);
- static bitset Lub(double value);
- static bitset Lub(double min, double max);
- static bitset ExpandInternals(bitset bits);
+ Type* outer_;
+};
- static const char* Name(bitset);
- static void Print(std::ostream& os, bitset); // NOLINT
-#ifdef DEBUG
- static void Print(bitset);
-#endif
+// -----------------------------------------------------------------------------
+// Array types.
- static bitset NumberBits(bitset bits);
+class ArrayType : public TypeBase {
+ public:
+ Type* Element() { return element_; }
private:
- struct Boundary {
- bitset internal;
- bitset external;
- double min;
- };
- static const Boundary BoundariesArray[];
- static inline const Boundary* Boundaries();
- static inline size_t BoundariesSize();
-};
+ friend class Type;
+
+ explicit ArrayType(Type* element) : TypeBase(kArray), element_(element) {}
+
+ static Type* New(Type* element, Zone* zone) {
+ return AsType(new (zone->New(sizeof(ArrayType))) ArrayType(element));
+ }
+
+ static ArrayType* cast(Type* type) {
+ DCHECK(IsKind(type, kArray));
+ return static_cast<ArrayType*>(FromType(type));
+ }
+ Type* element_;
+};
// -----------------------------------------------------------------------------
-// Superclass for non-bitset types (internal).
-// Contains a tag and a variable number of type or value fields.
+// Superclass for types with variable number of type fields.
+class StructuralType : public TypeBase {
+ public:
+ int LengthForTesting() { return Length(); }
-template<class Config>
-class TypeImpl<Config>::StructuralType : public TypeImpl<Config> {
protected:
- template<class> friend class TypeImpl;
- friend struct ZoneTypeConfig; // For tags.
- friend struct HeapTypeConfig;
-
- enum Tag {
- kClassTag,
- kConstantTag,
- kContextTag,
- kArrayTag,
- kFunctionTag,
- kUnionTag
- };
+ friend class Type;
- int Length() {
- return Config::struct_length(Config::as_struct(this));
- }
- TypeHandle Get(int i) {
+ int Length() { return length_; }
+
+ Type* Get(int i) {
DCHECK(0 <= i && i < this->Length());
- return Config::struct_get(Config::as_struct(this), i);
+ return elements_[i];
}
- void Set(int i, TypeHandle type) {
+
+ void Set(int i, Type* type) {
DCHECK(0 <= i && i < this->Length());
- Config::struct_set(Config::as_struct(this), i, type);
+ elements_[i] = type;
}
+
void Shrink(int length) {
DCHECK(2 <= length && length <= this->Length());
- Config::struct_shrink(Config::as_struct(this), length);
+ length_ = length;
}
- template<class V> i::Handle<V> GetValue(int i) {
- DCHECK(0 <= i && i < this->Length());
- return Config::template struct_get_value<V>(Config::as_struct(this), i);
+
+ StructuralType(Kind kind, int length, i::Zone* zone)
+ : TypeBase(kind), length_(length) {
+ elements_ = reinterpret_cast<Type**>(zone->New(sizeof(Type*) * length));
}
- template<class V> void SetValue(int i, i::Handle<V> x) {
- DCHECK(0 <= i && i < this->Length());
- Config::struct_set_value(Config::as_struct(this), i, x);
+
+ private:
+ int length_;
+ Type** elements_;
+};
+
+// -----------------------------------------------------------------------------
+// Function types.
+
+class FunctionType : public StructuralType {
+ public:
+ int Arity() { return this->Length() - 2; }
+ Type* Result() { return this->Get(0); }
+ Type* Receiver() { return this->Get(1); }
+ Type* Parameter(int i) { return this->Get(2 + i); }
+
+ void InitParameter(int i, Type* type) { this->Set(2 + i, type); }
+
+ private:
+ friend class Type;
+
+ FunctionType(Type* result, Type* receiver, int arity, Zone* zone)
+ : StructuralType(kFunction, 2 + arity, zone) {
+ Set(0, result);
+ Set(1, receiver);
+ }
+
+ static Type* New(Type* result, Type* receiver, int arity, Zone* zone) {
+ return AsType(new (zone->New(sizeof(FunctionType)))
+ FunctionType(result, receiver, arity, zone));
}
- static TypeHandle New(Tag tag, int length, Region* region) {
- DCHECK(1 <= length);
- return Config::from_struct(Config::struct_create(tag, length, region));
+ static FunctionType* cast(Type* type) {
+ DCHECK(IsKind(type, kFunction));
+ return static_cast<FunctionType*>(FromType(type));
}
};
+// -----------------------------------------------------------------------------
+// Tuple types.
+
+class TupleType : public StructuralType {
+ public:
+ int Arity() { return this->Length(); }
+ Type* Element(int i) { return this->Get(i); }
+
+ void InitElement(int i, Type* type) { this->Set(i, type); }
+
+ private:
+ friend class Type;
+
+ TupleType(int length, Zone* zone) : StructuralType(kTuple, length, zone) {}
+
+ static Type* New(int length, Zone* zone) {
+ return AsType(new (zone->New(sizeof(TupleType))) TupleType(length, zone));
+ }
+
+ static TupleType* cast(Type* type) {
+ DCHECK(IsKind(type, kTuple));
+ return static_cast<TupleType*>(FromType(type));
+ }
+};
// -----------------------------------------------------------------------------
// Union types (internal).
@@ -765,420 +642,329 @@ class TypeImpl<Config>::StructuralType : public TypeImpl<Config> {
// - at most one field is a bitset, and it must go into index 0
// - no field is a union
// - no field is a subtype of any other field
-template<class Config>
-class TypeImpl<Config>::UnionType : public StructuralType {
- public:
- static UnionHandle New(int length, Region* region) {
- return Config::template cast<UnionType>(
- StructuralType::New(StructuralType::kUnionTag, length, region));
+class UnionType : public StructuralType {
+ private:
+ friend Type;
+ friend BitsetType;
+
+ UnionType(int length, Zone* zone) : StructuralType(kUnion, length, zone) {}
+
+ static Type* New(int length, Zone* zone) {
+ return AsType(new (zone->New(sizeof(UnionType))) UnionType(length, zone));
}
- static UnionType* cast(TypeImpl* type) {
- DCHECK(type->IsUnion());
- return static_cast<UnionType*>(type);
+ static UnionType* cast(Type* type) {
+ DCHECK(IsKind(type, kUnion));
+ return static_cast<UnionType*>(FromType(type));
}
bool Wellformed();
};
+class Type {
+ public:
+ typedef BitsetType::bitset bitset; // Internal
-// -----------------------------------------------------------------------------
-// Class types.
+// Constructors.
+#define DEFINE_TYPE_CONSTRUCTOR(type, value) \
+ static Type* type() { return BitsetType::New(BitsetType::k##type); }
+ PROPER_BITSET_TYPE_LIST(DEFINE_TYPE_CONSTRUCTOR)
+#undef DEFINE_TYPE_CONSTRUCTOR
-template<class Config>
-class TypeImpl<Config>::ClassType : public StructuralType {
- public:
- i::Handle<i::Map> Map() {
- return Config::is_class(this) ? Config::as_class(this) :
- this->template GetValue<i::Map>(1);
+ static Type* SignedSmall() {
+ return BitsetType::New(BitsetType::SignedSmall());
}
-
- static ClassHandle New(i::Handle<i::Map> map, Region* region) {
- ClassHandle type =
- Config::template cast<ClassType>(Config::from_class(map, region));
- if (!type->IsClass()) {
- type = Config::template cast<ClassType>(
- StructuralType::New(StructuralType::kClassTag, 2, region));
- type->Set(0, BitsetType::New(BitsetType::Lub(*map), region));
- type->SetValue(1, map);
- }
- return type;
+ static Type* UnsignedSmall() {
+ return BitsetType::New(BitsetType::UnsignedSmall());
}
- static ClassType* cast(TypeImpl* type) {
- DCHECK(type->IsClass());
- return static_cast<ClassType*>(type);
+ static Type* Class(i::Handle<i::Map> map, Zone* zone) {
+ return ClassType::New(map, zone);
}
-
- private:
- template<class> friend class TypeImpl;
- bitset Lub() {
- return Config::is_class(this) ?
- BitsetType::Lub(*Config::as_class(this)) :
- this->Get(0)->AsBitset();
+ static Type* Constant(i::Handle<i::Object> value, Zone* zone) {
+ return ConstantType::New(value, zone);
+ }
+ static Type* Range(double min, double max, Zone* zone) {
+ return RangeType::New(min, max, REPRESENTATION(BitsetType::kTagged |
+ BitsetType::kUntaggedNumber),
+ zone);
+ }
+ static Type* Context(Type* outer, Zone* zone) {
+ return ContextType::New(outer, zone);
+ }
+ static Type* Array(Type* element, Zone* zone) {
+ return ArrayType::New(element, zone);
+ }
+ static Type* Function(Type* result, Type* receiver, int arity, Zone* zone) {
+ return FunctionType::New(result, receiver, arity, zone);
+ }
+ static Type* Function(Type* result, Zone* zone) {
+ return Function(result, Any(), 0, zone);
+ }
+ static Type* Function(Type* result, Type* param0, Zone* zone) {
+ Type* function = Function(result, Any(), 1, zone);
+ function->AsFunction()->InitParameter(0, param0);
+ return function;
+ }
+ static Type* Function(Type* result, Type* param0, Type* param1, Zone* zone) {
+ Type* function = Function(result, Any(), 2, zone);
+ function->AsFunction()->InitParameter(0, param0);
+ function->AsFunction()->InitParameter(1, param1);
+ return function;
+ }
+ static Type* Function(Type* result, Type* param0, Type* param1, Type* param2,
+ Zone* zone) {
+ Type* function = Function(result, Any(), 3, zone);
+ function->AsFunction()->InitParameter(0, param0);
+ function->AsFunction()->InitParameter(1, param1);
+ function->AsFunction()->InitParameter(2, param2);
+ return function;
+ }
+ static Type* Function(Type* result, int arity, Type** params, Zone* zone) {
+ Type* function = Function(result, Any(), arity, zone);
+ for (int i = 0; i < arity; ++i) {
+ function->AsFunction()->InitParameter(i, params[i]);
+ }
+ return function;
+ }
+ static Type* Tuple(Type* first, Type* second, Type* third, Zone* zone) {
+ Type* tuple = TupleType::New(3, zone);
+ tuple->AsTuple()->InitElement(0, first);
+ tuple->AsTuple()->InitElement(1, second);
+ tuple->AsTuple()->InitElement(2, third);
+ return tuple;
}
-};
+#define CONSTRUCT_SIMD_TYPE(NAME, Name, name, lane_count, lane_type) \
+ static Type* Name(Isolate* isolate, Zone* zone);
+ SIMD128_TYPES(CONSTRUCT_SIMD_TYPE)
+#undef CONSTRUCT_SIMD_TYPE
-// -----------------------------------------------------------------------------
-// Constant types.
+ static Type* Union(Type* type1, Type* type2, Zone* reg);
+ static Type* Intersect(Type* type1, Type* type2, Zone* reg);
-template<class Config>
-class TypeImpl<Config>::ConstantType : public StructuralType {
- public:
- i::Handle<i::Object> Value() { return this->template GetValue<i::Object>(1); }
-
- static ConstantHandle New(i::Handle<i::Object> value, Region* region) {
- ConstantHandle type = Config::template cast<ConstantType>(
- StructuralType::New(StructuralType::kConstantTag, 2, region));
- type->Set(0, BitsetType::New(BitsetType::Lub(*value), region));
- type->SetValue(1, value);
- return type;
+ static Type* Of(double value, Zone* zone) {
+ return BitsetType::New(BitsetType::ExpandInternals(BitsetType::Lub(value)));
}
-
- static ConstantType* cast(TypeImpl* type) {
- DCHECK(type->IsConstant());
- return static_cast<ConstantType*>(type);
+ static Type* Of(i::Object* value, Zone* zone) {
+ return BitsetType::New(BitsetType::ExpandInternals(BitsetType::Lub(value)));
+ }
+ static Type* Of(i::Handle<i::Object> value, Zone* zone) {
+ return Of(*value, zone);
}
- private:
- template<class> friend class TypeImpl;
- bitset Lub() { return this->Get(0)->AsBitset(); }
-};
-// TODO(neis): Also cache value if numerical.
-// TODO(neis): Allow restricting the representation.
+ // Extraction of components.
+ static Type* Representation(Type* t, Zone* zone);
+ static Type* Semantic(Type* t, Zone* zone);
+ // Predicates.
+ bool IsInhabited() { return BitsetType::IsInhabited(this->BitsetLub()); }
-// -----------------------------------------------------------------------------
-// Range types.
+ bool Is(Type* that) { return this == that || this->SlowIs(that); }
+ bool Maybe(Type* that);
+ bool Equals(Type* that) { return this->Is(that) && that->Is(this); }
-template <class Config>
-class TypeImpl<Config>::RangeType : public TypeImpl<Config> {
- public:
- double Min() { return Config::range_get_double(Config::as_range(this), 0); }
- double Max() { return Config::range_get_double(Config::as_range(this), 1); }
-
- static RangeHandle New(double min, double max, TypeHandle representation,
- Region* region) {
- DCHECK(IsInteger(min) && IsInteger(max));
- DCHECK(min <= max);
- bitset representation_bits = representation->AsBitset();
- DCHECK(REPRESENTATION(representation_bits) == representation_bits);
-
- typename Config::template Handle<typename Config::Range>::type range =
- Config::range_create(region);
-
- bitset bits = SEMANTIC(BitsetType::Lub(min, max)) | representation_bits;
- Config::range_set_bitset(range, bits);
- Config::range_set_double(range, 0, min, region);
- Config::range_set_double(range, 1, max, region);
- return Config::template cast<RangeType>(Config::from_range(range));
- }
+ // Equivalent to Constant(val)->Is(this), but avoiding allocation.
+ bool Contains(i::Object* val);
+ bool Contains(i::Handle<i::Object> val) { return this->Contains(*val); }
- static RangeHandle New(Limits lim, bitset representation, Region* region) {
- return New(lim.min, lim.max, BitsetType::New(representation, region),
- region);
+ // State-dependent versions of the above that consider subtyping between
+ // a constant and its map class.
+ static Type* NowOf(i::Object* value, Zone* zone);
+ static Type* NowOf(i::Handle<i::Object> value, Zone* zone) {
+ return NowOf(*value, zone);
}
+ bool NowIs(Type* that);
+ bool NowContains(i::Object* val);
+ bool NowContains(i::Handle<i::Object> val) { return this->NowContains(*val); }
- static RangeType* cast(TypeImpl* type) {
- DCHECK(type->IsRange());
- return static_cast<RangeType*>(type);
- }
+ bool NowStable();
- private:
- template<class> friend class TypeImpl;
- bitset Lub() {
- return Config::range_get_bitset(Config::as_range(this));
- }
-};
+ // Inspection.
+ bool IsRange() { return IsKind(TypeBase::kRange); }
+ bool IsClass() { return IsKind(TypeBase::kClass); }
+ bool IsConstant() { return IsKind(TypeBase::kConstant); }
+ bool IsContext() { return IsKind(TypeBase::kContext); }
+ bool IsArray() { return IsKind(TypeBase::kArray); }
+ bool IsFunction() { return IsKind(TypeBase::kFunction); }
+ bool IsTuple() { return IsKind(TypeBase::kTuple); }
+ ClassType* AsClass() { return ClassType::cast(this); }
+ ConstantType* AsConstant() { return ConstantType::cast(this); }
+ RangeType* AsRange() { return RangeType::cast(this); }
+ ContextType* AsContext() { return ContextType::cast(this); }
+ ArrayType* AsArray() { return ArrayType::cast(this); }
+ FunctionType* AsFunction() { return FunctionType::cast(this); }
+ TupleType* AsTuple() { return TupleType::cast(this); }
-// -----------------------------------------------------------------------------
-// Context types.
+ // Minimum and maximum of a numeric type.
+ // These functions do not distinguish between -0 and +0. If the type equals
+ // kNaN, they return NaN; otherwise kNaN is ignored. Only call these
+ // functions on subtypes of Number.
+ double Min();
+ double Max();
-template<class Config>
-class TypeImpl<Config>::ContextType : public StructuralType {
- public:
- TypeHandle Outer() { return this->Get(0); }
+ // Extracts a range from the type: if the type is a range or a union
+ // containing a range, that range is returned; otherwise, NULL is returned.
+ Type* GetRange();
- static ContextHandle New(TypeHandle outer, Region* region) {
- ContextHandle type = Config::template cast<ContextType>(
- StructuralType::New(StructuralType::kContextTag, 1, region));
- type->Set(0, outer);
- return type;
+ static bool IsInteger(i::Object* x);
+ static bool IsInteger(double x) {
+ return nearbyint(x) == x && !i::IsMinusZero(x); // Allows for infinities.
}
- static ContextType* cast(TypeImpl* type) {
- DCHECK(type->IsContext());
- return static_cast<ContextType*>(type);
- }
-};
+ int NumClasses();
+ int NumConstants();
+ template <class T>
+ class Iterator {
+ public:
+ bool Done() const { return index_ < 0; }
+ i::Handle<T> Current();
+ void Advance();
-// -----------------------------------------------------------------------------
-// Array types.
+ private:
+ friend class Type;
-template<class Config>
-class TypeImpl<Config>::ArrayType : public StructuralType {
- public:
- TypeHandle Element() { return this->Get(0); }
+ Iterator() : index_(-1) {}
+ explicit Iterator(Type* type) : type_(type), index_(-1) { Advance(); }
- static ArrayHandle New(TypeHandle element, Region* region) {
- ArrayHandle type = Config::template cast<ArrayType>(
- StructuralType::New(StructuralType::kArrayTag, 1, region));
- type->Set(0, element);
- return type;
- }
+ inline bool matches(Type* type);
+ inline Type* get_type();
+
+ Type* type_;
+ int index_;
+ };
- static ArrayType* cast(TypeImpl* type) {
- DCHECK(type->IsArray());
- return static_cast<ArrayType*>(type);
+ Iterator<i::Map> Classes() {
+ if (this->IsBitset()) return Iterator<i::Map>();
+ return Iterator<i::Map>(this);
+ }
+ Iterator<i::Object> Constants() {
+ if (this->IsBitset()) return Iterator<i::Object>();
+ return Iterator<i::Object>(this);
}
-};
+ // Printing.
-// -----------------------------------------------------------------------------
-// Function types.
+ enum PrintDimension { BOTH_DIMS, SEMANTIC_DIM, REPRESENTATION_DIM };
-template<class Config>
-class TypeImpl<Config>::FunctionType : public StructuralType {
- public:
- int Arity() { return this->Length() - 2; }
- TypeHandle Result() { return this->Get(0); }
- TypeHandle Receiver() { return this->Get(1); }
- TypeHandle Parameter(int i) { return this->Get(2 + i); }
-
- void InitParameter(int i, TypeHandle type) { this->Set(2 + i, type); }
-
- static FunctionHandle New(
- TypeHandle result, TypeHandle receiver, int arity, Region* region) {
- FunctionHandle type = Config::template cast<FunctionType>(
- StructuralType::New(StructuralType::kFunctionTag, 2 + arity, region));
- type->Set(0, result);
- type->Set(1, receiver);
- return type;
- }
+ void PrintTo(std::ostream& os, PrintDimension dim = BOTH_DIMS); // NOLINT
- static FunctionType* cast(TypeImpl* type) {
- DCHECK(type->IsFunction());
- return static_cast<FunctionType*>(type);
- }
-};
+#ifdef DEBUG
+ void Print();
+#endif
+ // Helpers for testing.
+ bool IsBitsetForTesting() { return IsBitset(); }
+ bool IsUnionForTesting() { return IsUnion(); }
+ bitset AsBitsetForTesting() { return AsBitset(); }
+ UnionType* AsUnionForTesting() { return AsUnion(); }
-// -----------------------------------------------------------------------------
-// Type iterators.
+ private:
+ // Friends.
+ template <class>
+ friend class Iterator;
+ friend BitsetType;
+ friend UnionType;
-template<class Config> template<class T>
-class TypeImpl<Config>::Iterator {
- public:
- bool Done() const { return index_ < 0; }
- i::Handle<T> Current();
- void Advance();
+ // Internal inspection.
+ bool IsKind(TypeBase::Kind kind) { return TypeBase::IsKind(this, kind); }
- private:
- template<class> friend class TypeImpl;
+ bool IsNone() { return this == None(); }
+ bool IsAny() { return this == Any(); }
+ bool IsBitset() { return BitsetType::IsBitset(this); }
+ bool IsUnion() { return IsKind(TypeBase::kUnion); }
- Iterator() : index_(-1) {}
- explicit Iterator(TypeHandle type) : type_(type), index_(-1) {
- Advance();
+ bitset AsBitset() {
+ DCHECK(this->IsBitset());
+ return reinterpret_cast<BitsetType*>(this)->Bitset();
}
+ UnionType* AsUnion() { return UnionType::cast(this); }
- inline bool matches(TypeHandle type);
- inline TypeHandle get_type();
-
- TypeHandle type_;
- int index_;
-};
+ bitset Representation();
+ // Auxiliary functions.
+ bool SemanticMaybe(Type* that);
-// -----------------------------------------------------------------------------
-// Zone-allocated types; they are either (odd) integers to represent bitsets, or
-// (even) pointers to structures for everything else.
-
-struct ZoneTypeConfig {
- typedef TypeImpl<ZoneTypeConfig> Type;
- class Base {};
- typedef void* Struct;
- // Hack: the Struct and Range types can be aliased in memory, the first
- // pointer word of each both must be the tag (kRangeStructTag for Range,
- // anything else for Struct) so that we can differentiate them.
- struct Range {
- void* tag;
- int bitset;
- double limits[2];
- };
- typedef i::Zone Region;
- template<class T> struct Handle { typedef T* type; };
-
- static const int kRangeStructTag = 0x1000;
-
- template<class T> static inline T* null_handle() { return nullptr; }
- template<class T> static inline T* handle(T* type);
- template<class T> static inline T* cast(Type* type);
-
- static inline bool is_bitset(Type* type);
- static inline bool is_class(Type* type);
- static inline bool is_struct(Type* type, int tag);
- static inline bool is_range(Type* type);
-
- static inline Type::bitset as_bitset(Type* type);
- static inline i::Handle<i::Map> as_class(Type* type);
- static inline Struct* as_struct(Type* type);
- static inline Range* as_range(Type* type);
-
- static inline Type* from_bitset(Type::bitset);
- static inline Type* from_bitset(Type::bitset, Zone* zone);
- static inline Type* from_class(i::Handle<i::Map> map, Zone* zone);
- static inline Type* from_struct(Struct* structured);
- static inline Type* from_range(Range* range);
-
- static inline Struct* struct_create(int tag, int length, Zone* zone);
- static inline void struct_shrink(Struct* structure, int length);
- static inline int struct_tag(Struct* structure);
- static inline int struct_length(Struct* structure);
- static inline Type* struct_get(Struct* structure, int i);
- static inline void struct_set(Struct* structure, int i, Type* type);
- template<class V>
- static inline i::Handle<V> struct_get_value(Struct* structure, int i);
- template<class V> static inline void struct_set_value(
- Struct* structure, int i, i::Handle<V> x);
-
- static inline Range* range_create(Zone* zone);
- static inline int range_get_bitset(Range* range);
- static inline void range_set_bitset(Range* range, int);
- static inline double range_get_double(Range*, int index);
- static inline void range_set_double(Range*, int index, double value, Zone*);
-};
+ bitset BitsetGlb() { return BitsetType::Glb(this); }
+ bitset BitsetLub() { return BitsetType::Lub(this); }
-typedef TypeImpl<ZoneTypeConfig> Type;
+ bool SlowIs(Type* that);
+ bool SemanticIs(Type* that);
+ static bool Overlap(RangeType* lhs, RangeType* rhs);
+ static bool Contains(RangeType* lhs, RangeType* rhs);
+ static bool Contains(RangeType* range, ConstantType* constant);
+ static bool Contains(RangeType* range, i::Object* val);
-// -----------------------------------------------------------------------------
-// Heap-allocated types; either smis for bitsets, maps for classes, boxes for
-// constants, or fixed arrays for unions.
+ static int UpdateRange(Type* type, UnionType* result, int size, Zone* zone);
-struct HeapTypeConfig {
- typedef TypeImpl<HeapTypeConfig> Type;
- typedef i::Object Base;
- typedef i::FixedArray Struct;
- typedef i::FixedArray Range;
- typedef i::Isolate Region;
- template<class T> struct Handle { typedef i::Handle<T> type; };
+ static RangeType::Limits IntersectRangeAndBitset(Type* range, Type* bits,
+ Zone* zone);
+ static RangeType::Limits ToLimits(bitset bits, Zone* zone);
- static const int kRangeStructTag = 0xffff;
+ bool SimplyEquals(Type* that);
- template<class T> static inline i::Handle<T> null_handle() {
- return i::Handle<T>();
- }
- template<class T> static inline i::Handle<T> handle(T* type);
- template<class T> static inline i::Handle<T> cast(i::Handle<Type> type);
-
- static inline bool is_bitset(Type* type);
- static inline bool is_class(Type* type);
- static inline bool is_struct(Type* type, int tag);
- static inline bool is_range(Type* type);
-
- static inline Type::bitset as_bitset(Type* type);
- static inline i::Handle<i::Map> as_class(Type* type);
- static inline i::Handle<Struct> as_struct(Type* type);
- static inline i::Handle<Range> as_range(Type* type);
-
- static inline Type* from_bitset(Type::bitset);
- static inline i::Handle<Type> from_bitset(Type::bitset, Isolate* isolate);
- static inline i::Handle<Type> from_class(
- i::Handle<i::Map> map, Isolate* isolate);
- static inline i::Handle<Type> from_struct(i::Handle<Struct> structure);
- static inline i::Handle<Type> from_range(i::Handle<Range> range);
-
- static inline i::Handle<Struct> struct_create(
- int tag, int length, Isolate* isolate);
- static inline void struct_shrink(i::Handle<Struct> structure, int length);
- static inline int struct_tag(i::Handle<Struct> structure);
- static inline int struct_length(i::Handle<Struct> structure);
- static inline i::Handle<Type> struct_get(i::Handle<Struct> structure, int i);
- static inline void struct_set(
- i::Handle<Struct> structure, int i, i::Handle<Type> type);
- template<class V>
- static inline i::Handle<V> struct_get_value(
- i::Handle<Struct> structure, int i);
- template<class V>
- static inline void struct_set_value(
- i::Handle<Struct> structure, int i, i::Handle<V> x);
-
- static inline i::Handle<Range> range_create(Isolate* isolate);
- static inline int range_get_bitset(i::Handle<Range> range);
- static inline void range_set_bitset(i::Handle<Range> range, int value);
- static inline double range_get_double(i::Handle<Range> range, int index);
- static inline void range_set_double(i::Handle<Range> range, int index,
- double value, Isolate* isolate);
+ static int AddToUnion(Type* type, UnionType* result, int size, Zone* zone);
+ static int IntersectAux(Type* type, Type* other, UnionType* result, int size,
+ RangeType::Limits* limits, Zone* zone);
+ static Type* NormalizeUnion(Type* unioned, int size, Zone* zone);
+ static Type* NormalizeRangeAndBitset(Type* range, bitset* bits, Zone* zone);
};
-typedef TypeImpl<HeapTypeConfig> HeapType;
-
-
// -----------------------------------------------------------------------------
// Type bounds. A simple struct to represent a pair of lower/upper types.
-template<class Config>
-struct BoundsImpl {
- typedef TypeImpl<Config> Type;
- typedef typename Type::TypeHandle TypeHandle;
- typedef typename Type::Region Region;
-
- TypeHandle lower;
- TypeHandle upper;
-
- BoundsImpl() : // Make sure accessing uninitialized bounds crashes big-time.
- lower(Config::template null_handle<Type>()),
- upper(Config::template null_handle<Type>()) {}
- explicit BoundsImpl(TypeHandle t) : lower(t), upper(t) {}
- BoundsImpl(TypeHandle l, TypeHandle u) : lower(l), upper(u) {
- DCHECK(lower->Is(upper));
- }
+struct Bounds {
+ Type* lower;
+ Type* upper;
+
+ Bounds()
+ : // Make sure accessing uninitialized bounds crashes big-time.
+ lower(nullptr),
+ upper(nullptr) {}
+ explicit Bounds(Type* t) : lower(t), upper(t) {}
+ Bounds(Type* l, Type* u) : lower(l), upper(u) { DCHECK(lower->Is(upper)); }
// Unrestricted bounds.
- static BoundsImpl Unbounded() {
- return BoundsImpl(Type::None(), Type::Any());
- }
+ static Bounds Unbounded() { return Bounds(Type::None(), Type::Any()); }
// Meet: both b1 and b2 are known to hold.
- static BoundsImpl Both(BoundsImpl b1, BoundsImpl b2, Region* region) {
- TypeHandle lower = Type::Union(b1.lower, b2.lower, region);
- TypeHandle upper = Type::Intersect(b1.upper, b2.upper, region);
+ static Bounds Both(Bounds b1, Bounds b2, Zone* zone) {
+ Type* lower = Type::Union(b1.lower, b2.lower, zone);
+ Type* upper = Type::Intersect(b1.upper, b2.upper, zone);
// Lower bounds are considered approximate, correct as necessary.
if (!lower->Is(upper)) lower = upper;
- return BoundsImpl(lower, upper);
+ return Bounds(lower, upper);
}
// Join: either b1 or b2 is known to hold.
- static BoundsImpl Either(BoundsImpl b1, BoundsImpl b2, Region* region) {
- TypeHandle lower = Type::Intersect(b1.lower, b2.lower, region);
- TypeHandle upper = Type::Union(b1.upper, b2.upper, region);
- return BoundsImpl(lower, upper);
+ static Bounds Either(Bounds b1, Bounds b2, Zone* zone) {
+ Type* lower = Type::Intersect(b1.lower, b2.lower, zone);
+ Type* upper = Type::Union(b1.upper, b2.upper, zone);
+ return Bounds(lower, upper);
}
- static BoundsImpl NarrowLower(BoundsImpl b, TypeHandle t, Region* region) {
- TypeHandle lower = Type::Union(b.lower, t, region);
+ static Bounds NarrowLower(Bounds b, Type* t, Zone* zone) {
+ Type* lower = Type::Union(b.lower, t, zone);
// Lower bounds are considered approximate, correct as necessary.
if (!lower->Is(b.upper)) lower = b.upper;
- return BoundsImpl(lower, b.upper);
+ return Bounds(lower, b.upper);
}
- static BoundsImpl NarrowUpper(BoundsImpl b, TypeHandle t, Region* region) {
- TypeHandle lower = b.lower;
- TypeHandle upper = Type::Intersect(b.upper, t, region);
+ static Bounds NarrowUpper(Bounds b, Type* t, Zone* zone) {
+ Type* lower = b.lower;
+ Type* upper = Type::Intersect(b.upper, t, zone);
// Lower bounds are considered approximate, correct as necessary.
if (!lower->Is(upper)) lower = upper;
- return BoundsImpl(lower, upper);
+ return Bounds(lower, upper);
}
- bool Narrows(BoundsImpl that) {
+ bool Narrows(Bounds that) {
return that.lower->Is(this->lower) && this->upper->Is(that.upper);
}
};
-typedef BoundsImpl<ZoneTypeConfig> Bounds;
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/typing-asm.cc b/deps/v8/src/typing-asm.cc
index 509ba7b125..ddb608fc2c 100644
--- a/deps/v8/src/typing-asm.cc
+++ b/deps/v8/src/typing-asm.cc
@@ -36,7 +36,6 @@ namespace internal {
if (!valid_) return; \
} while (false)
-
AsmTyper::AsmTyper(Isolate* isolate, Zone* zone, Script* script,
FunctionLiteral* root)
: zone_(zone),
@@ -62,6 +61,7 @@ AsmTyper::AsmTyper(Isolate* isolate, Zone* zone, Script* script,
ZoneAllocationPolicy(zone)),
in_function_(false),
building_function_tables_(false),
+ visiting_exports_(false),
cache_(TypeCache::Get()) {
InitializeAstVisitor(isolate);
InitializeStdlib();
@@ -89,7 +89,7 @@ void AsmTyper::VisitAsmModule(FunctionLiteral* fun) {
for (int i = 0; i < scope->num_parameters(); ++i) {
Variable* param = scope->parameter(i);
DCHECK(GetType(param) == NULL);
- SetType(param, Type::None(zone()));
+ SetType(param, Type::None());
}
ZoneList<Declaration*>* decls = scope->declarations();
@@ -126,8 +126,7 @@ void AsmTyper::VisitAsmModule(FunctionLiteral* fun) {
for (int i = 0; i < decls->length(); ++i) {
FunctionDeclaration* decl = decls->at(i)->AsFunctionDeclaration();
if (decl != NULL) {
- RECURSE(
- VisitWithExpectation(decl->fun(), Type::Any(zone()), "UNREACHABLE"));
+ RECURSE(VisitWithExpectation(decl->fun(), Type::Any(), "UNREACHABLE"));
if (!computed_type_->IsFunction()) {
FAIL(decl->fun(), "function literal expected to be a function");
}
@@ -135,6 +134,7 @@ void AsmTyper::VisitAsmModule(FunctionLiteral* fun) {
}
// Validate exports.
+ visiting_exports_ = true;
ReturnStatement* stmt = fun->body()->last()->AsReturnStatement();
if (stmt == nullptr) {
FAIL(fun->body()->last(), "last statement in module is not a return");
@@ -148,7 +148,7 @@ void AsmTyper::VisitVariableDeclaration(VariableDeclaration* decl) {
Variable* var = decl->proxy()->var();
if (var->location() != VariableLocation::PARAMETER) {
if (GetType(var) == NULL) {
- SetType(var, Type::Any(zone()));
+ SetType(var, Type::Any());
} else {
DCHECK(!GetType(var)->IsFunction());
}
@@ -165,14 +165,14 @@ void AsmTyper::VisitFunctionDeclaration(FunctionDeclaration* decl) {
// Set function type so global references to functions have some type
// (so they can give a more useful error).
Variable* var = decl->proxy()->var();
- SetType(var, Type::Function(zone()));
+ SetType(var, Type::Function());
}
void AsmTyper::VisitFunctionAnnotation(FunctionLiteral* fun) {
// Extract result type.
ZoneList<Statement*>* body = fun->body();
- Type* result_type = Type::Undefined(zone());
+ Type* result_type = Type::Undefined();
if (body->length() > 0) {
ReturnStatement* stmt = body->last()->AsReturnStatement();
if (stmt != NULL) {
@@ -188,9 +188,8 @@ void AsmTyper::VisitFunctionAnnotation(FunctionLiteral* fun) {
result_type = computed_type_;
}
}
- Type::FunctionType* type =
- Type::Function(result_type, Type::Any(), fun->parameter_count(), zone())
- ->AsFunction();
+ Type* type =
+ Type::Function(result_type, Type::Any(), fun->parameter_count(), zone());
// Extract parameter types.
bool good = true;
@@ -212,7 +211,7 @@ void AsmTyper::VisitFunctionAnnotation(FunctionLiteral* fun) {
property_info_ = NULL;
}
SetType(var, computed_type_);
- type->InitParameter(i, computed_type_);
+ type->AsFunction()->InitParameter(i, computed_type_);
good = true;
}
if (!good) FAIL(fun, "missing parameter type annotations");
@@ -489,19 +488,19 @@ void AsmTyper::VisitDebuggerStatement(DebuggerStatement* stmt) {
void AsmTyper::VisitFunctionLiteral(FunctionLiteral* expr) {
- Scope* scope = expr->scope();
- DCHECK(scope->is_function_scope());
if (in_function_) {
FAIL(expr, "invalid nested function");
}
+ Scope* scope = expr->scope();
+ DCHECK(scope->is_function_scope());
if (!expr->bounds().upper->IsFunction()) {
FAIL(expr, "invalid function literal");
}
- Type::FunctionType* type = expr->bounds().upper->AsFunction();
+ Type* type = expr->bounds().upper;
Type* save_return_type = return_type_;
- return_type_ = type->Result();
+ return_type_ = type->AsFunction()->Result();
in_function_ = true;
local_variable_type_.Clear();
RECURSE(VisitDeclarations(scope->declarations()));
@@ -523,6 +522,9 @@ void AsmTyper::VisitDoExpression(DoExpression* expr) {
void AsmTyper::VisitConditional(Conditional* expr) {
+ if (!in_function_) {
+ FAIL(expr, "ternary operator inside module body");
+ }
RECURSE(VisitWithExpectation(expr->condition(), Type::Number(),
"condition expected to be integer"));
if (!computed_type_->Is(cache_.kAsmInt)) {
@@ -554,11 +556,21 @@ void AsmTyper::VisitConditional(Conditional* expr) {
void AsmTyper::VisitVariableProxy(VariableProxy* expr) {
+ VisitVariableProxy(expr, false);
+}
+
+void AsmTyper::VisitVariableProxy(VariableProxy* expr, bool assignment) {
Variable* var = expr->var();
VariableInfo* info = GetVariableInfo(var, false);
+ if (!assignment && !in_function_ && !building_function_tables_ &&
+ !visiting_exports_) {
+ if (var->location() != VariableLocation::PARAMETER || var->index() >= 3) {
+ FAIL(expr, "illegal variable reference in module body");
+ }
+ }
if (info == NULL || info->type == NULL) {
if (var->mode() == TEMPORARY) {
- SetType(var, Type::Any(zone()));
+ SetType(var, Type::Any());
info = GetVariableInfo(var, false);
} else {
FAIL(expr, "unbound variable");
@@ -623,13 +635,13 @@ void AsmTyper::VisitObjectLiteral(ObjectLiteral* expr) {
ZoneList<ObjectLiteralProperty*>* props = expr->properties();
for (int i = 0; i < props->length(); ++i) {
ObjectLiteralProperty* prop = props->at(i);
- RECURSE(VisitWithExpectation(prop->value(), Type::Any(zone()),
+ RECURSE(VisitWithExpectation(prop->value(), Type::Any(),
"object property expected to be a function"));
if (!computed_type_->IsFunction()) {
FAIL(prop->value(), "non-function in function table");
}
}
- IntersectResult(expr, Type::Object(zone()));
+ IntersectResult(expr, Type::Object());
}
@@ -639,7 +651,7 @@ void AsmTyper::VisitArrayLiteral(ArrayLiteral* expr) {
}
// Allowed for function tables.
ZoneList<Expression*>* values = expr->values();
- Type* elem_type = Type::None(zone());
+ Type* elem_type = Type::None();
for (int i = 0; i < values->length(); ++i) {
Expression* value = values->at(i);
RECURSE(VisitWithExpectation(value, Type::Any(), "UNREACHABLE"));
@@ -671,19 +683,23 @@ void AsmTyper::VisitAssignment(Assignment* expr) {
RECURSE(VisitWithExpectation(
expr->value(), type, "assignment value expected to match surrounding"));
Type* target_type = StorageType(computed_type_);
- if (intish_ != 0) {
- FAIL(expr, "intish or floatish assignment");
- }
if (expr->target()->IsVariableProxy()) {
- RECURSE(VisitWithExpectation(expr->target(), target_type,
- "assignment target expected to match value"));
+ if (intish_ != 0) {
+ FAIL(expr, "intish or floatish assignment");
+ }
+ expected_type_ = target_type;
+ VisitVariableProxy(expr->target()->AsVariableProxy(), true);
} else if (expr->target()->IsProperty()) {
+ int value_intish = intish_;
Property* property = expr->target()->AsProperty();
RECURSE(VisitWithExpectation(property->obj(), Type::Any(),
"bad propety object"));
if (!computed_type_->IsArray()) {
FAIL(property->obj(), "array expected");
}
+ if (value_intish != 0 && computed_type_->Is(cache_.kFloat64Array)) {
+ FAIL(expr, "floatish assignment to double array");
+ }
VisitHeapAccess(property, true, target_type);
}
IntersectResult(expr, target_type);
@@ -720,29 +736,32 @@ Type* AsmTyper::StorageType(Type* type) {
void AsmTyper::VisitHeapAccess(Property* expr, bool assigning,
Type* assignment_type) {
- Type::ArrayType* array_type = computed_type_->AsArray();
- size_t size = array_size_;
- Type* type = array_type->AsArray()->Element();
+ ArrayType* array_type = computed_type_->AsArray();
+ // size_t size = array_size_;
+ Type* type = array_type->Element();
if (type->IsFunction()) {
if (assigning) {
FAIL(expr, "assigning to function table is illegal");
}
- BinaryOperation* bin = expr->key()->AsBinaryOperation();
- if (bin == NULL || bin->op() != Token::BIT_AND) {
- FAIL(expr->key(), "expected & in call");
- }
- RECURSE(VisitWithExpectation(bin->left(), cache_.kAsmSigned,
- "array index expected to be integer"));
- Literal* right = bin->right()->AsLiteral();
- if (right == NULL || right->raw_value()->ContainsDot()) {
- FAIL(right, "call mask must be integer");
- }
- RECURSE(VisitWithExpectation(bin->right(), cache_.kAsmSigned,
- "call mask expected to be integer"));
- if (static_cast<size_t>(right->raw_value()->AsNumber()) != size - 1) {
- FAIL(right, "call mask must match function table");
- }
- bin->set_bounds(Bounds(cache_.kAsmSigned));
+ // TODO(bradnelson): Fix the parser and then un-comment this part
+ // BinaryOperation* bin = expr->key()->AsBinaryOperation();
+ // if (bin == NULL || bin->op() != Token::BIT_AND) {
+ // FAIL(expr->key(), "expected & in call");
+ // }
+ // RECURSE(VisitWithExpectation(bin->left(), cache_.kAsmSigned,
+ // "array index expected to be integer"));
+ // Literal* right = bin->right()->AsLiteral();
+ // if (right == NULL || right->raw_value()->ContainsDot()) {
+ // FAIL(right, "call mask must be integer");
+ // }
+ // RECURSE(VisitWithExpectation(bin->right(), cache_.kAsmSigned,
+ // "call mask expected to be integer"));
+ // if (static_cast<size_t>(right->raw_value()->AsNumber()) != size - 1) {
+ // FAIL(right, "call mask must match function table");
+ // }
+ // bin->set_bounds(Bounds(cache_.kAsmSigned));
+ RECURSE(VisitWithExpectation(expr->key(), cache_.kAsmSigned,
+ "must be integer"));
IntersectResult(expr, type);
} else {
Literal* literal = expr->key()->AsLiteral();
@@ -750,24 +769,28 @@ void AsmTyper::VisitHeapAccess(Property* expr, bool assigning,
RECURSE(VisitWithExpectation(literal, cache_.kAsmSigned,
"array index expected to be integer"));
} else {
- BinaryOperation* bin = expr->key()->AsBinaryOperation();
- if (bin == NULL || bin->op() != Token::SAR) {
- FAIL(expr->key(), "expected >> in heap access");
- }
- RECURSE(VisitWithExpectation(bin->left(), cache_.kAsmSigned,
- "array index expected to be integer"));
- Literal* right = bin->right()->AsLiteral();
- if (right == NULL || right->raw_value()->ContainsDot()) {
- FAIL(right, "heap access shift must be integer");
- }
- RECURSE(VisitWithExpectation(bin->right(), cache_.kAsmSigned,
- "array shift expected to be integer"));
- int n = static_cast<int>(right->raw_value()->AsNumber());
int expected_shift = ElementShiftSize(type);
- if (expected_shift < 0 || n != expected_shift) {
- FAIL(right, "heap access shift must match element size");
+ if (expected_shift == 0) {
+ RECURSE(Visit(expr->key()));
+ } else {
+ BinaryOperation* bin = expr->key()->AsBinaryOperation();
+ if (bin == NULL || bin->op() != Token::SAR) {
+ FAIL(expr->key(), "expected >> in heap access");
+ }
+ RECURSE(VisitWithExpectation(bin->left(), cache_.kAsmSigned,
+ "array index expected to be integer"));
+ Literal* right = bin->right()->AsLiteral();
+ if (right == NULL || right->raw_value()->ContainsDot()) {
+ FAIL(right, "heap access shift must be integer");
+ }
+ RECURSE(VisitWithExpectation(bin->right(), cache_.kAsmSigned,
+ "array shift expected to be integer"));
+ int n = static_cast<int>(right->raw_value()->AsNumber());
+ if (expected_shift < 0 || n != expected_shift) {
+ FAIL(right, "heap access shift must match element size");
+ }
}
- bin->set_bounds(Bounds(cache_.kAsmSigned));
+ expr->key()->set_bounds(Bounds(cache_.kAsmSigned));
}
Type* result_type;
if (type->Is(cache_.kAsmIntArrayElement)) {
@@ -885,7 +908,8 @@ void AsmTyper::VisitProperty(Property* expr) {
// Only recurse at this point so that we avoid needing
// stdlib.Math to have a real type.
- RECURSE(VisitWithExpectation(expr->obj(), Type::Any(), "bad propety object"));
+ RECURSE(
+ VisitWithExpectation(expr->obj(), Type::Any(), "bad property object"));
// For heap view or function table access.
if (computed_type_->IsArray()) {
@@ -893,13 +917,16 @@ void AsmTyper::VisitProperty(Property* expr) {
return;
}
- // stdlib.x or foreign.x
VariableProxy* proxy = expr->obj()->AsVariableProxy();
if (proxy != NULL) {
Variable* var = proxy->var();
if (var->location() == VariableLocation::PARAMETER && var->index() == 1) {
- // foreign.x is ok.
- SetResult(expr, expected_type_);
+ // foreign.x - Function represent as () -> Any
+ if (Type::Any()->Is(expected_type_)) {
+ SetResult(expr, Type::Function(Type::Any(), zone()));
+ } else {
+ SetResult(expr, expected_type_);
+ }
return;
}
}
@@ -909,6 +936,7 @@ void AsmTyper::VisitProperty(Property* expr) {
void AsmTyper::VisitCall(Call* expr) {
+ Type* expected_type = expected_type_;
RECURSE(VisitWithExpectation(expr->expression(), Type::Any(),
"callee expected to be any"));
StandardMember standard_member = kNone;
@@ -923,57 +951,69 @@ void AsmTyper::VisitCall(Call* expr) {
FAIL(expr, "calls must be to bound variables or function tables");
}
if (computed_type_->IsFunction()) {
- Type::FunctionType* fun_type = computed_type_->AsFunction();
+ FunctionType* fun_type = computed_type_->AsFunction();
Type* result_type = fun_type->Result();
ZoneList<Expression*>* args = expr->arguments();
- if (fun_type->Arity() != args->length()) {
- FAIL(expr, "call with wrong arity");
- }
- for (int i = 0; i < args->length(); ++i) {
- Expression* arg = args->at(i);
- RECURSE(VisitWithExpectation(
- arg, fun_type->Parameter(i),
- "call argument expected to match callee parameter"));
- if (standard_member != kNone && standard_member != kMathFround &&
- i == 0) {
- result_type = computed_type_;
+ if (Type::Any()->Is(result_type)) {
+ // For foreign calls.
+ ZoneList<Expression*>* args = expr->arguments();
+ for (int i = 0; i < args->length(); ++i) {
+ Expression* arg = args->at(i);
+ RECURSE(VisitWithExpectation(
+ arg, Type::Any(), "foreign call argument expected to be any"));
+ // Checking for asm extern types explicitly, as the type system
+ // doesn't correctly check their inheritance relationship.
+ if (!computed_type_->Is(cache_.kAsmSigned) &&
+ !computed_type_->Is(cache_.kAsmFixnum) &&
+ !computed_type_->Is(cache_.kAsmDouble)) {
+ FAIL(arg,
+ "foreign call argument expected to be int, double, or fixnum");
+ }
}
- }
- // Handle polymorphic stdlib functions specially.
- if (standard_member == kMathCeil || standard_member == kMathFloor ||
- standard_member == kMathSqrt) {
- if (!args->at(0)->bounds().upper->Is(cache_.kAsmFloat) &&
- !args->at(0)->bounds().upper->Is(cache_.kAsmDouble)) {
- FAIL(expr, "illegal function argument type");
+ intish_ = 0;
+ expr->expression()->set_bounds(
+ Bounds(Type::Function(Type::Any(), zone())));
+ IntersectResult(expr, expected_type);
+ } else {
+ if (fun_type->Arity() != args->length()) {
+ FAIL(expr, "call with wrong arity");
}
- } else if (standard_member == kMathAbs || standard_member == kMathMin ||
- standard_member == kMathMax) {
- if (!args->at(0)->bounds().upper->Is(cache_.kAsmFloat) &&
- !args->at(0)->bounds().upper->Is(cache_.kAsmDouble) &&
- !args->at(0)->bounds().upper->Is(cache_.kAsmSigned)) {
- FAIL(expr, "illegal function argument type");
+ for (int i = 0; i < args->length(); ++i) {
+ Expression* arg = args->at(i);
+ RECURSE(VisitWithExpectation(
+ arg, fun_type->Parameter(i),
+ "call argument expected to match callee parameter"));
+ if (standard_member != kNone && standard_member != kMathFround &&
+ i == 0) {
+ result_type = computed_type_;
+ }
}
- if (args->length() > 1) {
- Type* other = Type::Intersect(args->at(0)->bounds().upper,
- args->at(1)->bounds().upper, zone());
- if (!other->Is(cache_.kAsmFloat) && !other->Is(cache_.kAsmDouble) &&
- !other->Is(cache_.kAsmSigned)) {
- FAIL(expr, "function arguments types don't match");
+ // Handle polymorphic stdlib functions specially.
+ if (standard_member == kMathCeil || standard_member == kMathFloor ||
+ standard_member == kMathSqrt) {
+ if (!args->at(0)->bounds().upper->Is(cache_.kAsmFloat) &&
+ !args->at(0)->bounds().upper->Is(cache_.kAsmDouble)) {
+ FAIL(expr, "illegal function argument type");
+ }
+ } else if (standard_member == kMathAbs || standard_member == kMathMin ||
+ standard_member == kMathMax) {
+ if (!args->at(0)->bounds().upper->Is(cache_.kAsmFloat) &&
+ !args->at(0)->bounds().upper->Is(cache_.kAsmDouble) &&
+ !args->at(0)->bounds().upper->Is(cache_.kAsmSigned)) {
+ FAIL(expr, "illegal function argument type");
+ }
+ if (args->length() > 1) {
+ Type* other = Type::Intersect(args->at(0)->bounds().upper,
+ args->at(1)->bounds().upper, zone());
+ if (!other->Is(cache_.kAsmFloat) && !other->Is(cache_.kAsmDouble) &&
+ !other->Is(cache_.kAsmSigned)) {
+ FAIL(expr, "function arguments types don't match");
+ }
}
}
+ intish_ = 0;
+ IntersectResult(expr, result_type);
}
- intish_ = 0;
- IntersectResult(expr, result_type);
- } else if (computed_type_->Is(Type::Any())) {
- // For foreign calls.
- ZoneList<Expression*>* args = expr->arguments();
- for (int i = 0; i < args->length(); ++i) {
- Expression* arg = args->at(i);
- RECURSE(VisitWithExpectation(arg, Type::Any(),
- "foreign call argument expected to be any"));
- }
- intish_ = kMaxUncombinedAdditiveSteps;
- IntersectResult(expr, Type::Number());
} else {
FAIL(expr, "invalid callee");
}
@@ -987,7 +1027,7 @@ void AsmTyper::VisitCallNew(CallNew* expr) {
RECURSE(VisitWithExpectation(expr->expression(), Type::Any(),
"expected stdlib function"));
if (computed_type_->IsFunction()) {
- Type::FunctionType* fun_type = computed_type_->AsFunction();
+ FunctionType* fun_type = computed_type_->AsFunction();
ZoneList<Expression*>* args = expr->arguments();
if (fun_type->Arity() != args->length())
FAIL(expr, "call with wrong arity");
@@ -1011,6 +1051,9 @@ void AsmTyper::VisitCallRuntime(CallRuntime* expr) {
void AsmTyper::VisitUnaryOperation(UnaryOperation* expr) {
+ if (!in_function_) {
+ FAIL(expr, "unary operator inside module body");
+ }
switch (expr->op()) {
case Token::NOT: // Used to encode != and !==
RECURSE(VisitWithExpectation(expr->expression(), cache_.kAsmInt,
@@ -1079,6 +1122,27 @@ void AsmTyper::VisitIntegerBitwiseOperator(BinaryOperation* expr,
void AsmTyper::VisitBinaryOperation(BinaryOperation* expr) {
+ if (!in_function_) {
+ if (expr->op() != Token::BIT_OR && expr->op() != Token::MUL) {
+ FAIL(expr, "illegal binary operator inside module body");
+ }
+ if (!(expr->left()->IsProperty() || expr->left()->IsVariableProxy()) ||
+ !expr->right()->IsLiteral()) {
+ FAIL(expr, "illegal computation inside module body");
+ }
+ DCHECK(expr->right()->AsLiteral() != nullptr);
+ const AstValue* right_value = expr->right()->AsLiteral()->raw_value();
+ if (expr->op() == Token::BIT_OR) {
+ if (right_value->AsNumber() != 0.0 || right_value->ContainsDot()) {
+ FAIL(expr, "illegal integer annotation value");
+ }
+ }
+ if (expr->op() == Token::MUL) {
+ if (right_value->AsNumber() != 1.0 && right_value->ContainsDot()) {
+ FAIL(expr, "illegal double annotation value");
+ }
+ }
+ }
switch (expr->op()) {
case Token::COMMA: {
RECURSE(VisitWithExpectation(expr->left(), Type::Any(),
@@ -1095,6 +1159,9 @@ void AsmTyper::VisitBinaryOperation(BinaryOperation* expr) {
// BIT_OR allows Any since it is used as a type coercion.
VisitIntegerBitwiseOperator(expr, Type::Any(), cache_.kAsmInt,
cache_.kAsmSigned, true);
+ if (expr->left()->IsCall() && expr->op() == Token::BIT_OR) {
+ expr->left()->set_bounds(Bounds(cache_.kAsmSigned));
+ }
return;
}
case Token::BIT_XOR: {
@@ -1181,6 +1248,9 @@ void AsmTyper::VisitBinaryOperation(BinaryOperation* expr) {
} else if (expr->op() == Token::MUL && expr->right()->IsLiteral() &&
right_type->Is(cache_.kAsmDouble)) {
// For unary +, expressed as x * 1.0
+ if (expr->left()->IsCall() && expr->op() == Token::MUL) {
+ expr->left()->set_bounds(Bounds(cache_.kAsmDouble));
+ }
IntersectResult(expr, cache_.kAsmDouble);
return;
} else if (type->Is(cache_.kAsmFloat) && expr->op() != Token::MOD) {
@@ -1204,6 +1274,9 @@ void AsmTyper::VisitBinaryOperation(BinaryOperation* expr) {
void AsmTyper::VisitCompareOperation(CompareOperation* expr) {
+ if (!in_function_) {
+ FAIL(expr, "comparison inside module body");
+ }
Token::Value op = expr->op();
if (op != Token::EQ && op != Token::NE && op != Token::LT &&
op != Token::LTE && op != Token::GT && op != Token::GTE) {
@@ -1295,7 +1368,7 @@ void AsmTyper::InitializeStdlib() {
if (allow_simd_) {
InitializeStdlibSIMD();
}
- Type* number_type = Type::Number(zone());
+ Type* number_type = Type::Number();
Type* double_type = cache_.kAsmDouble;
Type* double_fn1_type = Type::Function(double_type, double_type, zone());
Type* double_fn2_type =
@@ -1352,7 +1425,7 @@ void AsmTyper::InitializeStdlib() {
stdlib_types_["Infinity"]->standard_member = kInfinity;
stdlib_types_["NaN"] = new (zone()) VariableInfo(double_type);
stdlib_types_["NaN"]->standard_member = kNaN;
- Type* buffer_type = Type::Any(zone());
+ Type* buffer_type = Type::Any();
#define TYPED_ARRAY(TypeName, type_name, TYPE_NAME, ctype, size) \
stdlib_types_[#TypeName "Array"] = new (zone()) VariableInfo( \
Type::Function(cache_.k##TypeName##Array, buffer_type, zone()));
@@ -1469,7 +1542,7 @@ void AsmTyper::VisitWithExpectation(Expression* expr, Type* expected_type,
expected_type_ = expected_type;
RECURSE(Visit(expr));
Type* bounded_type = Type::Intersect(computed_type_, expected_type_, zone());
- if (bounded_type->Is(Type::None(zone()))) {
+ if (bounded_type->Is(Type::None())) {
#ifdef DEBUG
PrintF("Computed type: ");
computed_type_->Print();
@@ -1482,8 +1555,7 @@ void AsmTyper::VisitWithExpectation(Expression* expr, Type* expected_type,
}
-void AsmTyper::VisitRewritableAssignmentExpression(
- RewritableAssignmentExpression* expr) {
+void AsmTyper::VisitRewritableExpression(RewritableExpression* expr) {
RECURSE(Visit(expr->expression()));
}
diff --git a/deps/v8/src/typing-asm.h b/deps/v8/src/typing-asm.h
index b7f53831e6..54796ed4dd 100644
--- a/deps/v8/src/typing-asm.h
+++ b/deps/v8/src/typing-asm.h
@@ -22,7 +22,7 @@ class AsmTyper : public AstVisitor {
explicit AsmTyper(Isolate* isolate, Zone* zone, Script* script,
FunctionLiteral* root);
bool Validate();
- void set_allow_simd(bool simd);
+ void set_allow_simd(bool simd) { allow_simd_ = simd; }
const char* error_message() { return error_message_; }
enum StandardMember {
@@ -113,6 +113,7 @@ class AsmTyper : public AstVisitor {
bool in_function_; // In module function?
bool building_function_tables_;
+ bool visiting_exports_;
TypeCache const& cache_;
@@ -161,6 +162,8 @@ class AsmTyper : public AstVisitor {
void VisitLiteral(Literal* expr, bool is_return);
+ void VisitVariableProxy(VariableProxy* expr, bool assignment);
+
void VisitIntegerBitwiseOperator(BinaryOperation* expr, Type* left_expected,
Type* right_expected, Type* result_type,
bool conversion);
diff --git a/deps/v8/src/utils-inl.h b/deps/v8/src/utils-inl.h
new file mode 100644
index 0000000000..617d7fc151
--- /dev/null
+++ b/deps/v8/src/utils-inl.h
@@ -0,0 +1,37 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_UTILS_INL_H_
+#define V8_UTILS_INL_H_
+
+#include "src/utils.h"
+
+#include "include/v8-platform.h"
+#include "src/base/platform/time.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+class TimedScope {
+ public:
+ explicit TimedScope(double* result)
+ : start_(TimestampMs()), result_(result) {}
+
+ ~TimedScope() { *result_ = TimestampMs() - start_; }
+
+ private:
+ static inline double TimestampMs() {
+ return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
+ static_cast<double>(base::Time::kMillisecondsPerSecond);
+ }
+
+ double start_;
+ double* result_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_UTILS_INL_H_
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index 1ea2d56fbf..d779979a61 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -1125,6 +1125,19 @@ class BailoutId {
int id_;
};
+class TokenDispenserForFinally {
+ public:
+ int GetBreakContinueToken() { return next_token_++; }
+ static const int kFallThroughToken = 0;
+ static const int kThrowToken = 1;
+ static const int kReturnToken = 2;
+
+ static const int kFirstBreakContinueToken = 3;
+ static const int kInvalidToken = -1;
+
+ private:
+ int next_token_ = kFirstBreakContinueToken;
+};
// ----------------------------------------------------------------------------
// I/O support.
@@ -1715,75 +1728,47 @@ inline uintptr_t GetCurrentStackPosition() {
return limit;
}
-static inline double ReadDoubleValue(const void* p) {
-#ifndef V8_TARGET_ARCH_MIPS
- return *reinterpret_cast<const double*>(p);
+template <typename V>
+static inline V ReadUnalignedValue(const void* p) {
+#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64)
+ return *reinterpret_cast<const V*>(p);
+#else
+ V r;
+ memmove(&r, p, sizeof(V));
+ return r;
+#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+}
+
+template <typename V>
+static inline void WriteUnalignedValue(void* p, V value) {
+#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64)
+ *(reinterpret_cast<V*>(p)) = value;
#else // V8_TARGET_ARCH_MIPS
- // Prevent compiler from using load-double (mips ldc1) on (possibly)
- // non-64-bit aligned address.
- union conversion {
- double d;
- uint32_t u[2];
- } c;
- const uint32_t* ptr = reinterpret_cast<const uint32_t*>(p);
- c.u[0] = *ptr;
- c.u[1] = *(ptr + 1);
- return c.d;
-#endif // V8_TARGET_ARCH_MIPS
+ memmove(p, &value, sizeof(V));
+#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+}
+
+static inline double ReadDoubleValue(const void* p) {
+ return ReadUnalignedValue<double>(p);
}
static inline void WriteDoubleValue(void* p, double value) {
-#ifndef V8_TARGET_ARCH_MIPS
- *(reinterpret_cast<double*>(p)) = value;
-#else // V8_TARGET_ARCH_MIPS
- // Prevent compiler from using load-double (mips sdc1) on (possibly)
- // non-64-bit aligned address.
- union conversion {
- double d;
- uint32_t u[2];
- } c;
- c.d = value;
- uint32_t* ptr = reinterpret_cast<uint32_t*>(p);
- *ptr = c.u[0];
- *(ptr + 1) = c.u[1];
-#endif // V8_TARGET_ARCH_MIPS
+ WriteUnalignedValue(p, value);
}
static inline uint16_t ReadUnalignedUInt16(const void* p) {
-#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64)
- return *reinterpret_cast<const uint16_t*>(p);
-#else // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
- // Prevent compiler from using load-half (mips lh) on (possibly)
- // non-16-bit aligned address.
- union conversion {
- uint16_t h;
- uint8_t b[2];
- } c;
- const uint8_t* ptr = reinterpret_cast<const uint8_t*>(p);
- c.b[0] = *ptr;
- c.b[1] = *(ptr + 1);
- return c.h;
-#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+ return ReadUnalignedValue<uint16_t>(p);
}
static inline void WriteUnalignedUInt16(void* p, uint16_t value) {
-#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64)
- *(reinterpret_cast<uint16_t*>(p)) = value;
-#else // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
- // Prevent compiler from using store-half (mips sh) on (possibly)
- // non-16-bit aligned address.
- union conversion {
- uint16_t h;
- uint8_t b[2];
- } c;
- c.h = value;
- uint8_t* ptr = reinterpret_cast<uint8_t*>(p);
- *ptr = c.b[0];
- *(ptr + 1) = c.b[1];
-#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+ WriteUnalignedValue(p, value);
+}
+
+static inline void WriteUnalignedUInt32(void* p, uint32_t value) {
+ WriteUnalignedValue(p, value);
}
} // namespace internal
diff --git a/deps/v8/src/vm-state-inl.h b/deps/v8/src/vm-state-inl.h
index d60548d27d..6533aa1817 100644
--- a/deps/v8/src/vm-state-inl.h
+++ b/deps/v8/src/vm-state-inl.h
@@ -8,6 +8,7 @@
#include "src/vm-state.h"
#include "src/log.h"
#include "src/simulator.h"
+#include "src/tracing/trace-event.h"
namespace v8 {
namespace internal {
@@ -39,8 +40,11 @@ inline const char* StateToString(StateTag state) {
template <StateTag Tag>
VMState<Tag>::VMState(Isolate* isolate)
: isolate_(isolate), previous_tag_(isolate->current_vm_state()) {
- if (FLAG_log_timer_events && previous_tag_ != EXTERNAL && Tag == EXTERNAL) {
- LOG(isolate_, TimerEvent(Logger::START, TimerEventExternal::name()));
+ if (previous_tag_ != EXTERNAL && Tag == EXTERNAL) {
+ if (FLAG_log_timer_events) {
+ LOG(isolate_, TimerEvent(Logger::START, TimerEventExternal::name()));
+ }
+ TRACE_EVENT_BEGIN0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.External");
}
isolate_->set_current_vm_state(Tag);
}
@@ -48,24 +52,34 @@ VMState<Tag>::VMState(Isolate* isolate)
template <StateTag Tag>
VMState<Tag>::~VMState() {
- if (FLAG_log_timer_events && previous_tag_ != EXTERNAL && Tag == EXTERNAL) {
- LOG(isolate_, TimerEvent(Logger::END, TimerEventExternal::name()));
+ if (previous_tag_ != EXTERNAL && Tag == EXTERNAL) {
+ if (FLAG_log_timer_events) {
+ LOG(isolate_, TimerEvent(Logger::END, TimerEventExternal::name()));
+ }
+ TRACE_EVENT_END0(TRACE_DISABLED_BY_DEFAULT("v8"), "V8.External");
}
isolate_->set_current_vm_state(previous_tag_);
}
-
ExternalCallbackScope::ExternalCallbackScope(Isolate* isolate, Address callback)
: isolate_(isolate),
callback_(callback),
- previous_scope_(isolate->external_callback_scope()) {
+ previous_scope_(isolate->external_callback_scope()),
+ timer_(&isolate->counters()->runtime_call_stats()->ExternalCallback,
+ isolate->counters()->runtime_call_stats()->current_timer()) {
#ifdef USE_SIMULATOR
scope_address_ = Simulator::current(isolate)->get_sp();
#endif
isolate_->set_external_callback_scope(this);
+ if (FLAG_runtime_call_stats) {
+ isolate_->counters()->runtime_call_stats()->Enter(&timer_);
+ }
}
ExternalCallbackScope::~ExternalCallbackScope() {
+ if (FLAG_runtime_call_stats) {
+ isolate_->counters()->runtime_call_stats()->Leave(&timer_);
+ }
isolate_->set_external_callback_scope(previous_scope_);
}
diff --git a/deps/v8/src/vm-state.h b/deps/v8/src/vm-state.h
index 7e723a5282..3f8d3811b3 100644
--- a/deps/v8/src/vm-state.h
+++ b/deps/v8/src/vm-state.h
@@ -6,6 +6,7 @@
#define V8_VM_STATE_H_
#include "src/allocation.h"
+#include "src/counters.h"
#include "src/isolate.h"
namespace v8 {
@@ -48,6 +49,7 @@ class ExternalCallbackScope BASE_EMBEDDED {
Isolate* isolate_;
Address callback_;
ExternalCallbackScope* previous_scope_;
+ RuntimeCallTimer timer_;
#ifdef USE_SIMULATOR
Address scope_address_;
#endif
diff --git a/deps/v8/src/wasm/asm-wasm-builder.cc b/deps/v8/src/wasm/asm-wasm-builder.cc
index 30f84642f8..ee5427b174 100644
--- a/deps/v8/src/wasm/asm-wasm-builder.cc
+++ b/deps/v8/src/wasm/asm-wasm-builder.cc
@@ -27,7 +27,8 @@ namespace wasm {
class AsmWasmBuilderImpl : public AstVisitor {
public:
- AsmWasmBuilderImpl(Isolate* isolate, Zone* zone, FunctionLiteral* literal)
+ AsmWasmBuilderImpl(Isolate* isolate, Zone* zone, FunctionLiteral* literal,
+ Handle<Object> foreign)
: local_variables_(HashMap::PointersMatch,
ZoneHashMap::kDefaultHashMapCapacity,
ZoneAllocationPolicy(zone)),
@@ -44,17 +45,23 @@ class AsmWasmBuilderImpl : public AstVisitor {
literal_(literal),
isolate_(isolate),
zone_(zone),
+ foreign_(foreign),
cache_(TypeCache::Get()),
breakable_blocks_(zone),
block_size_(0),
- init_function_index(0) {
+ init_function_index_(0),
+ next_table_index_(0),
+ function_tables_(HashMap::PointersMatch,
+ ZoneHashMap::kDefaultHashMapCapacity,
+ ZoneAllocationPolicy(zone)),
+ imported_function_table_(this) {
InitializeAstVisitor(isolate);
}
void InitializeInitFunction() {
unsigned char init[] = "__init__";
- init_function_index = builder_->AddFunction();
- current_function_builder_ = builder_->FunctionAt(init_function_index);
+ init_function_index_ = builder_->AddFunction();
+ current_function_builder_ = builder_->FunctionAt(init_function_index_);
current_function_builder_->SetName(init, 8);
current_function_builder_->ReturnType(kAstStmt);
current_function_builder_->Exported(1);
@@ -70,7 +77,7 @@ class AsmWasmBuilderImpl : public AstVisitor {
void VisitFunctionDeclaration(FunctionDeclaration* decl) {
DCHECK(!in_function_);
- DCHECK(current_function_builder_ == nullptr);
+ DCHECK_NULL(current_function_builder_);
uint16_t index = LookupOrInsertFunction(decl->proxy()->var());
current_function_builder_ = builder_->FunctionAt(index);
in_function_ = true;
@@ -103,11 +110,15 @@ class AsmWasmBuilderImpl : public AstVisitor {
}
}
}
- DCHECK(in_function_);
- BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock, false,
- static_cast<byte>(stmt->statements()->length()));
- RECURSE(VisitStatements(stmt->statements()));
- DCHECK(block_size_ >= 0);
+ if (in_function_) {
+ BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock,
+ false,
+ static_cast<byte>(stmt->statements()->length()));
+ RECURSE(VisitStatements(stmt->statements()));
+ DCHECK(block_size_ >= 0);
+ } else {
+ RECURSE(VisitStatements(stmt->statements()));
+ }
}
class BlockVisitor {
@@ -162,7 +173,7 @@ class AsmWasmBuilderImpl : public AstVisitor {
void VisitContinueStatement(ContinueStatement* stmt) {
DCHECK(in_function_);
- DCHECK(stmt->target() != NULL);
+ DCHECK_NOT_NULL(stmt->target());
int i = static_cast<int>(breakable_blocks_.size()) - 1;
int block_distance = 0;
for (; i >= 0; i--) {
@@ -183,7 +194,7 @@ class AsmWasmBuilderImpl : public AstVisitor {
void VisitBreakStatement(BreakStatement* stmt) {
DCHECK(in_function_);
- DCHECK(stmt->target() != NULL);
+ DCHECK_NOT_NULL(stmt->target());
int i = static_cast<int>(breakable_blocks_.size()) - 1;
int block_distance = 0;
for (; i >= 0; i--) {
@@ -229,7 +240,7 @@ class AsmWasmBuilderImpl : public AstVisitor {
void CompileCase(CaseClause* clause, uint16_t fall_through,
VariableProxy* tag) {
Literal* label = clause->label()->AsLiteral();
- DCHECK(label != nullptr);
+ DCHECK_NOT_NULL(label);
block_size_++;
current_function_builder_->Emit(kExprIf);
current_function_builder_->Emit(kExprI32Ior);
@@ -247,7 +258,7 @@ class AsmWasmBuilderImpl : public AstVisitor {
void VisitSwitchStatement(SwitchStatement* stmt) {
VariableProxy* tag = stmt->tag()->AsVariableProxy();
- DCHECK(tag != NULL);
+ DCHECK_NOT_NULL(tag);
BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock, false,
0);
uint16_t fall_through = current_function_builder_->AddLocal(kAstI32);
@@ -332,20 +343,20 @@ class AsmWasmBuilderImpl : public AstVisitor {
Scope* scope = expr->scope();
if (in_function_) {
if (expr->bounds().lower->IsFunction()) {
- Type::FunctionType* func_type = expr->bounds().lower->AsFunction();
+ FunctionType* func_type = expr->bounds().lower->AsFunction();
LocalType return_type = TypeFrom(func_type->Result());
current_function_builder_->ReturnType(return_type);
for (int i = 0; i < expr->parameter_count(); i++) {
LocalType type = TypeFrom(func_type->Parameter(i));
- DCHECK(type != kAstStmt);
+ DCHECK_NE(kAstStmt, type);
LookupOrInsertLocal(scope->parameter(i), type);
}
} else {
UNREACHABLE();
}
}
- RECURSE(VisitDeclarations(scope->declarations()));
RECURSE(VisitStatements(expr->body()));
+ RECURSE(VisitDeclarations(scope->declarations()));
}
void VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
@@ -363,35 +374,27 @@ class AsmWasmBuilderImpl : public AstVisitor {
void VisitVariableProxy(VariableProxy* expr) {
if (in_function_) {
Variable* var = expr->var();
- if (var->is_function()) {
- DCHECK(!is_set_op_);
- std::vector<uint8_t> index =
- UnsignedLEB128From(LookupOrInsertFunction(var));
- current_function_builder_->EmitCode(
- &index[0], static_cast<uint32_t>(index.size()));
- } else {
- if (is_set_op_) {
- if (var->IsContextSlot()) {
- current_function_builder_->Emit(kExprStoreGlobal);
- } else {
- current_function_builder_->Emit(kExprSetLocal);
- }
- is_set_op_ = false;
+ if (is_set_op_) {
+ if (var->IsContextSlot()) {
+ current_function_builder_->Emit(kExprStoreGlobal);
} else {
- if (var->IsContextSlot()) {
- current_function_builder_->Emit(kExprLoadGlobal);
- } else {
- current_function_builder_->Emit(kExprGetLocal);
- }
+ current_function_builder_->Emit(kExprSetLocal);
}
- LocalType var_type = TypeOf(expr);
- DCHECK(var_type != kAstStmt);
+ is_set_op_ = false;
+ } else {
if (var->IsContextSlot()) {
- AddLeb128(LookupOrInsertGlobal(var, var_type), false);
+ current_function_builder_->Emit(kExprLoadGlobal);
} else {
- AddLeb128(LookupOrInsertLocal(var, var_type), true);
+ current_function_builder_->Emit(kExprGetLocal);
}
}
+ LocalType var_type = TypeOf(expr);
+ DCHECK_NE(kAstStmt, var_type);
+ if (var->IsContextSlot()) {
+ AddLeb128(LookupOrInsertGlobal(var, var_type), false);
+ } else {
+ AddLeb128(LookupOrInsertLocal(var, var_type), true);
+ }
}
}
@@ -433,10 +436,10 @@ class AsmWasmBuilderImpl : public AstVisitor {
ObjectLiteralProperty* prop = props->at(i);
DCHECK(marking_exported);
VariableProxy* expr = prop->value()->AsVariableProxy();
- DCHECK(expr != nullptr);
+ DCHECK_NOT_NULL(expr);
Variable* var = expr->var();
Literal* name = prop->key()->AsLiteral();
- DCHECK(name != nullptr);
+ DCHECK_NOT_NULL(name);
DCHECK(name->IsPropertyName());
const AstRawString* raw_name = name->AsRawPropertyName();
if (var->is_function()) {
@@ -451,7 +454,7 @@ class AsmWasmBuilderImpl : public AstVisitor {
void VisitArrayLiteral(ArrayLiteral* expr) { UNREACHABLE(); }
void LoadInitFunction() {
- current_function_builder_ = builder_->FunctionAt(init_function_index);
+ current_function_builder_ = builder_->FunctionAt(init_function_index_);
in_function_ = true;
}
@@ -460,11 +463,155 @@ class AsmWasmBuilderImpl : public AstVisitor {
current_function_builder_ = nullptr;
}
+ void AddFunctionTable(VariableProxy* table, ArrayLiteral* funcs) {
+ FunctionType* func_type =
+ funcs->bounds().lower->AsArray()->Element()->AsFunction();
+ LocalType return_type = TypeFrom(func_type->Result());
+ FunctionSig::Builder sig(zone(), return_type == kAstStmt ? 0 : 1,
+ func_type->Arity());
+ if (return_type != kAstStmt) {
+ sig.AddReturn(static_cast<LocalType>(return_type));
+ }
+ for (int i = 0; i < func_type->Arity(); i++) {
+ sig.AddParam(TypeFrom(func_type->Parameter(i)));
+ }
+ uint16_t signature_index = builder_->AddSignature(sig.Build());
+ InsertFunctionTable(table->var(), next_table_index_, signature_index);
+ next_table_index_ += funcs->values()->length();
+ for (int i = 0; i < funcs->values()->length(); i++) {
+ VariableProxy* func = funcs->values()->at(i)->AsVariableProxy();
+ DCHECK_NOT_NULL(func);
+ builder_->AddIndirectFunction(LookupOrInsertFunction(func->var()));
+ }
+ }
+
+ struct FunctionTableIndices : public ZoneObject {
+ uint32_t start_index;
+ uint16_t signature_index;
+ };
+
+ void InsertFunctionTable(Variable* v, uint32_t start_index,
+ uint16_t signature_index) {
+ FunctionTableIndices* container = new (zone()) FunctionTableIndices();
+ container->start_index = start_index;
+ container->signature_index = signature_index;
+ ZoneHashMap::Entry* entry = function_tables_.LookupOrInsert(
+ v, ComputePointerHash(v), ZoneAllocationPolicy(zone()));
+ entry->value = container;
+ }
+
+ FunctionTableIndices* LookupFunctionTable(Variable* v) {
+ ZoneHashMap::Entry* entry =
+ function_tables_.Lookup(v, ComputePointerHash(v));
+ DCHECK_NOT_NULL(entry);
+ return reinterpret_cast<FunctionTableIndices*>(entry->value);
+ }
+
+ class ImportedFunctionTable {
+ private:
+ class ImportedFunctionIndices : public ZoneObject {
+ public:
+ const unsigned char* name_;
+ int name_length_;
+ WasmModuleBuilder::SignatureMap signature_to_index_;
+
+ ImportedFunctionIndices(const unsigned char* name, int name_length,
+ Zone* zone)
+ : name_(name), name_length_(name_length), signature_to_index_(zone) {}
+ };
+ ZoneHashMap table_;
+ AsmWasmBuilderImpl* builder_;
+
+ public:
+ explicit ImportedFunctionTable(AsmWasmBuilderImpl* builder)
+ : table_(HashMap::PointersMatch, ZoneHashMap::kDefaultHashMapCapacity,
+ ZoneAllocationPolicy(builder->zone())),
+ builder_(builder) {}
+
+ void AddImport(Variable* v, const unsigned char* name, int name_length) {
+ ImportedFunctionIndices* indices = new (builder_->zone())
+ ImportedFunctionIndices(name, name_length, builder_->zone());
+ ZoneHashMap::Entry* entry = table_.LookupOrInsert(
+ v, ComputePointerHash(v), ZoneAllocationPolicy(builder_->zone()));
+ entry->value = indices;
+ }
+
+ uint16_t GetFunctionIndex(Variable* v, FunctionSig* sig) {
+ ZoneHashMap::Entry* entry = table_.Lookup(v, ComputePointerHash(v));
+ DCHECK_NOT_NULL(entry);
+ ImportedFunctionIndices* indices =
+ reinterpret_cast<ImportedFunctionIndices*>(entry->value);
+ WasmModuleBuilder::SignatureMap::iterator pos =
+ indices->signature_to_index_.find(sig);
+ if (pos != indices->signature_to_index_.end()) {
+ return pos->second;
+ } else {
+ uint16_t index = builder_->builder_->AddFunction();
+ indices->signature_to_index_[sig] = index;
+ WasmFunctionBuilder* function = builder_->builder_->FunctionAt(index);
+ function->External(1);
+ function->SetName(indices->name_, indices->name_length_);
+ if (sig->return_count() > 0) {
+ function->ReturnType(sig->GetReturn());
+ }
+ for (size_t i = 0; i < sig->parameter_count(); i++) {
+ function->AddParam(sig->GetParam(i));
+ }
+ return index;
+ }
+ }
+ };
+
void VisitAssignment(Assignment* expr) {
bool in_init = false;
if (!in_function_) {
+ BinaryOperation* binop = expr->value()->AsBinaryOperation();
+ if (binop != nullptr) {
+ Property* prop = binop->left()->AsProperty();
+ DCHECK_NOT_NULL(prop);
+ LoadInitFunction();
+ is_set_op_ = true;
+ RECURSE(Visit(expr->target()));
+ DCHECK(!is_set_op_);
+ if (binop->op() == Token::MUL) {
+ DCHECK(binop->right()->IsLiteral());
+ DCHECK_EQ(1.0, binop->right()->AsLiteral()->raw_value()->AsNumber());
+ DCHECK(binop->right()->AsLiteral()->raw_value()->ContainsDot());
+ VisitForeignVariable(true, prop);
+ } else if (binop->op() == Token::BIT_OR) {
+ DCHECK(binop->right()->IsLiteral());
+ DCHECK_EQ(0.0, binop->right()->AsLiteral()->raw_value()->AsNumber());
+ DCHECK(!binop->right()->AsLiteral()->raw_value()->ContainsDot());
+ VisitForeignVariable(false, prop);
+ } else {
+ UNREACHABLE();
+ }
+ UnLoadInitFunction();
+ return;
+ }
// TODO(bradnelson): Get rid of this.
if (TypeOf(expr->value()) == kAstStmt) {
+ Property* prop = expr->value()->AsProperty();
+ if (prop != nullptr) {
+ VariableProxy* vp = prop->obj()->AsVariableProxy();
+ if (vp != nullptr && vp->var()->IsParameter() &&
+ vp->var()->index() == 1) {
+ VariableProxy* target = expr->target()->AsVariableProxy();
+ if (target->bounds().lower->Is(Type::Function())) {
+ const AstRawString* name =
+ prop->key()->AsLiteral()->AsRawPropertyName();
+ imported_function_table_.AddImport(
+ target->var(), name->raw_data(), name->length());
+ }
+ }
+ }
+ ArrayLiteral* funcs = expr->value()->AsArrayLiteral();
+ if (funcs != nullptr &&
+ funcs->bounds().lower->AsArray()->Element()->IsFunction()) {
+ VariableProxy* target = expr->target()->AsVariableProxy();
+ DCHECK_NOT_NULL(target);
+ AddFunctionTable(target, funcs);
+ }
return;
}
in_init = true;
@@ -493,10 +640,59 @@ class AsmWasmBuilderImpl : public AstVisitor {
void VisitThrow(Throw* expr) { UNREACHABLE(); }
+ void VisitForeignVariable(bool is_float, Property* expr) {
+ DCHECK(expr->obj()->AsVariableProxy());
+ DCHECK(VariableLocation::PARAMETER ==
+ expr->obj()->AsVariableProxy()->var()->location());
+ DCHECK_EQ(1, expr->obj()->AsVariableProxy()->var()->index());
+ Literal* key_literal = expr->key()->AsLiteral();
+ DCHECK_NOT_NULL(key_literal);
+ if (!key_literal->value().is_null() && !foreign_.is_null() &&
+ foreign_->IsObject()) {
+ Handle<Name> name =
+ i::Object::ToName(isolate_, key_literal->value()).ToHandleChecked();
+ MaybeHandle<Object> maybe_value = i::Object::GetProperty(foreign_, name);
+ if (!maybe_value.is_null()) {
+ Handle<Object> value = maybe_value.ToHandleChecked();
+ if (is_float) {
+ MaybeHandle<Object> maybe_nvalue = i::Object::ToNumber(value);
+ if (!maybe_nvalue.is_null()) {
+ Handle<Object> nvalue = maybe_nvalue.ToHandleChecked();
+ if (nvalue->IsNumber()) {
+ double val = nvalue->Number();
+ byte code[] = {WASM_F64(val)};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ return;
+ }
+ }
+ } else {
+ MaybeHandle<Object> maybe_nvalue =
+ i::Object::ToInt32(isolate_, value);
+ if (!maybe_nvalue.is_null()) {
+ Handle<Object> nvalue = maybe_nvalue.ToHandleChecked();
+ if (nvalue->IsNumber()) {
+ int32_t val = static_cast<int32_t>(nvalue->Number());
+ byte code[] = {WASM_I32(val)};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ return;
+ }
+ }
+ }
+ }
+ }
+ if (is_float) {
+ byte code[] = {WASM_F64(std::numeric_limits<double>::quiet_NaN())};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ } else {
+ byte code[] = {WASM_I32(0)};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ }
+ }
+
void VisitProperty(Property* expr) {
Expression* obj = expr->obj();
- DCHECK(obj->bounds().lower == obj->bounds().upper);
- TypeImpl<ZoneTypeConfig>* type = obj->bounds().lower;
+ DCHECK_EQ(obj->bounds().lower, obj->bounds().upper);
+ Type* type = obj->bounds().lower;
MachineType mtype;
int size;
if (type->Is(cache_.kUint8Array)) {
@@ -533,29 +729,38 @@ class AsmWasmBuilderImpl : public AstVisitor {
WasmOpcodes::LoadStoreOpcodeOf(mtype, is_set_op_),
WasmOpcodes::LoadStoreAccessOf(false));
is_set_op_ = false;
- Literal* value = expr->key()->AsLiteral();
- if (value) {
- DCHECK(value->raw_value()->IsNumber());
- DCHECK(kAstI32 == TypeOf(value));
- int val = static_cast<int>(value->raw_value()->AsNumber());
- byte code[] = {WASM_I32(val * size)};
- current_function_builder_->EmitCode(code, sizeof(code));
- return;
- }
- BinaryOperation* binop = expr->key()->AsBinaryOperation();
- if (binop) {
- DCHECK(Token::SAR == binop->op());
- DCHECK(binop->right()->AsLiteral()->raw_value()->IsNumber());
- DCHECK(kAstI32 == TypeOf(binop->right()->AsLiteral()));
- DCHECK(size ==
- 1 << static_cast<int>(
- binop->right()->AsLiteral()->raw_value()->AsNumber()));
- // Mask bottom bits to match asm.js behavior.
- current_function_builder_->Emit(kExprI32And);
- byte code[] = {WASM_I8(~(size - 1))};
- current_function_builder_->EmitCode(code, sizeof(code));
- RECURSE(Visit(binop->left()));
+ if (size == 1) {
+ // Allow more general expression in byte arrays than the spec
+ // strictly permits.
+ // Early versions of Emscripten emit HEAP8[HEAP32[..]|0] in
+ // places that strictly should be HEAP8[HEAP32[..]>>0].
+ RECURSE(Visit(expr->key()));
return;
+ } else {
+ Literal* value = expr->key()->AsLiteral();
+ if (value) {
+ DCHECK(value->raw_value()->IsNumber());
+ DCHECK_EQ(kAstI32, TypeOf(value));
+ int val = static_cast<int>(value->raw_value()->AsNumber());
+ byte code[] = {WASM_I32(val * size)};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ return;
+ }
+ BinaryOperation* binop = expr->key()->AsBinaryOperation();
+ if (binop) {
+ DCHECK_EQ(Token::SAR, binop->op());
+ DCHECK(binop->right()->AsLiteral()->raw_value()->IsNumber());
+ DCHECK(kAstI32 == TypeOf(binop->right()->AsLiteral()));
+ DCHECK_EQ(size,
+ 1 << static_cast<int>(
+ binop->right()->AsLiteral()->raw_value()->AsNumber()));
+ // Mask bottom bits to match asm.js behavior.
+ current_function_builder_->Emit(kExprI32And);
+ byte code[] = {WASM_I8(~(size - 1))};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ RECURSE(Visit(binop->left()));
+ return;
+ }
}
UNREACHABLE();
}
@@ -565,18 +770,54 @@ class AsmWasmBuilderImpl : public AstVisitor {
switch (call_type) {
case Call::OTHER_CALL: {
DCHECK(in_function_);
- current_function_builder_->Emit(kExprCallFunction);
- RECURSE(Visit(expr->expression()));
- ZoneList<Expression*>* args = expr->arguments();
- for (int i = 0; i < args->length(); ++i) {
- Expression* arg = args->at(i);
- RECURSE(Visit(arg));
+ uint16_t index;
+ VariableProxy* vp = expr->expression()->AsVariableProxy();
+ if (vp != nullptr &&
+ Type::Any()->Is(vp->bounds().lower->AsFunction()->Result())) {
+ LocalType return_type = TypeOf(expr);
+ ZoneList<Expression*>* args = expr->arguments();
+ FunctionSig::Builder sig(zone(), return_type == kAstStmt ? 0 : 1,
+ args->length());
+ if (return_type != kAstStmt) {
+ sig.AddReturn(return_type);
+ }
+ for (int i = 0; i < args->length(); i++) {
+ sig.AddParam(TypeOf(args->at(i)));
+ }
+ index =
+ imported_function_table_.GetFunctionIndex(vp->var(), sig.Build());
+ } else {
+ index = LookupOrInsertFunction(vp->var());
}
+ current_function_builder_->Emit(kExprCallFunction);
+ std::vector<uint8_t> index_arr = UnsignedLEB128From(index);
+ current_function_builder_->EmitCode(
+ &index_arr[0], static_cast<uint32_t>(index_arr.size()));
+ break;
+ }
+ case Call::KEYED_PROPERTY_CALL: {
+ DCHECK(in_function_);
+ Property* p = expr->expression()->AsProperty();
+ DCHECK_NOT_NULL(p);
+ VariableProxy* var = p->obj()->AsVariableProxy();
+ DCHECK_NOT_NULL(var);
+ FunctionTableIndices* indices = LookupFunctionTable(var->var());
+ current_function_builder_->EmitWithU8(kExprCallIndirect,
+ indices->signature_index);
+ current_function_builder_->Emit(kExprI32Add);
+ byte code[] = {WASM_I32(indices->start_index)};
+ current_function_builder_->EmitCode(code, sizeof(code));
+ RECURSE(Visit(p->key()));
break;
}
default:
UNREACHABLE();
}
+ ZoneList<Expression*>* args = expr->arguments();
+ for (int i = 0; i < args->length(); ++i) {
+ Expression* arg = args->at(i);
+ RECURSE(Visit(arg));
+ }
}
void VisitCallNew(CallNew* expr) { UNREACHABLE(); }
@@ -586,7 +827,7 @@ class AsmWasmBuilderImpl : public AstVisitor {
void VisitUnaryOperation(UnaryOperation* expr) {
switch (expr->op()) {
case Token::NOT: {
- DCHECK(TypeOf(expr->expression()) == kAstI32);
+ DCHECK_EQ(kAstI32, TypeOf(expr->expression()));
current_function_builder_->Emit(kExprBoolNot);
break;
}
@@ -600,7 +841,7 @@ class AsmWasmBuilderImpl : public AstVisitor {
bool MatchIntBinaryOperation(BinaryOperation* expr, Token::Value op,
int32_t val) {
- DCHECK(expr->right() != nullptr);
+ DCHECK_NOT_NULL(expr->right());
if (expr->op() == op && expr->right()->IsLiteral() &&
TypeOf(expr) == kAstI32) {
Literal* right = expr->right()->AsLiteral();
@@ -614,7 +855,7 @@ class AsmWasmBuilderImpl : public AstVisitor {
bool MatchDoubleBinaryOperation(BinaryOperation* expr, Token::Value op,
double val) {
- DCHECK(expr->right() != nullptr);
+ DCHECK_NOT_NULL(expr->right());
if (expr->op() == op && expr->right()->IsLiteral() &&
TypeOf(expr) == kAstF64) {
Literal* right = expr->right()->AsLiteral();
@@ -629,8 +870,9 @@ class AsmWasmBuilderImpl : public AstVisitor {
enum ConvertOperation { kNone, kAsIs, kToInt, kToDouble };
ConvertOperation MatchOr(BinaryOperation* expr) {
- if (MatchIntBinaryOperation(expr, Token::BIT_OR, 0)) {
- return (TypeOf(expr->left()) == kAstI32) ? kAsIs : kToInt;
+ if (MatchIntBinaryOperation(expr, Token::BIT_OR, 0) &&
+ (TypeOf(expr->left()) == kAstI32)) {
+ return kAsIs;
} else {
return kNone;
}
@@ -647,12 +889,12 @@ class AsmWasmBuilderImpl : public AstVisitor {
ConvertOperation MatchXor(BinaryOperation* expr) {
if (MatchIntBinaryOperation(expr, Token::BIT_XOR, 0xffffffff)) {
- DCHECK(TypeOf(expr->left()) == kAstI32);
- DCHECK(TypeOf(expr->right()) == kAstI32);
+ DCHECK_EQ(kAstI32, TypeOf(expr->left()));
+ DCHECK_EQ(kAstI32, TypeOf(expr->right()));
BinaryOperation* op = expr->left()->AsBinaryOperation();
if (op != nullptr) {
if (MatchIntBinaryOperation(op, Token::BIT_XOR, 0xffffffff)) {
- DCHECK(TypeOf(op->right()) == kAstI32);
+ DCHECK_EQ(kAstI32, TypeOf(op->right()));
if (TypeOf(op->left()) != kAstI32) {
return kToInt;
} else {
@@ -666,7 +908,7 @@ class AsmWasmBuilderImpl : public AstVisitor {
ConvertOperation MatchMul(BinaryOperation* expr) {
if (MatchDoubleBinaryOperation(expr, Token::MUL, 1.0)) {
- DCHECK(TypeOf(expr->right()) == kAstF64);
+ DCHECK_EQ(kAstF64, TypeOf(expr->right()));
if (TypeOf(expr->left()) != kAstF64) {
return kToDouble;
} else {
@@ -768,6 +1010,7 @@ class AsmWasmBuilderImpl : public AstVisitor {
BINOP_CASE(Token::MUL, Mul, NON_SIGNED_BINOP, true);
BINOP_CASE(Token::DIV, Div, SIGNED_BINOP, false);
BINOP_CASE(Token::BIT_OR, Ior, NON_SIGNED_INT_BINOP, true);
+ BINOP_CASE(Token::BIT_AND, And, NON_SIGNED_INT_BINOP, true);
BINOP_CASE(Token::BIT_XOR, Xor, NON_SIGNED_INT_BINOP, true);
BINOP_CASE(Token::SHL, Shl, NON_SIGNED_INT_BINOP, true);
BINOP_CASE(Token::SAR, ShrS, NON_SIGNED_INT_BINOP, true);
@@ -786,6 +1029,10 @@ class AsmWasmBuilderImpl : public AstVisitor {
}
break;
}
+ case Token::COMMA: {
+ current_function_builder_->EmitWithU8(kExprBlock, 2);
+ break;
+ }
default:
UNREACHABLE();
}
@@ -879,8 +1126,8 @@ class AsmWasmBuilderImpl : public AstVisitor {
}
TypeIndex TypeIndexOf(Expression* expr) {
- DCHECK(expr->bounds().lower == expr->bounds().upper);
- TypeImpl<ZoneTypeConfig>* type = expr->bounds().lower;
+ DCHECK_EQ(expr->bounds().lower, expr->bounds().upper);
+ Type* type = expr->bounds().lower;
if (type->Is(cache_.kAsmFixnum)) {
return kFixnum;
} else if (type->Is(cache_.kAsmSigned)) {
@@ -929,17 +1176,14 @@ class AsmWasmBuilderImpl : public AstVisitor {
void VisitDoExpression(DoExpression* expr) { UNREACHABLE(); }
- void VisitRewritableAssignmentExpression(
- RewritableAssignmentExpression* expr) {
- UNREACHABLE();
- }
+ void VisitRewritableExpression(RewritableExpression* expr) { UNREACHABLE(); }
struct IndexContainer : public ZoneObject {
uint16_t index;
};
uint16_t LookupOrInsertLocal(Variable* v, LocalType type) {
- DCHECK(current_function_builder_ != nullptr);
+ DCHECK_NOT_NULL(current_function_builder_);
ZoneHashMap::Entry* entry =
local_variables_.Lookup(v, ComputePointerHash(v));
if (entry == nullptr) {
@@ -974,7 +1218,7 @@ class AsmWasmBuilderImpl : public AstVisitor {
}
uint16_t LookupOrInsertFunction(Variable* v) {
- DCHECK(builder_ != nullptr);
+ DCHECK_NOT_NULL(builder_);
ZoneHashMap::Entry* entry = functions_.Lookup(v, ComputePointerHash(v));
if (entry == nullptr) {
uint16_t index = builder_->AddFunction();
@@ -988,11 +1232,11 @@ class AsmWasmBuilderImpl : public AstVisitor {
}
LocalType TypeOf(Expression* expr) {
- DCHECK(expr->bounds().lower == expr->bounds().upper);
+ DCHECK_EQ(expr->bounds().lower, expr->bounds().upper);
return TypeFrom(expr->bounds().lower);
}
- LocalType TypeFrom(TypeImpl<ZoneTypeConfig>* type) {
+ LocalType TypeFrom(Type* type) {
if (type->Is(cache_.kAsmInt)) {
return kAstI32;
} else if (type->Is(cache_.kAsmFloat)) {
@@ -1017,10 +1261,14 @@ class AsmWasmBuilderImpl : public AstVisitor {
FunctionLiteral* literal_;
Isolate* isolate_;
Zone* zone_;
+ Handle<Object> foreign_;
TypeCache const& cache_;
ZoneVector<std::pair<BreakableStatement*, bool>> breakable_blocks_;
int block_size_;
- uint16_t init_function_index;
+ uint16_t init_function_index_;
+ uint32_t next_table_index_;
+ ZoneHashMap function_tables_;
+ ImportedFunctionTable imported_function_table_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
@@ -1029,13 +1277,13 @@ class AsmWasmBuilderImpl : public AstVisitor {
};
AsmWasmBuilder::AsmWasmBuilder(Isolate* isolate, Zone* zone,
- FunctionLiteral* literal)
- : isolate_(isolate), zone_(zone), literal_(literal) {}
+ FunctionLiteral* literal, Handle<Object> foreign)
+ : isolate_(isolate), zone_(zone), literal_(literal), foreign_(foreign) {}
// TODO(aseemgarg): probably should take zone (to write wasm to) as input so
// that zone in constructor may be thrown away once wasm module is written.
WasmModuleIndex* AsmWasmBuilder::Run() {
- AsmWasmBuilderImpl impl(isolate_, zone_, literal_);
+ AsmWasmBuilderImpl impl(isolate_, zone_, literal_, foreign_);
impl.Compile();
WasmModuleWriter* writer = impl.builder_->Build(zone_);
return writer->WriteTo(zone_);
diff --git a/deps/v8/src/wasm/asm-wasm-builder.h b/deps/v8/src/wasm/asm-wasm-builder.h
index cb568db77c..9b761f9040 100644
--- a/deps/v8/src/wasm/asm-wasm-builder.h
+++ b/deps/v8/src/wasm/asm-wasm-builder.h
@@ -6,6 +6,7 @@
#define V8_WASM_ASM_WASM_BUILDER_H_
#include "src/allocation.h"
+#include "src/objects.h"
#include "src/wasm/encoder.h"
#include "src/zone.h"
@@ -18,13 +19,15 @@ namespace wasm {
class AsmWasmBuilder {
public:
- explicit AsmWasmBuilder(Isolate* isolate, Zone* zone, FunctionLiteral* root);
+ explicit AsmWasmBuilder(Isolate* isolate, Zone* zone, FunctionLiteral* root,
+ Handle<Object> foreign);
WasmModuleIndex* Run();
private:
Isolate* isolate_;
Zone* zone_;
FunctionLiteral* literal_;
+ Handle<Object> foreign_;
};
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/ast-decoder.cc b/deps/v8/src/wasm/ast-decoder.cc
index ffb815771a..c97c781c12 100644
--- a/deps/v8/src/wasm/ast-decoder.cc
+++ b/deps/v8/src/wasm/ast-decoder.cc
@@ -5,6 +5,7 @@
#include "src/base/platform/elapsed-timer.h"
#include "src/signature.h"
+#include "src/bit-vector.h"
#include "src/flags.h"
#include "src/handles.h"
#include "src/zone-containers.h"
@@ -40,7 +41,6 @@ struct Tree {
WasmOpcode opcode() const { return static_cast<WasmOpcode>(*pc); }
};
-
// A production represents an incomplete decoded tree in the LR decoder.
struct Production {
Tree* tree; // the root of the syntax tree.
@@ -97,13 +97,278 @@ struct IfEnv {
#define BUILD0(func) (build() ? builder_->func() : nullptr)
+// Generic Wasm bytecode decoder with utilities for decoding operands,
+// lengths, etc.
+class WasmDecoder : public Decoder {
+ public:
+ WasmDecoder() : Decoder(nullptr, nullptr), function_env_(nullptr) {}
+ WasmDecoder(FunctionEnv* env, const byte* start, const byte* end)
+ : Decoder(start, end), function_env_(env) {}
+ FunctionEnv* function_env_;
+
+ void Reset(FunctionEnv* function_env, const byte* start, const byte* end) {
+ Decoder::Reset(start, end);
+ function_env_ = function_env;
+ }
+
+ byte ByteOperand(const byte* pc, const char* msg = "missing 1-byte operand") {
+ if ((pc + sizeof(byte)) >= limit_) {
+ error(pc, msg);
+ return 0;
+ }
+ return pc[1];
+ }
+
+ uint32_t Uint32Operand(const byte* pc) {
+ if ((pc + sizeof(uint32_t)) >= limit_) {
+ error(pc, "missing 4-byte operand");
+ return 0;
+ }
+ return read_u32(pc + 1);
+ }
+
+ uint64_t Uint64Operand(const byte* pc) {
+ if ((pc + sizeof(uint64_t)) >= limit_) {
+ error(pc, "missing 8-byte operand");
+ return 0;
+ }
+ return read_u64(pc + 1);
+ }
+
+ inline bool Validate(const byte* pc, LocalIndexOperand& operand) {
+ if (operand.index < function_env_->total_locals) {
+ operand.type = function_env_->GetLocalType(operand.index);
+ return true;
+ }
+ error(pc, pc + 1, "invalid local index");
+ return false;
+ }
+
+ inline bool Validate(const byte* pc, GlobalIndexOperand& operand) {
+ ModuleEnv* m = function_env_->module;
+ if (m && m->module && operand.index < m->module->globals->size()) {
+ operand.machine_type = m->module->globals->at(operand.index).type;
+ operand.type = WasmOpcodes::LocalTypeFor(operand.machine_type);
+ return true;
+ }
+ error(pc, pc + 1, "invalid global index");
+ return false;
+ }
+
+ inline bool Validate(const byte* pc, FunctionIndexOperand& operand) {
+ ModuleEnv* m = function_env_->module;
+ if (m && m->module && operand.index < m->module->functions->size()) {
+ operand.sig = m->module->functions->at(operand.index).sig;
+ return true;
+ }
+ error(pc, pc + 1, "invalid function index");
+ return false;
+ }
+
+ inline bool Validate(const byte* pc, SignatureIndexOperand& operand) {
+ ModuleEnv* m = function_env_->module;
+ if (m && m->module && operand.index < m->module->signatures->size()) {
+ operand.sig = m->module->signatures->at(operand.index);
+ return true;
+ }
+ error(pc, pc + 1, "invalid signature index");
+ return false;
+ }
+
+ inline bool Validate(const byte* pc, ImportIndexOperand& operand) {
+ ModuleEnv* m = function_env_->module;
+ if (m && m->module && operand.index < m->module->import_table->size()) {
+ operand.sig = m->module->import_table->at(operand.index).sig;
+ return true;
+ }
+ error(pc, pc + 1, "invalid signature index");
+ return false;
+ }
+
+ inline bool Validate(const byte* pc, BreakDepthOperand& operand,
+ ZoneVector<Block>& blocks) {
+ if (operand.depth < blocks.size()) {
+ operand.target = &blocks[blocks.size() - operand.depth - 1];
+ return true;
+ }
+ error(pc, pc + 1, "invalid break depth");
+ return false;
+ }
+
+ bool Validate(const byte* pc, TableSwitchOperand& operand,
+ size_t block_depth) {
+ if (operand.table_count == 0) {
+ error(pc, "tableswitch with 0 entries");
+ return false;
+ }
+ // Verify table.
+ for (uint32_t i = 0; i < operand.table_count; i++) {
+ uint16_t target = operand.read_entry(this, i);
+ if (target >= 0x8000) {
+ size_t depth = target - 0x8000;
+ if (depth > block_depth) {
+ error(operand.table + i * 2, "improper branch in tableswitch");
+ return false;
+ }
+ } else {
+ if (target >= operand.case_count) {
+ error(operand.table + i * 2, "invalid case target in tableswitch");
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ int OpcodeArity(const byte* pc) {
+#define DECLARE_ARITY(name, ...) \
+ static const LocalType kTypes_##name[] = {__VA_ARGS__}; \
+ static const int kArity_##name = \
+ static_cast<int>(arraysize(kTypes_##name) - 1);
+
+ FOREACH_SIGNATURE(DECLARE_ARITY);
+#undef DECLARE_ARITY
+
+ switch (static_cast<WasmOpcode>(*pc)) {
+ case kExprI8Const:
+ case kExprI32Const:
+ case kExprI64Const:
+ case kExprF64Const:
+ case kExprF32Const:
+ case kExprGetLocal:
+ case kExprLoadGlobal:
+ case kExprNop:
+ case kExprUnreachable:
+ return 0;
+
+ case kExprBr:
+ case kExprStoreGlobal:
+ case kExprSetLocal:
+ return 1;
+
+ case kExprIf:
+ case kExprBrIf:
+ return 2;
+ case kExprIfElse:
+ case kExprSelect:
+ return 3;
+
+ case kExprBlock:
+ case kExprLoop: {
+ BlockCountOperand operand(this, pc);
+ return operand.count;
+ }
+
+ case kExprCallFunction: {
+ FunctionIndexOperand operand(this, pc);
+ return static_cast<int>(
+ function_env_->module->GetFunctionSignature(operand.index)
+ ->parameter_count());
+ }
+ case kExprCallIndirect: {
+ SignatureIndexOperand operand(this, pc);
+ return 1 + static_cast<int>(
+ function_env_->module->GetSignature(operand.index)
+ ->parameter_count());
+ }
+ case kExprCallImport: {
+ ImportIndexOperand operand(this, pc);
+ return static_cast<int>(
+ function_env_->module->GetImportSignature(operand.index)
+ ->parameter_count());
+ }
+ case kExprReturn: {
+ return static_cast<int>(function_env_->sig->return_count());
+ }
+ case kExprTableSwitch: {
+ TableSwitchOperand operand(this, pc);
+ return 1 + operand.case_count;
+ }
+
+#define DECLARE_OPCODE_CASE(name, opcode, sig) \
+ case kExpr##name: \
+ return kArity_##sig;
+
+ FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
+ FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
+ FOREACH_MISC_MEM_OPCODE(DECLARE_OPCODE_CASE)
+ FOREACH_SIMPLE_OPCODE(DECLARE_OPCODE_CASE)
+#undef DECLARE_OPCODE_CASE
+ }
+ UNREACHABLE();
+ return 0;
+ }
+
+ int OpcodeLength(const byte* pc) {
+ switch (static_cast<WasmOpcode>(*pc)) {
+#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
+ FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
+ FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
+#undef DECLARE_OPCODE_CASE
+ {
+ MemoryAccessOperand operand(this, pc);
+ return 1 + operand.length;
+ }
+ case kExprBlock:
+ case kExprLoop: {
+ BlockCountOperand operand(this, pc);
+ return 1 + operand.length;
+ }
+ case kExprBr:
+ case kExprBrIf: {
+ BreakDepthOperand operand(this, pc);
+ return 1 + operand.length;
+ }
+ case kExprStoreGlobal:
+ case kExprLoadGlobal: {
+ GlobalIndexOperand operand(this, pc);
+ return 1 + operand.length;
+ }
+
+ case kExprCallFunction: {
+ FunctionIndexOperand operand(this, pc);
+ return 1 + operand.length;
+ }
+ case kExprCallIndirect: {
+ SignatureIndexOperand operand(this, pc);
+ return 1 + operand.length;
+ }
+ case kExprCallImport: {
+ ImportIndexOperand operand(this, pc);
+ return 1 + operand.length;
+ }
+
+ case kExprSetLocal:
+ case kExprGetLocal: {
+ LocalIndexOperand operand(this, pc);
+ return 1 + operand.length;
+ }
+ case kExprTableSwitch: {
+ TableSwitchOperand operand(this, pc);
+ return 1 + operand.length;
+ }
+ case kExprI8Const:
+ return 2;
+ case kExprI32Const:
+ case kExprF32Const:
+ return 5;
+ case kExprI64Const:
+ case kExprF64Const:
+ return 9;
+
+ default:
+ return 1;
+ }
+ }
+};
+
+
// A shift-reduce-parser strategy for decoding Wasm code that uses an explicit
// shift-reduce strategy with multiple internal stacks.
-class LR_WasmDecoder : public Decoder {
+class LR_WasmDecoder : public WasmDecoder {
public:
LR_WasmDecoder(Zone* zone, TFBuilder* builder)
- : Decoder(nullptr, nullptr),
- zone_(zone),
+ : zone_(zone),
builder_(builder),
trees_(zone),
stack_(zone),
@@ -127,8 +392,7 @@ class LR_WasmDecoder : public Decoder {
}
base_ = base;
- Reset(pc, end);
- function_env_ = function_env;
+ Reset(function_env, pc, end);
InitSsaEnv();
DecodeFunctionBody();
@@ -151,15 +415,20 @@ class LR_WasmDecoder : public Decoder {
}
if (ok()) {
+ if (FLAG_trace_wasm_ast) {
+ PrintAst(function_env, pc, end);
+ }
if (FLAG_trace_wasm_decode_time) {
double ms = decode_timer.Elapsed().InMillisecondsF();
- PrintF(" - decoding took %0.3f ms\n", ms);
+ PrintF("wasm-decode ok (%0.3f ms)\n\n", ms);
+ } else {
+ TRACE("wasm-decode ok\n\n");
}
- TRACE("wasm-decode ok\n\n");
} else {
TRACE("wasm-error module+%-6d func+%d: %s\n\n", baserel(error_pc_),
startrel(error_pc_), error_msg_.get());
}
+
return toResult(tree);
}
@@ -172,7 +441,6 @@ class LR_WasmDecoder : public Decoder {
TreeResult result_;
SsaEnv* ssa_env_;
- FunctionEnv* function_env_;
ZoneVector<Tree*> trees_;
ZoneVector<Production> stack_;
@@ -199,30 +467,30 @@ class LR_WasmDecoder : public Decoder {
ssa_env->locals[pos++] = builder_->Param(i, sig->GetParam(i));
}
// Initialize int32 locals.
- if (function_env_->local_int32_count > 0) {
+ if (function_env_->local_i32_count > 0) {
TFNode* zero = builder_->Int32Constant(0);
- for (uint32_t i = 0; i < function_env_->local_int32_count; i++) {
+ for (uint32_t i = 0; i < function_env_->local_i32_count; i++) {
ssa_env->locals[pos++] = zero;
}
}
// Initialize int64 locals.
- if (function_env_->local_int64_count > 0) {
+ if (function_env_->local_i64_count > 0) {
TFNode* zero = builder_->Int64Constant(0);
- for (uint32_t i = 0; i < function_env_->local_int64_count; i++) {
+ for (uint32_t i = 0; i < function_env_->local_i64_count; i++) {
ssa_env->locals[pos++] = zero;
}
}
// Initialize float32 locals.
- if (function_env_->local_float32_count > 0) {
+ if (function_env_->local_f32_count > 0) {
TFNode* zero = builder_->Float32Constant(0);
- for (uint32_t i = 0; i < function_env_->local_float32_count; i++) {
+ for (uint32_t i = 0; i < function_env_->local_f32_count; i++) {
ssa_env->locals[pos++] = zero;
}
}
// Initialize float64 locals.
- if (function_env_->local_float64_count > 0) {
+ if (function_env_->local_f64_count > 0) {
TFNode* zero = builder_->Float64Constant(0);
- for (uint32_t i = 0; i < function_env_->local_float64_count; i++) {
+ for (uint32_t i = 0; i < function_env_->local_f64_count; i++) {
ssa_env->locals[pos++] = zero;
}
}
@@ -329,25 +597,25 @@ class LR_WasmDecoder : public Decoder {
Leaf(kAstStmt);
break;
case kExprBlock: {
- int length = Operand<uint8_t>(pc_);
- if (length < 1) {
+ BlockCountOperand operand(this, pc_);
+ if (operand.count < 1) {
Leaf(kAstStmt);
} else {
- Shift(kAstEnd, length);
+ Shift(kAstEnd, operand.count);
// The break environment is the outer environment.
SsaEnv* break_env = ssa_env_;
PushBlock(break_env);
SetEnv("block:start", Steal(break_env));
}
- len = 2;
+ len = 1 + operand.length;
break;
}
case kExprLoop: {
- int length = Operand<uint8_t>(pc_);
- if (length < 1) {
+ BlockCountOperand operand(this, pc_);
+ if (operand.count < 1) {
Leaf(kAstStmt);
} else {
- Shift(kAstEnd, length);
+ Shift(kAstEnd, operand.count);
// The break environment is the outer environment.
SsaEnv* break_env = ssa_env_;
PushBlock(break_env);
@@ -359,7 +627,7 @@ class LR_WasmDecoder : public Decoder {
PushBlock(cont_env);
blocks_.back().stack_depth = -1; // no production for inner block.
}
- len = 2;
+ len = 1 + operand.length;
break;
}
case kExprIf:
@@ -372,59 +640,27 @@ class LR_WasmDecoder : public Decoder {
Shift(kAstStmt, 3); // Result type is typeof(x) in {c ? x : y}.
break;
case kExprBr: {
- uint32_t depth = Operand<uint8_t>(pc_);
- Shift(kAstEnd, 1);
- if (depth >= blocks_.size()) {
- error("improperly nested branch");
+ BreakDepthOperand operand(this, pc_);
+ if (Validate(pc_, operand, blocks_)) {
+ Shift(kAstEnd, 1);
}
- len = 2;
+ len = 1 + operand.length;
break;
}
case kExprBrIf: {
- uint32_t depth = Operand<uint8_t>(pc_);
- Shift(kAstStmt, 2);
- if (depth >= blocks_.size()) {
- error("improperly nested conditional branch");
+ BreakDepthOperand operand(this, pc_);
+ if (Validate(pc_, operand, blocks_)) {
+ Shift(kAstStmt, 2);
}
- len = 2;
+ len = 1 + operand.length;
break;
}
case kExprTableSwitch: {
- if (!checkAvailable(5)) {
- error("expected #tableswitch <cases> <table>, fell off end");
- break;
- }
- uint16_t case_count = *reinterpret_cast<const uint16_t*>(pc_ + 1);
- uint16_t table_count = *reinterpret_cast<const uint16_t*>(pc_ + 3);
- len = 5 + table_count * 2;
-
- if (table_count == 0) {
- error("tableswitch with 0 entries");
- break;
- }
-
- if (!checkAvailable(len)) {
- error("expected #tableswitch <cases> <table>, fell off end");
- break;
- }
-
- Shift(kAstEnd, 1 + case_count);
-
- // Verify table.
- for (int i = 0; i < table_count; i++) {
- uint16_t target =
- *reinterpret_cast<const uint16_t*>(pc_ + 5 + i * 2);
- if (target >= 0x8000) {
- size_t depth = target - 0x8000;
- if (depth > blocks_.size()) {
- error(pc_ + 5 + i * 2, "improper branch in tableswitch");
- }
- } else {
- if (target >= case_count) {
- error(pc_ + 5 + i * 2, "invalid case target in tableswitch");
- }
- }
+ TableSwitchOperand operand(this, pc_);
+ if (Validate(pc_, operand, blocks_.size())) {
+ Shift(kAstEnd, 1 + operand.case_count);
}
+ len = 1 + operand.length;
break;
}
case kExprReturn: {
@@ -445,59 +681,66 @@ class LR_WasmDecoder : public Decoder {
break;
}
case kExprI8Const: {
- int32_t value = Operand<int8_t>(pc_);
- Leaf(kAstI32, BUILD(Int32Constant, value));
- len = 2;
+ ImmI8Operand operand(this, pc_);
+ Leaf(kAstI32, BUILD(Int32Constant, operand.value));
+ len = 1 + operand.length;
break;
}
case kExprI32Const: {
- int32_t value = Operand<int32_t>(pc_);
- Leaf(kAstI32, BUILD(Int32Constant, value));
- len = 5;
+ ImmI32Operand operand(this, pc_);
+ Leaf(kAstI32, BUILD(Int32Constant, operand.value));
+ len = 1 + operand.length;
break;
}
case kExprI64Const: {
- int64_t value = Operand<int64_t>(pc_);
- Leaf(kAstI64, BUILD(Int64Constant, value));
- len = 9;
+ ImmI64Operand operand(this, pc_);
+ Leaf(kAstI64, BUILD(Int64Constant, operand.value));
+ len = 1 + operand.length;
break;
}
case kExprF32Const: {
- float value = Operand<float>(pc_);
- Leaf(kAstF32, BUILD(Float32Constant, value));
- len = 5;
+ ImmF32Operand operand(this, pc_);
+ Leaf(kAstF32, BUILD(Float32Constant, operand.value));
+ len = 1 + operand.length;
break;
}
case kExprF64Const: {
- double value = Operand<double>(pc_);
- Leaf(kAstF64, BUILD(Float64Constant, value));
- len = 9;
+ ImmF64Operand operand(this, pc_);
+ Leaf(kAstF64, BUILD(Float64Constant, operand.value));
+ len = 1 + operand.length;
break;
}
case kExprGetLocal: {
- uint32_t index;
- LocalType type = LocalOperand(pc_, &index, &len);
- TFNode* val =
- build() && type != kAstStmt ? ssa_env_->locals[index] : nullptr;
- Leaf(type, val);
+ LocalIndexOperand operand(this, pc_);
+ if (Validate(pc_, operand)) {
+ TFNode* val = build() ? ssa_env_->locals[operand.index] : nullptr;
+ Leaf(operand.type, val);
+ }
+ len = 1 + operand.length;
break;
}
case kExprSetLocal: {
- uint32_t index;
- LocalType type = LocalOperand(pc_, &index, &len);
- Shift(type, 1);
+ LocalIndexOperand operand(this, pc_);
+ if (Validate(pc_, operand)) {
+ Shift(operand.type, 1);
+ }
+ len = 1 + operand.length;
break;
}
case kExprLoadGlobal: {
- uint32_t index;
- LocalType type = GlobalOperand(pc_, &index, &len);
- Leaf(type, BUILD(LoadGlobal, index));
+ GlobalIndexOperand operand(this, pc_);
+ if (Validate(pc_, operand)) {
+ Leaf(operand.type, BUILD(LoadGlobal, operand.index));
+ }
+ len = 1 + operand.length;
break;
}
case kExprStoreGlobal: {
- uint32_t index;
- LocalType type = GlobalOperand(pc_, &index, &len);
- Shift(type, 1);
+ GlobalIndexOperand operand(this, pc_);
+ if (Validate(pc_, operand)) {
+ Shift(operand.type, 1);
+ }
+ len = 1 + operand.length;
break;
}
case kExprI32LoadMem8S:
@@ -546,27 +789,36 @@ class LR_WasmDecoder : public Decoder {
Shift(kAstI32, 1);
break;
case kExprCallFunction: {
- uint32_t unused;
- FunctionSig* sig = FunctionSigOperand(pc_, &unused, &len);
- if (sig) {
- LocalType type =
- sig->return_count() == 0 ? kAstStmt : sig->GetReturn();
- Shift(type, static_cast<int>(sig->parameter_count()));
- } else {
- Leaf(kAstI32); // error
+ FunctionIndexOperand operand(this, pc_);
+ if (Validate(pc_, operand)) {
+ LocalType type = operand.sig->return_count() == 0
+ ? kAstStmt
+ : operand.sig->GetReturn();
+ Shift(type, static_cast<int>(operand.sig->parameter_count()));
}
+ len = 1 + operand.length;
break;
}
case kExprCallIndirect: {
- uint32_t unused;
- FunctionSig* sig = SigOperand(pc_, &unused, &len);
- if (sig) {
- LocalType type =
- sig->return_count() == 0 ? kAstStmt : sig->GetReturn();
- Shift(type, static_cast<int>(1 + sig->parameter_count()));
- } else {
- Leaf(kAstI32); // error
+ SignatureIndexOperand operand(this, pc_);
+ if (Validate(pc_, operand)) {
+ LocalType type = operand.sig->return_count() == 0
+ ? kAstStmt
+ : operand.sig->GetReturn();
+ Shift(type, static_cast<int>(1 + operand.sig->parameter_count()));
}
+ len = 1 + operand.length;
+ break;
+ }
+ case kExprCallImport: {
+ ImportIndexOperand operand(this, pc_);
+ if (Validate(pc_, operand)) {
+ LocalType type = operand.sig->return_count() == 0
+ ? kAstStmt
+ : operand.sig->GetReturn();
+ Shift(type, static_cast<int>(operand.sig->parameter_count()));
+ }
+ len = 1 + operand.length;
break;
}
default:
@@ -589,19 +841,15 @@ class LR_WasmDecoder : public Decoder {
}
int DecodeLoadMem(const byte* pc, LocalType type) {
- int length = 2;
- uint32_t offset;
- MemoryAccessOperand(pc, &length, &offset);
+ MemoryAccessOperand operand(this, pc);
Shift(type, 1);
- return length;
+ return 1 + operand.length;
}
int DecodeStoreMem(const byte* pc, LocalType type) {
- int length = 2;
- uint32_t offset;
- MemoryAccessOperand(pc, &length, &offset);
+ MemoryAccessOperand operand(this, pc);
Shift(type, 2);
- return length;
+ return 1 + operand.length;
}
void AddImplicitReturnAtEnd() {
@@ -747,26 +995,26 @@ class LR_WasmDecoder : public Decoder {
}
case kExprSelect: {
if (p->index == 1) {
- // Condition done.
- TypeCheckLast(p, kAstI32);
- } else if (p->index == 2) {
// True expression done.
p->tree->type = p->last()->type;
if (p->tree->type == kAstStmt) {
error(p->pc(), p->tree->children[1]->pc,
"select operand should be expression");
}
- } else {
+ } else if (p->index == 2) {
// False expression done.
- DCHECK(p->done());
TypeCheckLast(p, p->tree->type);
+ } else {
+ // Condition done.
+ DCHECK(p->done());
+ TypeCheckLast(p, kAstI32);
if (build()) {
TFNode* controls[2];
- builder_->Branch(p->tree->children[0]->node, &controls[0],
+ builder_->Branch(p->tree->children[2]->node, &controls[0],
&controls[1]);
TFNode* merge = builder_->Merge(2, controls);
- TFNode* vals[2] = {p->tree->children[1]->node,
- p->tree->children[2]->node};
+ TFNode* vals[2] = {p->tree->children[0]->node,
+ p->tree->children[1]->node};
TFNode* phi = builder_->Phi(p->tree->type, 2, vals, merge);
p->tree->node = phi;
ssa_env_->control = merge;
@@ -775,64 +1023,44 @@ class LR_WasmDecoder : public Decoder {
break;
}
case kExprBr: {
- uint32_t depth = Operand<uint8_t>(p->pc());
- if (depth >= blocks_.size()) {
- error("improperly nested branch");
- break;
- }
- Block* block = &blocks_[blocks_.size() - depth - 1];
- ReduceBreakToExprBlock(p, block);
+ BreakDepthOperand operand(this, p->pc());
+ CHECK(Validate(p->pc(), operand, blocks_));
+ ReduceBreakToExprBlock(p, operand.target);
break;
}
case kExprBrIf: {
- if (p->index == 1) {
+ if (p->done()) {
TypeCheckLast(p, kAstI32);
- } else if (p->done()) {
- uint32_t depth = Operand<uint8_t>(p->pc());
- if (depth >= blocks_.size()) {
- error("improperly nested branch");
- break;
- }
- Block* block = &blocks_[blocks_.size() - depth - 1];
+ BreakDepthOperand operand(this, p->pc());
+ CHECK(Validate(p->pc(), operand, blocks_));
SsaEnv* fenv = ssa_env_;
SsaEnv* tenv = Split(fenv);
- BUILD(Branch, p->tree->children[0]->node, &tenv->control,
+ BUILD(Branch, p->tree->children[1]->node, &tenv->control,
&fenv->control);
ssa_env_ = tenv;
- ReduceBreakToExprBlock(p, block);
+ ReduceBreakToExprBlock(p, operand.target, p->tree->children[0]);
ssa_env_ = fenv;
}
break;
}
case kExprTableSwitch: {
- uint16_t table_count = *reinterpret_cast<const uint16_t*>(p->pc() + 3);
- if (table_count == 1) {
- // Degenerate switch with only a default target.
- if (p->index == 1) {
- SsaEnv* break_env = ssa_env_;
- PushBlock(break_env);
- SetEnv("switch:default", Steal(break_env));
- }
- if (p->done()) {
- Block* block = &blocks_.back();
- // fall through to the end.
- ReduceBreakToExprBlock(p, block);
- SetEnv("switch:end", block->ssa_env);
- blocks_.pop_back();
- }
- break;
- }
-
if (p->index == 1) {
// Switch key finished.
TypeCheckLast(p, kAstI32);
+ if (failed()) break;
+
+ TableSwitchOperand operand(this, p->pc());
+ DCHECK(Validate(p->pc(), operand, blocks_.size()));
- TFNode* sw = BUILD(Switch, table_count, p->last()->node);
+ // Build the switch only if it has more than just a default target.
+ bool build_switch = operand.table_count > 1;
+ TFNode* sw = nullptr;
+ if (build_switch)
+ sw = BUILD(Switch, operand.table_count, p->last()->node);
// Allocate environments for each case.
- uint16_t case_count = *reinterpret_cast<const uint16_t*>(p->pc() + 1);
- SsaEnv** case_envs = zone_->NewArray<SsaEnv*>(case_count);
- for (int i = 0; i < case_count; i++) {
+ SsaEnv** case_envs = zone_->NewArray<SsaEnv*>(operand.case_count);
+ for (uint32_t i = 0; i < operand.case_count; i++) {
case_envs[i] = UnreachableEnv();
}
@@ -843,13 +1071,15 @@ class LR_WasmDecoder : public Decoder {
ssa_env_ = copy;
// Build the environments for each case based on the table.
- const uint16_t* table =
- reinterpret_cast<const uint16_t*>(p->pc() + 5);
- for (int i = 0; i < table_count; i++) {
- uint16_t target = table[i];
- SsaEnv* env = Split(copy);
- env->control = (i == table_count - 1) ? BUILD(IfDefault, sw)
- : BUILD(IfValue, i, sw);
+ for (uint32_t i = 0; i < operand.table_count; i++) {
+ uint16_t target = operand.read_entry(this, i);
+ SsaEnv* env = copy;
+ if (build_switch) {
+ env = Split(env);
+ env->control = (i == operand.table_count - 1)
+ ? BUILD(IfDefault, sw)
+ : BUILD(IfValue, i, sw);
+ }
if (target >= 0x8000) {
// Targets an outer block.
int depth = target - 0x8000;
@@ -860,25 +1090,21 @@ class LR_WasmDecoder : public Decoder {
Goto(env, case_envs[target]);
}
}
+ }
- // Switch to the environment for the first case.
- SetEnv("switch:case", case_envs[0]);
+ if (p->done()) {
+ // Last case. Fall through to the end.
+ Block* block = &blocks_.back();
+ if (p->index > 1) ReduceBreakToExprBlock(p, block);
+ SsaEnv* next = block->ssa_env;
+ blocks_.pop_back();
+ ifs_.pop_back();
+ SetEnv("switch:end", next);
} else {
- // Switch case finished.
- if (p->done()) {
- // Last case. Fall through to the end.
- Block* block = &blocks_.back();
- ReduceBreakToExprBlock(p, block);
- SsaEnv* next = block->ssa_env;
- blocks_.pop_back();
- ifs_.pop_back();
- SetEnv("switch:end", next);
- } else {
- // Interior case. Maybe fall through to the next case.
- SsaEnv* next = ifs_.back().case_envs[p->index - 1];
- if (ssa_env_->go()) Goto(ssa_env_, next);
- SetEnv("switch:case", next);
- }
+ // Interior case. Maybe fall through to the next case.
+ SsaEnv* next = ifs_.back().case_envs[p->index - 1];
+ if (p->index > 1 && ssa_env_->go()) Goto(ssa_env_, next);
+ SetEnv("switch:case", next);
}
break;
}
@@ -898,12 +1124,11 @@ class LR_WasmDecoder : public Decoder {
break;
}
case kExprSetLocal: {
- int unused = 0;
- uint32_t index;
- LocalType type = LocalOperand(p->pc(), &index, &unused);
+ LocalIndexOperand operand(this, p->pc());
+ CHECK(Validate(p->pc(), operand));
Tree* val = p->last();
- if (type == val->type) {
- if (build()) ssa_env_->locals[index] = val->node;
+ if (operand.type == val->type) {
+ if (build()) ssa_env_->locals[operand.index] = val->node;
p->tree->node = val->node;
} else {
error(p->pc(), val->pc, "Typecheck failed in SetLocal");
@@ -911,12 +1136,11 @@ class LR_WasmDecoder : public Decoder {
break;
}
case kExprStoreGlobal: {
- int unused = 0;
- uint32_t index;
- LocalType type = GlobalOperand(p->pc(), &index, &unused);
+ GlobalIndexOperand operand(this, p->pc());
+ CHECK(Validate(p->pc(), operand));
Tree* val = p->last();
- if (type == val->type) {
- BUILD(StoreGlobal, index, val->node);
+ if (operand.type == val->type) {
+ BUILD(StoreGlobal, operand.index, val->node);
p->tree->node = val->node;
} else {
error(p->pc(), val->pc, "Typecheck failed in StoreGlobal");
@@ -985,34 +1209,29 @@ class LR_WasmDecoder : public Decoder {
return;
case kExprCallFunction: {
- int len;
- uint32_t index;
- FunctionSig* sig = FunctionSigOperand(p->pc(), &index, &len);
- if (!sig) break;
+ FunctionIndexOperand operand(this, p->pc());
+ CHECK(Validate(p->pc(), operand));
if (p->index > 0) {
- TypeCheckLast(p, sig->GetParam(p->index - 1));
+ TypeCheckLast(p, operand.sig->GetParam(p->index - 1));
}
if (p->done() && build()) {
uint32_t count = p->tree->count + 1;
TFNode** buffer = builder_->Buffer(count);
- FunctionSig* sig = FunctionSigOperand(p->pc(), &index, &len);
- USE(sig);
buffer[0] = nullptr; // reserved for code object.
for (uint32_t i = 1; i < count; i++) {
buffer[i] = p->tree->children[i - 1]->node;
}
- p->tree->node = builder_->CallDirect(index, buffer);
+ p->tree->node = builder_->CallDirect(operand.index, buffer);
}
break;
}
case kExprCallIndirect: {
- int len;
- uint32_t index;
- FunctionSig* sig = SigOperand(p->pc(), &index, &len);
+ SignatureIndexOperand operand(this, p->pc());
+ CHECK(Validate(p->pc(), operand));
if (p->index == 1) {
TypeCheckLast(p, kAstI32);
} else {
- TypeCheckLast(p, sig->GetParam(p->index - 2));
+ TypeCheckLast(p, operand.sig->GetParam(p->index - 2));
}
if (p->done() && build()) {
uint32_t count = p->tree->count;
@@ -1020,7 +1239,24 @@ class LR_WasmDecoder : public Decoder {
for (uint32_t i = 0; i < count; i++) {
buffer[i] = p->tree->children[i]->node;
}
- p->tree->node = builder_->CallIndirect(index, buffer);
+ p->tree->node = builder_->CallIndirect(operand.index, buffer);
+ }
+ break;
+ }
+ case kExprCallImport: {
+ ImportIndexOperand operand(this, p->pc());
+ CHECK(Validate(p->pc(), operand));
+ if (p->index > 0) {
+ TypeCheckLast(p, operand.sig->GetParam(p->index - 1));
+ }
+ if (p->done() && build()) {
+ uint32_t count = p->tree->count + 1;
+ TFNode** buffer = builder_->Buffer(count);
+ buffer[0] = nullptr; // reserved for code object.
+ for (uint32_t i = 1; i < count; i++) {
+ buffer[i] = p->tree->children[i - 1]->node;
+ }
+ p->tree->node = builder_->CallImport(operand.index, buffer);
}
break;
}
@@ -1030,13 +1266,17 @@ class LR_WasmDecoder : public Decoder {
}
void ReduceBreakToExprBlock(Production* p, Block* block) {
+ ReduceBreakToExprBlock(p, block, p->tree->count > 0 ? p->last() : nullptr);
+ }
+
+ void ReduceBreakToExprBlock(Production* p, Block* block, Tree* val) {
if (block->stack_depth < 0) {
// This is the inner loop block, which does not have a value.
Goto(ssa_env_, block->ssa_env);
} else {
// Merge the value into the production for the block.
Production* bp = &stack_[block->stack_depth];
- MergeIntoProduction(bp, block->ssa_env, p->last());
+ MergeIntoProduction(bp, block->ssa_env, val);
}
}
@@ -1045,7 +1285,7 @@ class LR_WasmDecoder : public Decoder {
bool first = target->state == SsaEnv::kUnreachable;
Goto(ssa_env_, target);
- if (expr->type == kAstEnd) return;
+ if (expr == nullptr || expr->type == kAstEnd) return;
if (first) {
// first merge to this environment; set the type and the node.
@@ -1069,11 +1309,9 @@ class LR_WasmDecoder : public Decoder {
DCHECK_EQ(1, p->index);
TypeCheckLast(p, kAstI32); // index
if (build()) {
- int length = 0;
- uint32_t offset = 0;
- MemoryAccessOperand(p->pc(), &length, &offset);
+ MemoryAccessOperand operand(this, p->pc());
p->tree->node =
- builder_->LoadMem(type, mem_type, p->last()->node, offset);
+ builder_->LoadMem(type, mem_type, p->last()->node, operand.offset);
}
}
@@ -1084,11 +1322,10 @@ class LR_WasmDecoder : public Decoder {
DCHECK_EQ(2, p->index);
TypeCheckLast(p, type);
if (build()) {
- int length = 0;
- uint32_t offset = 0;
- MemoryAccessOperand(p->pc(), &length, &offset);
+ MemoryAccessOperand operand(this, p->pc());
TFNode* val = p->tree->children[1]->node;
- builder_->StoreMem(mem_type, p->tree->children[0]->node, offset, val);
+ builder_->StoreMem(mem_type, p->tree->children[0]->node, operand.offset,
+ val);
p->tree->node = val;
}
}
@@ -1111,7 +1348,7 @@ class LR_WasmDecoder : public Decoder {
void SetEnv(const char* reason, SsaEnv* env) {
TRACE(" env = %p, block depth = %d, reason = %s", static_cast<void*>(env),
static_cast<int>(blocks_.size()), reason);
- if (env->control != nullptr && FLAG_trace_wasm_decoder) {
+ if (FLAG_trace_wasm_decoder && env && env->control) {
TRACE(", control = ");
compiler::WasmGraphBuilder::PrintDebugName(env->control);
}
@@ -1286,94 +1523,11 @@ class LR_WasmDecoder : public Decoder {
return result;
}
- // Load an operand at [pc + 1].
- template <typename V>
- V Operand(const byte* pc) {
- if ((limit_ - pc) < static_cast<int>(1 + sizeof(V))) {
- const char* msg = "Expected operand following opcode";
- switch (sizeof(V)) {
- case 1:
- msg = "Expected 1-byte operand following opcode";
- break;
- case 2:
- msg = "Expected 2-byte operand following opcode";
- break;
- case 4:
- msg = "Expected 4-byte operand following opcode";
- break;
- default:
- break;
- }
- error(pc, msg);
- return -1;
- }
- return *reinterpret_cast<const V*>(pc + 1);
- }
-
int EnvironmentCount() {
if (builder_) return static_cast<int>(function_env_->GetLocalCount());
return 0; // if we aren't building a graph, don't bother with SSA renaming.
}
- LocalType LocalOperand(const byte* pc, uint32_t* index, int* length) {
- *index = UnsignedLEB128Operand(pc, length);
- if (function_env_->IsValidLocal(*index)) {
- return function_env_->GetLocalType(*index);
- }
- error(pc, "invalid local variable index");
- return kAstStmt;
- }
-
- LocalType GlobalOperand(const byte* pc, uint32_t* index, int* length) {
- *index = UnsignedLEB128Operand(pc, length);
- if (function_env_->module->IsValidGlobal(*index)) {
- return WasmOpcodes::LocalTypeFor(
- function_env_->module->GetGlobalType(*index));
- }
- error(pc, "invalid global variable index");
- return kAstStmt;
- }
-
- FunctionSig* FunctionSigOperand(const byte* pc, uint32_t* index,
- int* length) {
- *index = UnsignedLEB128Operand(pc, length);
- if (function_env_->module->IsValidFunction(*index)) {
- return function_env_->module->GetFunctionSignature(*index);
- }
- error(pc, "invalid function index");
- return nullptr;
- }
-
- FunctionSig* SigOperand(const byte* pc, uint32_t* index, int* length) {
- *index = UnsignedLEB128Operand(pc, length);
- if (function_env_->module->IsValidSignature(*index)) {
- return function_env_->module->GetSignature(*index);
- }
- error(pc, "invalid signature index");
- return nullptr;
- }
-
- uint32_t UnsignedLEB128Operand(const byte* pc, int* length) {
- uint32_t result = 0;
- ReadUnsignedLEB128ErrorCode error_code =
- ReadUnsignedLEB128Operand(pc + 1, limit_, length, &result);
- if (error_code == kInvalidLEB128) error(pc, "invalid LEB128 varint");
- if (error_code == kMissingLEB128) error(pc, "expected LEB128 varint");
- (*length)++;
- return result;
- }
-
- void MemoryAccessOperand(const byte* pc, int* length, uint32_t* offset) {
- byte bitfield = Operand<uint8_t>(pc);
- if (MemoryAccess::OffsetField::decode(bitfield)) {
- *offset = UnsignedLEB128Operand(pc + 1, length);
- (*length)++; // to account for the memory access byte
- } else {
- *offset = 0;
- *length = 2;
- }
- }
-
virtual void onFirstError() {
limit_ = start_; // Terminate decoding loop.
builder_ = nullptr; // Don't build any more nodes.
@@ -1447,137 +1601,114 @@ ReadUnsignedLEB128ErrorCode ReadUnsignedLEB128Operand(const byte* pc,
const byte* limit,
int* length,
uint32_t* result) {
- *result = 0;
- const byte* ptr = pc;
- const byte* end = pc + 5; // maximum 5 bytes.
- if (end > limit) end = limit;
- int shift = 0;
- byte b = 0;
- while (ptr < end) {
- b = *ptr++;
- *result = *result | ((b & 0x7F) << shift);
- if ((b & 0x80) == 0) break;
- shift += 7;
- }
- DCHECK_LE(ptr - pc, 5);
- *length = static_cast<int>(ptr - pc);
- if (ptr == end && (b & 0x80)) {
- return kInvalidLEB128;
- } else if (*length == 0) {
- return kMissingLEB128;
- } else {
- return kNoError;
- }
+ Decoder decoder(pc, limit);
+ *result = decoder.checked_read_u32v(pc, 0, length);
+ if (decoder.ok()) return kNoError;
+ return (limit - pc) > 1 ? kInvalidLEB128 : kMissingLEB128;
}
+int OpcodeLength(const byte* pc, const byte* end) {
+ WasmDecoder decoder(nullptr, pc, end);
+ return decoder.OpcodeLength(pc);
+}
-int OpcodeLength(const byte* pc) {
- switch (static_cast<WasmOpcode>(*pc)) {
-#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
- FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
- FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
-#undef DECLARE_OPCODE_CASE
+int OpcodeArity(FunctionEnv* env, const byte* pc, const byte* end) {
+ WasmDecoder decoder(env, pc, end);
+ return decoder.OpcodeArity(pc);
+}
- case kExprI8Const:
- case kExprBlock:
- case kExprLoop:
- case kExprBr:
- case kExprBrIf:
- return 2;
- case kExprI32Const:
- case kExprF32Const:
- return 5;
- case kExprI64Const:
- case kExprF64Const:
- return 9;
- case kExprStoreGlobal:
- case kExprSetLocal:
- case kExprLoadGlobal:
- case kExprCallFunction:
- case kExprCallIndirect:
- case kExprGetLocal: {
- int length;
- uint32_t result = 0;
- ReadUnsignedLEB128Operand(pc + 1, pc + 6, &length, &result);
- return 1 + length;
- }
- case kExprTableSwitch: {
- uint16_t table_count = *reinterpret_cast<const uint16_t*>(pc + 3);
- return 5 + table_count * 2;
+void PrintAst(FunctionEnv* env, const byte* start, const byte* end) {
+ WasmDecoder decoder(env, start, end);
+ const byte* pc = start;
+ std::vector<int> arity_stack;
+ while (pc < end) {
+ int arity = decoder.OpcodeArity(pc);
+ size_t length = decoder.OpcodeLength(pc);
+
+ for (auto arity : arity_stack) {
+ printf(" ");
+ USE(arity);
}
- default:
- return 1;
+ WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
+ printf("k%s,", WasmOpcodes::OpcodeName(opcode));
+
+ for (size_t i = 1; i < length; i++) {
+ printf(" 0x%02x,", pc[i]);
+ }
+ pc += length;
+ printf("\n");
+
+ arity_stack.push_back(arity);
+ while (arity_stack.back() == 0) {
+ arity_stack.pop_back();
+ if (arity_stack.empty()) break;
+ arity_stack.back()--;
+ }
}
}
+// Analyzes loop bodies for static assignments to locals, which helps in
+// reducing the number of phis introduced at loop headers.
+class LoopAssignmentAnalyzer : public WasmDecoder {
+ public:
+ LoopAssignmentAnalyzer(Zone* zone, FunctionEnv* function_env) : zone_(zone) {
+ function_env_ = function_env;
+ }
-int OpcodeArity(FunctionEnv* env, const byte* pc) {
-#define DECLARE_ARITY(name, ...) \
- static const LocalType kTypes_##name[] = {__VA_ARGS__}; \
- static const int kArity_##name = \
- static_cast<int>(arraysize(kTypes_##name) - 1);
+ BitVector* Analyze(const byte* pc, const byte* limit) {
+ Decoder::Reset(pc, limit);
+ if (pc_ >= limit_) return nullptr;
+ if (*pc_ != kExprLoop) return nullptr;
- FOREACH_SIGNATURE(DECLARE_ARITY);
-#undef DECLARE_ARITY
+ BitVector* assigned =
+ new (zone_) BitVector(function_env_->total_locals, zone_);
+ // Keep a stack to model the nesting of expressions.
+ std::vector<int> arity_stack;
+ arity_stack.push_back(OpcodeArity(pc_));
+ pc_ += OpcodeLength(pc_);
- switch (static_cast<WasmOpcode>(*pc)) {
- case kExprI8Const:
- case kExprI32Const:
- case kExprI64Const:
- case kExprF64Const:
- case kExprF32Const:
- case kExprGetLocal:
- case kExprLoadGlobal:
- case kExprNop:
- case kExprUnreachable:
- return 0;
+ // Iteratively process all AST nodes nested inside the loop.
+ while (pc_ < limit_) {
+ WasmOpcode opcode = static_cast<WasmOpcode>(*pc_);
+ int arity = 0;
+ int length = 1;
+ if (opcode == kExprSetLocal) {
+ LocalIndexOperand operand(this, pc_);
+ if (assigned->length() > 0 &&
+ static_cast<int>(operand.index) < assigned->length()) {
+ // Unverified code might have an out-of-bounds index.
+ assigned->Add(operand.index);
+ }
+ arity = 1;
+ length = 1 + operand.length;
+ } else {
+ arity = OpcodeArity(pc_);
+ length = OpcodeLength(pc_);
+ }
- case kExprBr:
- case kExprStoreGlobal:
- case kExprSetLocal:
- return 1;
-
- case kExprIf:
- case kExprBrIf:
- return 2;
- case kExprIfElse:
- case kExprSelect:
- return 3;
- case kExprBlock:
- case kExprLoop:
- return *(pc + 1);
-
- case kExprCallFunction: {
- int index = *(pc + 1);
- return static_cast<int>(
- env->module->GetFunctionSignature(index)->parameter_count());
- }
- case kExprCallIndirect: {
- int index = *(pc + 1);
- return 1 + static_cast<int>(
- env->module->GetSignature(index)->parameter_count());
- }
- case kExprReturn:
- return static_cast<int>(env->sig->return_count());
- case kExprTableSwitch: {
- uint16_t case_count = *reinterpret_cast<const uint16_t*>(pc + 1);
- return 1 + case_count;
+ pc_ += length;
+ arity_stack.push_back(arity);
+ while (arity_stack.back() == 0) {
+ arity_stack.pop_back();
+ if (arity_stack.empty()) return assigned; // reached end of loop
+ arity_stack.back()--;
+ }
}
+ return assigned;
+ }
-#define DECLARE_OPCODE_CASE(name, opcode, sig) \
- case kExpr##name: \
- return kArity_##sig;
+ private:
+ Zone* zone_;
+};
- FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
- FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
- FOREACH_MISC_MEM_OPCODE(DECLARE_OPCODE_CASE)
- FOREACH_SIMPLE_OPCODE(DECLARE_OPCODE_CASE)
-#undef DECLARE_OPCODE_CASE
- }
- UNREACHABLE();
- return 0;
+
+BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, FunctionEnv* env,
+ const byte* start, const byte* end) {
+ LoopAssignmentAnalyzer analyzer(zone, env);
+ return analyzer.Analyze(start, end);
}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/ast-decoder.h b/deps/v8/src/wasm/ast-decoder.h
index 5b95ad9f87..465bacaab8 100644
--- a/deps/v8/src/wasm/ast-decoder.h
+++ b/deps/v8/src/wasm/ast-decoder.h
@@ -6,18 +6,181 @@
#define V8_WASM_AST_DECODER_H_
#include "src/signature.h"
+#include "src/wasm/decoder.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-result.h"
namespace v8 {
namespace internal {
+class BitVector; // forward declaration
+
namespace compiler { // external declarations from compiler.
class WasmGraphBuilder;
}
namespace wasm {
+// Helpers for decoding different kinds of operands which follow bytecodes.
+struct LocalIndexOperand {
+ uint32_t index;
+ LocalType type;
+ int length;
+
+ inline LocalIndexOperand(Decoder* decoder, const byte* pc) {
+ index = decoder->checked_read_u32v(pc, 1, &length, "local index");
+ type = kAstStmt;
+ }
+};
+
+struct ImmI8Operand {
+ int8_t value;
+ int length;
+ inline ImmI8Operand(Decoder* decoder, const byte* pc) {
+ value = bit_cast<int8_t>(decoder->checked_read_u8(pc, 1, "immi8"));
+ length = 1;
+ }
+};
+
+struct ImmI32Operand {
+ int32_t value;
+ int length;
+ inline ImmI32Operand(Decoder* decoder, const byte* pc) {
+ value = bit_cast<int32_t>(decoder->checked_read_u32(pc, 1, "immi32"));
+ length = 4;
+ }
+};
+
+struct ImmI64Operand {
+ int64_t value;
+ int length;
+ inline ImmI64Operand(Decoder* decoder, const byte* pc) {
+ value = bit_cast<int64_t>(decoder->checked_read_u64(pc, 1, "immi64"));
+ length = 8;
+ }
+};
+
+struct ImmF32Operand {
+ float value;
+ int length;
+ inline ImmF32Operand(Decoder* decoder, const byte* pc) {
+ value = bit_cast<float>(decoder->checked_read_u32(pc, 1, "immf32"));
+ length = 4;
+ }
+};
+
+struct ImmF64Operand {
+ double value;
+ int length;
+ inline ImmF64Operand(Decoder* decoder, const byte* pc) {
+ value = bit_cast<double>(decoder->checked_read_u64(pc, 1, "immf64"));
+ length = 8;
+ }
+};
+
+struct GlobalIndexOperand {
+ uint32_t index;
+ LocalType type;
+ MachineType machine_type;
+ int length;
+
+ inline GlobalIndexOperand(Decoder* decoder, const byte* pc) {
+ index = decoder->checked_read_u32v(pc, 1, &length, "global index");
+ type = kAstStmt;
+ machine_type = MachineType::None();
+ }
+};
+
+struct Block;
+struct BreakDepthOperand {
+ uint32_t depth;
+ Block* target;
+ int length;
+ inline BreakDepthOperand(Decoder* decoder, const byte* pc) {
+ depth = decoder->checked_read_u8(pc, 1, "break depth");
+ length = 1;
+ target = nullptr;
+ }
+};
+
+struct BlockCountOperand {
+ uint32_t count;
+ int length;
+ inline BlockCountOperand(Decoder* decoder, const byte* pc) {
+ count = decoder->checked_read_u8(pc, 1, "block count");
+ length = 1;
+ }
+};
+
+struct SignatureIndexOperand {
+ uint32_t index;
+ FunctionSig* sig;
+ int length;
+ inline SignatureIndexOperand(Decoder* decoder, const byte* pc) {
+ index = decoder->checked_read_u32v(pc, 1, &length, "signature index");
+ sig = nullptr;
+ }
+};
+
+struct FunctionIndexOperand {
+ uint32_t index;
+ FunctionSig* sig;
+ int length;
+ inline FunctionIndexOperand(Decoder* decoder, const byte* pc) {
+ index = decoder->checked_read_u32v(pc, 1, &length, "function index");
+ sig = nullptr;
+ }
+};
+
+struct ImportIndexOperand {
+ uint32_t index;
+ FunctionSig* sig;
+ int length;
+ inline ImportIndexOperand(Decoder* decoder, const byte* pc) {
+ index = decoder->checked_read_u32v(pc, 1, &length, "import index");
+ sig = nullptr;
+ }
+};
+
+struct TableSwitchOperand {
+ uint32_t case_count;
+ uint32_t table_count;
+ const byte* table;
+ int length;
+ inline TableSwitchOperand(Decoder* decoder, const byte* pc) {
+ case_count = decoder->checked_read_u16(pc, 1, "expected #cases");
+ table_count = decoder->checked_read_u16(pc, 3, "expected #entries");
+ length = 4 + table_count * 2;
+
+ if (decoder->check(pc, 5, table_count * 2, "expected <table entries>")) {
+ table = pc + 5;
+ } else {
+ table = nullptr;
+ }
+ }
+ inline uint16_t read_entry(Decoder* decoder, int i) {
+ DCHECK(i >= 0 && static_cast<uint32_t>(i) < table_count);
+ return table ? decoder->read_u16(table + i * sizeof(uint16_t)) : 0;
+ }
+};
+
+struct MemoryAccessOperand {
+ bool aligned;
+ uint32_t offset;
+ int length;
+ inline MemoryAccessOperand(Decoder* decoder, const byte* pc) {
+ byte bitfield = decoder->checked_read_u8(pc, 1, "memory access byte");
+ aligned = MemoryAccess::AlignmentField::decode(bitfield);
+ if (MemoryAccess::OffsetField::decode(bitfield)) {
+ offset = decoder->checked_read_u32v(pc, 2, &length, "memory offset");
+ length++;
+ } else {
+ offset = 0;
+ length = 1;
+ }
+ }
+};
+
typedef compiler::WasmGraphBuilder TFBuilder;
struct ModuleEnv; // forward declaration of module interface.
@@ -26,56 +189,55 @@ struct ModuleEnv; // forward declaration of module interface.
struct FunctionEnv {
ModuleEnv* module; // module environment
FunctionSig* sig; // signature of this function
- uint32_t local_int32_count; // number of int32 locals
- uint32_t local_int64_count; // number of int64 locals
- uint32_t local_float32_count; // number of float32 locals
- uint32_t local_float64_count; // number of float64 locals
+ uint32_t local_i32_count; // number of int32 locals
+ uint32_t local_i64_count; // number of int64 locals
+ uint32_t local_f32_count; // number of float32 locals
+ uint32_t local_f64_count; // number of float64 locals
uint32_t total_locals; // sum of parameters and all locals
- bool IsValidLocal(uint32_t index) { return index < total_locals; }
uint32_t GetLocalCount() { return total_locals; }
LocalType GetLocalType(uint32_t index) {
if (index < static_cast<uint32_t>(sig->parameter_count())) {
return sig->GetParam(index);
}
index -= static_cast<uint32_t>(sig->parameter_count());
- if (index < local_int32_count) return kAstI32;
- index -= local_int32_count;
- if (index < local_int64_count) return kAstI64;
- index -= local_int64_count;
- if (index < local_float32_count) return kAstF32;
- index -= local_float32_count;
- if (index < local_float64_count) return kAstF64;
+ if (index < local_i32_count) return kAstI32;
+ index -= local_i32_count;
+ if (index < local_i64_count) return kAstI64;
+ index -= local_i64_count;
+ if (index < local_f32_count) return kAstF32;
+ index -= local_f32_count;
+ if (index < local_f64_count) return kAstF64;
return kAstStmt;
}
void AddLocals(LocalType type, uint32_t count) {
switch (type) {
case kAstI32:
- local_int32_count += count;
+ local_i32_count += count;
break;
case kAstI64:
- local_int64_count += count;
+ local_i64_count += count;
break;
case kAstF32:
- local_float32_count += count;
+ local_f32_count += count;
break;
case kAstF64:
- local_float64_count += count;
+ local_f64_count += count;
break;
default:
UNREACHABLE();
}
total_locals += count;
- DCHECK(total_locals ==
- (sig->parameter_count() + local_int32_count + local_int64_count +
- local_float32_count + local_float64_count));
+ DCHECK_EQ(total_locals,
+ (sig->parameter_count() + local_i32_count + local_i64_count +
+ local_f32_count + local_f64_count));
}
void SumLocals() {
total_locals = static_cast<uint32_t>(sig->parameter_count()) +
- local_int32_count + local_int64_count + local_float32_count +
- local_float64_count;
+ local_i32_count + local_i64_count + local_f32_count +
+ local_f64_count;
}
};
@@ -89,6 +251,8 @@ TreeResult VerifyWasmCode(FunctionEnv* env, const byte* base, const byte* start,
TreeResult BuildTFGraph(TFBuilder* builder, FunctionEnv* env, const byte* base,
const byte* start, const byte* end);
+void PrintAst(FunctionEnv* env, const byte* start, const byte* end);
+
inline TreeResult VerifyWasmCode(FunctionEnv* env, const byte* start,
const byte* end) {
return VerifyWasmCode(env, nullptr, start, end);
@@ -104,11 +268,14 @@ enum ReadUnsignedLEB128ErrorCode { kNoError, kInvalidLEB128, kMissingLEB128 };
ReadUnsignedLEB128ErrorCode ReadUnsignedLEB128Operand(const byte*, const byte*,
int*, uint32_t*);
+BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, FunctionEnv* env,
+ const byte* start, const byte* end);
+
// Computes the length of the opcode at the given address.
-int OpcodeLength(const byte* pc);
+int OpcodeLength(const byte* pc, const byte* end);
// Computes the arity (number of sub-nodes) of the opcode at the given address.
-int OpcodeArity(FunctionEnv* env, const byte* pc);
+int OpcodeArity(FunctionEnv* env, const byte* pc, const byte* end);
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index 698919d6a0..0e88eda022 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -24,6 +24,12 @@ namespace wasm {
#define TRACE(...)
#endif
+#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM)
+#define UNALIGNED_ACCESS_OK 1
+#else
+#define UNALIGNED_ACCESS_OK 0
+#endif
+
// A helper utility to decode bytes, integers, fields, varints, etc, from
// a buffer of bytes.
class Decoder {
@@ -32,107 +38,188 @@ class Decoder {
: start_(start),
pc_(start),
limit_(end),
+ end_(end),
error_pc_(nullptr),
error_pt_(nullptr) {}
virtual ~Decoder() {}
+ inline bool check(const byte* base, int offset, int length, const char* msg) {
+ DCHECK_GE(base, start_);
+ if ((base + offset + length) > limit_) {
+ error(base, base + offset, msg);
+ return false;
+ }
+ return true;
+ }
+
+ // Reads a single 8-bit byte, reporting an error if out of bounds.
+ inline uint8_t checked_read_u8(const byte* base, int offset,
+ const char* msg = "expected 1 byte") {
+ return check(base, offset, 1, msg) ? base[offset] : 0;
+ }
+
+ // Reads 16-bit word, reporting an error if out of bounds.
+ inline uint16_t checked_read_u16(const byte* base, int offset,
+ const char* msg = "expected 2 bytes") {
+ return check(base, offset, 2, msg) ? read_u16(base + offset) : 0;
+ }
+
+ // Reads 32-bit word, reporting an error if out of bounds.
+ inline uint32_t checked_read_u32(const byte* base, int offset,
+ const char* msg = "expected 4 bytes") {
+ return check(base, offset, 4, msg) ? read_u32(base + offset) : 0;
+ }
+
+ // Reads 64-bit word, reporting an error if out of bounds.
+ inline uint64_t checked_read_u64(const byte* base, int offset,
+ const char* msg = "expected 8 bytes") {
+ return check(base, offset, 8, msg) ? read_u64(base + offset) : 0;
+ }
+
+ uint32_t checked_read_u32v(const byte* base, int offset, int* length,
+ const char* msg = "expected LEB128") {
+ if (!check(base, offset, 1, msg)) {
+ *length = 0;
+ return 0;
+ }
+
+ const ptrdiff_t kMaxDiff = 5; // maximum 5 bytes.
+ const byte* ptr = base + offset;
+ const byte* end = ptr + kMaxDiff;
+ if (end > limit_) end = limit_;
+ int shift = 0;
+ byte b = 0;
+ uint32_t result = 0;
+ while (ptr < end) {
+ b = *ptr++;
+ result = result | ((b & 0x7F) << shift);
+ if ((b & 0x80) == 0) break;
+ shift += 7;
+ }
+ DCHECK_LE(ptr - (base + offset), kMaxDiff);
+ *length = static_cast<int>(ptr - (base + offset));
+ if (ptr == end && (b & 0x80)) {
+ error(base, ptr, msg);
+ return 0;
+ }
+ return result;
+ }
+
+ // Reads a single 16-bit unsigned integer (little endian).
+ inline uint16_t read_u16(const byte* ptr) {
+ DCHECK(ptr >= start_ && (ptr + 2) <= end_);
+#if V8_TARGET_LITTLE_ENDIAN && UNALIGNED_ACCESS_OK
+ return *reinterpret_cast<const uint16_t*>(ptr);
+#else
+ uint16_t b0 = ptr[0];
+ uint16_t b1 = ptr[1];
+ return (b1 << 8) | b0;
+#endif
+ }
+
+ // Reads a single 32-bit unsigned integer (little endian).
+ inline uint32_t read_u32(const byte* ptr) {
+ DCHECK(ptr >= start_ && (ptr + 4) <= end_);
+#if V8_TARGET_LITTLE_ENDIAN && UNALIGNED_ACCESS_OK
+ return *reinterpret_cast<const uint32_t*>(ptr);
+#else
+ uint32_t b0 = ptr[0];
+ uint32_t b1 = ptr[1];
+ uint32_t b2 = ptr[2];
+ uint32_t b3 = ptr[3];
+ return (b3 << 24) | (b2 << 16) | (b1 << 8) | b0;
+#endif
+ }
+
+ // Reads a single 64-bit unsigned integer (little endian).
+ inline uint64_t read_u64(const byte* ptr) {
+ DCHECK(ptr >= start_ && (ptr + 8) <= end_);
+#if V8_TARGET_LITTLE_ENDIAN && UNALIGNED_ACCESS_OK
+ return *reinterpret_cast<const uint64_t*>(ptr);
+#else
+ uint32_t b0 = ptr[0];
+ uint32_t b1 = ptr[1];
+ uint32_t b2 = ptr[2];
+ uint32_t b3 = ptr[3];
+ uint32_t low = (b3 << 24) | (b2 << 16) | (b1 << 8) | b0;
+ uint32_t b4 = ptr[4];
+ uint32_t b5 = ptr[5];
+ uint32_t b6 = ptr[6];
+ uint32_t b7 = ptr[7];
+ uint64_t high = (b7 << 24) | (b6 << 16) | (b5 << 8) | b4;
+ return (high << 32) | low;
+#endif
+ }
+
// Reads a 8-bit unsigned integer (byte) and advances {pc_}.
- uint8_t u8(const char* name = nullptr) {
+ uint8_t consume_u8(const char* name = nullptr) {
TRACE(" +%d %-20s: ", static_cast<int>(pc_ - start_),
name ? name : "uint8_t");
if (checkAvailable(1)) {
byte val = *(pc_++);
TRACE("%02x = %d\n", val, val);
return val;
- } else {
- error("expected 1 byte, but fell off end");
- return traceOffEnd<uint8_t>();
}
+ return traceOffEnd<uint8_t>();
}
// Reads a 16-bit unsigned integer (little endian) and advances {pc_}.
- uint16_t u16(const char* name = nullptr) {
+ uint16_t consume_u16(const char* name = nullptr) {
TRACE(" +%d %-20s: ", static_cast<int>(pc_ - start_),
name ? name : "uint16_t");
if (checkAvailable(2)) {
-#ifdef V8_TARGET_LITTLE_ENDIAN
- byte b0 = pc_[0];
- byte b1 = pc_[1];
-#else
- byte b1 = pc_[0];
- byte b0 = pc_[1];
-#endif
- uint16_t val = static_cast<uint16_t>(b1 << 8) | b0;
+ uint16_t val = read_u16(pc_);
TRACE("%02x %02x = %d\n", pc_[0], pc_[1], val);
pc_ += 2;
return val;
- } else {
- error("expected 2 bytes, but fell off end");
- return traceOffEnd<uint16_t>();
}
+ return traceOffEnd<uint16_t>();
}
// Reads a single 32-bit unsigned integer (little endian) and advances {pc_}.
- uint32_t u32(const char* name = nullptr) {
+ uint32_t consume_u32(const char* name = nullptr) {
TRACE(" +%d %-20s: ", static_cast<int>(pc_ - start_),
name ? name : "uint32_t");
if (checkAvailable(4)) {
-#ifdef V8_TARGET_LITTLE_ENDIAN
- byte b0 = pc_[0];
- byte b1 = pc_[1];
- byte b2 = pc_[2];
- byte b3 = pc_[3];
-#else
- byte b3 = pc_[0];
- byte b2 = pc_[1];
- byte b1 = pc_[2];
- byte b0 = pc_[3];
-#endif
- uint32_t val = static_cast<uint32_t>(b3 << 24) |
- static_cast<uint32_t>(b2 << 16) |
- static_cast<uint32_t>(b1 << 8) | b0;
+ uint32_t val = read_u32(pc_);
TRACE("%02x %02x %02x %02x = %u\n", pc_[0], pc_[1], pc_[2], pc_[3], val);
pc_ += 4;
return val;
- } else {
- error("expected 4 bytes, but fell off end");
- return traceOffEnd<uint32_t>();
}
+ return traceOffEnd<uint32_t>();
}
// Reads a LEB128 variable-length 32-bit integer and advances {pc_}.
- uint32_t u32v(int* length, const char* name = nullptr) {
+ uint32_t consume_u32v(int* length, const char* name = nullptr) {
TRACE(" +%d %-20s: ", static_cast<int>(pc_ - start_),
name ? name : "varint");
- if (!checkAvailable(1)) {
- error("expected at least 1 byte, but fell off end");
- return traceOffEnd<uint32_t>();
- }
-
- const byte* pos = pc_;
- const byte* end = pc_ + 5;
- if (end > limit_) end = limit_;
+ if (checkAvailable(1)) {
+ const byte* pos = pc_;
+ const byte* end = pc_ + 5;
+ if (end > limit_) end = limit_;
- uint32_t result = 0;
- int shift = 0;
- byte b = 0;
- while (pc_ < end) {
- b = *pc_++;
- TRACE("%02x ", b);
- result = result | ((b & 0x7F) << shift);
- if ((b & 0x80) == 0) break;
- shift += 7;
- }
+ uint32_t result = 0;
+ int shift = 0;
+ byte b = 0;
+ while (pc_ < end) {
+ b = *pc_++;
+ TRACE("%02x ", b);
+ result = result | ((b & 0x7F) << shift);
+ if ((b & 0x80) == 0) break;
+ shift += 7;
+ }
- *length = static_cast<int>(pc_ - pos);
- if (pc_ == end && (b & 0x80)) {
- error(pc_ - 1, "varint too large");
- } else {
- TRACE("= %u\n", result);
+ *length = static_cast<int>(pc_ - pos);
+ if (pc_ == end && (b & 0x80)) {
+ error(pc_ - 1, "varint too large");
+ } else {
+ TRACE("= %u\n", result);
+ }
+ return result;
}
- return result;
+ return traceOffEnd<uint32_t>();
}
// Check that at least {size} bytes exist between {pc_} and {limit_}.
@@ -145,6 +232,12 @@ class Decoder {
}
}
+ bool RangeOk(const byte* pc, int length) {
+ if (pc < start_ || pc_ >= limit_) return false;
+ if ((pc + length) >= limit_) return false;
+ return true;
+ }
+
void error(const char* msg) { error(pc_, nullptr, msg); }
void error(const byte* pc, const char* msg) { error(pc, nullptr, msg); }
@@ -208,6 +301,7 @@ class Decoder {
start_ = start;
pc_ = start;
limit_ = end;
+ end_ = end;
error_pc_ = nullptr;
error_pt_ = nullptr;
error_msg_.Reset(nullptr);
@@ -220,6 +314,7 @@ class Decoder {
const byte* start_;
const byte* pc_;
const byte* limit_;
+ const byte* end_;
const byte* error_pc_;
const byte* error_pt_;
base::SmartArrayPointer<char> error_msg_;
diff --git a/deps/v8/src/wasm/encoder.cc b/deps/v8/src/wasm/encoder.cc
index d8d36338b1..d80a275338 100644
--- a/deps/v8/src/wasm/encoder.cc
+++ b/deps/v8/src/wasm/encoder.cc
@@ -30,13 +30,13 @@ void EmitUint8(byte** b, uint8_t x) {
void EmitUint16(byte** b, uint16_t x) {
- Memory::uint16_at(*b) = x;
+ WriteUnalignedUInt16(*b, x);
*b += 2;
}
void EmitUint32(byte** b, uint32_t x) {
- Memory::uint32_at(*b) = x;
+ WriteUnalignedUInt32(*b, x);
*b += 4;
}
@@ -121,12 +121,6 @@ void WasmFunctionBuilder::EmitWithU8(WasmOpcode opcode, const byte immediate) {
}
-void WasmFunctionBuilder::EmitWithLocal(WasmOpcode opcode) {
- body_.push_back(static_cast<byte>(opcode));
- local_indices_.push_back(static_cast<uint32_t>(body_.size()) - 1);
-}
-
-
uint32_t WasmFunctionBuilder::EmitEditableImmediate(const byte immediate) {
body_.push_back(immediate);
return static_cast<uint32_t>(body_.size()) - 1;
@@ -202,44 +196,44 @@ WasmFunctionEncoder* WasmFunctionBuilder::Build(Zone* zone,
void WasmFunctionBuilder::IndexVars(WasmFunctionEncoder* e,
uint16_t* var_index) const {
uint16_t param = 0;
- uint16_t int32 = 0;
- uint16_t int64 = 0;
- uint16_t float32 = 0;
- uint16_t float64 = 0;
+ uint16_t i32 = 0;
+ uint16_t i64 = 0;
+ uint16_t f32 = 0;
+ uint16_t f64 = 0;
for (size_t i = 0; i < locals_.size(); i++) {
if (locals_.at(i).param_) {
param++;
} else if (locals_.at(i).type_ == kAstI32) {
- int32++;
+ i32++;
} else if (locals_.at(i).type_ == kAstI64) {
- int64++;
+ i64++;
} else if (locals_.at(i).type_ == kAstF32) {
- float32++;
+ f32++;
} else if (locals_.at(i).type_ == kAstF64) {
- float64++;
+ f64++;
}
}
- e->local_int32_count_ = int32;
- e->local_int64_count_ = int64;
- e->local_float32_count_ = float32;
- e->local_float64_count_ = float64;
- float64 = param + int32 + int64 + float32;
- float32 = param + int32 + int64;
- int64 = param + int32;
- int32 = param;
+ e->local_i32_count_ = i32;
+ e->local_i64_count_ = i64;
+ e->local_f32_count_ = f32;
+ e->local_f64_count_ = f64;
+ f64 = param + i32 + i64 + f32;
+ f32 = param + i32 + i64;
+ i64 = param + i32;
+ i32 = param;
param = 0;
for (size_t i = 0; i < locals_.size(); i++) {
if (locals_.at(i).param_) {
e->params_.push_back(locals_.at(i).type_);
var_index[i] = param++;
} else if (locals_.at(i).type_ == kAstI32) {
- var_index[i] = int32++;
+ var_index[i] = i32++;
} else if (locals_.at(i).type_ == kAstI64) {
- var_index[i] = int64++;
+ var_index[i] = i64++;
} else if (locals_.at(i).type_ == kAstF32) {
- var_index[i] = float32++;
+ var_index[i] = f32++;
} else if (locals_.at(i).type_ == kAstF64) {
- var_index[i] = float64++;
+ var_index[i] = f64++;
}
}
}
@@ -269,7 +263,7 @@ uint32_t WasmFunctionEncoder::BodySize(void) const {
uint32_t WasmFunctionEncoder::NameSize() const {
- return exported_ ? static_cast<uint32_t>(name_.size()) : 0;
+ return HasName() ? static_cast<uint32_t>(name_.size()) : 0;
}
@@ -291,10 +285,10 @@ void WasmFunctionEncoder::Serialize(byte* buffer, byte** header,
}
if (HasLocals()) {
- EmitUint16(header, local_int32_count_);
- EmitUint16(header, local_int64_count_);
- EmitUint16(header, local_float32_count_);
- EmitUint16(header, local_float64_count_);
+ EmitUint16(header, local_i32_count_);
+ EmitUint16(header, local_i64_count_);
+ EmitUint16(header, local_f32_count_);
+ EmitUint16(header, local_f64_count_);
}
if (!external_) {
@@ -370,21 +364,21 @@ void WasmModuleBuilder::AddDataSegment(WasmDataSegmentEncoder* data) {
}
-int WasmModuleBuilder::CompareFunctionSigs::operator()(FunctionSig* a,
- FunctionSig* b) const {
- if (a->return_count() < b->return_count()) return -1;
- if (a->return_count() > b->return_count()) return 1;
- if (a->parameter_count() < b->parameter_count()) return -1;
- if (a->parameter_count() > b->parameter_count()) return 1;
+bool WasmModuleBuilder::CompareFunctionSigs::operator()(FunctionSig* a,
+ FunctionSig* b) const {
+ if (a->return_count() < b->return_count()) return true;
+ if (a->return_count() > b->return_count()) return false;
+ if (a->parameter_count() < b->parameter_count()) return true;
+ if (a->parameter_count() > b->parameter_count()) return false;
for (size_t r = 0; r < a->return_count(); r++) {
- if (a->GetReturn(r) < b->GetReturn(r)) return -1;
- if (a->GetReturn(r) > b->GetReturn(r)) return 1;
+ if (a->GetReturn(r) < b->GetReturn(r)) return true;
+ if (a->GetReturn(r) > b->GetReturn(r)) return false;
}
for (size_t p = 0; p < a->parameter_count(); p++) {
- if (a->GetParam(p) < b->GetParam(p)) return -1;
- if (a->GetParam(p) > b->GetParam(p)) return 1;
+ if (a->GetParam(p) < b->GetParam(p)) return true;
+ if (a->GetParam(p) > b->GetParam(p)) return false;
}
- return 0;
+ return false;
}
diff --git a/deps/v8/src/wasm/encoder.h b/deps/v8/src/wasm/encoder.h
index f0fabe998a..7b651bf95e 100644
--- a/deps/v8/src/wasm/encoder.h
+++ b/deps/v8/src/wasm/encoder.h
@@ -33,21 +33,21 @@ class WasmFunctionEncoder : public ZoneObject {
friend class WasmFunctionBuilder;
uint16_t signature_index_;
ZoneVector<LocalType> params_;
- uint16_t local_int32_count_;
- uint16_t local_int64_count_;
- uint16_t local_float32_count_;
- uint16_t local_float64_count_;
+ uint16_t local_i32_count_;
+ uint16_t local_i64_count_;
+ uint16_t local_f32_count_;
+ uint16_t local_f64_count_;
bool exported_;
bool external_;
ZoneVector<uint8_t> body_;
ZoneVector<char> name_;
bool HasLocals() const {
- return (local_int32_count_ + local_int64_count_ + local_float32_count_ +
- local_float64_count_) > 0;
+ return (local_i32_count_ + local_i64_count_ + local_f32_count_ +
+ local_f64_count_) > 0;
}
- bool HasName() const { return exported_ && name_.size() > 0; }
+ bool HasName() const { return (exported_ || external_) && name_.size() > 0; }
};
class WasmFunctionBuilder : public ZoneObject {
@@ -60,7 +60,6 @@ class WasmFunctionBuilder : public ZoneObject {
const uint32_t* local_indices, uint32_t indices_size);
void Emit(WasmOpcode opcode);
void EmitWithU8(WasmOpcode opcode, const byte immediate);
- void EmitWithLocal(WasmOpcode opcode);
uint32_t EmitEditableImmediate(const byte immediate);
void EditImmediate(uint32_t offset, const byte immediate);
void Exported(uint8_t flag);
@@ -134,12 +133,12 @@ class WasmModuleBuilder : public ZoneObject {
void AddIndirectFunction(uint16_t index);
WasmModuleWriter* Build(Zone* zone);
- private:
struct CompareFunctionSigs {
- int operator()(FunctionSig* a, FunctionSig* b) const;
+ bool operator()(FunctionSig* a, FunctionSig* b) const;
};
typedef ZoneMap<FunctionSig*, uint16_t, CompareFunctionSigs> SignatureMap;
+ private:
Zone* zone_;
ZoneVector<FunctionSig*> signatures_;
ZoneVector<WasmFunctionBuilder*> functions_;
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index 24f39822f9..62b000da2b 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -54,6 +54,7 @@ class ModuleDecoder : public Decoder {
module->functions = new std::vector<WasmFunction>();
module->data_segments = new std::vector<WasmDataSegment>();
module->function_table = new std::vector<uint16_t>();
+ module->import_table = new std::vector<WasmImport>();
bool sections[kMaxModuleSectionCode];
memset(sections, 0, sizeof(sections));
@@ -62,7 +63,7 @@ class ModuleDecoder : public Decoder {
while (pc_ < limit_) {
TRACE("DecodeSection\n");
WasmSectionDeclCode section =
- static_cast<WasmSectionDeclCode>(u8("section"));
+ static_cast<WasmSectionDeclCode>(consume_u8("section"));
// Each section should appear at most once.
if (section < kMaxModuleSectionCode) {
CheckForPreviousSection(sections, section, false);
@@ -75,20 +76,20 @@ class ModuleDecoder : public Decoder {
limit_ = pc_;
break;
case kDeclMemory:
- module->min_mem_size_log2 = u8("min memory");
- module->max_mem_size_log2 = u8("max memory");
- module->mem_export = u8("export memory") != 0;
+ module->min_mem_size_log2 = consume_u8("min memory");
+ module->max_mem_size_log2 = consume_u8("max memory");
+ module->mem_export = consume_u8("export memory") != 0;
break;
case kDeclSignatures: {
int length;
- uint32_t signatures_count = u32v(&length, "signatures count");
+ uint32_t signatures_count = consume_u32v(&length, "signatures count");
module->signatures->reserve(SafeReserve(signatures_count));
// Decode signatures.
for (uint32_t i = 0; i < signatures_count; i++) {
if (failed()) break;
TRACE("DecodeSignature[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
- FunctionSig* s = sig(); // read function sig.
+ FunctionSig* s = consume_sig(); // read function sig.
module->signatures->push_back(s);
}
break;
@@ -97,15 +98,12 @@ class ModuleDecoder : public Decoder {
// Functions require a signature table first.
CheckForPreviousSection(sections, kDeclSignatures, true);
int length;
- uint32_t functions_count = u32v(&length, "functions count");
+ uint32_t functions_count = consume_u32v(&length, "functions count");
module->functions->reserve(SafeReserve(functions_count));
// Set up module environment for verification.
ModuleEnv menv;
menv.module = module;
- menv.globals_area = 0;
- menv.mem_start = 0;
- menv.mem_end = 0;
- menv.function_code = nullptr;
+ menv.instance = nullptr;
menv.asm_js = asm_js_;
// Decode functions.
for (uint32_t i = 0; i < functions_count; i++) {
@@ -114,7 +112,7 @@ class ModuleDecoder : public Decoder {
static_cast<int>(pc_ - start_));
module->functions->push_back(
- {nullptr, 0, 0, 0, 0, 0, 0, false, false});
+ {nullptr, i, 0, 0, 0, 0, 0, 0, false, false});
WasmFunction* function = &module->functions->back();
DecodeFunctionInModule(module, function, false);
}
@@ -133,7 +131,7 @@ class ModuleDecoder : public Decoder {
}
case kDeclGlobals: {
int length;
- uint32_t globals_count = u32v(&length, "globals count");
+ uint32_t globals_count = consume_u32v(&length, "globals count");
module->globals->reserve(SafeReserve(globals_count));
// Decode globals.
for (uint32_t i = 0; i < globals_count; i++) {
@@ -148,7 +146,8 @@ class ModuleDecoder : public Decoder {
}
case kDeclDataSegments: {
int length;
- uint32_t data_segments_count = u32v(&length, "data segments count");
+ uint32_t data_segments_count =
+ consume_u32v(&length, "data segments count");
module->data_segments->reserve(SafeReserve(data_segments_count));
// Decode data segments.
for (uint32_t i = 0; i < data_segments_count; i++) {
@@ -157,7 +156,7 @@ class ModuleDecoder : public Decoder {
static_cast<int>(pc_ - start_));
module->data_segments->push_back({0, 0, 0});
WasmDataSegment* segment = &module->data_segments->back();
- DecodeDataSegmentInModule(segment);
+ DecodeDataSegmentInModule(module, segment);
}
break;
}
@@ -165,14 +164,15 @@ class ModuleDecoder : public Decoder {
// An indirect function table requires functions first.
CheckForPreviousSection(sections, kDeclFunctions, true);
int length;
- uint32_t function_table_count = u32v(&length, "function table count");
+ uint32_t function_table_count =
+ consume_u32v(&length, "function table count");
module->function_table->reserve(SafeReserve(function_table_count));
// Decode function table.
for (uint32_t i = 0; i < function_table_count; i++) {
if (failed()) break;
TRACE("DecodeFunctionTable[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
- uint16_t index = u16();
+ uint16_t index = consume_u16();
if (index >= module->functions->size()) {
error(pc_ - 2, "invalid function index");
break;
@@ -181,13 +181,66 @@ class ModuleDecoder : public Decoder {
}
break;
}
+ case kDeclStartFunction: {
+ // Declares a start function for a module.
+ CheckForPreviousSection(sections, kDeclFunctions, true);
+ if (module->start_function_index >= 0) {
+ error("start function already declared");
+ break;
+ }
+ int length;
+ const byte* before = pc_;
+ uint32_t index = consume_u32v(&length, "start function index");
+ if (index >= module->functions->size()) {
+ error(before, "invalid start function index");
+ break;
+ }
+ module->start_function_index = static_cast<int>(index);
+ FunctionSig* sig =
+ module->signatures->at(module->functions->at(index).sig_index);
+ if (sig->parameter_count() > 0) {
+ error(before, "invalid start function: non-zero parameter count");
+ break;
+ }
+ break;
+ }
+ case kDeclImportTable: {
+ // Declares an import table.
+ CheckForPreviousSection(sections, kDeclSignatures, true);
+ int length;
+ uint32_t import_table_count =
+ consume_u32v(&length, "import table count");
+ module->import_table->reserve(SafeReserve(import_table_count));
+ // Decode import table.
+ for (uint32_t i = 0; i < import_table_count; i++) {
+ if (failed()) break;
+ TRACE("DecodeImportTable[%d] module+%d\n", i,
+ static_cast<int>(pc_ - start_));
+
+ module->import_table->push_back({nullptr, 0, 0});
+ WasmImport* import = &module->import_table->back();
+
+ const byte* sigpos = pc_;
+ import->sig_index = consume_u16("signature index");
+
+ if (import->sig_index >= module->signatures->size()) {
+ error(sigpos, "invalid signature index");
+ } else {
+ import->sig = module->signatures->at(import->sig_index);
+ }
+ import->module_name_offset = consume_string("import module name");
+ import->function_name_offset =
+ consume_string("import function name");
+ }
+ break;
+ }
case kDeclWLL: {
// Reserved for experimentation by the Web Low-level Language project
// which is augmenting the binary encoding with source code meta
// information. This section does not affect the semantics of the code
// and can be ignored by the runtime. https://github.com/JSStats/wll
int length = 0;
- uint32_t section_size = u32v(&length, "section size");
+ uint32_t section_size = consume_u32v(&length, "section size");
if (pc_ + section_size > limit_ || pc_ + section_size < pc_) {
error(pc_ - length, "invalid section size");
break;
@@ -249,14 +302,14 @@ class ModuleDecoder : public Decoder {
FunctionResult DecodeSingleFunction(ModuleEnv* module_env,
WasmFunction* function) {
pc_ = start_;
- function->sig = sig(); // read signature
+ function->sig = consume_sig(); // read signature
function->name_offset = 0; // ---- name
function->code_start_offset = off(pc_ + 8); // ---- code start
function->code_end_offset = off(limit_); // ---- code end
- function->local_int32_count = u16(); // read u16
- function->local_int64_count = u16(); // read u16
- function->local_float32_count = u16(); // read u16
- function->local_float64_count = u16(); // read u16
+ function->local_i32_count = consume_u16(); // read u16
+ function->local_i64_count = consume_u16(); // read u16
+ function->local_f32_count = consume_u16(); // read u16
+ function->local_f64_count = consume_u16(); // read u16
function->exported = false; // ---- exported
function->external = false; // ---- external
@@ -271,7 +324,7 @@ class ModuleDecoder : public Decoder {
// Decodes a single function signature at {start}.
FunctionSig* DecodeFunctionSignature(const byte* start) {
pc_ = start;
- FunctionSig* result = sig();
+ FunctionSig* result = consume_sig();
return ok() ? result : nullptr;
}
@@ -284,19 +337,19 @@ class ModuleDecoder : public Decoder {
// Decodes a single global entry inside a module starting at {pc_}.
void DecodeGlobalInModule(WasmGlobal* global) {
- global->name_offset = string("global name");
+ global->name_offset = consume_string("global name");
global->type = mem_type();
global->offset = 0;
- global->exported = u8("exported") != 0;
+ global->exported = consume_u8("exported") != 0;
}
// Decodes a single function entry inside a module starting at {pc_}.
void DecodeFunctionInModule(WasmModule* module, WasmFunction* function,
bool verify_body = true) {
- byte decl_bits = u8("function decl");
+ byte decl_bits = consume_u8("function decl");
const byte* sigpos = pc_;
- function->sig_index = u16("signature index");
+ function->sig_index = consume_u16("signature index");
if (function->sig_index >= module->signatures->size()) {
return error(sigpos, "invalid signature index");
@@ -313,7 +366,7 @@ class ModuleDecoder : public Decoder {
(decl_bits & kDeclFunctionImport) == 0 ? " body" : "");
if (decl_bits & kDeclFunctionName) {
- function->name_offset = string("function name");
+ function->name_offset = consume_string("function name");
}
function->exported = decl_bits & kDeclFunctionExport;
@@ -325,13 +378,13 @@ class ModuleDecoder : public Decoder {
}
if (decl_bits & kDeclFunctionLocals) {
- function->local_int32_count = u16("int32 count");
- function->local_int64_count = u16("int64 count");
- function->local_float32_count = u16("float32 count");
- function->local_float64_count = u16("float64 count");
+ function->local_i32_count = consume_u16("i32 count");
+ function->local_i64_count = consume_u16("i64 count");
+ function->local_f32_count = consume_u16("f32 count");
+ function->local_f64_count = consume_u16("f64 count");
}
- uint16_t size = u16("body size");
+ uint16_t size = consume_u16("body size");
if (ok()) {
if ((pc_ + size) > limit_) {
return error(pc_, limit_,
@@ -345,35 +398,51 @@ class ModuleDecoder : public Decoder {
}
}
+ bool IsWithinLimit(uint32_t limit, uint32_t offset, uint32_t size) {
+ if (offset > limit) return false;
+ if ((offset + size) < offset) return false; // overflow
+ return (offset + size) <= limit;
+ }
+
// Decodes a single data segment entry inside a module starting at {pc_}.
- void DecodeDataSegmentInModule(WasmDataSegment* segment) {
- segment->dest_addr =
- u32("destination"); // TODO(titzer): check it's within the memory size.
- segment->source_offset = offset("source offset");
- segment->source_size =
- u32("source size"); // TODO(titzer): check the size is reasonable.
- segment->init = u8("init");
+ void DecodeDataSegmentInModule(WasmModule* module, WasmDataSegment* segment) {
+ segment->dest_addr = consume_u32("destination");
+ segment->source_offset = consume_offset("source offset");
+ segment->source_size = consume_u32("source size");
+ segment->init = consume_u8("init");
+
+ // Validate the data is in the module.
+ uint32_t module_limit = static_cast<uint32_t>(limit_ - start_);
+ if (!IsWithinLimit(module_limit, segment->source_offset,
+ segment->source_size)) {
+ error(pc_ - sizeof(uint32_t), "segment out of bounds of module");
+ }
+
+ // Validate that the segment will fit into the (minimum) memory.
+ uint32_t memory_limit =
+ 1 << (module ? module->min_mem_size_log2 : WasmModule::kMaxMemSize);
+ if (!IsWithinLimit(memory_limit, segment->dest_addr,
+ segment->source_size)) {
+ error(pc_ - sizeof(uint32_t), "segment out of bounds of memory");
+ }
}
// Verifies the body (code) of a given function.
void VerifyFunctionBody(uint32_t func_num, ModuleEnv* menv,
WasmFunction* function) {
if (FLAG_trace_wasm_decode_time) {
- // TODO(titzer): clean me up a bit.
OFStream os(stdout);
- os << "Verifying WASM function:";
- if (function->name_offset > 0) {
- os << menv->module->GetName(function->name_offset);
- }
+ os << "Verifying WASM function " << WasmFunctionName(function, menv)
+ << std::endl;
os << std::endl;
}
FunctionEnv fenv;
fenv.module = menv;
fenv.sig = function->sig;
- fenv.local_int32_count = function->local_int32_count;
- fenv.local_int64_count = function->local_int64_count;
- fenv.local_float32_count = function->local_float32_count;
- fenv.local_float64_count = function->local_float64_count;
+ fenv.local_i32_count = function->local_i32_count;
+ fenv.local_i64_count = function->local_i64_count;
+ fenv.local_f32_count = function->local_f32_count;
+ fenv.local_f64_count = function->local_f64_count;
fenv.SumLocals();
TreeResult result =
@@ -382,8 +451,7 @@ class ModuleDecoder : public Decoder {
if (result.failed()) {
// Wrap the error message from the function decoder.
std::ostringstream str;
- str << "in function #" << func_num << ": ";
- // TODO(titzer): add function name for the user?
+ str << "in function " << WasmFunctionName(function, menv) << ": ";
str << result;
std::string strval = str.str();
const char* raw = strval.c_str();
@@ -400,8 +468,8 @@ class ModuleDecoder : public Decoder {
// Reads a single 32-bit unsigned integer interpreted as an offset, checking
// the offset is within bounds and advances.
- uint32_t offset(const char* name = nullptr) {
- uint32_t offset = u32(name ? name : "offset");
+ uint32_t consume_offset(const char* name = nullptr) {
+ uint32_t offset = consume_u32(name ? name : "offset");
if (offset > static_cast<uint32_t>(limit_ - start_)) {
error(pc_ - sizeof(uint32_t), "offset out of bounds of module");
}
@@ -410,13 +478,14 @@ class ModuleDecoder : public Decoder {
// Reads a single 32-bit unsigned integer interpreted as an offset into the
// data and validating the string there and advances.
- uint32_t string(const char* name = nullptr) {
- return offset(name ? name : "string"); // TODO(titzer): validate string
+ uint32_t consume_string(const char* name = nullptr) {
+ // TODO(titzer): validate string
+ return consume_offset(name ? name : "string");
}
// Reads a single 8-bit integer, interpreting it as a local type.
- LocalType local_type() {
- byte val = u8("local type");
+ LocalType consume_local_type() {
+ byte val = consume_u8("local type");
LocalTypeCode t = static_cast<LocalTypeCode>(val);
switch (t) {
case kLocalVoid:
@@ -437,7 +506,7 @@ class ModuleDecoder : public Decoder {
// Reads a single 8-bit integer, interpreting it as a memory type.
MachineType mem_type() {
- byte val = u8("memory type");
+ byte val = consume_u8("memory type");
MemTypeCode t = static_cast<MemTypeCode>(val);
switch (t) {
case kMemI8:
@@ -467,14 +536,14 @@ class ModuleDecoder : public Decoder {
}
// Parses an inline function signature.
- FunctionSig* sig() {
- byte count = u8("param count");
- LocalType ret = local_type();
+ FunctionSig* consume_sig() {
+ byte count = consume_u8("param count");
+ LocalType ret = consume_local_type();
FunctionSig::Builder builder(module_zone, ret == kAstStmt ? 0 : 1, count);
if (ret != kAstStmt) builder.AddReturn(ret);
for (int i = 0; i < count; i++) {
- LocalType param = local_type();
+ LocalType param = consume_local_type();
if (param == kAstStmt) error(pc_ - 1, "invalid void parameter type");
builder.AddParam(param);
}
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index 80d8bdb236..62a2676032 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -37,6 +37,7 @@ struct RawBuffer {
RawBuffer GetRawBufferArgument(
ErrorThrower& thrower, const v8::FunctionCallbackInfo<v8::Value>& args) {
+ // TODO(titzer): allow typed array views.
if (args.Length() < 1 || !args[0]->IsArrayBuffer()) {
thrower.Error("Argument 0 must be an array buffer");
return {nullptr, nullptr};
@@ -44,8 +45,6 @@ RawBuffer GetRawBufferArgument(
Local<ArrayBuffer> buffer = Local<ArrayBuffer>::Cast(args[0]);
ArrayBuffer::Contents contents = buffer->GetContents();
- // TODO(titzer): allow offsets into buffers, views, etc.
-
const byte* start = reinterpret_cast<const byte*>(contents.Data());
const byte* end = start + contents.ByteLength();
@@ -100,33 +99,8 @@ void VerifyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (result.val) delete result.val;
}
-
-void CompileRun(const v8::FunctionCallbackInfo<v8::Value>& args) {
- HandleScope scope(args.GetIsolate());
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
- ErrorThrower thrower(isolate, "WASM.compileRun()");
-
- RawBuffer buffer = GetRawBufferArgument(thrower, args);
- if (thrower.error()) return;
-
- // Decode and pre-verify the functions before compiling and running.
- i::Zone zone;
- internal::wasm::ModuleResult result = internal::wasm::DecodeWasmModule(
- isolate, &zone, buffer.start, buffer.end, true, false);
-
- if (result.failed()) {
- thrower.Failed("", result);
- } else {
- // Success. Compile and run!
- int32_t retval = i::wasm::CompileAndRunWasmModule(isolate, result.val);
- args.GetReturnValue().Set(retval);
- }
-
- if (result.val) delete result.val;
-}
-
-
-v8::internal::wasm::WasmModuleIndex* TranslateAsmModule(i::ParseInfo* info) {
+v8::internal::wasm::WasmModuleIndex* TranslateAsmModule(
+ i::ParseInfo* info, i::Handle<i::Object> foreign, ErrorThrower* thrower) {
info->set_global();
info->set_lazy(false);
info->set_allow_lazy_parsing(false);
@@ -141,61 +115,79 @@ v8::internal::wasm::WasmModuleIndex* TranslateAsmModule(i::ParseInfo* info) {
v8::internal::AsmTyper typer(info->isolate(), info->zone(), *(info->script()),
info->literal());
+ if (i::FLAG_enable_simd_asmjs) {
+ typer.set_allow_simd(true);
+ }
if (!typer.Validate()) {
+ thrower->Error("Asm.js validation failed: %s", typer.error_message());
return nullptr;
}
auto module = v8::internal::wasm::AsmWasmBuilder(
- info->isolate(), info->zone(), info->literal())
+ info->isolate(), info->zone(), info->literal(), foreign)
.Run();
+
+ if (i::FLAG_dump_asmjs_wasm) {
+ FILE* wasm_file = fopen(i::FLAG_asmjs_wasm_dumpfile, "wb");
+ if (wasm_file) {
+ fwrite(module->Begin(), module->End() - module->Begin(), 1, wasm_file);
+ fclose(wasm_file);
+ }
+ }
+
return module;
}
-void AsmCompileRun(const v8::FunctionCallbackInfo<v8::Value>& args) {
- HandleScope scope(args.GetIsolate());
+void InstantiateModuleCommon(const v8::FunctionCallbackInfo<v8::Value>& args,
+ const byte* start, const byte* end,
+ ErrorThrower* thrower, bool must_decode) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
- ErrorThrower thrower(isolate, "WASM.asmCompileRun()");
- if (args.Length() != 1) {
- thrower.Error("Invalid argument count");
- return;
- }
- if (!args[0]->IsString()) {
- thrower.Error("Invalid argument count");
- return;
+ i::Handle<i::JSArrayBuffer> memory = i::Handle<i::JSArrayBuffer>::null();
+ if (args.Length() > 2 && args[2]->IsArrayBuffer()) {
+ Local<Object> obj = Local<Object>::Cast(args[2]);
+ i::Handle<i::Object> mem_obj = v8::Utils::OpenHandle(*obj);
+ memory = i::Handle<i::JSArrayBuffer>(i::JSArrayBuffer::cast(*mem_obj));
}
- i::Factory* factory = isolate->factory();
+ // Decode but avoid a redundant pass over function bodies for verification.
+ // Verification will happen during compilation.
i::Zone zone;
- Local<String> source = Local<String>::Cast(args[0]);
- i::Handle<i::Script> script = factory->NewScript(Utils::OpenHandle(*source));
- i::ParseInfo info(&zone, script);
+ internal::wasm::ModuleResult result = internal::wasm::DecodeWasmModule(
+ isolate, &zone, start, end, false, false);
- auto module = TranslateAsmModule(&info);
- if (module == nullptr) {
- thrower.Error("Asm.js validation failed");
- return;
+ if (result.failed() && must_decode) {
+ thrower->Error("Asm.js converted module failed to decode");
+ } else if (result.failed()) {
+ thrower->Failed("", result);
+ } else {
+ // Success. Instantiate the module and return the object.
+ i::Handle<i::JSObject> ffi = i::Handle<i::JSObject>::null();
+ if (args.Length() > 1 && args[1]->IsObject()) {
+ Local<Object> obj = Local<Object>::Cast(args[1]);
+ ffi = i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj));
+ }
+
+ i::MaybeHandle<i::JSObject> object =
+ result.val->Instantiate(isolate, ffi, memory);
+
+ if (!object.is_null()) {
+ args.GetReturnValue().Set(v8::Utils::ToLocal(object.ToHandleChecked()));
+ }
}
- int32_t result = v8::internal::wasm::CompileAndRunWasmModule(
- isolate, module->Begin(), module->End(), true);
- args.GetReturnValue().Set(result);
+ if (result.val) delete result.val;
}
-// TODO(aseemgarg): deal with arraybuffer and foreign functions
void InstantiateModuleFromAsm(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope scope(args.GetIsolate());
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
ErrorThrower thrower(isolate, "WASM.instantiateModuleFromAsm()");
- if (args.Length() != 1) {
- thrower.Error("Invalid argument count");
- return;
- }
if (!args[0]->IsString()) {
- thrower.Error("Invalid argument count");
+ thrower.Error("Asm module text should be a string");
return;
}
@@ -205,31 +197,18 @@ void InstantiateModuleFromAsm(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::Handle<i::Script> script = factory->NewScript(Utils::OpenHandle(*source));
i::ParseInfo info(&zone, script);
- auto module = TranslateAsmModule(&info);
- if (module == nullptr) {
- thrower.Error("Asm.js validation failed");
- return;
+ i::Handle<i::Object> foreign;
+ if (args.Length() > 1 && args[1]->IsObject()) {
+ Local<Object> local_foreign = Local<Object>::Cast(args[1]);
+ foreign = v8::Utils::OpenHandle(*local_foreign);
}
- i::Handle<i::JSArrayBuffer> memory = i::Handle<i::JSArrayBuffer>::null();
- internal::wasm::ModuleResult result = internal::wasm::DecodeWasmModule(
- isolate, &zone, module->Begin(), module->End(), false, false);
-
- if (result.failed()) {
- thrower.Failed("", result);
- } else {
- // Success. Instantiate the module and return the object.
- i::Handle<i::JSObject> ffi = i::Handle<i::JSObject>::null();
-
- i::MaybeHandle<i::JSObject> object =
- result.val->Instantiate(isolate, ffi, memory);
-
- if (!object.is_null()) {
- args.GetReturnValue().Set(v8::Utils::ToLocal(object.ToHandleChecked()));
- }
+ auto module = TranslateAsmModule(&info, foreign, &thrower);
+ if (module == nullptr) {
+ return;
}
- if (result.val) delete result.val;
+ InstantiateModuleCommon(args, module->Begin(), module->End(), &thrower, true);
}
@@ -241,38 +220,7 @@ void InstantiateModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
RawBuffer buffer = GetRawBufferArgument(thrower, args);
if (buffer.start == nullptr) return;
- i::Handle<i::JSArrayBuffer> memory = i::Handle<i::JSArrayBuffer>::null();
- if (args.Length() > 2 && args[2]->IsArrayBuffer()) {
- Local<Object> obj = Local<Object>::Cast(args[2]);
- i::Handle<i::Object> mem_obj = v8::Utils::OpenHandle(*obj);
- memory = i::Handle<i::JSArrayBuffer>(i::JSArrayBuffer::cast(*mem_obj));
- }
-
- // Decode but avoid a redundant pass over function bodies for verification.
- // Verification will happen during compilation.
- i::Zone zone;
- internal::wasm::ModuleResult result = internal::wasm::DecodeWasmModule(
- isolate, &zone, buffer.start, buffer.end, false, false);
-
- if (result.failed()) {
- thrower.Failed("", result);
- } else {
- // Success. Instantiate the module and return the object.
- i::Handle<i::JSObject> ffi = i::Handle<i::JSObject>::null();
- if (args.Length() > 1 && args[1]->IsObject()) {
- Local<Object> obj = Local<Object>::Cast(args[1]);
- ffi = i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj));
- }
-
- i::MaybeHandle<i::JSObject> object =
- result.val->Instantiate(isolate, ffi, memory);
-
- if (!object.is_null()) {
- args.GetReturnValue().Set(v8::Utils::ToLocal(object.ToHandleChecked()));
- }
- }
-
- if (result.val) delete result.val;
+ InstantiateModuleCommon(args, buffer.start, buffer.end, &thrower, false);
}
} // namespace
@@ -322,11 +270,9 @@ void WasmJs::Install(Isolate* isolate, Handle<JSGlobalObject> global) {
JSObject::AddProperty(global, name, wasm_object, attributes);
// Install functions on the WASM object.
- InstallFunc(isolate, wasm_object, "instantiateModule", InstantiateModule);
InstallFunc(isolate, wasm_object, "verifyModule", VerifyModule);
InstallFunc(isolate, wasm_object, "verifyFunction", VerifyFunction);
- InstallFunc(isolate, wasm_object, "compileRun", CompileRun);
- InstallFunc(isolate, wasm_object, "asmCompileRun", AsmCompileRun);
+ InstallFunc(isolate, wasm_object, "instantiateModule", InstantiateModule);
InstallFunc(isolate, wasm_object, "instantiateModuleFromAsm",
InstantiateModuleFromAsm);
}
diff --git a/deps/v8/src/wasm/wasm-macro-gen.h b/deps/v8/src/wasm/wasm-macro-gen.h
index 470804a73d..dd653c1740 100644
--- a/deps/v8/src/wasm/wasm-macro-gen.h
+++ b/deps/v8/src/wasm/wasm-macro-gen.h
@@ -22,10 +22,10 @@
#define WASM_SELECT(cond, tval, fval) kExprSelect, cond, tval, fval
#define WASM_BR(depth) kExprBr, static_cast<byte>(depth), kExprNop
#define WASM_BR_IF(depth, cond) \
- kExprBrIf, static_cast<byte>(depth), cond, kExprNop
+ kExprBrIf, static_cast<byte>(depth), kExprNop, cond
#define WASM_BRV(depth, val) kExprBr, static_cast<byte>(depth), val
-#define WASM_BRV_IF(depth, cond, val) \
- kExprBrIf, static_cast<byte>(depth), cond, val
+#define WASM_BRV_IF(depth, val, cond) \
+ kExprBrIf, static_cast<byte>(depth), val, cond
#define WASM_BREAK(depth) kExprBr, static_cast<byte>(depth + 1), kExprNop
#define WASM_CONTINUE(depth) kExprBr, static_cast<byte>(depth), kExprNop
#define WASM_BREAKV(depth, val) kExprBr, static_cast<byte>(depth + 1), val
@@ -104,9 +104,12 @@
static_cast<byte>(offset), index, val
#define WASM_CALL_FUNCTION(index, ...) \
kExprCallFunction, static_cast<byte>(index), __VA_ARGS__
+#define WASM_CALL_IMPORT(index, ...) \
+ kExprCallImport, static_cast<byte>(index), __VA_ARGS__
#define WASM_CALL_INDIRECT(index, func, ...) \
kExprCallIndirect, static_cast<byte>(index), func, __VA_ARGS__
#define WASM_CALL_FUNCTION0(index) kExprCallFunction, static_cast<byte>(index)
+#define WASM_CALL_IMPORT0(index) kExprCallImport, static_cast<byte>(index)
#define WASM_CALL_INDIRECT0(index, func) \
kExprCallIndirect, static_cast<byte>(index), func
#define WASM_NOT(x) kExprBoolNot, x
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index fd2428080b..02d197c547 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -31,33 +31,32 @@ std::ostream& operator<<(std::ostream& os, const WasmModule& module) {
std::ostream& operator<<(std::ostream& os, const WasmFunction& function) {
- os << "WASM function with signature ";
+ os << "WASM function with signature " << *function.sig;
- // TODO(titzer): factor out rendering of signatures.
- if (function.sig->return_count() == 0) os << "v";
- for (size_t i = 0; i < function.sig->return_count(); i++) {
- os << WasmOpcodes::ShortNameOf(function.sig->GetReturn(i));
- }
- os << "_";
- if (function.sig->parameter_count() == 0) os << "v";
- for (size_t i = 0; i < function.sig->parameter_count(); i++) {
- os << WasmOpcodes::ShortNameOf(function.sig->GetParam(i));
- }
os << " locals: ";
- if (function.local_int32_count)
- os << function.local_int32_count << " int32s ";
- if (function.local_int64_count)
- os << function.local_int64_count << " int64s ";
- if (function.local_float32_count)
- os << function.local_float32_count << " float32s ";
- if (function.local_float64_count)
- os << function.local_float64_count << " float64s ";
+ if (function.local_i32_count) os << function.local_i32_count << " i32s ";
+ if (function.local_i64_count) os << function.local_i64_count << " i64s ";
+ if (function.local_f32_count) os << function.local_f32_count << " f32s ";
+ if (function.local_f64_count) os << function.local_f64_count << " f64s ";
os << " code bytes: "
<< (function.code_end_offset - function.code_start_offset);
return os;
}
+std::ostream& operator<<(std::ostream& os, const WasmFunctionName& pair) {
+ os << "#" << pair.function_->func_index << ":";
+ if (pair.function_->name_offset > 0) {
+ if (pair.module_) {
+ os << pair.module_->GetName(pair.function_->name_offset);
+ } else {
+ os << "+" << pair.function_->func_index;
+ }
+ } else {
+ os << "?";
+ }
+ return os;
+}
// A helper class for compiling multiple wasm functions that offers
// placeholder code objects for calling functions that are not yet compiled.
@@ -193,35 +192,98 @@ Handle<FixedArray> BuildFunctionTable(Isolate* isolate, WasmModule* module) {
return fixed;
}
-
-Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, int size,
+Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
byte** backing_store) {
- void* memory = isolate->array_buffer_allocator()->Allocate(size);
- if (!memory) return Handle<JSArrayBuffer>::null();
+ if (size > (1 << WasmModule::kMaxMemSize)) {
+ // TODO(titzer): lift restriction on maximum memory allocated here.
+ *backing_store = nullptr;
+ return Handle<JSArrayBuffer>::null();
+ }
+ void* memory =
+ isolate->array_buffer_allocator()->Allocate(static_cast<int>(size));
+ if (!memory) {
+ *backing_store = nullptr;
+ return Handle<JSArrayBuffer>::null();
+ }
+
*backing_store = reinterpret_cast<byte*>(memory);
#if DEBUG
// Double check the API allocator actually zero-initialized the memory.
- for (int i = 0; i < size; i++) {
- DCHECK_EQ(0, (*backing_store)[i]);
+ byte* bytes = reinterpret_cast<byte*>(*backing_store);
+ for (size_t i = 0; i < size; i++) {
+ DCHECK_EQ(0, bytes[i]);
}
#endif
Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
- JSArrayBuffer::Setup(buffer, isolate, false, memory, size);
+ JSArrayBuffer::Setup(buffer, isolate, false, memory, static_cast<int>(size));
buffer->set_is_neuterable(false);
return buffer;
}
-} // namespace
+// Set the memory for a module instance to be the {memory} array buffer.
+void SetMemory(WasmModuleInstance* instance, Handle<JSArrayBuffer> memory) {
+ memory->set_is_neuterable(false);
+ instance->mem_start = reinterpret_cast<byte*>(memory->backing_store());
+ instance->mem_size = memory->byte_length()->Number();
+ instance->mem_buffer = memory;
+}
+
+// Allocate memory for a module instance as a new JSArrayBuffer.
+bool AllocateMemory(ErrorThrower* thrower, Isolate* isolate,
+ WasmModuleInstance* instance) {
+ DCHECK(instance->module);
+ DCHECK(instance->mem_buffer.is_null());
+
+ if (instance->module->min_mem_size_log2 > WasmModule::kMaxMemSize) {
+ thrower->Error("Out of memory: wasm memory too large");
+ return false;
+ }
+ instance->mem_size = static_cast<size_t>(1)
+ << instance->module->min_mem_size_log2;
+ instance->mem_buffer =
+ NewArrayBuffer(isolate, instance->mem_size, &instance->mem_start);
+ if (!instance->mem_start) {
+ thrower->Error("Out of memory: wasm memory");
+ instance->mem_size = 0;
+ return false;
+ }
+ return true;
+}
+
+bool AllocateGlobals(ErrorThrower* thrower, Isolate* isolate,
+ WasmModuleInstance* instance) {
+ instance->globals_size = AllocateGlobalsOffsets(instance->module->globals);
+
+ if (instance->globals_size > 0) {
+ instance->globals_buffer = NewArrayBuffer(isolate, instance->globals_size,
+ &instance->globals_start);
+ if (!instance->globals_start) {
+ // Not enough space for backing store of globals.
+ thrower->Error("Out of memory: wasm globals");
+ return false;
+ }
+ }
+ return true;
+}
+} // namespace
WasmModule::WasmModule()
- : globals(nullptr),
+ : shared_isolate(nullptr),
+ module_start(nullptr),
+ module_end(nullptr),
+ min_mem_size_log2(0),
+ max_mem_size_log2(0),
+ mem_export(false),
+ mem_external(false),
+ start_function_index(-1),
+ globals(nullptr),
signatures(nullptr),
functions(nullptr),
data_segments(nullptr),
- function_table(nullptr) {}
-
+ function_table(nullptr),
+ import_table(nullptr) {}
WasmModule::~WasmModule() {
if (globals) delete globals;
@@ -229,8 +291,33 @@ WasmModule::~WasmModule() {
if (functions) delete functions;
if (data_segments) delete data_segments;
if (function_table) delete function_table;
+ if (import_table) delete import_table;
}
+static MaybeHandle<JSFunction> LookupFunction(ErrorThrower& thrower,
+ Handle<JSObject> ffi,
+ uint32_t index,
+ Handle<String> name,
+ const char* cstr) {
+ if (!ffi.is_null()) {
+ MaybeHandle<Object> result = Object::GetProperty(ffi, name);
+ if (!result.is_null()) {
+ Handle<Object> obj = result.ToHandleChecked();
+ if (obj->IsJSFunction()) {
+ return Handle<JSFunction>::cast(obj);
+ } else {
+ thrower.Error("FFI function #%d:%s is not a JSFunction.", index, cstr);
+ return MaybeHandle<JSFunction>();
+ }
+ } else {
+ thrower.Error("FFI function #%d:%s not found.", index, cstr);
+ return MaybeHandle<JSFunction>();
+ }
+ } else {
+ thrower.Error("FFI table is not an object.");
+ return MaybeHandle<JSFunction>();
+ }
+}
// Instantiates a wasm module as a JSObject.
// * allocates a backing store of {mem_size} bytes.
@@ -242,95 +329,91 @@ MaybeHandle<JSObject> WasmModule::Instantiate(Isolate* isolate,
Handle<JSArrayBuffer> memory) {
this->shared_isolate = isolate; // TODO(titzer): have a real shared isolate.
ErrorThrower thrower(isolate, "WasmModule::Instantiate()");
-
Factory* factory = isolate->factory();
- // Memory is bigger than maximum supported size.
- if (memory.is_null() && min_mem_size_log2 > kMaxMemSize) {
- thrower.Error("Out of memory: wasm memory too large");
- return MaybeHandle<JSObject>();
- }
+ //-------------------------------------------------------------------------
+ // Allocate the instance and its JS counterpart.
+ //-------------------------------------------------------------------------
Handle<Map> map = factory->NewMap(
JS_OBJECT_TYPE,
JSObject::kHeaderSize + kWasmModuleInternalFieldCount * kPointerSize);
-
- //-------------------------------------------------------------------------
- // Allocate the module object.
- //-------------------------------------------------------------------------
- Handle<JSObject> module = factory->NewJSObjectFromMap(map, TENURED);
+ WasmModuleInstance instance(this);
+ std::vector<Handle<Code>> import_code;
+ instance.context = isolate->native_context();
+ instance.js_object = factory->NewJSObjectFromMap(map, TENURED);
Handle<FixedArray> code_table =
factory->NewFixedArray(static_cast<int>(functions->size()), TENURED);
+ instance.js_object->SetInternalField(kWasmModuleCodeTable, *code_table);
//-------------------------------------------------------------------------
- // Allocate the linear memory.
+ // Allocate and initialize the linear memory.
//-------------------------------------------------------------------------
- uint32_t mem_size = 1 << min_mem_size_log2;
- byte* mem_addr = nullptr;
- Handle<JSArrayBuffer> mem_buffer;
- if (!memory.is_null()) {
- memory->set_is_neuterable(false);
- mem_addr = reinterpret_cast<byte*>(memory->backing_store());
- mem_size = memory->byte_length()->Number();
- mem_buffer = memory;
- } else {
- mem_buffer = NewArrayBuffer(isolate, mem_size, &mem_addr);
- if (!mem_addr) {
- // Not enough space for backing store of memory
- thrower.Error("Out of memory: wasm memory");
+ if (memory.is_null()) {
+ if (!AllocateMemory(&thrower, isolate, &instance)) {
return MaybeHandle<JSObject>();
}
+ } else {
+ SetMemory(&instance, memory);
}
-
- // Load initialized data segments.
- LoadDataSegments(this, mem_addr, mem_size);
-
- module->SetInternalField(kWasmMemArrayBuffer, *mem_buffer);
+ instance.js_object->SetInternalField(kWasmMemArrayBuffer,
+ *instance.mem_buffer);
+ LoadDataSegments(this, instance.mem_start, instance.mem_size);
if (mem_export) {
// Export the memory as a named property.
Handle<String> name = factory->InternalizeUtf8String("memory");
- JSObject::AddProperty(module, name, mem_buffer, READ_ONLY);
+ JSObject::AddProperty(instance.js_object, name, instance.mem_buffer,
+ READ_ONLY);
}
//-------------------------------------------------------------------------
// Allocate the globals area if necessary.
//-------------------------------------------------------------------------
- size_t globals_size = AllocateGlobalsOffsets(globals);
- byte* globals_addr = nullptr;
- if (globals_size > 0) {
- Handle<JSArrayBuffer> globals_buffer =
- NewArrayBuffer(isolate, mem_size, &globals_addr);
- if (!globals_addr) {
- // Not enough space for backing store of globals.
- thrower.Error("Out of memory: wasm globals");
- return MaybeHandle<JSObject>();
- }
-
- module->SetInternalField(kWasmGlobalsArrayBuffer, *globals_buffer);
- } else {
- module->SetInternalField(kWasmGlobalsArrayBuffer, Smi::FromInt(0));
+ if (!AllocateGlobals(&thrower, isolate, &instance)) {
+ return MaybeHandle<JSObject>();
+ }
+ if (!instance.globals_buffer.is_null()) {
+ instance.js_object->SetInternalField(kWasmGlobalsArrayBuffer,
+ *instance.globals_buffer);
}
//-------------------------------------------------------------------------
- // Compile all functions in the module.
+ // Compile wrappers to imported functions.
//-------------------------------------------------------------------------
- int index = 0;
+ uint32_t index = 0;
+ instance.function_table = BuildFunctionTable(isolate, this);
WasmLinker linker(isolate, functions->size());
ModuleEnv module_env;
module_env.module = this;
- module_env.mem_start = reinterpret_cast<uintptr_t>(mem_addr);
- module_env.mem_end = reinterpret_cast<uintptr_t>(mem_addr) + mem_size;
- module_env.globals_area = reinterpret_cast<uintptr_t>(globals_addr);
+ module_env.instance = &instance;
module_env.linker = &linker;
- module_env.function_code = nullptr;
- module_env.function_table = BuildFunctionTable(isolate, this);
- module_env.memory = memory;
- module_env.context = isolate->native_context();
module_env.asm_js = false;
+ if (import_table->size() > 0) {
+ instance.import_code = &import_code;
+ instance.import_code->reserve(import_table->size());
+ for (const WasmImport& import : *import_table) {
+ const char* cstr = GetName(import.function_name_offset);
+ Handle<String> name = factory->InternalizeUtf8String(cstr);
+ MaybeHandle<JSFunction> function =
+ LookupFunction(thrower, ffi, index, name, cstr);
+ if (function.is_null()) return MaybeHandle<JSObject>();
+ Handle<Code> code = compiler::CompileWasmToJSWrapper(
+ isolate, &module_env, function.ToHandleChecked(), import.sig, cstr);
+ instance.import_code->push_back(code);
+ index++;
+ }
+ }
+
+ //-------------------------------------------------------------------------
+ // Compile all functions in the module.
+ //-------------------------------------------------------------------------
+
// First pass: compile each function and initialize the code table.
+ index = 0;
for (const WasmFunction& func : *functions) {
if (thrower.error()) break;
+ DCHECK_EQ(index, func.func_index);
const char* cstr = GetName(func.name_offset);
Handle<String> name = factory->InternalizeUtf8String(cstr);
@@ -338,38 +421,21 @@ MaybeHandle<JSObject> WasmModule::Instantiate(Isolate* isolate,
Handle<JSFunction> function = Handle<JSFunction>::null();
if (func.external) {
// Lookup external function in FFI object.
- if (!ffi.is_null()) {
- MaybeHandle<Object> result = Object::GetProperty(ffi, name);
- if (!result.is_null()) {
- Handle<Object> obj = result.ToHandleChecked();
- if (obj->IsJSFunction()) {
- function = Handle<JSFunction>::cast(obj);
- code = compiler::CompileWasmToJSWrapper(isolate, &module_env,
- function, index);
- } else {
- thrower.Error("FFI function #%d:%s is not a JSFunction.", index,
- cstr);
- return MaybeHandle<JSObject>();
- }
- } else {
- thrower.Error("FFI function #%d:%s not found.", index, cstr);
- return MaybeHandle<JSObject>();
- }
- } else {
- thrower.Error("FFI table is not an object.");
- return MaybeHandle<JSObject>();
- }
+ MaybeHandle<JSFunction> function =
+ LookupFunction(thrower, ffi, index, name, cstr);
+ if (function.is_null()) return MaybeHandle<JSObject>();
+ code = compiler::CompileWasmToJSWrapper(
+ isolate, &module_env, function.ToHandleChecked(), func.sig, cstr);
} else {
// Compile the function.
- code = compiler::CompileWasmFunction(thrower, isolate, &module_env, func,
- index);
+ code = compiler::CompileWasmFunction(thrower, isolate, &module_env, func);
if (code.is_null()) {
thrower.Error("Compilation of #%d:%s failed.", index, cstr);
return MaybeHandle<JSObject>();
}
if (func.exported) {
- function = compiler::CompileJSToWasmWrapper(isolate, &module_env, name,
- code, module, index);
+ function = compiler::CompileJSToWasmWrapper(
+ isolate, &module_env, name, code, instance.js_object, index);
}
}
if (!code.is_null()) {
@@ -379,27 +445,54 @@ MaybeHandle<JSObject> WasmModule::Instantiate(Isolate* isolate,
}
if (func.exported) {
// Exported functions are installed as read-only properties on the module.
- JSObject::AddProperty(module, name, function, READ_ONLY);
+ JSObject::AddProperty(instance.js_object, name, function, READ_ONLY);
}
index++;
}
// Second pass: patch all direct call sites.
- linker.Link(module_env.function_table, this->function_table);
-
- module->SetInternalField(kWasmModuleFunctionTable, Smi::FromInt(0));
- module->SetInternalField(kWasmModuleCodeTable, *code_table);
- return module;
+ linker.Link(instance.function_table, this->function_table);
+ instance.js_object->SetInternalField(kWasmModuleFunctionTable,
+ Smi::FromInt(0));
+
+ // Run the start function if one was specified.
+ if (this->start_function_index >= 0) {
+ HandleScope scope(isolate);
+ uint32_t index = static_cast<uint32_t>(this->start_function_index);
+ Handle<String> name = isolate->factory()->NewStringFromStaticChars("start");
+ Handle<Code> code = linker.GetFunctionCode(index);
+ Handle<JSFunction> jsfunc = compiler::CompileJSToWasmWrapper(
+ isolate, &module_env, name, code, instance.js_object, index);
+
+ // Call the JS function.
+ Handle<Object> undefined(isolate->heap()->undefined_value(), isolate);
+ MaybeHandle<Object> retval =
+ Execution::Call(isolate, jsfunc, undefined, 0, nullptr);
+
+ if (retval.is_null()) {
+ thrower.Error("WASM.instantiateModule(): start function failed");
+ }
+ }
+ return instance.js_object;
}
Handle<Code> ModuleEnv::GetFunctionCode(uint32_t index) {
DCHECK(IsValidFunction(index));
if (linker) return linker->GetFunctionCode(index);
- if (function_code) return function_code->at(index);
+ if (instance && instance->function_code) {
+ return instance->function_code->at(index);
+ }
return Handle<Code>::null();
}
+Handle<Code> ModuleEnv::GetImportCode(uint32_t index) {
+ DCHECK(IsValidImport(index));
+ if (instance && instance->import_code) {
+ return instance->import_code->at(index);
+ }
+ return Handle<Code>::null();
+}
compiler::CallDescriptor* ModuleEnv::GetCallDescriptor(Zone* zone,
uint32_t index) {
@@ -436,43 +529,45 @@ int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
int32_t CompileAndRunWasmModule(Isolate* isolate, WasmModule* module) {
ErrorThrower thrower(isolate, "CompileAndRunWasmModule");
+ WasmModuleInstance instance(module);
- // Allocate temporary linear memory and globals.
- size_t mem_size = 1 << module->min_mem_size_log2;
- size_t globals_size = AllocateGlobalsOffsets(module->globals);
+ // Allocate and initialize the linear memory.
+ if (!AllocateMemory(&thrower, isolate, &instance)) {
+ return -1;
+ }
+ LoadDataSegments(module, instance.mem_start, instance.mem_size);
- base::SmartArrayPointer<byte> mem_addr(new byte[mem_size]);
- base::SmartArrayPointer<byte> globals_addr(new byte[globals_size]);
+ // Allocate the globals area if necessary.
+ if (!AllocateGlobals(&thrower, isolate, &instance)) {
+ return -1;
+ }
- memset(mem_addr.get(), 0, mem_size);
- memset(globals_addr.get(), 0, globals_size);
+ // Build the function table.
+ instance.function_table = BuildFunctionTable(isolate, module);
// Create module environment.
WasmLinker linker(isolate, module->functions->size());
ModuleEnv module_env;
module_env.module = module;
- module_env.mem_start = reinterpret_cast<uintptr_t>(mem_addr.get());
- module_env.mem_end = reinterpret_cast<uintptr_t>(mem_addr.get()) + mem_size;
- module_env.globals_area = reinterpret_cast<uintptr_t>(globals_addr.get());
+ module_env.instance = &instance;
module_env.linker = &linker;
- module_env.function_code = nullptr;
- module_env.function_table = BuildFunctionTable(isolate, module);
module_env.asm_js = false;
- // Load data segments.
- // TODO(titzer): throw instead of crashing if segments don't fit in memory?
- LoadDataSegments(module, mem_addr.get(), mem_size);
-
// Compile all functions.
Handle<Code> main_code = Handle<Code>::null(); // record last code.
- int index = 0;
+ uint32_t index = 0;
+ int main_index = 0;
for (const WasmFunction& func : *module->functions) {
+ DCHECK_EQ(index, func.func_index);
if (!func.external) {
// Compile the function and install it in the code table.
- Handle<Code> code = compiler::CompileWasmFunction(
- thrower, isolate, &module_env, func, index);
+ Handle<Code> code =
+ compiler::CompileWasmFunction(thrower, isolate, &module_env, func);
if (!code.is_null()) {
- if (func.exported) main_code = code;
+ if (func.exported) {
+ main_code = code;
+ main_index = index;
+ }
linker.Finish(index, code);
}
if (thrower.error()) return -1;
@@ -480,30 +575,37 @@ int32_t CompileAndRunWasmModule(Isolate* isolate, WasmModule* module) {
index++;
}
- if (!main_code.is_null()) {
- linker.Link(module_env.function_table, module->function_table);
-#if USE_SIMULATOR && V8_TARGET_ARCH_ARM64
- // Run the main code on arm64 simulator.
- Simulator* simulator = Simulator::current(isolate);
- Simulator::CallArgument args[] = {Simulator::CallArgument(0),
- Simulator::CallArgument::End()};
- return static_cast<int32_t>(simulator->CallInt64(main_code->entry(), args));
-#elif USE_SIMULATOR
- // Run the main code on simulator.
- Simulator* simulator = Simulator::current(isolate);
- return static_cast<int32_t>(
- simulator->Call(main_code->entry(), 4, 0, 0, 0, 0));
-#else
- // Run the main code as raw machine code.
- int32_t (*raw_func)() = reinterpret_cast<int32_t (*)()>(
- reinterpret_cast<uintptr_t>(main_code->entry()));
- return raw_func();
-#endif
- } else {
- // No main code was found.
- isolate->Throw(*isolate->factory()->NewStringFromStaticChars(
- "WASM.compileRun() failed: no valid main code produced."));
+ if (main_code.is_null()) {
+ thrower.Error("WASM.compileRun() failed: no main code found");
+ return -1;
+ }
+
+ linker.Link(instance.function_table, instance.module->function_table);
+
+ // Wrap the main code so it can be called as a JS function.
+ Handle<String> name = isolate->factory()->NewStringFromStaticChars("main");
+ Handle<JSObject> module_object = Handle<JSObject>(0, isolate);
+ Handle<JSFunction> jsfunc = compiler::CompileJSToWasmWrapper(
+ isolate, &module_env, name, main_code, module_object, main_index);
+
+ // Call the JS function.
+ Handle<Object> undefined(isolate->heap()->undefined_value(), isolate);
+ MaybeHandle<Object> retval =
+ Execution::Call(isolate, jsfunc, undefined, 0, nullptr);
+
+ // The result should be a number.
+ if (retval.is_null()) {
+ thrower.Error("WASM.compileRun() failed: Invocation was null");
+ return -1;
+ }
+ Handle<Object> result = retval.ToHandleChecked();
+ if (result->IsSmi()) {
+ return Smi::cast(*result)->value();
+ }
+ if (result->IsHeapNumber()) {
+ return static_cast<int32_t>(HeapNumber::cast(*result)->value());
}
+ thrower.Error("WASM.compileRun() failed: Return value should be number");
return -1;
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index 5e2ba58a44..5f5777cebe 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -30,11 +30,13 @@ enum WasmSectionDeclCode {
kDeclGlobals = 0x03,
kDeclDataSegments = 0x04,
kDeclFunctionTable = 0x05,
- kDeclWLL = 0x11,
kDeclEnd = 0x06,
+ kDeclStartFunction = 0x07,
+ kDeclImportTable = 0x08,
+ kDeclWLL = 0x11,
};
-static const int kMaxModuleSectionCode = 6;
+static const int kMaxModuleSectionCode = 0x11;
enum WasmFunctionDeclBit {
kDeclFunctionName = 0x01,
@@ -48,22 +50,29 @@ static const size_t kDeclMemorySize = 3;
static const size_t kDeclGlobalSize = 6;
static const size_t kDeclDataSegmentSize = 13;
-// Static representation of a wasm function.
+// Static representation of a WASM function.
struct WasmFunction {
FunctionSig* sig; // signature of the function.
+ uint32_t func_index; // index into the function table.
uint16_t sig_index; // index into the signature table.
uint32_t name_offset; // offset in the module bytes of the name, if any.
uint32_t code_start_offset; // offset in the module bytes of code start.
uint32_t code_end_offset; // offset in the module bytes of code end.
- uint16_t local_int32_count; // number of int32 local variables.
- uint16_t local_int64_count; // number of int64 local variables.
- uint16_t local_float32_count; // number of float32 local variables.
- uint16_t local_float64_count; // number of float64 local variables.
+ uint16_t local_i32_count; // number of i32 local variables.
+ uint16_t local_i64_count; // number of i64 local variables.
+ uint16_t local_f32_count; // number of f32 local variables.
+ uint16_t local_f64_count; // number of f64 local variables.
bool exported; // true if this function is exported.
bool external; // true if this function is externally supplied.
};
-struct ModuleEnv; // forward declaration of decoder interface.
+// Static representation of an imported WASM function.
+struct WasmImport {
+ FunctionSig* sig; // signature of the function.
+ uint16_t sig_index; // index into the signature table.
+ uint32_t module_name_offset; // offset in module bytes of the module name.
+ uint32_t function_name_offset; // offset in module bytes of the import name.
+};
// Static representation of a wasm global variable.
struct WasmGlobal {
@@ -93,25 +102,27 @@ struct WasmModule {
uint8_t max_mem_size_log2; // maximum size of the memory (log base 2).
bool mem_export; // true if the memory is exported.
bool mem_external; // true if the memory is external.
+ int start_function_index; // start function, if any.
std::vector<WasmGlobal>* globals; // globals in this module.
std::vector<FunctionSig*>* signatures; // signatures in this module.
std::vector<WasmFunction>* functions; // functions in this module.
std::vector<WasmDataSegment>* data_segments; // data segments in this module.
std::vector<uint16_t>* function_table; // function table.
+ std::vector<WasmImport>* import_table; // import table.
WasmModule();
~WasmModule();
// Get a pointer to a string stored in the module bytes representing a name.
- const char* GetName(uint32_t offset) {
- CHECK(BoundsCheck(offset, offset + 1));
+ const char* GetName(uint32_t offset) const {
if (offset == 0) return "<?>"; // no name.
+ CHECK(BoundsCheck(offset, offset + 1));
return reinterpret_cast<const char*>(module_start + offset);
}
// Checks the given offset range is contained within the module bytes.
- bool BoundsCheck(uint32_t start, uint32_t end) {
+ bool BoundsCheck(uint32_t start, uint32_t end) const {
size_t size = module_end - module_start;
return start < size && end < size;
}
@@ -121,22 +132,42 @@ struct WasmModule {
Handle<JSArrayBuffer> memory);
};
+// An instantiated WASM module, including memory, function table, etc.
+struct WasmModuleInstance {
+ WasmModule* module; // static representation of the module.
+ // -- Heap allocated --------------------------------------------------------
+ Handle<JSObject> js_object; // JavaScript module object.
+ Handle<Context> context; // JavaScript native context.
+ Handle<JSArrayBuffer> mem_buffer; // Handle to array buffer of memory.
+ Handle<JSArrayBuffer> globals_buffer; // Handle to array buffer of globals.
+ Handle<FixedArray> function_table; // indirect function table.
+ std::vector<Handle<Code>>* function_code; // code objects for each function.
+ std::vector<Handle<Code>>* import_code; // code objects for each import.
+ // -- raw memory ------------------------------------------------------------
+ byte* mem_start; // start of linear memory.
+ size_t mem_size; // size of the linear memory.
+ // -- raw globals -----------------------------------------------------------
+ byte* globals_start; // start of the globals area.
+ size_t globals_size; // size of the globals area.
+
+ explicit WasmModuleInstance(WasmModule* m)
+ : module(m),
+ function_code(nullptr),
+ mem_start(nullptr),
+ mem_size(0),
+ globals_start(nullptr),
+ globals_size(0) {}
+};
+
// forward declaration.
class WasmLinker;
// Interface provided to the decoder/graph builder which contains only
// minimal information about the globals, functions, and function tables.
struct ModuleEnv {
- uintptr_t globals_area; // address of the globals area.
- uintptr_t mem_start; // address of the start of linear memory.
- uintptr_t mem_end; // address of the end of linear memory.
-
WasmModule* module;
+ WasmModuleInstance* instance;
WasmLinker* linker;
- std::vector<Handle<Code>>* function_code;
- Handle<FixedArray> function_table;
- Handle<JSArrayBuffer> memory;
- Handle<Context> context;
bool asm_js; // true if the module originated from asm.js.
bool IsValidGlobal(uint32_t index) {
@@ -148,6 +179,9 @@ struct ModuleEnv {
bool IsValidSignature(uint32_t index) {
return module && index < module->signatures->size();
}
+ bool IsValidImport(uint32_t index) {
+ return module && index < module->import_table->size();
+ }
MachineType GetGlobalType(uint32_t index) {
DCHECK(IsValidGlobal(index));
return module->globals->at(index).type;
@@ -156,23 +190,41 @@ struct ModuleEnv {
DCHECK(IsValidFunction(index));
return module->functions->at(index).sig;
}
+ FunctionSig* GetImportSignature(uint32_t index) {
+ DCHECK(IsValidImport(index));
+ return module->import_table->at(index).sig;
+ }
FunctionSig* GetSignature(uint32_t index) {
DCHECK(IsValidSignature(index));
return module->signatures->at(index);
}
size_t FunctionTableSize() {
- return module ? module->function_table->size() : 0;
+ return module && module->function_table ? module->function_table->size()
+ : 0;
}
Handle<Code> GetFunctionCode(uint32_t index);
+ Handle<Code> GetImportCode(uint32_t index);
Handle<FixedArray> GetFunctionTable();
- compiler::CallDescriptor* GetWasmCallDescriptor(Zone* zone, FunctionSig* sig);
+ static compiler::CallDescriptor* GetWasmCallDescriptor(Zone* zone,
+ FunctionSig* sig);
+ static compiler::CallDescriptor* GetI32WasmCallDescriptor(
+ Zone* zone, compiler::CallDescriptor* descriptor);
compiler::CallDescriptor* GetCallDescriptor(Zone* zone, uint32_t index);
};
+// A helper for printing out the names of functions.
+struct WasmFunctionName {
+ const WasmFunction* function_;
+ const WasmModule* module_;
+ WasmFunctionName(const WasmFunction* function, const ModuleEnv* menv)
+ : function_(function), module_(menv ? menv->module : nullptr) {}
+};
+
std::ostream& operator<<(std::ostream& os, const WasmModule& module);
std::ostream& operator<<(std::ostream& os, const WasmFunction& function);
+std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name);
typedef Result<WasmModule*> ModuleResult;
typedef Result<WasmFunction*> FunctionResult;
@@ -185,6 +237,7 @@ int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start,
// For testing. Decode, verify, and run the last exported function in the
// given decoded module.
int32_t CompileAndRunWasmModule(Isolate* isolate, WasmModule* module);
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index 25eef034d7..a609e03261 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -25,6 +25,20 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
}
+std::ostream& operator<<(std::ostream& os, const FunctionSig& sig) {
+ if (sig.return_count() == 0) os << "v";
+ for (size_t i = 0; i < sig.return_count(); i++) {
+ os << WasmOpcodes::ShortNameOf(sig.GetReturn(i));
+ }
+ os << "_";
+ if (sig.parameter_count() == 0) os << "v";
+ for (size_t i = 0; i < sig.parameter_count(); i++) {
+ os << WasmOpcodes::ShortNameOf(sig.GetParam(i));
+ }
+ return os;
+}
+
+
#define DECLARE_SIG_ENUM(name, ...) kSigEnum_##name,
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index ae2843a6c1..7cb9c00449 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -66,6 +66,9 @@ struct MemoryAccess {
};
typedef Signature<LocalType> FunctionSig;
+std::ostream& operator<<(std::ostream& os, const FunctionSig& function);
+
+// TODO(titzer): Renumber all the opcodes to fill in holes.
// Control expressions and blocks.
#define FOREACH_CONTROL_OPCODE(V) \
@@ -80,7 +83,6 @@ typedef Signature<LocalType> FunctionSig;
V(TableSwitch, 0x08, _) \
V(Return, 0x14, _) \
V(Unreachable, 0x15, _)
-// TODO(titzer): numbering
// Constants, locals, globals, and calls.
#define FOREACH_MISC_OPCODE(V) \
@@ -94,7 +96,8 @@ typedef Signature<LocalType> FunctionSig;
V(LoadGlobal, 0x10, _) \
V(StoreGlobal, 0x11, _) \
V(CallFunction, 0x12, _) \
- V(CallIndirect, 0x13, _)
+ V(CallIndirect, 0x13, _) \
+ V(CallImport, 0x1F, _)
// Load memory expressions.
#define FOREACH_LOAD_MEM_OPCODE(V) \
@@ -398,7 +401,6 @@ class WasmOpcodes {
}
}
- // TODO(titzer): remove this method
static WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
if (type == MachineType::Int8()) {
return store ? kExprI32StoreMem8 : kExprI32LoadMem8S;
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index bfec51c462..9a0fc7c8f4 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -456,10 +456,8 @@ void RelocInfo::set_target_cell(Cell* cell,
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL) {
- // TODO(1550) We are passing NULL as a slot because cell can never be on
- // evacuation candidate.
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), NULL, cell);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
+ cell);
}
}
@@ -478,23 +476,6 @@ void RelocInfo::WipeOut() {
}
-bool RelocInfo::IsPatchedReturnSequence() {
- // The recognized call sequence is:
- // movq(kScratchRegister, address); call(kScratchRegister);
- // It only needs to be distinguished from a return sequence
- // movq(rsp, rbp); pop(rbp); ret(n); int3 *6
- // The 11th byte is int3 (0xCC) in the return sequence and
- // REX.WB (0x48+register bit) for the call sequence.
- return pc_[Assembler::kMoveAddressIntoScratchRegisterInstructionLength] !=
- 0xCC;
-}
-
-
-bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
- return !Assembler::IsNop(pc());
-}
-
-
Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
DCHECK(*pc_ == kCallOpcode);
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 9626efc4a7..3cf3398e87 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -3179,6 +3179,17 @@ void Assembler::cvtlsi2sd(XMMRegister dst, Register src) {
}
+void Assembler::cvtlsi2ss(XMMRegister dst, const Operand& src) {
+ DCHECK(!IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit(0xF3);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x2A);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::cvtlsi2ss(XMMRegister dst, Register src) {
EnsureSpace ensure_space(this);
emit(0xF3);
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 799fa6fe9d..2847ff2569 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -248,6 +248,8 @@ const DoubleRegister no_double_reg = {DoubleRegister::kCode_no_reg};
typedef DoubleRegister XMMRegister;
+typedef DoubleRegister Simd128Register;
+
enum Condition {
// any value < 0 is considered no_condition
no_condition = -1,
@@ -1025,6 +1027,7 @@ class Assembler : public AssemblerBase {
void cvttss2si(Register dst, const Operand& src);
void cvttss2si(Register dst, XMMRegister src);
+ void cvtlsi2ss(XMMRegister dst, const Operand& src);
void cvtlsi2ss(XMMRegister dst, Register src);
void andps(XMMRegister dst, XMMRegister src);
@@ -1370,6 +1373,13 @@ class Assembler : public AssemblerBase {
void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
vsd(0x2a, dst, src1, src2, kF2, k0F, kW0);
}
+ void vcvtlsi2ss(XMMRegister dst, XMMRegister src1, Register src2) {
+ XMMRegister isrc2 = {src2.code()};
+ vsd(0x2a, dst, src1, isrc2, kF3, k0F, kW0);
+ }
+ void vcvtlsi2ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ vsd(0x2a, dst, src1, src2, kF3, k0F, kW0);
+ }
void vcvtqsi2ss(XMMRegister dst, XMMRegister src1, Register src2) {
XMMRegister isrc2 = {src2.code()};
vsd(0x2a, dst, src1, isrc2, kF3, k0F, kW1);
@@ -1384,6 +1394,14 @@ class Assembler : public AssemblerBase {
void vcvtqsi2sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
vsd(0x2a, dst, src1, src2, kF2, k0F, kW1);
}
+ void vcvttss2si(Register dst, XMMRegister src) {
+ XMMRegister idst = {dst.code()};
+ vsd(0x2c, idst, xmm0, src, kF3, k0F, kW0);
+ }
+ void vcvttss2si(Register dst, const Operand& src) {
+ XMMRegister idst = {dst.code()};
+ vsd(0x2c, idst, xmm0, src, kF3, k0F, kW0);
+ }
void vcvttsd2si(Register dst, XMMRegister src) {
XMMRegister idst = {dst.code()};
vsd(0x2c, idst, xmm0, src, kF2, k0F, kW0);
@@ -1660,7 +1678,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, const SourcePosition position);
+ void RecordDeoptReason(const int reason, int raw_position);
void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
ConstantPoolEntry::Access access,
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index cb092f2f2d..6c4419e084 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -60,27 +60,6 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
}
-static void CallRuntimePassFunction(
- MacroAssembler* masm, Runtime::FunctionId function_id) {
- // ----------- S t a t e -------------
- // -- rdx : new target (preserved for callee)
- // -- rdi : target function (preserved for callee)
- // -----------------------------------
-
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the target function and the new target.
- __ Push(rdi);
- __ Push(rdx);
- // Function is also the parameter to the runtime call.
- __ Push(rdi);
-
- __ CallRuntime(function_id, 1);
- // Restore target function and new target.
- __ Pop(rdx);
- __ Pop(rdi);
-}
-
-
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ movp(kScratchRegister,
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
@@ -90,10 +69,35 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ jmp(kScratchRegister);
}
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ // ----------- S t a t e -------------
+ // -- rax : argument count (preserved for callee)
+ // -- rdx : new target (preserved for callee)
+ // -- rdi : target function (preserved for callee)
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push the number of arguments to the callee.
+ __ Integer32ToSmi(rax, rax);
+ __ Push(rax);
+ // Push a copy of the target function and the new target.
+ __ Push(rdi);
+ __ Push(rdx);
+ // Function is also the parameter to the runtime call.
+ __ Push(rdi);
-static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
- __ leap(rax, FieldOperand(rax, Code::kHeaderSize));
- __ jmp(rax);
+ __ CallRuntime(function_id, 1);
+ __ movp(rbx, rax);
+
+ // Restore target function and new target.
+ __ Pop(rdx);
+ __ Pop(rdi);
+ __ Pop(rax);
+ __ SmiToInteger32(rax, rax);
+ }
+ __ leap(rbx, FieldOperand(rbx, Code::kHeaderSize));
+ __ jmp(rbx);
}
@@ -107,8 +111,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &ok);
- CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
@@ -117,7 +120,8 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool create_implicit_receiver) {
+ bool create_implicit_receiver,
+ bool check_derived_construct) {
// ----------- S t a t e -------------
// -- rax: number of arguments
// -- rdi: constructor function
@@ -136,152 +140,23 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ Push(rcx);
if (create_implicit_receiver) {
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- // Verify that the new target is a JSFunction.
- __ CmpObjectType(rdx, JS_FUNCTION_TYPE, rbx);
- __ j(not_equal, &rt_call);
-
- // Load the initial map and verify that it is in fact a map.
- // rdx: new target
- __ movp(rax,
- FieldOperand(rdx, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi
- DCHECK(kSmiTag == 0);
- __ JumpIfSmi(rax, &rt_call);
- // rdi: constructor
- // rax: initial map (if proven valid below)
- __ CmpObjectType(rax, MAP_TYPE, rbx);
- __ j(not_equal, &rt_call);
-
- // Fall back to runtime if the expected base constructor and base
- // constructor differ.
- __ cmpp(rdi, FieldOperand(rax, Map::kConstructorOrBackPointerOffset));
- __ j(not_equal, &rt_call);
-
- // Now allocate the JSObject on the heap.
- __ movzxbp(r9, FieldOperand(rax, Map::kInstanceSizeOffset));
- __ shlp(r9, Immediate(kPointerSizeLog2));
- // r9: size of new object
- __ Allocate(r9, rbx, r9, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
- // Allocated the JSObject, now initialize the fields.
- // rdi: constructor
- // rdx: new target
- // rax: initial map
- // rbx: JSObject (not HeapObject tagged - the actual address).
- // r9: start of next object
- __ movp(Operand(rbx, JSObject::kMapOffset), rax);
- __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
- __ movp(Operand(rbx, JSObject::kPropertiesOffset), rcx);
- __ movp(Operand(rbx, JSObject::kElementsOffset), rcx);
- __ leap(rcx, Operand(rbx, JSObject::kHeaderSize));
-
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on.
- __ orp(rbx, Immediate(kHeapObjectTag));
-
- // Fill all the in-object properties with the appropriate filler.
- // rbx: JSObject (tagged)
- // rcx: First in-object property of JSObject (not tagged)
- __ LoadRoot(r11, Heap::kUndefinedValueRootIndex);
-
- if (!is_api_function) {
- Label no_inobject_slack_tracking;
-
- // The code below relies on these assumptions.
- STATIC_ASSERT(Map::kNoSlackTracking == 0);
- STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
- // Check if slack tracking is enabled.
- __ movl(rsi, FieldOperand(rax, Map::kBitField3Offset));
- __ shrl(rsi, Immediate(Map::ConstructionCounter::kShift));
- __ j(zero, &no_inobject_slack_tracking); // Map::kNoSlackTracking
- __ Push(rsi); // Save allocation count value.
- // Decrease generous allocation count.
- __ subl(FieldOperand(rax, Map::kBitField3Offset),
- Immediate(1 << Map::ConstructionCounter::kShift));
-
- // Allocate object with a slack.
- __ movzxbp(rsi, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
- __ negp(rsi);
- __ leap(rsi, Operand(r9, rsi, times_pointer_size, 0));
- // rsi: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ cmpp(rcx, rsi);
- __ Assert(less_equal,
- kUnexpectedNumberOfPreAllocatedPropertyFields);
- }
- __ InitializeFieldsWithFiller(rcx, rsi, r11);
-
- // To allow truncation fill the remaining fields with one pointer
- // filler map.
- __ LoadRoot(r11, Heap::kOnePointerFillerMapRootIndex);
- __ InitializeFieldsWithFiller(rcx, r9, r11);
-
- __ Pop(rsi); // Restore allocation count value before decreasing.
- __ cmpl(rsi, Immediate(Map::kSlackTrackingCounterEnd));
- __ j(not_equal, &allocated);
-
- // Push the constructor, new_target and the object to the stack,
- // and then the initial map as an argument to the runtime call.
- __ Push(rdi);
- __ Push(rdx);
- __ Push(rbx);
-
- __ Push(rax); // initial map
- __ CallRuntime(Runtime::kFinalizeInstanceSize);
-
- __ Pop(rbx);
- __ Pop(rdx);
- __ Pop(rdi);
-
- // Continue with JSObject being successfully allocated.
- // rdi: constructor
- // rdx: new target
- // rbx: JSObject (tagged)
- __ jmp(&allocated);
-
- __ bind(&no_inobject_slack_tracking);
- }
-
- __ InitializeFieldsWithFiller(rcx, r9, r11);
-
- // Continue with JSObject being successfully allocated
- // rdi: constructor
- // rdx: new target
- // rbx: JSObject (tagged)
- __ jmp(&allocated);
- }
-
- // Allocate the new receiver object using the runtime call.
- // rdi: constructor
- // rdx: new target
- __ bind(&rt_call);
-
- // Must restore rsi (context) before calling runtime.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-
- // Push the constructor and new_target twice, second pair as arguments
- // to the runtime call.
+ // Allocate the new receiver object.
__ Push(rdi);
__ Push(rdx);
- __ Push(rdi); // constructor function
- __ Push(rdx); // new target
- __ CallRuntime(Runtime::kNewObject);
- __ movp(rbx, rax); // store result in rbx
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ movp(rbx, rax);
__ Pop(rdx);
__ Pop(rdi);
- // Receiver for constructor call allocated.
- // rdi: constructor
- // rdx: new target
- // rbx: newly allocated object
- __ bind(&allocated);
+ // ----------- S t a t e -------------
+ // -- rdi: constructor function
+ // -- rbx: newly allocated object
+ // -- rdx: new target
+ // -----------------------------------
// Retrieve smi-tagged arguments count from the stack.
- __ movp(rax, Operand(rsp, 0));
- __ SmiToInteger32(rax, rax);
+ __ SmiToInteger32(rax, Operand(rsp, 0 * kPointerSize));
}
if (create_implicit_receiver) {
@@ -357,6 +232,19 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Leave construct frame.
}
+ // ES6 9.2.2. Step 13+
+ // Check that the result is not a Smi, indicating that the constructor result
+ // from a derived class is neither undefined nor an Object.
+ if (check_derived_construct) {
+ Label dont_throw;
+ __ JumpIfNotSmi(rax, &dont_throw);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
+ }
+ __ bind(&dont_throw);
+ }
+
// Remove caller arguments from the stack and return.
__ PopReturnAddressTo(rcx);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
@@ -371,17 +259,23 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
+ Generate_JSConstructStubHelper(masm, false, true, false);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, true);
+ Generate_JSConstructStubHelper(masm, true, false, false);
}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
+ Generate_JSConstructStubHelper(masm, false, false, false);
+}
+
+
+void Builtins::Generate_JSBuiltinsConstructStubForDerived(
+ MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false, true);
}
@@ -584,10 +478,8 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
// o rbp: the caller's frame pointer
// o rsp: stack pointer (pointing to return address)
//
-// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-x64.h for its layout.
-// TODO(rmcilroy): We will need to include the current bytecode pointer in the
-// frame.
+// The function builds an interpreter frame. See InterpreterFrameConstants in
+// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
@@ -599,14 +491,18 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(rdi); // Callee's JS function.
__ Push(rdx); // Callee's new target.
- // Push zero for bytecode array offset.
- __ Push(Immediate(0));
-
// Get the bytecode array from the function object and load the pointer to the
// first entry into edi (InterpreterBytecodeRegister).
__ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+
+ Label load_debug_bytecode_array, bytecode_array_loaded;
+ DCHECK_EQ(Smi::FromInt(0), DebugInfo::uninitialized());
+ __ cmpp(FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset),
+ Immediate(0));
+ __ j(not_equal, &load_debug_bytecode_array);
__ movp(kInterpreterBytecodeArrayRegister,
FieldOperand(rax, SharedFunctionInfo::kFunctionDataOffset));
+ __ bind(&bytecode_array_loaded);
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -616,6 +512,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
+ // Push bytecode array.
+ __ Push(kInterpreterBytecodeArrayRegister);
+ // Push zero for bytecode array offset.
+ __ Push(Immediate(0));
+
// Allocate the local and temporary register file on the stack.
{
// Load frame size from the BytecodeArray object.
@@ -647,22 +548,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// TODO(rmcilroy): List of things not currently dealt with here but done in
// fullcodegen's prologue:
- // - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
- // - Allow simulator stop operations if FLAG_stop_at is set.
// - Code aging of the BytecodeArray object.
- // Perform stack guard check.
- {
- Label ok;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok, Label::kNear);
- __ Push(kInterpreterBytecodeArrayRegister);
- __ CallRuntime(Runtime::kStackGuard);
- __ Pop(kInterpreterBytecodeArrayRegister);
- __ bind(&ok);
- }
-
// Load accumulator, register file, bytecode offset, dispatch table into
// registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
@@ -671,10 +559,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ movp(kInterpreterBytecodeOffsetRegister,
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
- __ LoadRoot(kInterpreterDispatchTableRegister,
- Heap::kInterpreterTableRootIndex);
- __ addp(kInterpreterDispatchTableRegister,
- Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Move(
+ kInterpreterDispatchTableRegister,
+ ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
// Dispatch to the first bytecode handler for the function.
__ movzxbp(rbx, Operand(kInterpreterBytecodeArrayRegister,
@@ -685,6 +572,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// and header removal.
__ addp(rbx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(rbx);
+
+ // Even though the first bytecode handler was called, we will never return.
+ __ Abort(kUnexpectedReturnFromBytecodeHandler);
+
+ // Load debug copy of the bytecode array.
+ __ bind(&load_debug_bytecode_array);
+ Register debug_info = kInterpreterBytecodeArrayRegister;
+ __ movp(debug_info, FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset));
+ __ movp(kInterpreterBytecodeArrayRegister,
+ FieldOperand(debug_info, DebugInfo::kAbstractCodeIndex));
+ __ jmp(&bytecode_array_loaded);
}
@@ -742,7 +640,8 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// static
-void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+ MacroAssembler* masm, TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
// -- rbx : the address of the first argument to be pushed. Subsequent
@@ -758,7 +657,9 @@ void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
// Call the target.
__ PushReturnAddressFrom(kScratchRegister); // Re-push return address.
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ tail_call_mode),
+ RelocInfo::CODE_TARGET);
}
@@ -790,52 +691,25 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
}
-static void Generate_InterpreterNotifyDeoptimizedHelper(
- MacroAssembler* masm, Deoptimizer::BailoutType type) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(kInterpreterAccumulatorRegister); // Save accumulator register.
-
- // Pass the deoptimization type to the runtime system.
- __ Push(Smi::FromInt(static_cast<int>(type)));
-
- __ CallRuntime(Runtime::kNotifyDeoptimized);
-
- __ Pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
- // Tear down internal frame.
- }
-
- // Drop state (we don't use these for interpreter deopts) and push PC at top
- // of stack (to simulate initial call to bytecode handler in interpreter entry
- // trampoline).
- __ Pop(rbx);
- __ Drop(1);
- __ Push(rbx);
-
+static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
// Initialize register file register and dispatch table register.
__ movp(kInterpreterRegisterFileRegister, rbp);
__ addp(kInterpreterRegisterFileRegister,
Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
- __ LoadRoot(kInterpreterDispatchTableRegister,
- Heap::kInterpreterTableRootIndex);
- __ addp(kInterpreterDispatchTableRegister,
- Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Move(
+ kInterpreterDispatchTableRegister,
+ ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
// Get the context from the frame.
- // TODO(rmcilroy): Update interpreter frame to expect current context at the
- // context slot instead of the function context.
__ movp(kContextRegister,
Operand(kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kContextFromRegisterPointer));
// Get the bytecode array pointer from the frame.
- __ movp(rbx,
- Operand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kFunctionFromRegisterPointer));
- __ movp(rbx, FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
- __ movp(kInterpreterBytecodeArrayRegister,
- FieldOperand(rbx, SharedFunctionInfo::kFunctionDataOffset));
+ __ movp(
+ kInterpreterBytecodeArrayRegister,
+ Operand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -863,6 +737,32 @@ static void Generate_InterpreterNotifyDeoptimizedHelper(
}
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+ MacroAssembler* masm, Deoptimizer::BailoutType type) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Pass the deoptimization type to the runtime system.
+ __ Push(Smi::FromInt(static_cast<int>(type)));
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+ // Tear down internal frame.
+ }
+
+ // Drop state (we don't use these for interpreter deopts) and and pop the
+ // accumulator value into the accumulator register and push PC at top
+ // of stack (to simulate initial call to bytecode handler in interpreter entry
+ // trampoline).
+ __ Pop(rbx);
+ __ Drop(1);
+ __ Pop(kInterpreterAccumulatorRegister);
+ __ Push(rbx);
+
+ // Enter the bytecode dispatch.
+ Generate_EnterBytecodeDispatch(masm);
+}
+
+
void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
@@ -877,22 +777,30 @@ void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ // Set the address of the interpreter entry trampoline as a return address.
+ // This simulates the initial call to bytecode handlers in interpreter entry
+ // trampoline. The return will never actually be taken, but our stack walker
+ // uses this address to determine whether a frame is interpreted.
+ __ Push(masm->isolate()->builtins()->InterpreterEntryTrampoline());
+
+ Generate_EnterBytecodeDispatch(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileLazy);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm,
+ Runtime::kCompileOptimized_NotConcurrent);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
@@ -1166,7 +1074,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
// 4b. The argArray is either null or undefined, so we tail call without any
- // arguments to the receiver.
+ // arguments to the receiver. Since we did not create a frame for
+ // Function.prototype.apply() yet, we use a normal Call builtin here.
__ bind(&no_arguments);
{
__ Set(rax, 0);
@@ -1230,6 +1139,8 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
}
// 4. Call the callable.
+ // Since we did not create a frame for Function.prototype.call() yet,
+ // we use a normal Call builtin here.
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
@@ -1443,6 +1354,118 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// static
+void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
+ // ----------- S t a t e -------------
+ // -- rax : number of arguments
+ // -- rsp[0] : return address
+ // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
+ // -- rsp[(argc + 1) * 8] : receiver
+ // -----------------------------------
+ Condition const cc = (kind == MathMaxMinKind::kMin) ? below : above;
+ Heap::RootListIndex const root_index =
+ (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
+ : Heap::kMinusInfinityValueRootIndex;
+ XMMRegister const reg = (kind == MathMaxMinKind::kMin) ? xmm1 : xmm0;
+
+ // Load the accumulator with the default return value (either -Infinity or
+ // +Infinity), with the tagged value in rdx and the double value in xmm0.
+ __ LoadRoot(rdx, root_index);
+ __ Movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ Move(rcx, rax);
+
+ Label done_loop, loop;
+ __ bind(&loop);
+ {
+ // Check if all parameters done.
+ __ testp(rcx, rcx);
+ __ j(zero, &done_loop);
+
+ // Load the next parameter tagged value into rbx.
+ __ movp(rbx, Operand(rsp, rcx, times_pointer_size, 0));
+
+ // Load the double value of the parameter into xmm1, maybe converting the
+ // parameter to a number first using the ToNumberStub if necessary.
+ Label convert, convert_smi, convert_number, done_convert;
+ __ bind(&convert);
+ __ JumpIfSmi(rbx, &convert_smi);
+ __ JumpIfRoot(FieldOperand(rbx, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex, &convert_number);
+ {
+ // Parameter is not a Number, use the ToNumberStub to convert it.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Integer32ToSmi(rax, rax);
+ __ Integer32ToSmi(rcx, rcx);
+ __ Push(rax);
+ __ Push(rcx);
+ __ Push(rdx);
+ __ movp(rax, rbx);
+ ToNumberStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ movp(rbx, rax);
+ __ Pop(rdx);
+ __ Pop(rcx);
+ __ Pop(rax);
+ {
+ // Restore the double accumulator value (xmm0).
+ Label restore_smi, done_restore;
+ __ JumpIfSmi(rdx, &restore_smi, Label::kNear);
+ __ Movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ jmp(&done_restore, Label::kNear);
+ __ bind(&restore_smi);
+ __ SmiToDouble(xmm0, rdx);
+ __ bind(&done_restore);
+ }
+ __ SmiToInteger32(rcx, rcx);
+ __ SmiToInteger32(rax, rax);
+ }
+ __ jmp(&convert);
+ __ bind(&convert_number);
+ __ Movsd(xmm1, FieldOperand(rbx, HeapNumber::kValueOffset));
+ __ jmp(&done_convert, Label::kNear);
+ __ bind(&convert_smi);
+ __ SmiToDouble(xmm1, rbx);
+ __ bind(&done_convert);
+
+ // Perform the actual comparison with the accumulator value on the left hand
+ // side (xmm0) and the next parameter value on the right hand side (xmm1).
+ Label compare_equal, compare_nan, compare_swap, done_compare;
+ __ Ucomisd(xmm0, xmm1);
+ __ j(parity_even, &compare_nan, Label::kNear);
+ __ j(cc, &done_compare, Label::kNear);
+ __ j(equal, &compare_equal, Label::kNear);
+
+ // Result is on the right hand side.
+ __ bind(&compare_swap);
+ __ Movaps(xmm0, xmm1);
+ __ Move(rdx, rbx);
+ __ jmp(&done_compare, Label::kNear);
+
+ // At least one side is NaN, which means that the result will be NaN too.
+ __ bind(&compare_nan);
+ __ LoadRoot(rdx, Heap::kNanValueRootIndex);
+ __ Movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ jmp(&done_compare, Label::kNear);
+
+ // Left and right hand side are equal, check for -0 vs. +0.
+ __ bind(&compare_equal);
+ __ Movmskpd(kScratchRegister, reg);
+ __ testl(kScratchRegister, Immediate(1));
+ __ j(not_zero, &compare_swap);
+
+ __ bind(&done_compare);
+ __ decp(rcx);
+ __ jmp(&loop);
+ }
+
+ __ bind(&done_loop);
+ __ PopReturnAddressTo(rcx);
+ __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(rcx);
+ __ movp(rax, rdx);
+ __ Ret();
+}
+
+// static
void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : number of arguments
@@ -1542,9 +1565,8 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(rbx); // the first argument
- __ Push(rdi); // constructor function
- __ Push(rdx); // new target
- __ CallRuntime(Runtime::kNewObject);
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ Pop(FieldOperand(rax, JSValue::kValueOffset));
}
__ Ret();
@@ -1678,9 +1700,8 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(rbx); // the first argument
- __ Push(rdi); // constructor function
- __ Push(rdx); // new target
- __ CallRuntime(Runtime::kNewObject);
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ Pop(FieldOperand(rax, JSValue::kValueOffset));
}
__ Ret();
@@ -1931,9 +1952,7 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Try to create the list from an arguments object.
__ bind(&create_arguments);
- __ movp(rbx,
- FieldOperand(rax, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize));
+ __ movp(rbx, FieldOperand(rax, JSArgumentsObject::kLengthOffset));
__ movp(rcx, FieldOperand(rax, JSObject::kElementsOffset));
__ cmpp(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ j(not_equal, &create_runtime);
@@ -2010,10 +2029,136 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
}
}
+namespace {
+
+// Drops top JavaScript frame and an arguments adaptor frame below it (if
+// present) preserving all the arguments prepared for current call.
+// Does nothing if debugger is currently active.
+// ES6 14.6.3. PrepareForTailCall
+//
+// Stack structure for the function g() tail calling f():
+//
+// ------- Caller frame: -------
+// | ...
+// | g()'s arg M
+// | ...
+// | g()'s arg 1
+// | g()'s receiver arg
+// | g()'s caller pc
+// ------- g()'s frame: -------
+// | g()'s caller fp <- fp
+// | g()'s context
+// | function pointer: g
+// | -------------------------
+// | ...
+// | ...
+// | f()'s arg N
+// | ...
+// | f()'s arg 1
+// | f()'s receiver arg
+// | f()'s caller pc <- sp
+// ----------------------
+//
+void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+ DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+ Comment cmnt(masm, "[ PrepareForTailCall");
+
+ // Prepare for tail call only if the debugger is not active.
+ Label done;
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(masm->isolate());
+ __ Move(kScratchRegister, debug_is_active);
+ __ cmpb(Operand(kScratchRegister, 0), Immediate(0));
+ __ j(not_equal, &done);
+
+ // Drop possible interpreter handler/stub frame.
+ {
+ Label no_interpreter_frame;
+ __ Cmp(Operand(rbp, StandardFrameConstants::kMarkerOffset),
+ Smi::FromInt(StackFrame::STUB));
+ __ j(not_equal, &no_interpreter_frame, Label::kNear);
+ __ movp(rbp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&no_interpreter_frame);
+ }
+
+ // Check if next frame is an arguments adaptor frame.
+ Label no_arguments_adaptor, formal_parameter_count_loaded;
+ __ movp(scratch2, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ Cmp(Operand(scratch2, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(not_equal, &no_arguments_adaptor, Label::kNear);
+
+ // Drop arguments adaptor frame and load arguments count.
+ __ movp(rbp, scratch2);
+ __ SmiToInteger32(
+ scratch1, Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ jmp(&formal_parameter_count_loaded, Label::kNear);
+
+ __ bind(&no_arguments_adaptor);
+ // Load caller's formal parameter count
+ __ movp(scratch1, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(scratch1,
+ FieldOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadSharedFunctionInfoSpecialField(
+ scratch1, scratch1, SharedFunctionInfo::kFormalParameterCountOffset);
+
+ __ bind(&formal_parameter_count_loaded);
+
+ // Calculate the destination address where we will put the return address
+ // after we drop current frame.
+ Register new_sp_reg = scratch2;
+ __ subp(scratch1, args_reg);
+ __ leap(new_sp_reg, Operand(rbp, scratch1, times_pointer_size,
+ StandardFrameConstants::kCallerPCOffset));
+
+ if (FLAG_debug_code) {
+ __ cmpp(rsp, new_sp_reg);
+ __ Check(below, kStackAccessBelowStackPointer);
+ }
+
+ // Copy receiver and return address as well.
+ Register count_reg = scratch1;
+ __ leap(count_reg, Operand(args_reg, 2));
+
+ // Copy return address from caller's frame to current frame's return address
+ // to avoid its trashing and let the following loop copy it to the right
+ // place.
+ Register tmp_reg = scratch3;
+ __ movp(tmp_reg, Operand(rbp, StandardFrameConstants::kCallerPCOffset));
+ __ movp(Operand(rsp, 0), tmp_reg);
+
+ // Restore caller's frame pointer now as it could be overwritten by
+ // the copying loop.
+ __ movp(rbp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+
+ Operand src(rsp, count_reg, times_pointer_size, 0);
+ Operand dst(new_sp_reg, count_reg, times_pointer_size, 0);
+
+ // Now copy callee arguments to the caller frame going backwards to avoid
+ // callee arguments corruption (source and destination areas could overlap).
+ Label loop, entry;
+ __ jmp(&entry, Label::kNear);
+ __ bind(&loop);
+ __ decp(count_reg);
+ __ movp(tmp_reg, src);
+ __ movp(dst, tmp_reg);
+ __ bind(&entry);
+ __ cmpp(count_reg, Immediate(0));
+ __ j(not_equal, &loop, Label::kNear);
+
+ // Leave current frame.
+ __ movp(rsp, new_sp_reg);
+
+ __ bind(&done);
+}
+} // namespace
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode) {
+ ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
// -- rdi : the function to call (checked to be a JSFunction)
@@ -2109,6 +2254,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- rsi : the function context.
// -----------------------------------
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, rax, rbx, rcx, r8);
+ }
+
__ LoadSharedFunctionInfoSpecialField(
rbx, rdx, SharedFunctionInfo::kFormalParameterCountOffset);
ParameterCount actual(rax);
@@ -2213,13 +2362,18 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// static
-void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
// -- rdi : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(rdi);
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, rax, rbx, rcx, r8);
+ }
+
// Patch the receiver to [[BoundThis]].
StackArgumentsAccessor args(rsp, rax);
__ movp(rbx, FieldOperand(rdi, JSBoundFunction::kBoundThisOffset));
@@ -2238,7 +2392,8 @@ void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
// static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- rax : the number of arguments (not including the receiver)
// -- rdi : the target to call (can be any Object)
@@ -2249,14 +2404,25 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ JumpIfSmi(rdi, &non_callable);
__ bind(&non_smi);
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(equal, masm->isolate()->builtins()->CallFunction(mode),
+ __ j(equal, masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
RelocInfo::CODE_TARGET);
__ CmpInstanceType(rcx, JS_BOUND_FUNCTION_TYPE);
- __ j(equal, masm->isolate()->builtins()->CallBoundFunction(),
+ __ j(equal, masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
RelocInfo::CODE_TARGET);
+
+ // Check if target has a [[Call]] internal method.
+ __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsCallable));
+ __ j(zero, &non_callable);
+
__ CmpInstanceType(rcx, JS_PROXY_TYPE);
__ j(not_equal, &non_function);
+ // 0. Prepare for tail call if necessary.
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, rax, rbx, rcx, r8);
+ }
+
// 1. Runtime fallback for Proxy [[Call]].
__ PopReturnAddressTo(kScratchRegister);
__ Push(rdi);
@@ -2271,16 +2437,12 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
__ bind(&non_function);
- // Check if target has a [[Call]] internal method.
- __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsCallable));
- __ j(zero, &non_callable, Label::kNear);
// Overwrite the original receiver with the (original) target.
__ movp(args.GetReceiverOperand(), rdi);
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, rdi);
__ Jump(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined),
+ ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
@@ -2473,14 +2635,11 @@ static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
// Load the next prototype.
__ bind(&next_prototype);
- __ movp(receiver, FieldOperand(map, Map::kPrototypeOffset));
- // End if the prototype is null or not hidden.
- __ CompareRoot(receiver, Heap::kNullValueRootIndex);
- __ j(equal, receiver_check_failed);
- __ movp(map, FieldOperand(receiver, HeapObject::kMapOffset));
__ testq(FieldOperand(map, Map::kBitField3Offset),
- Immediate(Map::IsHiddenPrototype::kMask));
+ Immediate(Map::HasHiddenPrototype::kMask));
__ j(zero, receiver_check_failed);
+ __ movp(receiver, FieldOperand(map, Map::kPrototypeOffset));
+ __ movp(map, FieldOperand(receiver, HeapObject::kMapOffset));
// Iterate.
__ jmp(&prototype_loop_start, Label::kNear);
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 1e14f83d9b..4b3d02841b 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -487,7 +487,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ Cvtlsi2sd(double_exponent, exponent);
// Returning or bailing out.
- Counters* counters = isolate()->counters();
if (exponent_type() == ON_STACK) {
// The arguments are still on the stack.
__ bind(&call_runtime);
@@ -498,7 +497,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&done);
__ AllocateHeapNumber(rax, rcx, &call_runtime);
__ Movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result);
- __ IncrementCounter(counters->math_pow(), 1);
__ ret(2 * kPointerSize);
} else {
__ bind(&call_runtime);
@@ -515,7 +513,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ Movsd(double_result, xmm0);
__ bind(&done);
- __ IncrementCounter(counters->math_pow(), 1);
__ ret(0);
}
}
@@ -537,340 +534,6 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- // The key is in rdx and the parameter count is in rax.
- DCHECK(rdx.is(ArgumentsAccessReadDescriptor::index()));
- DCHECK(rax.is(ArgumentsAccessReadDescriptor::parameter_count()));
-
- // Check that the key is a smi.
- Label slow;
- __ JumpIfNotSmi(rdx, &slow);
-
- // Check if the calling frame is an arguments adaptor frame. We look at the
- // context offset, and if the frame is not a regular one, then we find a
- // Smi instead of the context. We can't use SmiCompare here, because that
- // only works for comparing two smis.
- Label adaptor;
- __ movp(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adaptor);
-
- // Check index against formal parameters count limit passed in
- // through register rax. Use unsigned comparison to get negative
- // check for free.
- __ cmpp(rdx, rax);
- __ j(above_equal, &slow);
-
- // Read the argument from the stack and return it.
- __ SmiSub(rax, rax, rdx);
- __ SmiToInteger32(rax, rax);
- StackArgumentsAccessor args(rbp, rax, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movp(rax, args.GetArgumentOperand(0));
- __ Ret();
-
- // Arguments adaptor case: Check index against actual arguments
- // limit found in the arguments adaptor frame. Use unsigned
- // comparison to get negative check for free.
- __ bind(&adaptor);
- __ movp(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmpp(rdx, rcx);
- __ j(above_equal, &slow);
-
- // Read the argument from the stack and return it.
- __ SmiSub(rcx, rcx, rdx);
- __ SmiToInteger32(rcx, rcx);
- StackArgumentsAccessor adaptor_args(rbx, rcx,
- ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movp(rax, adaptor_args.GetArgumentOperand(0));
- __ Ret();
-
- // Slow-case: Handle non-smi or out-of-bounds access to arguments
- // by calling the runtime system.
- __ bind(&slow);
- __ PopReturnAddressTo(rbx);
- __ Push(rdx);
- __ PushReturnAddressFrom(rbx);
- __ TailCallRuntime(Runtime::kArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
- // rcx : number of parameters (tagged)
- // rdx : parameters pointer
- // rdi : function
- // rsp[0] : return address
- // Registers used over the whole function:
- // rbx: the mapped parameter count (untagged)
- // rax: the allocated object (tagged).
- Factory* factory = isolate()->factory();
-
- DCHECK(rdi.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(rcx.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(rdx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- __ SmiToInteger64(rbx, rcx);
- // rbx = parameter count (untagged)
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ movp(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movp(r8, Operand(rax, StandardFrameConstants::kContextOffset));
- __ Cmp(r8, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adaptor_frame);
-
- // No adaptor, parameter count = argument count.
- __ movp(r11, rbx);
- __ jmp(&try_allocate, Label::kNear);
-
- // We have an adaptor frame. Patch the parameters pointer.
- __ bind(&adaptor_frame);
- __ SmiToInteger64(
- r11, Operand(rax, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ leap(rdx, Operand(rax, r11, times_pointer_size,
- StandardFrameConstants::kCallerSPOffset));
-
- // rbx = parameter count (untagged)
- // r11 = argument count (untagged)
- // Compute the mapped parameter count = min(rbx, r11) in rbx.
- __ cmpp(rbx, r11);
- __ j(less_equal, &try_allocate, Label::kNear);
- __ movp(rbx, r11);
-
- __ bind(&try_allocate);
-
- // Compute the sizes of backing store, parameter map, and arguments object.
- // 1. Parameter map, has 2 extra words containing context and backing store.
- const int kParameterMapHeaderSize =
- FixedArray::kHeaderSize + 2 * kPointerSize;
- Label no_parameter_map;
- __ xorp(r8, r8);
- __ testp(rbx, rbx);
- __ j(zero, &no_parameter_map, Label::kNear);
- __ leap(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
- __ bind(&no_parameter_map);
-
- // 2. Backing store.
- __ leap(r8, Operand(r8, r11, times_pointer_size, FixedArray::kHeaderSize));
-
- // 3. Arguments object.
- __ addp(r8, Immediate(Heap::kSloppyArgumentsObjectSize));
-
- // Do the allocation of all three objects in one go.
- __ Allocate(r8, rax, r9, no_reg, &runtime, TAG_OBJECT);
-
- // rax = address of new object(s) (tagged)
- // r11 = argument count (untagged)
- // Get the arguments map from the current native context into r9.
- Label has_mapped_parameters, instantiate;
- __ movp(r9, NativeContextOperand());
- __ testp(rbx, rbx);
- __ j(not_zero, &has_mapped_parameters, Label::kNear);
-
- const int kIndex = Context::SLOPPY_ARGUMENTS_MAP_INDEX;
- __ movp(r9, Operand(r9, Context::SlotOffset(kIndex)));
- __ jmp(&instantiate, Label::kNear);
-
- const int kAliasedIndex = Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX;
- __ bind(&has_mapped_parameters);
- __ movp(r9, Operand(r9, Context::SlotOffset(kAliasedIndex)));
- __ bind(&instantiate);
-
- // rax = address of new object (tagged)
- // rbx = mapped parameter count (untagged)
- // r11 = argument count (untagged)
- // r9 = address of arguments map (tagged)
- __ movp(FieldOperand(rax, JSObject::kMapOffset), r9);
- __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
- __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister);
- __ movp(FieldOperand(rax, JSObject::kElementsOffset), kScratchRegister);
-
- // Set up the callee in-object property.
- STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ AssertNotSmi(rdi);
- __ movp(FieldOperand(rax, JSObject::kHeaderSize +
- Heap::kArgumentsCalleeIndex * kPointerSize),
- rdi);
-
- // Use the length (smi tagged) and set that as an in-object property too.
- // Note: r11 is tagged from here on.
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ Integer32ToSmi(r11, r11);
- __ movp(FieldOperand(rax, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize),
- r11);
-
- // Set up the elements pointer in the allocated arguments object.
- // If we allocated a parameter map, rdi will point there, otherwise to the
- // backing store.
- __ leap(rdi, Operand(rax, Heap::kSloppyArgumentsObjectSize));
- __ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
-
- // rax = address of new object (tagged)
- // rbx = mapped parameter count (untagged)
- // r11 = argument count (tagged)
- // rdi = address of parameter map or backing store (tagged)
-
- // Initialize parameter map. If there are no mapped arguments, we're done.
- Label skip_parameter_map;
- __ testp(rbx, rbx);
- __ j(zero, &skip_parameter_map);
-
- __ LoadRoot(kScratchRegister, Heap::kSloppyArgumentsElementsMapRootIndex);
- // rbx contains the untagged argument count. Add 2 and tag to write.
- __ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
- __ Integer64PlusConstantToSmi(r9, rbx, 2);
- __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), r9);
- __ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
- __ leap(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
- __ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
-
- // Copy the parameter slots and the holes in the arguments.
- // We need to fill in mapped_parameter_count slots. They index the context,
- // where parameters are stored in reverse order, at
- // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
- // The mapped parameter thus need to get indices
- // MIN_CONTEXT_SLOTS+parameter_count-1 ..
- // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
- // We loop from right to left.
- Label parameters_loop, parameters_test;
-
- // Load tagged parameter count into r9.
- __ Integer32ToSmi(r9, rbx);
- __ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
- __ addp(r8, rcx);
- __ subp(r8, r9);
- __ movp(rcx, rdi);
- __ leap(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
- __ SmiToInteger64(r9, r9);
- // r9 = loop variable (untagged)
- // r8 = mapping index (tagged)
- // rcx = address of parameter map (tagged)
- // rdi = address of backing store (tagged)
- __ jmp(&parameters_test, Label::kNear);
-
- __ bind(&parameters_loop);
- __ subp(r9, Immediate(1));
- __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- __ movp(FieldOperand(rcx, r9, times_pointer_size, kParameterMapHeaderSize),
- r8);
- __ movp(FieldOperand(rdi, r9, times_pointer_size, FixedArray::kHeaderSize),
- kScratchRegister);
- __ SmiAddConstant(r8, r8, Smi::FromInt(1));
- __ bind(&parameters_test);
- __ testp(r9, r9);
- __ j(not_zero, &parameters_loop, Label::kNear);
-
- __ bind(&skip_parameter_map);
-
- // r11 = argument count (tagged)
- // rdi = address of backing store (tagged)
- // Copy arguments header and remaining slots (if there are any).
- __ Move(FieldOperand(rdi, FixedArray::kMapOffset),
- factory->fixed_array_map());
- __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), r11);
-
- Label arguments_loop, arguments_test;
- __ movp(r8, rbx);
- // Untag r11 for the loop below.
- __ SmiToInteger64(r11, r11);
- __ leap(kScratchRegister, Operand(r8, times_pointer_size, 0));
- __ subp(rdx, kScratchRegister);
- __ jmp(&arguments_test, Label::kNear);
-
- __ bind(&arguments_loop);
- __ subp(rdx, Immediate(kPointerSize));
- __ movp(r9, Operand(rdx, 0));
- __ movp(FieldOperand(rdi, r8,
- times_pointer_size,
- FixedArray::kHeaderSize),
- r9);
- __ addp(r8, Immediate(1));
-
- __ bind(&arguments_test);
- __ cmpp(r8, r11);
- __ j(less, &arguments_loop, Label::kNear);
-
- // Return.
- __ ret(0);
-
- // Do the runtime call to allocate the arguments object.
- // r11 = argument count (untagged)
- __ bind(&runtime);
- __ Integer32ToSmi(r11, r11);
- __ PopReturnAddressTo(rax);
- __ Push(rdi); // Push function.
- __ Push(rdx); // Push parameters pointer.
- __ Push(r11); // Push parameter count.
- __ PushReturnAddressFrom(rax);
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
- // rcx : number of parameters (tagged)
- // rdx : parameters pointer
- // rdi : function
- // rsp[0] : return address
-
- DCHECK(rdi.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(rcx.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(rdx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- __ movp(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movp(rax, Operand(rbx, StandardFrameConstants::kContextOffset));
- __ Cmp(rax, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &runtime);
-
- // Patch the arguments.length and the parameters pointer.
- StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movp(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiToInteger64(rax, rcx);
- __ leap(rdx, Operand(rbx, rax, times_pointer_size,
- StandardFrameConstants::kCallerSPOffset));
-
- __ bind(&runtime);
- __ PopReturnAddressTo(rax);
- __ Push(rdi); // Push function.
- __ Push(rdx); // Push parameters pointer.
- __ Push(rcx); // Push parameter count.
- __ PushReturnAddressFrom(rax);
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
- // rcx : number of parameters (tagged)
- // rdx : parameters pointer
- // rbx : rest parameter index (tagged)
- // rsp[0] : return address
-
- // Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- __ movp(r8, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movp(rax, Operand(r8, StandardFrameConstants::kContextOffset));
- __ Cmp(rax, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &runtime);
-
- // Patch the arguments.length and the parameters pointer.
- StackArgumentsAccessor args(rsp, 4, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movp(rcx, Operand(r8, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiToInteger64(rax, rcx);
- __ leap(rdx, Operand(r8, rax, times_pointer_size,
- StandardFrameConstants::kCallerSPOffset));
-
- __ bind(&runtime);
- __ PopReturnAddressTo(rax);
- __ Push(rcx); // Push number of parameters.
- __ Push(rdx); // Push parameters pointer.
- __ Push(rbx); // Push rest parameter index.
- __ PushReturnAddressFrom(rax);
- __ TailCallRuntime(Runtime::kNewRestParam);
-}
-
-
void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
// Return address is on the stack.
Label slow;
@@ -932,103 +595,6 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
- // rcx : number of parameters (tagged)
- // rdx : parameters pointer
- // rdi : function
- // rsp[0] : return address
-
- DCHECK(rdi.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(rcx.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(rdx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ movp(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movp(rax, Operand(rbx, StandardFrameConstants::kContextOffset));
- __ Cmp(rax, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adaptor_frame);
-
- // Get the length from the frame.
- __ SmiToInteger64(rax, rcx);
- __ jmp(&try_allocate);
-
- // Patch the arguments.length and the parameters pointer.
- __ bind(&adaptor_frame);
- __ movp(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiToInteger64(rax, rcx);
- __ leap(rdx, Operand(rbx, rax, times_pointer_size,
- StandardFrameConstants::kCallerSPOffset));
-
- // Try the new space allocation. Start out with computing the size of
- // the arguments object and the elements array.
- Label add_arguments_object;
- __ bind(&try_allocate);
- __ testp(rax, rax);
- __ j(zero, &add_arguments_object, Label::kNear);
- __ leap(rax, Operand(rax, times_pointer_size, FixedArray::kHeaderSize));
- __ bind(&add_arguments_object);
- __ addp(rax, Immediate(Heap::kStrictArgumentsObjectSize));
-
- // Do the allocation of both objects in one go.
- __ Allocate(rax, rax, rbx, no_reg, &runtime, TAG_OBJECT);
-
- // Get the arguments map from the current native context.
- __ movp(rdi, NativeContextOperand());
- __ movp(rdi, ContextOperand(rdi, Context::STRICT_ARGUMENTS_MAP_INDEX));
-
- __ movp(FieldOperand(rax, JSObject::kMapOffset), rdi);
- __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
- __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister);
- __ movp(FieldOperand(rax, JSObject::kElementsOffset), kScratchRegister);
-
- // Get the length (smi tagged) and set that as an in-object property too.
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ movp(FieldOperand(rax, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize),
- rcx);
-
- // If there are no actual arguments, we're done.
- Label done;
- __ testp(rcx, rcx);
- __ j(zero, &done);
-
- // Set up the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- __ leap(rdi, Operand(rax, Heap::kStrictArgumentsObjectSize));
- __ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
- __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
- __ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
- __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
-
- // Untag the length for the loop below.
- __ SmiToInteger64(rcx, rcx);
-
- // Copy the fixed array slots.
- Label loop;
- __ bind(&loop);
- __ movp(rbx, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
- __ movp(FieldOperand(rdi, FixedArray::kHeaderSize), rbx);
- __ addp(rdi, Immediate(kPointerSize));
- __ subp(rdx, Immediate(kPointerSize));
- __ decp(rcx);
- __ j(not_zero, &loop);
-
- // Return.
- __ bind(&done);
- __ ret(0);
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ PopReturnAddressTo(rax);
- __ Push(rdi); // Push function.
- __ Push(rdx); // Push parameters pointer.
- __ Push(rcx); // Push parameter count.
- __ PushReturnAddressFrom(rax);
- __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -1545,16 +1111,11 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Check for undefined. undefined OP undefined is false even though
// undefined == undefined.
__ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
- if (is_strong(strength())) {
- // In strong mode, this comparison must throw, so call the runtime.
- __ j(equal, &runtime_call, Label::kFar);
- } else {
- Label check_for_nan;
- __ j(not_equal, &check_for_nan, Label::kNear);
- __ Set(rax, NegativeComparisonResult(cc));
- __ ret(0);
- __ bind(&check_for_nan);
- }
+ Label check_for_nan;
+ __ j(not_equal, &check_for_nan, Label::kNear);
+ __ Set(rax, NegativeComparisonResult(cc));
+ __ ret(0);
+ __ bind(&check_for_nan);
}
// Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
@@ -1576,12 +1137,6 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call runtime on identical SIMD values since we must throw a TypeError.
__ cmpb(rcx, Immediate(static_cast<uint8_t>(SIMD128_VALUE_TYPE)));
__ j(equal, &runtime_call, Label::kFar);
- if (is_strong(strength())) {
- // We have already tested for smis and heap numbers, so if both
- // arguments are not strings we must proceed to the slow case.
- __ testb(rcx, Immediate(kIsNotStringMask));
- __ j(not_zero, &runtime_call, Label::kFar);
- }
}
__ Set(rax, EQUAL);
__ ret(0);
@@ -1728,7 +1283,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Not strict equality. Objects are unequal if
// they are both JSObjects and not undetectable,
// and their pointers are different.
- Label return_unequal;
+ Label return_unequal, undetectable;
// At most one is a smi, so we can test for smi by adding the two.
// A smi plus a heap object has the low bit set, a heap object plus
// a heap object has the low bit clear.
@@ -1737,22 +1292,30 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ leap(rcx, Operand(rax, rdx, times_1, 0));
__ testb(rcx, Immediate(kSmiTagMask));
__ j(not_zero, &runtime_call, Label::kNear);
- __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rbx);
- __ j(below, &runtime_call, Label::kNear);
- __ CmpObjectType(rdx, FIRST_JS_RECEIVER_TYPE, rcx);
- __ j(below, &runtime_call, Label::kNear);
+
+ __ movp(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- __ j(zero, &return_unequal, Label::kNear);
+ __ j(not_zero, &undetectable);
__ testb(FieldOperand(rcx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
- __ j(zero, &return_unequal, Label::kNear);
- // The objects are both undetectable, so they both compare as the value
- // undefined, and are equal.
- __ Set(rax, EQUAL);
+ __ j(not_zero, &return_unequal);
+
+ __ CmpInstanceType(rbx, FIRST_JS_RECEIVER_TYPE);
+ __ j(below, &runtime_call, Label::kNear);
+ __ CmpInstanceType(rcx, FIRST_JS_RECEIVER_TYPE);
+ __ j(below, &runtime_call, Label::kNear);
+
__ bind(&return_unequal);
- // Return non-equal by returning the non-zero object pointer in rax,
- // or return equal if we fell through to here.
+ // Return non-equal by returning the non-zero object pointer in rax.
+ __ ret(0);
+
+ __ bind(&undetectable);
+ __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(zero, &return_unequal);
+ __ Set(rax, EQUAL);
__ ret(0);
}
__ bind(&runtime_call);
@@ -1769,8 +1332,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
} else {
__ Push(Smi::FromInt(NegativeComparisonResult(cc)));
__ PushReturnAddressFrom(rcx);
- __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
- : Runtime::kCompare);
+ __ TailCallRuntime(Runtime::kCompare);
}
__ bind(&miss);
@@ -2002,7 +1564,8 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&call_function);
__ Set(rax, argc);
- __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+ __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
+ tail_call_mode()),
RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
@@ -2040,7 +1603,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&call);
__ Set(rax, argc);
- __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+ __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
RelocInfo::CODE_TARGET);
__ bind(&uninitialized);
@@ -2158,12 +1721,34 @@ void CEntryStub::Generate(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
- // Enter the exit frame that transitions from JavaScript to C++.
#ifdef _WIN64
- int arg_stack_space = (result_size() < 2 ? 2 : 4);
-#else // _WIN64
- int arg_stack_space = 0;
+ // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9. It requires the
+ // stack to be aligned to 16 bytes. It only allows a single-word to be
+ // returned in register rax. Larger return sizes must be written to an address
+ // passed as a hidden first argument.
+ const Register kCCallArg0 = rcx;
+ const Register kCCallArg1 = rdx;
+ const Register kCCallArg2 = r8;
+ const Register kCCallArg3 = r9;
+ const int kArgExtraStackSpace = 2;
+ const int kMaxRegisterResultSize = 1;
+#else
+ // GCC / Clang passes arguments in rdi, rsi, rdx, rcx, r8, r9. Simple results
+ // are returned in rax, and a struct of two pointers are returned in rax+rdx.
+ // Larger return sizes must be written to an address passed as a hidden first
+ // argument.
+ const Register kCCallArg0 = rdi;
+ const Register kCCallArg1 = rsi;
+ const Register kCCallArg2 = rdx;
+ const Register kCCallArg3 = rcx;
+ const int kArgExtraStackSpace = 0;
+ const int kMaxRegisterResultSize = 2;
#endif // _WIN64
+
+ // Enter the exit frame that transitions from JavaScript to C++.
+ int arg_stack_space =
+ kArgExtraStackSpace +
+ (result_size() <= kMaxRegisterResultSize ? 0 : result_size());
if (argv_in_register()) {
DCHECK(!save_doubles());
__ EnterApiExitFrame(arg_stack_space);
@@ -2179,56 +1764,41 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// r14: number of arguments including receiver (C callee-saved).
// r15: argv pointer (C callee-saved).
- // Simple results returned in rax (both AMD64 and Win64 calling conventions).
- // Complex results must be written to address passed as first argument.
- // AMD64 calling convention: a struct of two pointers in rax+rdx
-
// Check stack alignment.
if (FLAG_debug_code) {
__ CheckStackAlignment();
}
- // Call C function.
-#ifdef _WIN64
- // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9.
- // Pass argv and argc as two parameters. The arguments object will
- // be created by stubs declared by DECLARE_RUNTIME_FUNCTION().
- if (result_size() < 2) {
+ // Call C function. The arguments object will be created by stubs declared by
+ // DECLARE_RUNTIME_FUNCTION().
+ if (result_size() <= kMaxRegisterResultSize) {
// Pass a pointer to the Arguments object as the first argument.
- // Return result in single register (rax).
- __ movp(rcx, r14); // argc.
- __ movp(rdx, r15); // argv.
- __ Move(r8, ExternalReference::isolate_address(isolate()));
+ // Return result in single register (rax), or a register pair (rax, rdx).
+ __ movp(kCCallArg0, r14); // argc.
+ __ movp(kCCallArg1, r15); // argv.
+ __ Move(kCCallArg2, ExternalReference::isolate_address(isolate()));
} else {
- DCHECK_EQ(2, result_size());
+ DCHECK_LE(result_size(), 3);
// Pass a pointer to the result location as the first argument.
- __ leap(rcx, StackSpaceOperand(2));
+ __ leap(kCCallArg0, StackSpaceOperand(kArgExtraStackSpace));
// Pass a pointer to the Arguments object as the second argument.
- __ movp(rdx, r14); // argc.
- __ movp(r8, r15); // argv.
- __ Move(r9, ExternalReference::isolate_address(isolate()));
+ __ movp(kCCallArg1, r14); // argc.
+ __ movp(kCCallArg2, r15); // argv.
+ __ Move(kCCallArg3, ExternalReference::isolate_address(isolate()));
}
-
-#else // _WIN64
- // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
- __ movp(rdi, r14); // argc.
- __ movp(rsi, r15); // argv.
- __ Move(rdx, ExternalReference::isolate_address(isolate()));
-#endif // _WIN64
__ call(rbx);
- // Result is in rax - do not destroy this register!
-#ifdef _WIN64
- // If return value is on the stack, pop it to registers.
- if (result_size() > 1) {
- DCHECK_EQ(2, result_size());
+ if (result_size() > kMaxRegisterResultSize) {
// Read result values stored on stack. Result is stored
- // above the four argument mirror slots and the two
- // Arguments object slots.
- __ movq(rax, Operand(rsp, 6 * kRegisterSize));
- __ movq(rdx, Operand(rsp, 7 * kRegisterSize));
+ // above the the two Arguments object slots on Win64.
+ DCHECK_LE(result_size(), 3);
+ __ movq(kReturnRegister0, StackSpaceOperand(kArgExtraStackSpace + 0));
+ __ movq(kReturnRegister1, StackSpaceOperand(kArgExtraStackSpace + 1));
+ if (result_size() > 2) {
+ __ movq(kReturnRegister2, StackSpaceOperand(kArgExtraStackSpace + 2));
+ }
}
-#endif // _WIN64
+ // Result is in rax, rdx:rax or r8:rdx:rax - do not destroy these registers!
// Check result for exception sentinel.
Label exception_returned;
@@ -3068,6 +2638,42 @@ void ToStringStub::Generate(MacroAssembler* masm) {
}
+void ToNameStub::Generate(MacroAssembler* masm) {
+ // The ToName stub takes one argument in rax.
+ Label is_number;
+ __ JumpIfSmi(rax, &is_number, Label::kNear);
+
+ Label not_name;
+ STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+ __ CmpObjectType(rax, LAST_NAME_TYPE, rdi);
+ // rax: receiver
+ // rdi: receiver map
+ __ j(above, &not_name, Label::kNear);
+ __ Ret();
+ __ bind(&not_name);
+
+ Label not_heap_number;
+ __ CompareRoot(rdi, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &not_heap_number, Label::kNear);
+ __ bind(&is_number);
+ NumberToStringStub stub(isolate());
+ __ TailCallStub(&stub);
+ __ bind(&not_heap_number);
+
+ Label not_oddball;
+ __ CmpInstanceType(rdi, ODDBALL_TYPE);
+ __ j(not_equal, &not_oddball, Label::kNear);
+ __ movp(rax, FieldOperand(rax, Oddball::kToStringOffset));
+ __ Ret();
+ __ bind(&not_oddball);
+
+ __ PopReturnAddressTo(rcx); // Pop return address.
+ __ Push(rax); // Push argument.
+ __ PushReturnAddressFrom(rcx); // Push return address.
+ __ TailCallRuntime(Runtime::kToName);
+}
+
+
void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register left,
Register right,
@@ -3283,21 +2889,17 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ movp(rbx, FieldOperand(rax, HeapObject::kMapOffset));
__ JumpIfNotRoot(rcx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
__ JumpIfNotRoot(rbx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
- if (op() != Token::EQ_STRICT && is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
- } else {
- if (!Token::IsEqualityOp(op())) {
- __ movp(rax, FieldOperand(rax, Oddball::kToNumberOffset));
- __ AssertSmi(rax);
- __ movp(rdx, FieldOperand(rdx, Oddball::kToNumberOffset));
- __ AssertSmi(rdx);
- __ pushq(rax);
- __ movq(rax, rdx);
- __ popq(rdx);
- }
- __ subp(rax, rdx);
- __ Ret();
+ if (!Token::IsEqualityOp(op())) {
+ __ movp(rax, FieldOperand(rax, Oddball::kToNumberOffset));
+ __ AssertSmi(rax);
+ __ movp(rdx, FieldOperand(rdx, Oddball::kToNumberOffset));
+ __ AssertSmi(rdx);
+ __ pushq(rax);
+ __ movq(rax, rdx);
+ __ popq(rdx);
}
+ __ subp(rax, rdx);
+ __ Ret();
__ bind(&miss);
GenerateMiss(masm);
@@ -3380,7 +2982,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&unordered);
__ bind(&generic_stub);
- CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
+ CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
CompareICState::GENERIC, CompareICState::GENERIC);
__ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
@@ -3614,8 +3216,6 @@ void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
if (Token::IsEqualityOp(op())) {
__ subp(rax, rdx);
__ ret(0);
- } else if (is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
__ PopReturnAddressTo(rcx);
__ Push(rdx);
@@ -3913,11 +3513,8 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
regs_.scratch0(),
&dont_need_remembered_set);
- __ CheckPageFlag(regs_.object(),
- regs_.scratch0(),
- 1 << MemoryChunk::SCAN_ON_SCAVENGE,
- not_zero,
- &dont_need_remembered_set);
+ __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
+ &dont_need_remembered_set);
// First notify the incremental marker if necessary, then update the
// remembered set.
@@ -4867,6 +4464,626 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
+void FastNewObjectStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rdi : target
+ // -- rdx : new target
+ // -- rsi : context
+ // -- rsp[0] : return address
+ // -----------------------------------
+ __ AssertFunction(rdi);
+ __ AssertReceiver(rdx);
+
+ // Verify that the new target is a JSFunction.
+ Label new_object;
+ __ CmpObjectType(rdx, JS_FUNCTION_TYPE, rbx);
+ __ j(not_equal, &new_object);
+
+ // Load the initial map and verify that it's in fact a map.
+ __ movp(rcx, FieldOperand(rdx, JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(rcx, &new_object);
+ __ CmpObjectType(rcx, MAP_TYPE, rbx);
+ __ j(not_equal, &new_object);
+
+ // Fall back to runtime if the target differs from the new target's
+ // initial map constructor.
+ __ cmpp(rdi, FieldOperand(rcx, Map::kConstructorOrBackPointerOffset));
+ __ j(not_equal, &new_object);
+
+ // Allocate the JSObject on the heap.
+ Label allocate, done_allocate;
+ __ movzxbl(rbx, FieldOperand(rcx, Map::kInstanceSizeOffset));
+ __ leal(rbx, Operand(rbx, times_pointer_size, 0));
+ __ Allocate(rbx, rax, rdi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
+ __ bind(&done_allocate);
+
+ // Initialize the JSObject fields.
+ __ movp(Operand(rax, JSObject::kMapOffset), rcx);
+ __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
+ __ movp(Operand(rax, JSObject::kPropertiesOffset), rbx);
+ __ movp(Operand(rax, JSObject::kElementsOffset), rbx);
+ STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
+ __ leap(rbx, Operand(rax, JSObject::kHeaderSize));
+
+ // ----------- S t a t e -------------
+ // -- rax : result (untagged)
+ // -- rbx : result fields (untagged)
+ // -- rdi : result end (untagged)
+ // -- rcx : initial map
+ // -- rsi : context
+ // -- rsp[0] : return address
+ // -----------------------------------
+
+ // Perform in-object slack tracking if requested.
+ Label slack_tracking;
+ STATIC_ASSERT(Map::kNoSlackTracking == 0);
+ __ LoadRoot(r11, Heap::kUndefinedValueRootIndex);
+ __ testl(FieldOperand(rcx, Map::kBitField3Offset),
+ Immediate(Map::ConstructionCounter::kMask));
+ __ j(not_zero, &slack_tracking, Label::kNear);
+ {
+ // Initialize all in-object fields with undefined.
+ __ InitializeFieldsWithFiller(rbx, rdi, r11);
+
+ // Add the object tag to make the JSObject real.
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ incp(rax);
+ __ Ret();
+ }
+ __ bind(&slack_tracking);
+ {
+ // Decrease generous allocation count.
+ STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+ __ subl(FieldOperand(rcx, Map::kBitField3Offset),
+ Immediate(1 << Map::ConstructionCounter::kShift));
+
+ // Initialize the in-object fields with undefined.
+ __ movzxbl(rdx, FieldOperand(rcx, Map::kUnusedPropertyFieldsOffset));
+ __ negp(rdx);
+ __ leap(rdx, Operand(rdi, rdx, times_pointer_size, 0));
+ __ InitializeFieldsWithFiller(rbx, rdx, r11);
+
+ // Initialize the remaining (reserved) fields with one pointer filler map.
+ __ LoadRoot(r11, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(rdx, rdi, r11);
+
+ // Add the object tag to make the JSObject real.
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ incp(rax);
+
+ // Check if we can finalize the instance size.
+ Label finalize;
+ STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
+ __ testl(FieldOperand(rcx, Map::kBitField3Offset),
+ Immediate(Map::ConstructionCounter::kMask));
+ __ j(zero, &finalize, Label::kNear);
+ __ Ret();
+
+ // Finalize the instance size.
+ __ bind(&finalize);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(rax);
+ __ Push(rcx);
+ __ CallRuntime(Runtime::kFinalizeInstanceSize);
+ __ Pop(rax);
+ }
+ __ Ret();
+ }
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Integer32ToSmi(rbx, rbx);
+ __ Push(rcx);
+ __ Push(rbx);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ Pop(rcx);
+ }
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ decp(rax);
+ __ movzxbl(rbx, FieldOperand(rcx, Map::kInstanceSizeOffset));
+ __ leap(rdi, Operand(rax, rbx, times_pointer_size, 0));
+ __ jmp(&done_allocate);
+
+ // Fall back to %NewObject.
+ __ bind(&new_object);
+ __ PopReturnAddressTo(rcx);
+ __ Push(rdi);
+ __ Push(rdx);
+ __ PushReturnAddressFrom(rcx);
+ __ TailCallRuntime(Runtime::kNewObject);
+}
+
+
+void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rdi : function
+ // -- rsi : context
+ // -- rbp : frame pointer
+ // -- rsp[0] : return address
+ // -----------------------------------
+ __ AssertFunction(rdi);
+
+ // For Ignition we need to skip all possible handler/stub frames until
+ // we reach the JavaScript frame for the function (similar to what the
+ // runtime fallback implementation does). So make rdx point to that
+ // JavaScript frame.
+ {
+ Label loop, loop_entry;
+ __ movp(rdx, rbp);
+ __ jmp(&loop_entry, Label::kNear);
+ __ bind(&loop);
+ __ movp(rdx, Operand(rdx, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&loop_entry);
+ __ cmpp(rdi, Operand(rdx, StandardFrameConstants::kMarkerOffset));
+ __ j(not_equal, &loop);
+ }
+
+ // Check if we have rest parameters (only possible if we have an
+ // arguments adaptor frame below the function frame).
+ Label no_rest_parameters;
+ __ movp(rbx, Operand(rdx, StandardFrameConstants::kCallerFPOffset));
+ __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(not_equal, &no_rest_parameters, Label::kNear);
+
+ // Check if the arguments adaptor frame contains more arguments than
+ // specified by the function's internal formal parameter count.
+ Label rest_parameters;
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadSharedFunctionInfoSpecialField(
+ rcx, rcx, SharedFunctionInfo::kFormalParameterCountOffset);
+ __ SmiToInteger32(
+ rax, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ subl(rax, rcx);
+ __ j(greater, &rest_parameters);
+
+ // Return an empty rest parameter array.
+ __ bind(&no_rest_parameters);
+ {
+ // ----------- S t a t e -------------
+ // -- rsi : context
+ // -- rsp[0] : return address
+ // -----------------------------------
+
+ // Allocate an empty rest parameter array.
+ Label allocate, done_allocate;
+ __ Allocate(JSArray::kSize, rax, rdx, rcx, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Setup the rest parameter array in rax.
+ __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, rcx);
+ __ movp(FieldOperand(rax, JSArray::kMapOffset), rcx);
+ __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
+ __ movp(FieldOperand(rax, JSArray::kPropertiesOffset), rcx);
+ __ movp(FieldOperand(rax, JSArray::kElementsOffset), rcx);
+ __ movp(FieldOperand(rax, JSArray::kLengthOffset), Immediate(0));
+ STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+ __ Ret();
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(Smi::FromInt(JSArray::kSize));
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ }
+ __ jmp(&done_allocate);
+ }
+
+ __ bind(&rest_parameters);
+ {
+ // Compute the pointer to the first rest parameter (skippping the receiver).
+ __ leap(rbx, Operand(rbx, rax, times_pointer_size,
+ StandardFrameConstants::kCallerSPOffset -
+ 1 * kPointerSize));
+
+ // ----------- S t a t e -------------
+ // -- rsi : context
+ // -- rax : number of rest parameters
+ // -- rbx : pointer to first rest parameters
+ // -- rsp[0] : return address
+ // -----------------------------------
+
+ // Allocate space for the rest parameter array plus the backing store.
+ Label allocate, done_allocate;
+ __ leal(rcx, Operand(rax, times_pointer_size,
+ JSArray::kSize + FixedArray::kHeaderSize));
+ __ Allocate(rcx, rdx, rdi, no_reg, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Compute the arguments.length in rdi.
+ __ Integer32ToSmi(rdi, rax);
+
+ // Setup the elements array in rdx.
+ __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
+ __ movp(FieldOperand(rdx, FixedArray::kMapOffset), rcx);
+ __ movp(FieldOperand(rdx, FixedArray::kLengthOffset), rdi);
+ {
+ Label loop, done_loop;
+ __ Set(rcx, 0);
+ __ bind(&loop);
+ __ cmpl(rcx, rax);
+ __ j(equal, &done_loop, Label::kNear);
+ __ movp(kScratchRegister, Operand(rbx, 0 * kPointerSize));
+ __ movp(
+ FieldOperand(rdx, rcx, times_pointer_size, FixedArray::kHeaderSize),
+ kScratchRegister);
+ __ subp(rbx, Immediate(1 * kPointerSize));
+ __ addl(rcx, Immediate(1));
+ __ jmp(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Setup the rest parameter array in rax.
+ __ leap(rax,
+ Operand(rdx, rax, times_pointer_size, FixedArray::kHeaderSize));
+ __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, rcx);
+ __ movp(FieldOperand(rax, JSArray::kMapOffset), rcx);
+ __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
+ __ movp(FieldOperand(rax, JSArray::kPropertiesOffset), rcx);
+ __ movp(FieldOperand(rax, JSArray::kElementsOffset), rdx);
+ __ movp(FieldOperand(rax, JSArray::kLengthOffset), rdi);
+ STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+ __ Ret();
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Integer32ToSmi(rax, rax);
+ __ Integer32ToSmi(rcx, rcx);
+ __ Push(rax);
+ __ Push(rbx);
+ __ Push(rcx);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ movp(rdx, rax);
+ __ Pop(rbx);
+ __ Pop(rax);
+ __ SmiToInteger32(rax, rax);
+ }
+ __ jmp(&done_allocate);
+ }
+}
+
+
+void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rdi : function
+ // -- rsi : context
+ // -- rbp : frame pointer
+ // -- rsp[0] : return address
+ // -----------------------------------
+ __ AssertFunction(rdi);
+
+ // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadSharedFunctionInfoSpecialField(
+ rcx, rcx, SharedFunctionInfo::kFormalParameterCountOffset);
+ __ leap(rdx, Operand(rbp, rcx, times_pointer_size,
+ StandardFrameConstants::kCallerSPOffset));
+ __ Integer32ToSmi(rcx, rcx);
+
+ // rcx : number of parameters (tagged)
+ // rdx : parameters pointer
+ // rdi : function
+ // rsp[0] : return address
+ // Registers used over the whole function:
+ // rbx: the mapped parameter count (untagged)
+ // rax: the allocated object (tagged).
+ Factory* factory = isolate()->factory();
+
+ __ SmiToInteger64(rbx, rcx);
+ // rbx = parameter count (untagged)
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, try_allocate, runtime;
+ __ movp(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(r8, Operand(rax, StandardFrameConstants::kContextOffset));
+ __ Cmp(r8, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(equal, &adaptor_frame);
+
+ // No adaptor, parameter count = argument count.
+ __ movp(r11, rbx);
+ __ jmp(&try_allocate, Label::kNear);
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ SmiToInteger64(
+ r11, Operand(rax, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ leap(rdx, Operand(rax, r11, times_pointer_size,
+ StandardFrameConstants::kCallerSPOffset));
+
+ // rbx = parameter count (untagged)
+ // r11 = argument count (untagged)
+ // Compute the mapped parameter count = min(rbx, r11) in rbx.
+ __ cmpp(rbx, r11);
+ __ j(less_equal, &try_allocate, Label::kNear);
+ __ movp(rbx, r11);
+
+ __ bind(&try_allocate);
+
+ // Compute the sizes of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has 2 extra words containing context and backing store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+ Label no_parameter_map;
+ __ xorp(r8, r8);
+ __ testp(rbx, rbx);
+ __ j(zero, &no_parameter_map, Label::kNear);
+ __ leap(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
+ __ bind(&no_parameter_map);
+
+ // 2. Backing store.
+ __ leap(r8, Operand(r8, r11, times_pointer_size, FixedArray::kHeaderSize));
+
+ // 3. Arguments object.
+ __ addp(r8, Immediate(JSSloppyArgumentsObject::kSize));
+
+ // Do the allocation of all three objects in one go.
+ __ Allocate(r8, rax, r9, no_reg, &runtime, TAG_OBJECT);
+
+ // rax = address of new object(s) (tagged)
+ // r11 = argument count (untagged)
+ // Get the arguments map from the current native context into r9.
+ Label has_mapped_parameters, instantiate;
+ __ movp(r9, NativeContextOperand());
+ __ testp(rbx, rbx);
+ __ j(not_zero, &has_mapped_parameters, Label::kNear);
+
+ const int kIndex = Context::SLOPPY_ARGUMENTS_MAP_INDEX;
+ __ movp(r9, Operand(r9, Context::SlotOffset(kIndex)));
+ __ jmp(&instantiate, Label::kNear);
+
+ const int kAliasedIndex = Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX;
+ __ bind(&has_mapped_parameters);
+ __ movp(r9, Operand(r9, Context::SlotOffset(kAliasedIndex)));
+ __ bind(&instantiate);
+
+ // rax = address of new object (tagged)
+ // rbx = mapped parameter count (untagged)
+ // r11 = argument count (untagged)
+ // r9 = address of arguments map (tagged)
+ __ movp(FieldOperand(rax, JSObject::kMapOffset), r9);
+ __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
+ __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister);
+ __ movp(FieldOperand(rax, JSObject::kElementsOffset), kScratchRegister);
+
+ // Set up the callee in-object property.
+ __ AssertNotSmi(rdi);
+ __ movp(FieldOperand(rax, JSSloppyArgumentsObject::kCalleeOffset), rdi);
+
+ // Use the length (smi tagged) and set that as an in-object property too.
+ // Note: r11 is tagged from here on.
+ __ Integer32ToSmi(r11, r11);
+ __ movp(FieldOperand(rax, JSSloppyArgumentsObject::kLengthOffset), r11);
+
+ // Set up the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, rdi will point there, otherwise to the
+ // backing store.
+ __ leap(rdi, Operand(rax, JSSloppyArgumentsObject::kSize));
+ __ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
+
+ // rax = address of new object (tagged)
+ // rbx = mapped parameter count (untagged)
+ // r11 = argument count (tagged)
+ // rdi = address of parameter map or backing store (tagged)
+
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ __ testp(rbx, rbx);
+ __ j(zero, &skip_parameter_map);
+
+ __ LoadRoot(kScratchRegister, Heap::kSloppyArgumentsElementsMapRootIndex);
+ // rbx contains the untagged argument count. Add 2 and tag to write.
+ __ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
+ __ Integer64PlusConstantToSmi(r9, rbx, 2);
+ __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), r9);
+ __ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
+ __ leap(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
+ __ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. They index the context,
+ // where parameters are stored in reverse order, at
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+ // The mapped parameter thus need to get indices
+ // MIN_CONTEXT_SLOTS+parameter_count-1 ..
+ // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+ // We loop from right to left.
+ Label parameters_loop, parameters_test;
+
+ // Load tagged parameter count into r9.
+ __ Integer32ToSmi(r9, rbx);
+ __ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
+ __ addp(r8, rcx);
+ __ subp(r8, r9);
+ __ movp(rcx, rdi);
+ __ leap(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
+ __ SmiToInteger64(r9, r9);
+ // r9 = loop variable (untagged)
+ // r8 = mapping index (tagged)
+ // rcx = address of parameter map (tagged)
+ // rdi = address of backing store (tagged)
+ __ jmp(&parameters_test, Label::kNear);
+
+ __ bind(&parameters_loop);
+ __ subp(r9, Immediate(1));
+ __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
+ __ movp(FieldOperand(rcx, r9, times_pointer_size, kParameterMapHeaderSize),
+ r8);
+ __ movp(FieldOperand(rdi, r9, times_pointer_size, FixedArray::kHeaderSize),
+ kScratchRegister);
+ __ SmiAddConstant(r8, r8, Smi::FromInt(1));
+ __ bind(&parameters_test);
+ __ testp(r9, r9);
+ __ j(not_zero, &parameters_loop, Label::kNear);
+
+ __ bind(&skip_parameter_map);
+
+ // r11 = argument count (tagged)
+ // rdi = address of backing store (tagged)
+ // Copy arguments header and remaining slots (if there are any).
+ __ Move(FieldOperand(rdi, FixedArray::kMapOffset),
+ factory->fixed_array_map());
+ __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), r11);
+
+ Label arguments_loop, arguments_test;
+ __ movp(r8, rbx);
+ // Untag r11 for the loop below.
+ __ SmiToInteger64(r11, r11);
+ __ leap(kScratchRegister, Operand(r8, times_pointer_size, 0));
+ __ subp(rdx, kScratchRegister);
+ __ jmp(&arguments_test, Label::kNear);
+
+ __ bind(&arguments_loop);
+ __ subp(rdx, Immediate(kPointerSize));
+ __ movp(r9, Operand(rdx, 0));
+ __ movp(FieldOperand(rdi, r8,
+ times_pointer_size,
+ FixedArray::kHeaderSize),
+ r9);
+ __ addp(r8, Immediate(1));
+
+ __ bind(&arguments_test);
+ __ cmpp(r8, r11);
+ __ j(less, &arguments_loop, Label::kNear);
+
+ // Return.
+ __ ret(0);
+
+ // Do the runtime call to allocate the arguments object.
+ // r11 = argument count (untagged)
+ __ bind(&runtime);
+ __ Integer32ToSmi(r11, r11);
+ __ PopReturnAddressTo(rax);
+ __ Push(rdi); // Push function.
+ __ Push(rdx); // Push parameters pointer.
+ __ Push(r11); // Push parameter count.
+ __ PushReturnAddressFrom(rax);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
+}
+
+
+void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rdi : function
+ // -- rsi : context
+ // -- rbp : frame pointer
+ // -- rsp[0] : return address
+ // -----------------------------------
+ __ AssertFunction(rdi);
+
+ // For Ignition we need to skip all possible handler/stub frames until
+ // we reach the JavaScript frame for the function (similar to what the
+ // runtime fallback implementation does). So make rdx point to that
+ // JavaScript frame.
+ {
+ Label loop, loop_entry;
+ __ movp(rdx, rbp);
+ __ jmp(&loop_entry, Label::kNear);
+ __ bind(&loop);
+ __ movp(rdx, Operand(rdx, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&loop_entry);
+ __ cmpp(rdi, Operand(rdx, StandardFrameConstants::kMarkerOffset));
+ __ j(not_equal, &loop);
+ }
+
+ // Check if we have an arguments adaptor frame below the function frame.
+ Label arguments_adaptor, arguments_done;
+ __ movp(rbx, Operand(rdx, StandardFrameConstants::kCallerFPOffset));
+ __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ j(equal, &arguments_adaptor, Label::kNear);
+ {
+ __ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadSharedFunctionInfoSpecialField(
+ rax, rax, SharedFunctionInfo::kFormalParameterCountOffset);
+ __ leap(rbx, Operand(rdx, rax, times_pointer_size,
+ StandardFrameConstants::kCallerSPOffset -
+ 1 * kPointerSize));
+ }
+ __ jmp(&arguments_done, Label::kNear);
+ __ bind(&arguments_adaptor);
+ {
+ __ SmiToInteger32(
+ rax, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ leap(rbx, Operand(rbx, rax, times_pointer_size,
+ StandardFrameConstants::kCallerSPOffset -
+ 1 * kPointerSize));
+ }
+ __ bind(&arguments_done);
+
+ // ----------- S t a t e -------------
+ // -- rax : number of arguments
+ // -- rbx : pointer to the first argument
+ // -- rsi : context
+ // -- rsp[0] : return address
+ // -----------------------------------
+
+ // Allocate space for the strict arguments object plus the backing store.
+ Label allocate, done_allocate;
+ __ leal(rcx, Operand(rax, times_pointer_size, JSStrictArgumentsObject::kSize +
+ FixedArray::kHeaderSize));
+ __ Allocate(rcx, rdx, rdi, no_reg, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Compute the arguments.length in rdi.
+ __ Integer32ToSmi(rdi, rax);
+
+ // Setup the elements array in rdx.
+ __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
+ __ movp(FieldOperand(rdx, FixedArray::kMapOffset), rcx);
+ __ movp(FieldOperand(rdx, FixedArray::kLengthOffset), rdi);
+ {
+ Label loop, done_loop;
+ __ Set(rcx, 0);
+ __ bind(&loop);
+ __ cmpl(rcx, rax);
+ __ j(equal, &done_loop, Label::kNear);
+ __ movp(kScratchRegister, Operand(rbx, 0 * kPointerSize));
+ __ movp(
+ FieldOperand(rdx, rcx, times_pointer_size, FixedArray::kHeaderSize),
+ kScratchRegister);
+ __ subp(rbx, Immediate(1 * kPointerSize));
+ __ addl(rcx, Immediate(1));
+ __ jmp(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Setup the strict arguments object in rax.
+ __ leap(rax,
+ Operand(rdx, rax, times_pointer_size, FixedArray::kHeaderSize));
+ __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, rcx);
+ __ movp(FieldOperand(rax, JSStrictArgumentsObject::kMapOffset), rcx);
+ __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
+ __ movp(FieldOperand(rax, JSStrictArgumentsObject::kPropertiesOffset), rcx);
+ __ movp(FieldOperand(rax, JSStrictArgumentsObject::kElementsOffset), rdx);
+ __ movp(FieldOperand(rax, JSStrictArgumentsObject::kLengthOffset), rdi);
+ STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
+ __ Ret();
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Integer32ToSmi(rax, rax);
+ __ Integer32ToSmi(rcx, rcx);
+ __ Push(rax);
+ __ Push(rbx);
+ __ Push(rcx);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ movp(rdx, rax);
+ __ Pop(rbx);
+ __ Pop(rax);
+ __ SmiToInteger32(rax, rax);
+ }
+ __ jmp(&done_allocate);
+}
+
+
void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
Register context_reg = rsi;
Register slot_reg = rbx;
@@ -5205,11 +5422,10 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ jmp(&leave_exit_frame);
}
-
static void CallApiFunctionStubHelper(MacroAssembler* masm,
const ParameterCount& argc,
bool return_first_arg,
- bool call_data_undefined) {
+ bool call_data_undefined, bool is_lazy) {
// ----------- S t a t e -------------
// -- rdi : callee
// -- rbx : call_data
@@ -5272,8 +5488,10 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
// Push return address back on stack.
__ PushReturnAddressFrom(return_address);
- // load context from callee
- __ movp(context, FieldOperand(callee, JSFunction::kContextOffset));
+ if (!is_lazy) {
+ // load context from callee
+ __ movp(context, FieldOperand(callee, JSFunction::kContextOffset));
+ }
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
@@ -5346,7 +5564,7 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
void CallApiFunctionStub::Generate(MacroAssembler* masm) {
bool call_data_undefined = this->call_data_undefined();
CallApiFunctionStubHelper(masm, ParameterCount(rax), false,
- call_data_undefined);
+ call_data_undefined, false);
}
@@ -5354,18 +5572,19 @@ void CallApiAccessorStub::Generate(MacroAssembler* masm) {
bool is_store = this->is_store();
int argc = this->argc();
bool call_data_undefined = this->call_data_undefined();
+ bool is_lazy = this->is_lazy();
CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
- call_data_undefined);
+ call_data_undefined, is_lazy);
}
void CallApiGetterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rsp[0] : return address
- // -- rsp[8] : name
- // -- rsp[16 - kArgsLength*8] : PropertyCallbackArguments object
+ // -- rsp[0] : return address
+ // -- rsp[8] : name
+ // -- rsp[16 .. (16 + kArgsLength*8)] : v8::PropertyCallbackInfo::args_
// -- ...
- // -- r8 : api_function_address
+ // -- r8 : api_function_address
// -----------------------------------
#if defined(__MINGW64__) || defined(_WIN64)
@@ -5381,23 +5600,25 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
DCHECK(api_function_address.is(r8));
Register scratch = rax;
- // v8::Arguments::values_ and handler for name.
- const int kStackSpace = PropertyCallbackArguments::kArgsLength + 1;
+ // v8::PropertyCallbackInfo::args_ array and name handle.
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
- // Allocate v8::AccessorInfo in non-GCed stack space.
+ // Allocate v8::PropertyCallbackInfo in non-GCed stack space.
const int kArgStackSpace = 1;
- __ leap(name_arg, Operand(rsp, kPCOnStackSize));
+ // Load address of v8::PropertyAccessorInfo::args_ array.
+ __ leap(scratch, Operand(rsp, 2 * kPointerSize));
PrepareCallApiFunction(masm, kArgStackSpace);
- __ leap(scratch, Operand(name_arg, 1 * kPointerSize));
-
- // v8::PropertyAccessorInfo::args_.
- __ movp(StackSpaceOperand(0), scratch);
+ // Create v8::PropertyCallbackInfo object on the stack and initialize
+ // it's args_ field.
+ Operand info_object = StackSpaceOperand(0);
+ __ movp(info_object, scratch);
+ __ leap(name_arg, Operand(scratch, -kPointerSize));
// The context register (rsi) has been saved in PrepareCallApiFunction and
// could be used to pass arguments.
- __ leap(accessor_info_arg, StackSpaceOperand(0));
+ __ leap(accessor_info_arg, info_object);
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback(isolate());
@@ -5407,13 +5628,12 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
DCHECK(!api_function_address.is(accessor_info_arg) &&
!api_function_address.is(name_arg));
- // The name handler is counted as an argument.
- StackArgumentsAccessor args(rbp, PropertyCallbackArguments::kArgsLength);
- Operand return_value_operand = args.GetArgumentOperand(
- PropertyCallbackArguments::kArgsLength - 1 -
- PropertyCallbackArguments::kReturnValueOffset);
+ // +3 is to skip prolog, return address and name handle.
+ Operand return_value_operand(
+ rbp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, getter_arg,
- kStackSpace, nullptr, return_value_operand, NULL);
+ kStackUnwindSpace, nullptr, return_value_operand,
+ NULL);
}
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index c2fd970c67..ddf59eb470 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -88,26 +88,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
-void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
- // Set the register values. The values are not important as there are no
- // callee saved registers in JavaScript frames, so all registers are
- // spilled. Registers rbp and rsp are set to the correct values though.
- for (int i = 0; i < Register::kNumRegisters; i++) {
- input_->SetRegister(i, i * 4);
- }
- input_->SetRegister(rsp.code(), reinterpret_cast<intptr_t>(frame->sp()));
- input_->SetRegister(rbp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) {
- input_->SetDoubleRegister(i, 0.0);
- }
-
- // Fill the frame content from the actual data on the frame.
- for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
- input_->SetFrameSlot(i, Memory::uintptr_at(tos + i));
- }
-}
-
-
void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
intptr_t handler =
@@ -125,8 +105,7 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
}
}
-
-bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
// There is no dynamic alignment padding on x64 in the input frame.
return false;
}
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 05b199d558..a9532dc3ad 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -956,6 +956,12 @@ int DisassemblerX64::AVXInstruction(byte* data) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
+ case 0x0a:
+ AppendToBuffer("vroundss %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",0x%x", *current++);
+ break;
case 0x0b:
AppendToBuffer("vroundsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -1516,6 +1522,12 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += PrintRightOperand(current);
AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3);
current += 1;
+ } else if (third_byte == 0x0a) {
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("roundss %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(",0x%x", (*current) & 3);
+ current += 1;
} else if (third_byte == 0x0b) {
get_modrm(*current, &mod, &regop, &rm);
// roundsd xmm, xmm/m64, imm8
diff --git a/deps/v8/src/x64/interface-descriptors-x64.cc b/deps/v8/src/x64/interface-descriptors-x64.cc
index 79315c70a0..0913d1c1d9 100644
--- a/deps/v8/src/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/x64/interface-descriptors-x64.cc
@@ -54,20 +54,6 @@ const Register StringCompareDescriptor::LeftRegister() { return rdx; }
const Register StringCompareDescriptor::RightRegister() { return rax; }
-const Register ArgumentsAccessReadDescriptor::index() { return rdx; }
-const Register ArgumentsAccessReadDescriptor::parameter_count() { return rax; }
-
-
-const Register ArgumentsAccessNewDescriptor::function() { return rdi; }
-const Register ArgumentsAccessNewDescriptor::parameter_count() { return rcx; }
-const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return rdx; }
-
-
-const Register RestParamAccessDescriptor::parameter_count() { return rcx; }
-const Register RestParamAccessDescriptor::parameter_pointer() { return rdx; }
-const Register RestParamAccessDescriptor::rest_parameter_index() { return rbx; }
-
-
const Register ApiGetterDescriptor::function_address() { return r8; }
@@ -96,6 +82,30 @@ void FastNewContextDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void FastNewObjectDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rdi, rdx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewRestParameterDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rdi};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rdi};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {rdi};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void TypeofDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -121,6 +131,10 @@ const Register ToStringDescriptor::ReceiverRegister() { return rax; }
// static
+const Register ToNameDescriptor::ReceiverRegister() { return rax; }
+
+
+// static
const Register ToObjectDescriptor::ReceiverRegister() { return rax; }
@@ -166,13 +180,6 @@ void CreateWeakCellDescriptor::InitializePlatformSpecific(
}
-void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rcx, rax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rdi};
@@ -406,6 +413,14 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void InterpreterDispatchDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
+ kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister,
+ kInterpreterDispatchTableRegister};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -417,7 +432,6 @@ void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@@ -429,7 +443,6 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 9952eb3b65..e72d40b4ae 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -251,38 +251,9 @@ void MacroAssembler::InNewSpace(Register object,
Condition cc,
Label* branch,
Label::Distance distance) {
- if (serializer_enabled()) {
- // Can't do arithmetic on external references if it might get serialized.
- // The mask isn't really an address. We load it as an external reference in
- // case the size of the new space is different between the snapshot maker
- // and the running system.
- if (scratch.is(object)) {
- Move(kScratchRegister, ExternalReference::new_space_mask(isolate()));
- andp(scratch, kScratchRegister);
- } else {
- Move(scratch, ExternalReference::new_space_mask(isolate()));
- andp(scratch, object);
- }
- Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
- cmpp(scratch, kScratchRegister);
- j(cc, branch, distance);
- } else {
- DCHECK(kPointerSize == kInt64Size
- ? is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask()))
- : kPointerSize == kInt32Size);
- intptr_t new_space_start =
- reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
- Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
- Assembler::RelocInfoNone());
- if (scratch.is(object)) {
- addp(scratch, kScratchRegister);
- } else {
- leap(scratch, Operand(object, kScratchRegister, times_1, 0));
- }
- andp(scratch,
- Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask())));
- j(cc, branch, distance);
- }
+ const int mask =
+ (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
+ CheckPageFlag(object, scratch, mask, cc, branch, distance);
}
@@ -507,6 +478,90 @@ void MacroAssembler::RecordWrite(
}
}
+void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
+ Register code_entry,
+ Register scratch) {
+ const int offset = JSFunction::kCodeEntryOffset;
+
+ // The input registers are fixed to make calling the C write barrier function
+ // easier.
+ DCHECK(js_function.is(rdi));
+ DCHECK(code_entry.is(rcx));
+ DCHECK(scratch.is(rax));
+
+ // Since a code entry (value) is always in old space, we don't need to update
+ // remembered set. If incremental marking is off, there is nothing for us to
+ // do.
+ if (!FLAG_incremental_marking) return;
+
+ AssertNotSmi(js_function);
+
+ if (emit_debug_code()) {
+ Label ok;
+ leap(scratch, FieldOperand(js_function, offset));
+ cmpp(code_entry, Operand(scratch, 0));
+ j(equal, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ }
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis and stores into young gen.
+ Label done;
+
+ CheckPageFlag(code_entry, scratch,
+ MemoryChunk::kPointersToHereAreInterestingMask, zero, &done,
+ Label::kNear);
+ CheckPageFlag(js_function, scratch,
+ MemoryChunk::kPointersFromHereAreInterestingMask, zero, &done,
+ Label::kNear);
+
+ // Save input registers.
+ Push(js_function);
+ Push(code_entry);
+
+ const Register dst = scratch;
+ leap(dst, FieldOperand(js_function, offset));
+
+ // Save caller-saved registers.
+ PushCallerSaved(kDontSaveFPRegs, js_function, code_entry);
+
+ int argument_count = 3;
+ PrepareCallCFunction(argument_count);
+
+ // Load the argument registers.
+ if (arg_reg_1.is(rcx)) {
+ // Windows calling convention.
+ DCHECK(arg_reg_2.is(rdx) && arg_reg_3.is(r8));
+
+ movp(arg_reg_1, js_function); // rcx gets rdi.
+ movp(arg_reg_2, dst); // rdx gets rax.
+ } else {
+ // AMD64 calling convention.
+ DCHECK(arg_reg_1.is(rdi) && arg_reg_2.is(rsi) && arg_reg_3.is(rdx));
+
+ // rdi is already loaded with js_function.
+ movp(arg_reg_2, dst); // rsi gets rax.
+ }
+ Move(arg_reg_3, ExternalReference::isolate_address(isolate()));
+
+ {
+ AllowExternalCallThatCantCauseGC scope(this);
+ CallCFunction(
+ ExternalReference::incremental_marking_record_write_code_entry_function(
+ isolate()),
+ argument_count);
+ }
+
+ // Restore caller-saved registers.
+ PopCallerSaved(kDontSaveFPRegs, js_function, code_entry);
+
+ // Restore input registers.
+ Pop(code_entry);
+ Pop(js_function);
+
+ bind(&done);
+}
void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
if (emit_debug_code()) Check(cc, reason);
@@ -589,9 +644,9 @@ void MacroAssembler::Abort(BailoutReason reason) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 1);
+ CallRuntime(Runtime::kAbort);
} else {
- CallRuntime(Runtime::kAbort, 1);
+ CallRuntime(Runtime::kAbort);
}
// Control will not return here.
int3();
@@ -690,18 +745,6 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
}
-void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- // You can't call a builtin without a valid frame.
- DCHECK(flag == JUMP_FUNCTION || has_frame());
-
- // Fake a parameter count to avoid emitting code to do the check.
- ParameterCount expected(0);
- LoadNativeContextSlot(native_context_index, rdi);
- InvokeFunctionCode(rdi, no_reg, expected, expected, flag, call_wrapper);
-}
-
-
#define REG(Name) \
{ Register::kCode_##Name }
@@ -823,6 +866,30 @@ void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
}
+void MacroAssembler::Cvtlsi2ss(XMMRegister dst, Register src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vxorps(dst, dst, dst);
+ vcvtlsi2ss(dst, dst, src);
+ } else {
+ xorps(dst, dst);
+ cvtlsi2ss(dst, src);
+ }
+}
+
+
+void MacroAssembler::Cvtlsi2ss(XMMRegister dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vxorps(dst, dst, dst);
+ vcvtlsi2ss(dst, dst, src);
+ } else {
+ xorps(dst, dst);
+ cvtlsi2ss(dst, src);
+ }
+}
+
+
void MacroAssembler::Cvtqsi2ss(XMMRegister dst, Register src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@@ -918,6 +985,26 @@ void MacroAssembler::Cvtsd2si(Register dst, XMMRegister src) {
}
+void MacroAssembler::Cvttss2si(Register dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcvttss2si(dst, src);
+ } else {
+ cvttss2si(dst, src);
+ }
+}
+
+
+void MacroAssembler::Cvttss2si(Register dst, const Operand& src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcvttss2si(dst, src);
+ } else {
+ cvttss2si(dst, src);
+ }
+}
+
+
void MacroAssembler::Cvttsd2si(Register dst, XMMRegister src) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
@@ -3865,6 +3952,19 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
+void MacroAssembler::AssertReceiver(Register object) {
+ if (emit_debug_code()) {
+ testb(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsASmiAndNotAReceiver);
+ Push(object);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, object);
+ Pop(object);
+ Check(above_equal, kOperandIsNotAReceiver);
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
if (emit_debug_code()) {
Label done_checking;
@@ -4168,7 +4268,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
}
Push(fun);
Push(fun);
- CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
Pop(fun);
if (new_target.is_valid()) {
Pop(new_target);
@@ -5040,7 +5140,7 @@ void MacroAssembler::CopyBytes(Register destination,
incp(source);
incp(destination);
decl(length);
- j(not_zero, &short_loop);
+ j(not_zero, &short_loop, Label::kNear);
}
bind(&done);
@@ -5051,13 +5151,13 @@ void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Register end_address,
Register filler) {
Label loop, entry;
- jmp(&entry);
+ jmp(&entry, Label::kNear);
bind(&loop);
movp(Operand(current_address, 0), filler);
addp(current_address, Immediate(kPointerSize));
bind(&entry);
cmpp(current_address, end_address);
- j(below, &loop);
+ j(below, &loop, Label::kNear);
}
@@ -5363,7 +5463,7 @@ void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
}
-void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
+void MacroAssembler::CheckEnumCache(Label* call_runtime) {
Label next, start;
Register empty_fixed_array_value = r8;
LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
@@ -5404,10 +5504,11 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
bind(&no_elements);
movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
- cmpp(rcx, null_value);
+ CompareRoot(rcx, Heap::kNullValueRootIndex);
j(not_equal, &next);
}
+
void MacroAssembler::TestJSArrayForAllocationMemento(
Register receiver_reg,
Register scratch_reg,
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 1aa2c74f22..9c0b7964b3 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -18,6 +18,7 @@ namespace internal {
// Give alias names to registers for calling conventions.
const Register kReturnRegister0 = {Register::kCode_rax};
const Register kReturnRegister1 = {Register::kCode_rdx};
+const Register kReturnRegister2 = {Register::kCode_r8};
const Register kJSFunctionRegister = {Register::kCode_rdi};
const Register kContextRegister = {Register::kCode_rsi};
const Register kInterpreterAccumulatorRegister = {Register::kCode_rax};
@@ -217,7 +218,7 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* branch,
Label::Distance distance = Label::kFar) {
- InNewSpace(object, scratch, not_equal, branch, distance);
+ InNewSpace(object, scratch, zero, branch, distance);
}
// Check if object is in new space. Jumps if the object is in new space.
@@ -226,7 +227,7 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* branch,
Label::Distance distance = Label::kFar) {
- InNewSpace(object, scratch, equal, branch, distance);
+ InNewSpace(object, scratch, not_zero, branch, distance);
}
// Check if an object has the black incremental marking color. Also uses rcx!
@@ -293,6 +294,11 @@ class MacroAssembler: public Assembler {
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting);
+ // Notify the garbage collector that we wrote a code entry into a
+ // JSFunction. Only scratch is clobbered by the operation.
+ void RecordWriteCodeEntryField(Register js_function, Register code_entry,
+ Register scratch);
+
void RecordWriteForMap(
Register object,
Register map,
@@ -395,10 +401,6 @@ class MacroAssembler: public Assembler {
InvokeFlag flag,
const CallWrapper& call_wrapper);
- // Invoke specified builtin JavaScript function.
- void InvokeBuiltin(int native_context_index, InvokeFlag flag,
- const CallWrapper& call_wrapper = NullCallWrapper());
-
// ---------------------------------------------------------------------------
// Smi tagging, untagging and operations on tagged smis.
@@ -430,6 +432,12 @@ class MacroAssembler: public Assembler {
void SmiToInteger64(Register dst, Register src);
void SmiToInteger64(Register dst, const Operand& src);
+ // Convert smi to double.
+ void SmiToDouble(XMMRegister dst, Register src) {
+ SmiToInteger32(kScratchRegister, src);
+ Cvtlsi2sd(dst, kScratchRegister);
+ }
+
// Multiply a positive smi's integer value by a power of two.
// Provides result as 64-bit integer value.
void PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
@@ -811,6 +819,8 @@ class MacroAssembler: public Assembler {
void Cvtlsi2sd(XMMRegister dst, Register src);
void Cvtlsi2sd(XMMRegister dst, const Operand& src);
+ void Cvtlsi2ss(XMMRegister dst, Register src);
+ void Cvtlsi2ss(XMMRegister dst, const Operand& src);
void Cvtqsi2ss(XMMRegister dst, Register src);
void Cvtqsi2ss(XMMRegister dst, const Operand& src);
@@ -822,6 +832,8 @@ class MacroAssembler: public Assembler {
void Cvtsd2si(Register dst, XMMRegister src);
+ void Cvttss2si(Register dst, XMMRegister src);
+ void Cvttss2si(Register dst, const Operand& src);
void Cvttsd2si(Register dst, XMMRegister src);
void Cvttsd2si(Register dst, const Operand& src);
void Cvttss2siq(Register dst, XMMRegister src);
@@ -1204,6 +1216,9 @@ class MacroAssembler: public Assembler {
// enabled via --debug-code.
void AssertBoundFunction(Register object);
+ // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
+ void AssertReceiver(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object);
@@ -1540,8 +1555,7 @@ class MacroAssembler: public Assembler {
// Expects object in rax and returns map with validated enum cache
// in rax. Assumes that any other register can be used as a scratch.
- void CheckEnumCache(Register null_value,
- Label* call_runtime);
+ void CheckEnumCache(Label* call_runtime);
// AllocationMemento support. Arrays may have an associated
// AllocationMemento object that can be checked for in order to pretransition
diff --git a/deps/v8/src/x87/assembler-x87-inl.h b/deps/v8/src/x87/assembler-x87-inl.h
index 0e529c7ab6..7af1d02f32 100644
--- a/deps/v8/src/x87/assembler-x87-inl.h
+++ b/deps/v8/src/x87/assembler-x87-inl.h
@@ -204,10 +204,8 @@ void RelocInfo::set_target_cell(Cell* cell,
Assembler::FlushICache(isolate_, pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
- // TODO(1550) We are passing NULL as a slot because cell can never be on
- // evacuation candidate.
- host()->GetHeap()->incremental_marking()->RecordWrite(
- host(), NULL, cell);
+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
+ cell);
}
}
@@ -269,16 +267,6 @@ void RelocInfo::WipeOut() {
}
-bool RelocInfo::IsPatchedReturnSequence() {
- return *pc_ == kCallOpcode;
-}
-
-
-bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
- return !Assembler::IsNop(pc());
-}
-
-
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
diff --git a/deps/v8/src/x87/assembler-x87.cc b/deps/v8/src/x87/assembler-x87.cc
index 53919486d6..66fda5787f 100644
--- a/deps/v8/src/x87/assembler-x87.cc
+++ b/deps/v8/src/x87/assembler-x87.cc
@@ -674,6 +674,11 @@ void Assembler::cmp(Register reg, const Operand& op) {
emit_operand(reg, op);
}
+void Assembler::cmp(const Operand& op, Register reg) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x39);
+ emit_operand(reg, op);
+}
void Assembler::cmp(const Operand& op, const Immediate& imm) {
EnsureSpace ensure_space(this);
diff --git a/deps/v8/src/x87/assembler-x87.h b/deps/v8/src/x87/assembler-x87.h
index 668dc7bb40..15fc29c29f 100644
--- a/deps/v8/src/x87/assembler-x87.h
+++ b/deps/v8/src/x87/assembler-x87.h
@@ -186,6 +186,9 @@ const DoubleRegister no_double_reg = {DoubleRegister::kCode_no_reg};
typedef DoubleRegister X87Register;
+// TODO(x87) Define SIMD registers.
+typedef DoubleRegister Simd128Register;
+
enum Condition {
// any value < 0 is considered no_condition
no_condition = -1,
@@ -668,6 +671,7 @@ class Assembler : public AssemblerBase {
void cmp(Register reg0, Register reg1) { cmp(reg0, Operand(reg1)); }
void cmp(Register reg, const Operand& op);
void cmp(Register reg, const Immediate& imm) { cmp(Operand(reg), imm); }
+ void cmp(const Operand& op, Register reg);
void cmp(const Operand& op, const Immediate& imm);
void cmp(const Operand& op, Handle<Object> handle);
@@ -935,7 +939,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, const SourcePosition position);
+ void RecordDeoptReason(const int reason, int raw_position);
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
diff --git a/deps/v8/src/x87/builtins-x87.cc b/deps/v8/src/x87/builtins-x87.cc
index 55ec55fc6f..ce07908d93 100644
--- a/deps/v8/src/x87/builtins-x87.cc
+++ b/deps/v8/src/x87/builtins-x87.cc
@@ -60,42 +60,45 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
-
-static void CallRuntimePassFunction(
- MacroAssembler* masm, Runtime::FunctionId function_id) {
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
// ----------- S t a t e -------------
+ // -- eax : argument count (preserved for callee)
// -- edx : new target (preserved for callee)
// -- edi : target function (preserved for callee)
// -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push the number of arguments to the callee.
+ __ SmiTag(eax);
+ __ push(eax);
+ // Push a copy of the target function and the new target.
+ __ push(edi);
+ __ push(edx);
+ // Function is also the parameter to the runtime call.
+ __ push(edi);
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Push a copy of the target function and the new target.
- __ push(edi);
- __ push(edx);
- // Function is also the parameter to the runtime call.
- __ push(edi);
-
- __ CallRuntime(function_id, 1);
- // Restore target function and new target.
- __ pop(edx);
- __ pop(edi);
-}
+ __ CallRuntime(function_id, 1);
+ __ mov(ebx, eax);
+ // Restore target function and new target.
+ __ pop(edx);
+ __ pop(edi);
+ __ pop(eax);
+ __ SmiUntag(eax);
+ }
-static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kCodeOffset));
- __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
- __ jmp(eax);
+ __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
+ __ jmp(ebx);
}
-
-static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
- __ lea(eax, FieldOperand(eax, Code::kHeaderSize));
- __ jmp(eax);
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+ __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kCodeOffset));
+ __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
+ __ jmp(ebx);
}
-
void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
// Checking whether the queued function is ready for install is optional,
// since we come across interrupts and stack checks elsewhere. However,
@@ -108,17 +111,16 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok, Label::kNear);
- CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kTryInstallOptimizedCode);
__ bind(&ok);
GenerateTailCallToSharedCode(masm);
}
-
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool create_implicit_receiver) {
+ bool create_implicit_receiver,
+ bool check_derived_construct) {
// ----------- S t a t e -------------
// -- eax: number of arguments
// -- edi: constructor function
@@ -137,148 +139,20 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ push(eax);
if (create_implicit_receiver) {
- __ push(edi);
- __ push(edx);
-
- // Try to allocate the object without transitioning into C code. If any of
- // the preconditions is not met, the code bails out to the runtime call.
- Label rt_call, allocated;
- if (FLAG_inline_new) {
- // Verify that the new target is a JSFunction.
- __ CmpObjectType(edx, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &rt_call);
-
- // Load the initial map and verify that it is in fact a map.
- // edx: new target
- __ mov(eax,
- FieldOperand(edx, JSFunction::kPrototypeOrInitialMapOffset));
- // Will both indicate a NULL and a Smi
- __ JumpIfSmi(eax, &rt_call);
- // edi: constructor
- // eax: initial map (if proven valid below)
- __ CmpObjectType(eax, MAP_TYPE, ebx);
- __ j(not_equal, &rt_call);
-
- // Fall back to runtime if the expected base constructor and base
- // constructor differ.
- __ cmp(edi, FieldOperand(eax, Map::kConstructorOrBackPointerOffset));
- __ j(not_equal, &rt_call);
-
- // Check that the constructor is not constructing a JSFunction (see
- // comments in Runtime_NewObject in runtime.cc). In which case the
- // initial map's instance type would be JS_FUNCTION_TYPE.
- // edi: constructor
- // eax: initial map
- __ CmpInstanceType(eax, JS_FUNCTION_TYPE);
- __ j(equal, &rt_call);
-
- // Now allocate the JSObject on the heap.
- // edi: constructor
- // eax: initial map
- __ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
- __ shl(edi, kPointerSizeLog2);
-
- __ Allocate(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
-
- Factory* factory = masm->isolate()->factory();
-
- // Allocated the JSObject, now initialize the fields.
- // eax: initial map
- // ebx: JSObject (not HeapObject tagged - the actual address).
- // edi: start of next object
- __ mov(Operand(ebx, JSObject::kMapOffset), eax);
- __ mov(ecx, factory->empty_fixed_array());
- __ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
- __ mov(Operand(ebx, JSObject::kElementsOffset), ecx);
- __ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
-
- // Add the object tag to make the JSObject real, so that we can continue
- // and jump into the continuation code at any time from now on.
- __ or_(ebx, Immediate(kHeapObjectTag));
-
- // Fill all the in-object properties with the appropriate filler.
- // ebx: JSObject (tagged)
- // ecx: First in-object property of JSObject (not tagged)
- __ mov(edx, factory->undefined_value());
-
- if (!is_api_function) {
- Label no_inobject_slack_tracking;
-
- // The code below relies on these assumptions.
- STATIC_ASSERT(Map::kNoSlackTracking == 0);
- STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
- // Check if slack tracking is enabled.
- __ mov(esi, FieldOperand(eax, Map::kBitField3Offset));
- __ shr(esi, Map::ConstructionCounter::kShift);
- __ j(zero, &no_inobject_slack_tracking); // Map::kNoSlackTracking
- __ push(esi); // Save allocation count value.
- // Decrease generous allocation count.
- __ sub(FieldOperand(eax, Map::kBitField3Offset),
- Immediate(1 << Map::ConstructionCounter::kShift));
-
- // Allocate object with a slack.
- __ movzx_b(esi, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
- __ neg(esi);
- __ lea(esi, Operand(edi, esi, times_pointer_size, 0));
- // esi: offset of first field after pre-allocated fields
- if (FLAG_debug_code) {
- __ cmp(ecx, esi);
- __ Assert(less_equal,
- kUnexpectedNumberOfPreAllocatedPropertyFields);
- }
- __ InitializeFieldsWithFiller(ecx, esi, edx);
-
- // To allow truncation fill the remaining fields with one pointer
- // filler map.
- __ mov(edx, factory->one_pointer_filler_map());
- __ InitializeFieldsWithFiller(ecx, edi, edx);
-
- __ pop(esi); // Restore allocation count value before decreasing.
- __ cmp(esi, Map::kSlackTrackingCounterEnd);
- __ j(not_equal, &allocated);
-
- // Push the object to the stack, and then the initial map as
- // an argument to the runtime call.
- __ push(ebx);
- __ push(eax); // initial map
- __ CallRuntime(Runtime::kFinalizeInstanceSize);
- __ pop(ebx);
-
- // Continue with JSObject being successfully allocated
- // ebx: JSObject (tagged)
- __ jmp(&allocated);
-
- __ bind(&no_inobject_slack_tracking);
- }
-
- __ InitializeFieldsWithFiller(ecx, edi, edx);
-
- // Continue with JSObject being successfully allocated
- // ebx: JSObject (tagged)
- __ jmp(&allocated);
- }
-
- // Allocate the new receiver object using the runtime call.
- // edx: new target
- __ bind(&rt_call);
- int offset = kPointerSize;
-
- // Must restore esi (context) and edi (constructor) before calling
- // runtime.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ mov(edi, Operand(esp, offset));
- __ push(edi); // constructor function
- __ push(edx); // new target
- __ CallRuntime(Runtime::kNewObject);
- __ mov(ebx, eax); // store result in ebx
-
- // New object allocated.
- // ebx: newly allocated object
- __ bind(&allocated);
+ // Allocate the new receiver object.
+ __ Push(edi);
+ __ Push(edx);
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ mov(ebx, eax);
+ __ Pop(edx);
+ __ Pop(edi);
- // Restore the parameters.
- __ pop(edx); // new.target
- __ pop(edi); // Constructor function.
+ // ----------- S t a t e -------------
+ // -- edi: constructor function
+ // -- ebx: newly allocated object
+ // -- edx: new target
+ // -----------------------------------
// Retrieve smi-tagged arguments count from the stack.
__ mov(eax, Operand(esp, 0));
@@ -359,6 +233,19 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Leave construct frame.
}
+ // ES6 9.2.2. Step 13+
+ // Check that the result is not a Smi, indicating that the constructor result
+ // from a derived class is neither undefined nor an Object.
+ if (check_derived_construct) {
+ Label dont_throw;
+ __ JumpIfNotSmi(eax, &dont_throw);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
+ }
+ __ bind(&dont_throw);
+ }
+
// Remove caller arguments from the stack and return.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ pop(ecx);
@@ -372,17 +259,23 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
+ Generate_JSConstructStubHelper(masm, false, true, false);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, true);
+ Generate_JSConstructStubHelper(masm, true, false, false);
}
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
+ Generate_JSConstructStubHelper(masm, false, false, false);
+}
+
+
+void Builtins::Generate_JSBuiltinsConstructStubForDerived(
+ MacroAssembler* masm) {
+ Generate_JSConstructStubHelper(masm, false, false, true);
}
@@ -513,10 +406,8 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
// o ebp: the caller's frame pointer
// o esp: stack pointer (pointing to return address)
//
-// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-ia32.h for its layout.
-// TODO(rmcilroy): We will need to include the current bytecode pointer in the
-// frame.
+// The function builds an interpreter frame. See InterpreterFrameConstants in
+// frames.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
@@ -528,14 +419,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ push(edi); // Callee's JS function.
__ push(edx); // Callee's new target.
- // Push zero for bytecode array offset.
- __ push(Immediate(0));
-
// Get the bytecode array from the function object and load the pointer to the
// first entry into edi (InterpreterBytecodeRegister).
__ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+
+ Label load_debug_bytecode_array, bytecode_array_loaded;
+ __ cmp(FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset),
+ Immediate(DebugInfo::uninitialized()));
+ __ j(not_equal, &load_debug_bytecode_array);
__ mov(kInterpreterBytecodeArrayRegister,
FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset));
+ __ bind(&bytecode_array_loaded);
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -545,6 +439,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
+ // Push bytecode array.
+ __ push(kInterpreterBytecodeArrayRegister);
+ // Push zero for bytecode array offset.
+ __ push(Immediate(0));
+
// Allocate the local and temporary register file on the stack.
{
// Load frame size from the BytecodeArray object.
@@ -578,24 +477,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// TODO(rmcilroy): List of things not currently dealt with here but done in
// fullcodegen's prologue:
- // - Support profiler (specifically profiling_counter).
// - Call ProfileEntryHookStub when isolate has a function_entry_hook.
- // - Allow simulator stop operations if FLAG_stop_at is set.
// - Code aging of the BytecodeArray object.
- // Perform stack guard check.
- {
- Label ok;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm->isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok);
- __ push(kInterpreterBytecodeArrayRegister);
- __ CallRuntime(Runtime::kStackGuard);
- __ pop(kInterpreterBytecodeArrayRegister);
- __ bind(&ok);
- }
-
// Load accumulator, register file, bytecode offset, dispatch table into
// registers.
__ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
@@ -604,10 +488,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
__ mov(kInterpreterBytecodeOffsetRegister,
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
- // Since the dispatch table root might be set after builtins are generated,
- // load directly from the roots table.
- __ LoadRoot(ebx, Heap::kInterpreterTableRootIndex);
- __ add(ebx, Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ mov(ebx, Immediate(ExternalReference::interpreter_dispatch_table_address(
+ masm->isolate())));
// Push dispatch table as a stack located parameter to the bytecode handler.
DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
@@ -625,8 +507,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// and header removal.
__ add(ebx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(ebx);
- __ nop(); // Ensure that return address still counts as interpreter entry
- // trampoline.
+
+ // Even though the first bytecode handler was called, we will never return.
+ __ Abort(kUnexpectedReturnFromBytecodeHandler);
+
+ // Load debug copy of the bytecode array.
+ __ bind(&load_debug_bytecode_array);
+ Register debug_info = kInterpreterBytecodeArrayRegister;
+ __ mov(debug_info, FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset));
+ __ mov(kInterpreterBytecodeArrayRegister,
+ FieldOperand(debug_info, DebugInfo::kAbstractCodeIndex));
+ __ jmp(&bytecode_array_loaded);
}
@@ -671,7 +562,8 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// static
-void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterPushArgsAndCallImpl(
+ MacroAssembler* masm, TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- ebx : the address of the first argument to be pushed. Subsequent
@@ -694,7 +586,9 @@ void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
// Call the target.
__ Push(edx); // Re-push return address.
- __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+ tail_call_mode),
+ RelocInfo::CODE_TARGET);
}
@@ -739,33 +633,16 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
}
-static void Generate_InterpreterNotifyDeoptimizedHelper(
- MacroAssembler* masm, Deoptimizer::BailoutType type) {
- // Enter an internal frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(kInterpreterAccumulatorRegister); // Save accumulator register.
-
- // Pass the deoptimization type to the runtime system.
- __ Push(Smi::FromInt(static_cast<int>(type)));
-
- __ CallRuntime(Runtime::kNotifyDeoptimized);
-
- __ Pop(kInterpreterAccumulatorRegister); // Restore accumulator register.
- // Tear down internal frame.
- }
-
+static void Generate_EnterBytecodeDispatch(MacroAssembler* masm) {
// Initialize register file register.
__ mov(kInterpreterRegisterFileRegister, ebp);
__ add(kInterpreterRegisterFileRegister,
Immediate(InterpreterFrameConstants::kRegisterFilePointerFromFp));
// Get the bytecode array pointer from the frame.
- __ mov(ebx, Operand(kInterpreterRegisterFileRegister,
- InterpreterFrameConstants::kFunctionFromRegisterPointer));
- __ mov(ebx, FieldOperand(ebx, JSFunction::kSharedFunctionInfoOffset));
__ mov(kInterpreterBytecodeArrayRegister,
- FieldOperand(ebx, SharedFunctionInfo::kFunctionDataOffset));
+ Operand(kInterpreterRegisterFileRegister,
+ InterpreterFrameConstants::kBytecodeArrayFromRegisterPointer));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -782,12 +659,13 @@ static void Generate_InterpreterNotifyDeoptimizedHelper(
InterpreterFrameConstants::kBytecodeOffsetFromRegisterPointer));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
- // Push dispatch table as a stack located parameter to the bytecode handler -
- // overwrite the state slot (we don't use these for interpreter deopts).
- __ LoadRoot(ebx, Heap::kInterpreterTableRootIndex);
- __ add(ebx, Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
+ // Push dispatch table as a stack located parameter to the bytecode handler.
+ __ mov(ebx, Immediate(ExternalReference::interpreter_dispatch_table_address(
+ masm->isolate())));
DCHECK_EQ(-1, kInterpreterDispatchTableSpillSlot);
- __ mov(Operand(esp, kPointerSize), ebx);
+ __ Pop(esi);
+ __ Push(ebx);
+ __ Push(esi);
// Dispatch to the target bytecode.
__ movzx_b(esi, Operand(kInterpreterBytecodeArrayRegister,
@@ -795,8 +673,6 @@ static void Generate_InterpreterNotifyDeoptimizedHelper(
__ mov(ebx, Operand(ebx, esi, times_pointer_size, 0));
// Get the context from the frame.
- // TODO(rmcilroy): Update interpreter frame to expect current context at the
- // context slot instead of the function context.
__ mov(kContextRegister,
Operand(kInterpreterRegisterFileRegister,
InterpreterFrameConstants::kContextFromRegisterPointer));
@@ -808,6 +684,32 @@ static void Generate_InterpreterNotifyDeoptimizedHelper(
}
+static void Generate_InterpreterNotifyDeoptimizedHelper(
+ MacroAssembler* masm, Deoptimizer::BailoutType type) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Pass the deoptimization type to the runtime system.
+ __ Push(Smi::FromInt(static_cast<int>(type)));
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+ // Tear down internal frame.
+ }
+
+ // Drop state (we don't use these for interpreter deopts) and and pop the
+ // accumulator value into the accumulator register and push PC at top
+ // of stack (to simulate initial call to bytecode handler in interpreter entry
+ // trampoline).
+ __ Pop(ebx);
+ __ Drop(1);
+ __ Pop(kInterpreterAccumulatorRegister);
+ __ Push(ebx);
+
+ // Enter the bytecode dispatch.
+ Generate_EnterBytecodeDispatch(masm);
+}
+
+
void Builtins::Generate_InterpreterNotifyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
}
@@ -822,22 +724,30 @@ void Builtins::Generate_InterpreterNotifyLazyDeoptimized(MacroAssembler* masm) {
Generate_InterpreterNotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
}
+void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+ // Set the address of the interpreter entry trampoline as a return address.
+ // This simulates the initial call to bytecode handlers in interpreter entry
+ // trampoline. The return will never actually be taken, but our stack walker
+ // uses this address to determine whether a frame is interpreted.
+ __ Push(masm->isolate()->builtins()->InterpreterEntryTrampoline());
+
+ Generate_EnterBytecodeDispatch(masm);
+}
+
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileLazy);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileOptimized_NotConcurrent);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm,
+ Runtime::kCompileOptimized_NotConcurrent);
}
void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileOptimized_Concurrent);
- GenerateTailCallToReturnedCode(masm);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileOptimized_Concurrent);
}
@@ -1375,6 +1285,140 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// static
+void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
+ // ----------- S t a t e -------------
+ // -- eax : number of arguments
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 8] : arg[n] (zero-based)
+ // -- esp[(argc + 1) * 8] : receiver
+ // -----------------------------------
+ Condition const cc = (kind == MathMaxMinKind::kMin) ? below : above;
+ Heap::RootListIndex const root_index =
+ (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex
+ : Heap::kMinusInfinityValueRootIndex;
+ const int reg_sel = (kind == MathMaxMinKind::kMin) ? 1 : 0;
+
+ // Load the accumulator with the default return value (either -Infinity or
+ // +Infinity), with the tagged value in edx and the double value in stx_0.
+ __ LoadRoot(edx, root_index);
+ __ fld_d(FieldOperand(edx, HeapNumber::kValueOffset));
+ __ Move(ecx, eax);
+
+ Label done_loop, loop;
+ __ bind(&loop);
+ {
+ // Check if all parameters done.
+ __ test(ecx, ecx);
+ __ j(zero, &done_loop);
+
+ // Load the next parameter tagged value into ebx.
+ __ mov(ebx, Operand(esp, ecx, times_pointer_size, 0));
+
+ // Load the double value of the parameter into stx_1, maybe converting the
+ // parameter to a number first using the ToNumberStub if necessary.
+ Label convert, convert_smi, convert_number, done_convert;
+ __ bind(&convert);
+ __ JumpIfSmi(ebx, &convert_smi);
+ __ JumpIfRoot(FieldOperand(ebx, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex, &convert_number);
+ {
+ // Parameter is not a Number, use the ToNumberStub to convert it.
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(eax);
+ __ SmiTag(ecx);
+ __ Push(eax);
+ __ Push(ecx);
+ __ Push(edx);
+ __ mov(eax, ebx);
+ ToNumberStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ mov(ebx, eax);
+ __ Pop(edx);
+ __ Pop(ecx);
+ __ Pop(eax);
+ {
+ // Restore the double accumulator value (stX_0).
+ Label restore_smi, done_restore;
+ __ JumpIfSmi(edx, &restore_smi, Label::kNear);
+ __ fld_d(FieldOperand(edx, HeapNumber::kValueOffset));
+ __ jmp(&done_restore, Label::kNear);
+ __ bind(&restore_smi);
+ __ SmiUntag(edx);
+ __ push(edx);
+ __ fild_s(Operand(esp, 0));
+ __ pop(edx);
+ __ SmiTag(edx);
+ __ bind(&done_restore);
+ }
+ __ SmiUntag(ecx);
+ __ SmiUntag(eax);
+ }
+ __ jmp(&convert);
+ __ bind(&convert_number);
+ // Load another value into stx_1
+ __ fld_d(FieldOperand(ebx, HeapNumber::kValueOffset));
+ __ fxch();
+ __ jmp(&done_convert, Label::kNear);
+ __ bind(&convert_smi);
+ __ SmiUntag(ebx);
+ __ push(ebx);
+ __ fild_s(Operand(esp, 0));
+ __ pop(ebx);
+ __ fxch();
+ __ SmiTag(ebx);
+ __ bind(&done_convert);
+
+ // Perform the actual comparison with the accumulator value on the left hand
+ // side (stx_0) and the next parameter value on the right hand side (stx_1).
+ Label compare_equal, compare_nan, compare_swap, done_compare;
+
+ // Duplicates the 2 float data for FCmp
+ __ fld(1);
+ __ fld(1);
+ __ FCmp();
+ __ j(parity_even, &compare_nan, Label::kNear);
+ __ j(cc, &done_compare, Label::kNear);
+ __ j(equal, &compare_equal, Label::kNear);
+
+ // Result is on the right hand side(stx_0).
+ __ bind(&compare_swap);
+ __ fxch();
+ __ mov(edx, ebx);
+ __ jmp(&done_compare, Label::kNear);
+
+ // At least one side is NaN, which means that the result will be NaN too.
+ __ bind(&compare_nan);
+ // Set the result on the right hand side (stx_0) to nan
+ __ fstp(0);
+ __ LoadRoot(edx, Heap::kNanValueRootIndex);
+ __ fld_d(FieldOperand(edx, HeapNumber::kValueOffset));
+ __ jmp(&done_compare, Label::kNear);
+
+ // Left and right hand side are equal, check for -0 vs. +0.
+ __ bind(&compare_equal);
+ // Check the sign of the value in reg_sel
+ __ fld(reg_sel);
+ __ FXamSign();
+ __ j(not_zero, &compare_swap);
+
+ __ bind(&done_compare);
+ // The right result is on the right hand side(stx_0)
+ // and can remove the useless stx_1 now.
+ __ fxch();
+ __ fstp(0);
+ __ dec(ecx);
+ __ jmp(&loop);
+ }
+
+ __ bind(&done_loop);
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(ecx);
+ __ mov(eax, edx);
+ __ Ret();
+}
+
+// static
void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : number of arguments
@@ -1472,9 +1516,8 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(ebx); // the first argument
- __ Push(edi); // constructor function
- __ Push(edx); // new target
- __ CallRuntime(Runtime::kNewObject);
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ Pop(FieldOperand(eax, JSValue::kValueOffset));
}
__ Ret();
@@ -1606,9 +1649,8 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(ebx); // the first argument
- __ Push(edi); // constructor function
- __ Push(edx); // new target
- __ CallRuntime(Runtime::kNewObject);
+ FastNewObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
__ Pop(FieldOperand(eax, JSValue::kValueOffset));
}
__ Ret();
@@ -1724,9 +1766,7 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Try to create the list from an arguments object.
__ bind(&create_arguments);
- __ mov(ebx,
- FieldOperand(eax, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize));
+ __ mov(ebx, FieldOperand(eax, JSArgumentsObject::kLengthOffset));
__ mov(ecx, FieldOperand(eax, JSObject::kElementsOffset));
__ cmp(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
__ j(not_equal, &create_runtime);
@@ -1815,10 +1855,138 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
}
}
+namespace {
+
+// Drops top JavaScript frame and an arguments adaptor frame below it (if
+// present) preserving all the arguments prepared for current call.
+// Does nothing if debugger is currently active.
+// ES6 14.6.3. PrepareForTailCall
+//
+// Stack structure for the function g() tail calling f():
+//
+// ------- Caller frame: -------
+// | ...
+// | g()'s arg M
+// | ...
+// | g()'s arg 1
+// | g()'s receiver arg
+// | g()'s caller pc
+// ------- g()'s frame: -------
+// | g()'s caller fp <- fp
+// | g()'s context
+// | function pointer: g
+// | -------------------------
+// | ...
+// | ...
+// | f()'s arg N
+// | ...
+// | f()'s arg 1
+// | f()'s receiver arg
+// | f()'s caller pc <- sp
+// ----------------------
+//
+void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
+ Register scratch1, Register scratch2,
+ Register scratch3) {
+ DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
+ Comment cmnt(masm, "[ PrepareForTailCall");
+
+ // Prepare for tail call only if the debugger is not active.
+ Label done;
+ ExternalReference debug_is_active =
+ ExternalReference::debug_is_active_address(masm->isolate());
+ __ movzx_b(scratch1, Operand::StaticVariable(debug_is_active));
+ __ cmp(scratch1, Immediate(0));
+ __ j(not_equal, &done, Label::kNear);
+
+ // Drop possible interpreter handler/stub frame.
+ {
+ Label no_interpreter_frame;
+ __ cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
+ Immediate(Smi::FromInt(StackFrame::STUB)));
+ __ j(not_equal, &no_interpreter_frame, Label::kNear);
+ __ mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&no_interpreter_frame);
+ }
+
+ // Check if next frame is an arguments adaptor frame.
+ Label no_arguments_adaptor, formal_parameter_count_loaded;
+ __ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ cmp(Operand(scratch2, StandardFrameConstants::kContextOffset),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &no_arguments_adaptor, Label::kNear);
+
+ // Drop arguments adaptor frame and load arguments count.
+ __ mov(ebp, scratch2);
+ __ mov(scratch1, Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(scratch1);
+ __ jmp(&formal_parameter_count_loaded, Label::kNear);
+
+ __ bind(&no_arguments_adaptor);
+ // Load caller's formal parameter count
+ __ mov(scratch1, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(scratch1,
+ FieldOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(
+ scratch1,
+ FieldOperand(scratch1, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ SmiUntag(scratch1);
+
+ __ bind(&formal_parameter_count_loaded);
+
+ // Calculate the destination address where we will put the return address
+ // after we drop current frame.
+ Register new_sp_reg = scratch2;
+ __ sub(scratch1, args_reg);
+ __ lea(new_sp_reg, Operand(ebp, scratch1, times_pointer_size,
+ StandardFrameConstants::kCallerPCOffset));
+
+ if (FLAG_debug_code) {
+ __ cmp(esp, new_sp_reg);
+ __ Check(below, kStackAccessBelowStackPointer);
+ }
+
+ // Copy receiver and return address as well.
+ Register count_reg = scratch1;
+ __ lea(count_reg, Operand(args_reg, 2));
+
+ // Copy return address from caller's frame to current frame's return address
+ // to avoid its trashing and let the following loop copy it to the right
+ // place.
+ Register tmp_reg = scratch3;
+ __ mov(tmp_reg, Operand(ebp, StandardFrameConstants::kCallerPCOffset));
+ __ mov(Operand(esp, 0), tmp_reg);
+
+ // Restore caller's frame pointer now as it could be overwritten by
+ // the copying loop.
+ __ mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+
+ Operand src(esp, count_reg, times_pointer_size, 0);
+ Operand dst(new_sp_reg, count_reg, times_pointer_size, 0);
+
+ // Now copy callee arguments to the caller frame going backwards to avoid
+ // callee arguments corruption (source and destination areas could overlap).
+ Label loop, entry;
+ __ jmp(&entry, Label::kNear);
+ __ bind(&loop);
+ __ dec(count_reg);
+ __ mov(tmp_reg, src);
+ __ mov(dst, tmp_reg);
+ __ bind(&entry);
+ __ cmp(count_reg, Immediate(0));
+ __ j(not_equal, &loop, Label::kNear);
+
+ // Leave current frame.
+ __ mov(esp, new_sp_reg);
+
+ __ bind(&done);
+}
+} // namespace
// static
void Builtins::Generate_CallFunction(MacroAssembler* masm,
- ConvertReceiverMode mode) {
+ ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edi : the function to call (checked to be a JSFunction)
@@ -1907,6 +2075,12 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- esi : the function context.
// -----------------------------------
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, eax, ebx, ecx, edx);
+ // Reload shared function info.
+ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ }
+
__ mov(ebx,
FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
__ SmiUntag(ebx);
@@ -2012,13 +2186,18 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// static
-void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edi : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(edi);
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, eax, ebx, ecx, edx);
+ }
+
// Patch the receiver to [[BoundThis]].
__ mov(ebx, FieldOperand(edi, JSBoundFunction::kBoundThisOffset));
__ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ebx);
@@ -2036,7 +2215,8 @@ void Builtins::Generate_CallBoundFunction(MacroAssembler* masm) {
// static
-void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
+ TailCallMode tail_call_mode) {
// ----------- S t a t e -------------
// -- eax : the number of arguments (not including the receiver)
// -- edi : the target to call (can be any Object).
@@ -2046,14 +2226,24 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
__ JumpIfSmi(edi, &non_callable);
__ bind(&non_smi);
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(equal, masm->isolate()->builtins()->CallFunction(mode),
+ __ j(equal, masm->isolate()->builtins()->CallFunction(mode, tail_call_mode),
RelocInfo::CODE_TARGET);
__ CmpInstanceType(ecx, JS_BOUND_FUNCTION_TYPE);
- __ j(equal, masm->isolate()->builtins()->CallBoundFunction(),
+ __ j(equal, masm->isolate()->builtins()->CallBoundFunction(tail_call_mode),
RelocInfo::CODE_TARGET);
+
+ // Check if target has a [[Call]] internal method.
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
+ __ j(zero, &non_callable);
+
__ CmpInstanceType(ecx, JS_PROXY_TYPE);
__ j(not_equal, &non_function);
+ // 0. Prepare for tail call if necessary.
+ if (tail_call_mode == TailCallMode::kAllow) {
+ PrepareForTailCall(masm, eax, ebx, ecx, edx);
+ }
+
// 1. Runtime fallback for Proxy [[Call]].
__ PopReturnAddressTo(ecx);
__ Push(edi);
@@ -2068,15 +2258,12 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// 2. Call to something else, which might have a [[Call]] internal method (if
// not we raise an exception).
__ bind(&non_function);
- // Check if target has a [[Call]] internal method.
- __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
- __ j(zero, &non_callable, Label::kNear);
// Overwrite the original receiver with the (original) target.
__ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
// Let the "call_as_function_delegate" take care of the rest.
__ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, edi);
__ Jump(masm->isolate()->builtins()->CallFunction(
- ConvertReceiverMode::kNotNullOrUndefined),
+ ConvertReceiverMode::kNotNullOrUndefined, tail_call_mode),
RelocInfo::CODE_TARGET);
// 3. Call to something that is not callable.
@@ -2394,14 +2581,12 @@ static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
// Load the next prototype.
__ bind(&next_prototype);
__ mov(receiver, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ test(FieldOperand(receiver, Map::kBitField3Offset),
+ Immediate(Map::HasHiddenPrototype::kMask));
+ __ j(zero, receiver_check_failed);
+
__ mov(receiver, FieldOperand(receiver, Map::kPrototypeOffset));
- // End if the prototype is null or not hidden.
- __ CompareRoot(receiver, Heap::kNullValueRootIndex);
- __ j(equal, receiver_check_failed);
__ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
- __ test(FieldOperand(scratch0, Map::kBitField3Offset),
- Immediate(Map::IsHiddenPrototype::kMask));
- __ j(zero, receiver_check_failed);
// Iterate.
__ jmp(&prototype_loop_start, Label::kNear);
diff --git a/deps/v8/src/x87/code-stubs-x87.cc b/deps/v8/src/x87/code-stubs-x87.cc
index 1da5f41a88..e1ad6a7f5a 100644
--- a/deps/v8/src/x87/code-stubs-x87.cc
+++ b/deps/v8/src/x87/code-stubs-x87.cc
@@ -327,7 +327,6 @@ void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
void MathPowStub::Generate(MacroAssembler* masm) {
const Register base = edx;
const Register scratch = ecx;
- Counters* counters = isolate()->counters();
Label call_runtime;
// We will call runtime helper function directly.
@@ -340,7 +339,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// as heap number in exponent.
__ AllocateHeapNumber(eax, scratch, base, &call_runtime);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ IncrementCounter(counters->math_pow(), 1);
__ ret(2 * kPointerSize);
} else {
// Currently it's only called from full-compiler and exponent type is
@@ -431,456 +429,6 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
}
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- // The key is in edx and the parameter count is in eax.
- DCHECK(edx.is(ArgumentsAccessReadDescriptor::index()));
- DCHECK(eax.is(ArgumentsAccessReadDescriptor::parameter_count()));
-
- // The displacement is used for skipping the frame pointer on the
- // stack. It is the offset of the last parameter (if any) relative
- // to the frame pointer.
- static const int kDisplacement = 1 * kPointerSize;
-
- // Check that the key is a smi.
- Label slow;
- __ JumpIfNotSmi(edx, &slow, Label::kNear);
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor;
- __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adaptor, Label::kNear);
-
- // Check index against formal parameters count limit passed in
- // through register eax. Use unsigned comparison to get negative
- // check for free.
- __ cmp(edx, eax);
- __ j(above_equal, &slow, Label::kNear);
-
- // Read the argument from the stack and return it.
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
- __ lea(ebx, Operand(ebp, eax, times_2, 0));
- __ neg(edx);
- __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
- __ ret(0);
-
- // Arguments adaptor case: Check index against actual arguments
- // limit found in the arguments adaptor frame. Use unsigned
- // comparison to get negative check for free.
- __ bind(&adaptor);
- __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmp(edx, ecx);
- __ j(above_equal, &slow, Label::kNear);
-
- // Read the argument from the stack and return it.
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
- __ lea(ebx, Operand(ebx, ecx, times_2, 0));
- __ neg(edx);
- __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
- __ ret(0);
-
- // Slow-case: Handle non-smi or out-of-bounds access to arguments
- // by calling the runtime system.
- __ bind(&slow);
- __ pop(ebx); // Return address.
- __ push(edx);
- __ push(ebx);
- __ TailCallRuntime(Runtime::kArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
- // ecx : number of parameters (tagged)
- // edx : parameters pointer
- // edi : function
- // esp[0] : return address
-
- DCHECK(edi.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(ecx.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(edx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(eax, Operand(ebx, StandardFrameConstants::kContextOffset));
- __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &runtime, Label::kNear);
-
- // Patch the arguments.length and the parameters pointer.
- __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ lea(edx,
- Operand(ebx, ecx, times_2, StandardFrameConstants::kCallerSPOffset));
-
- __ bind(&runtime);
- __ pop(eax); // Pop return address.
- __ push(edi); // Push function.
- __ push(edx); // Push parameters pointer.
- __ push(ecx); // Push parameter count.
- __ push(eax); // Push return address.
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
- // ecx : number of parameters (tagged)
- // edx : parameters pointer
- // edi : function
- // esp[0] : return address
-
- DCHECK(edi.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(ecx.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(edx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(eax, Operand(ebx, StandardFrameConstants::kContextOffset));
- __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adaptor_frame, Label::kNear);
-
- // No adaptor, parameter count = argument count.
- __ mov(ebx, ecx);
- __ push(ecx);
- __ jmp(&try_allocate, Label::kNear);
-
- // We have an adaptor frame. Patch the parameters pointer.
- __ bind(&adaptor_frame);
- __ mov(ebx, ecx);
- __ push(ecx);
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ lea(edx, Operand(edx, ecx, times_2,
- StandardFrameConstants::kCallerSPOffset));
-
- // ebx = parameter count (tagged)
- // ecx = argument count (smi-tagged)
- // Compute the mapped parameter count = min(ebx, ecx) in ebx.
- __ cmp(ebx, ecx);
- __ j(less_equal, &try_allocate, Label::kNear);
- __ mov(ebx, ecx);
-
- // Save mapped parameter count and function.
- __ bind(&try_allocate);
- __ push(edi);
- __ push(ebx);
-
- // Compute the sizes of backing store, parameter map, and arguments object.
- // 1. Parameter map, has 2 extra words containing context and backing store.
- const int kParameterMapHeaderSize =
- FixedArray::kHeaderSize + 2 * kPointerSize;
- Label no_parameter_map;
- __ test(ebx, ebx);
- __ j(zero, &no_parameter_map, Label::kNear);
- __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
- __ bind(&no_parameter_map);
-
- // 2. Backing store.
- __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
-
- // 3. Arguments object.
- __ add(ebx, Immediate(Heap::kSloppyArgumentsObjectSize));
-
- // Do the allocation of all three objects in one go.
- __ Allocate(ebx, eax, edi, no_reg, &runtime, TAG_OBJECT);
-
- // eax = address of new object(s) (tagged)
- // ecx = argument count (smi-tagged)
- // esp[0] = mapped parameter count (tagged)
- // esp[4] = function
- // esp[8] = parameter count (tagged)
- // Get the arguments map from the current native context into edi.
- Label has_mapped_parameters, instantiate;
- __ mov(edi, NativeContextOperand());
- __ mov(ebx, Operand(esp, 0 * kPointerSize));
- __ test(ebx, ebx);
- __ j(not_zero, &has_mapped_parameters, Label::kNear);
- __ mov(
- edi,
- Operand(edi, Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX)));
- __ jmp(&instantiate, Label::kNear);
-
- __ bind(&has_mapped_parameters);
- __ mov(edi, Operand(edi, Context::SlotOffset(
- Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX)));
- __ bind(&instantiate);
-
- // eax = address of new object (tagged)
- // ebx = mapped parameter count (tagged)
- // ecx = argument count (smi-tagged)
- // edi = address of arguments map (tagged)
- // esp[0] = mapped parameter count (tagged)
- // esp[4] = function
- // esp[8] = parameter count (tagged)
- // Copy the JS object part.
- __ mov(FieldOperand(eax, JSObject::kMapOffset), edi);
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
- masm->isolate()->factory()->empty_fixed_array());
- __ mov(FieldOperand(eax, JSObject::kElementsOffset),
- masm->isolate()->factory()->empty_fixed_array());
-
- // Set up the callee in-object property.
- STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ mov(edi, Operand(esp, 1 * kPointerSize));
- __ AssertNotSmi(edi);
- __ mov(FieldOperand(eax, JSObject::kHeaderSize +
- Heap::kArgumentsCalleeIndex * kPointerSize),
- edi);
-
- // Use the length (smi tagged) and set that as an in-object property too.
- __ AssertSmi(ecx);
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ mov(FieldOperand(eax, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize),
- ecx);
-
- // Set up the elements pointer in the allocated arguments object.
- // If we allocated a parameter map, edi will point there, otherwise to the
- // backing store.
- __ lea(edi, Operand(eax, Heap::kSloppyArgumentsObjectSize));
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
-
- // eax = address of new object (tagged)
- // ebx = mapped parameter count (tagged)
- // ecx = argument count (tagged)
- // edx = address of receiver argument
- // edi = address of parameter map or backing store (tagged)
- // esp[0] = mapped parameter count (tagged)
- // esp[4] = function
- // esp[8] = parameter count (tagged)
- // Free two registers.
- __ push(edx);
- __ push(eax);
-
- // Initialize parameter map. If there are no mapped arguments, we're done.
- Label skip_parameter_map;
- __ test(ebx, ebx);
- __ j(zero, &skip_parameter_map);
-
- __ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(isolate()->factory()->sloppy_arguments_elements_map()));
- __ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2))));
- __ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax);
- __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi);
- __ lea(eax, Operand(edi, ebx, times_2, kParameterMapHeaderSize));
- __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 1 * kPointerSize), eax);
-
- // Copy the parameter slots and the holes in the arguments.
- // We need to fill in mapped_parameter_count slots. They index the context,
- // where parameters are stored in reverse order, at
- // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
- // The mapped parameter thus need to get indices
- // MIN_CONTEXT_SLOTS+parameter_count-1 ..
- // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
- // We loop from right to left.
- Label parameters_loop, parameters_test;
- __ push(ecx);
- __ mov(eax, Operand(esp, 3 * kPointerSize));
- __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
- __ add(ebx, Operand(esp, 5 * kPointerSize));
- __ sub(ebx, eax);
- __ mov(ecx, isolate()->factory()->the_hole_value());
- __ mov(edx, edi);
- __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
- // eax = loop variable (tagged)
- // ebx = mapping index (tagged)
- // ecx = the hole value
- // edx = address of parameter map (tagged)
- // edi = address of backing store (tagged)
- // esp[0] = argument count (tagged)
- // esp[4] = address of new object (tagged)
- // esp[8] = address of receiver argument
- // esp[12] = mapped parameter count (tagged)
- // esp[16] = function
- // esp[20] = parameter count (tagged)
- __ jmp(&parameters_test, Label::kNear);
-
- __ bind(&parameters_loop);
- __ sub(eax, Immediate(Smi::FromInt(1)));
- __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
- __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx);
- __ add(ebx, Immediate(Smi::FromInt(1)));
- __ bind(&parameters_test);
- __ test(eax, eax);
- __ j(not_zero, &parameters_loop, Label::kNear);
- __ pop(ecx);
-
- __ bind(&skip_parameter_map);
-
- // ecx = argument count (tagged)
- // edi = address of backing store (tagged)
- // esp[0] = address of new object (tagged)
- // esp[4] = address of receiver argument
- // esp[8] = mapped parameter count (tagged)
- // esp[12] = function
- // esp[16] = parameter count (tagged)
- // Copy arguments header and remaining slots (if there are any).
- __ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(isolate()->factory()->fixed_array_map()));
- __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
-
- Label arguments_loop, arguments_test;
- __ mov(ebx, Operand(esp, 2 * kPointerSize));
- __ mov(edx, Operand(esp, 1 * kPointerSize));
- __ sub(edx, ebx); // Is there a smarter way to do negative scaling?
- __ sub(edx, ebx);
- __ jmp(&arguments_test, Label::kNear);
-
- __ bind(&arguments_loop);
- __ sub(edx, Immediate(kPointerSize));
- __ mov(eax, Operand(edx, 0));
- __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax);
- __ add(ebx, Immediate(Smi::FromInt(1)));
-
- __ bind(&arguments_test);
- __ cmp(ebx, ecx);
- __ j(less, &arguments_loop, Label::kNear);
-
- // Restore.
- __ pop(eax); // Address of arguments object.
- __ Drop(4);
-
- // Return.
- __ ret(0);
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ pop(eax); // Remove saved mapped parameter count.
- __ pop(edi); // Pop saved function.
- __ pop(eax); // Remove saved parameter count.
- __ pop(eax); // Pop return address.
- __ push(edi); // Push function.
- __ push(edx); // Push parameters pointer.
- __ push(ecx); // Push parameter count.
- __ push(eax); // Push return address.
- __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
- // ecx : number of parameters (tagged)
- // edx : parameters pointer
- // edi : function
- // esp[0] : return address
-
- DCHECK(edi.is(ArgumentsAccessNewDescriptor::function()));
- DCHECK(ecx.is(ArgumentsAccessNewDescriptor::parameter_count()));
- DCHECK(edx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
-
- // Check if the calling frame is an arguments adaptor frame.
- Label try_allocate, runtime;
- __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(eax, Operand(ebx, StandardFrameConstants::kContextOffset));
- __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &try_allocate, Label::kNear);
-
- // Patch the arguments.length and the parameters pointer.
- __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ lea(edx,
- Operand(ebx, ecx, times_2, StandardFrameConstants::kCallerSPOffset));
-
- // Try the new space allocation. Start out with computing the size of
- // the arguments object and the elements array.
- Label add_arguments_object;
- __ bind(&try_allocate);
- __ mov(eax, ecx);
- __ test(eax, eax);
- __ j(zero, &add_arguments_object, Label::kNear);
- __ lea(eax, Operand(eax, times_2, FixedArray::kHeaderSize));
- __ bind(&add_arguments_object);
- __ add(eax, Immediate(Heap::kStrictArgumentsObjectSize));
-
- // Do the allocation of both objects in one go.
- __ Allocate(eax, eax, ebx, no_reg, &runtime, TAG_OBJECT);
-
- // Get the arguments map from the current native context.
- __ mov(edi, NativeContextOperand());
- __ mov(edi, ContextOperand(edi, Context::STRICT_ARGUMENTS_MAP_INDEX));
-
- __ mov(FieldOperand(eax, JSObject::kMapOffset), edi);
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
- masm->isolate()->factory()->empty_fixed_array());
- __ mov(FieldOperand(eax, JSObject::kElementsOffset),
- masm->isolate()->factory()->empty_fixed_array());
-
- // Get the length (smi tagged) and set that as an in-object property too.
- STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ AssertSmi(ecx);
- __ mov(FieldOperand(eax, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize),
- ecx);
-
- // If there are no actual arguments, we're done.
- Label done;
- __ test(ecx, ecx);
- __ j(zero, &done, Label::kNear);
-
- // Set up the elements pointer in the allocated arguments object and
- // initialize the header in the elements fixed array.
- __ lea(edi, Operand(eax, Heap::kStrictArgumentsObjectSize));
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
- __ mov(FieldOperand(edi, FixedArray::kMapOffset),
- Immediate(isolate()->factory()->fixed_array_map()));
- __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
-
- // Untag the length for the loop below.
- __ SmiUntag(ecx);
-
- // Copy the fixed array slots.
- Label loop;
- __ bind(&loop);
- __ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver.
- __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
- __ add(edi, Immediate(kPointerSize));
- __ sub(edx, Immediate(kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &loop);
-
- // Return.
- __ bind(&done);
- __ ret(0);
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ pop(eax); // Pop return address.
- __ push(edi); // Push function.
- __ push(edx); // Push parameters pointer.
- __ push(ecx); // Push parameter count.
- __ push(eax); // Push return address.
- __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
-void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
- // ecx : number of parameters (tagged)
- // edx : parameters pointer
- // ebx : rest parameter index (tagged)
- // esp[0] : return address
-
- // Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- __ mov(edi, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(eax, Operand(edi, StandardFrameConstants::kContextOffset));
- __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &runtime);
-
- // Patch the arguments.length and the parameters pointer.
- __ mov(ecx, Operand(edi, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ lea(edx,
- Operand(edi, ecx, times_2, StandardFrameConstants::kCallerSPOffset));
-
- __ bind(&runtime);
- __ pop(eax); // Save return address.
- __ push(ecx); // Push number of parameters.
- __ push(edx); // Push parameters pointer.
- __ push(ebx); // Push rest parameter index.
- __ push(eax); // Push return address.
- __ TailCallRuntime(Runtime::kNewRestParam);
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -1384,16 +932,11 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Check for undefined. undefined OP undefined is false even though
// undefined == undefined.
__ cmp(edx, isolate()->factory()->undefined_value());
- if (is_strong(strength())) {
- // In strong mode, this comparison must throw, so call the runtime.
- __ j(equal, &runtime_call, Label::kFar);
- } else {
- Label check_for_nan;
- __ j(not_equal, &check_for_nan, Label::kNear);
- __ Move(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
- __ ret(0);
- __ bind(&check_for_nan);
- }
+ Label check_for_nan;
+ __ j(not_equal, &check_for_nan, Label::kNear);
+ __ Move(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
+ __ ret(0);
+ __ bind(&check_for_nan);
}
// Test for NaN. Compare heap numbers in a general way,
@@ -1413,12 +956,6 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call runtime on identical SIMD values since we must throw a TypeError.
__ cmpb(ecx, static_cast<uint8_t>(SIMD128_VALUE_TYPE));
__ j(equal, &runtime_call, Label::kFar);
- if (is_strong(strength())) {
- // We have already tested for smis and heap numbers, so if both
- // arguments are not strings we must proceed to the slow case.
- __ test(ecx, Immediate(kIsNotStringMask));
- __ j(not_zero, &runtime_call, Label::kFar);
- }
}
__ Move(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
@@ -1575,7 +1112,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Non-strict equality. Objects are unequal if
// they are both JSObjects and not undetectable,
// and their pointers are different.
- Label return_unequal;
+ Label return_unequal, undetectable;
// At most one is a smi, so we can test for smi by adding the two.
// A smi plus a heap object has the low bit set, a heap object plus
// a heap object has the low bit clear.
@@ -1584,26 +1121,32 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ lea(ecx, Operand(eax, edx, times_1, 0));
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &runtime_call, Label::kNear);
- __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
+
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+
+ __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ __ j(not_zero, &undetectable, Label::kNear);
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
+ __ j(not_zero, &return_unequal, Label::kNear);
+
+ __ CmpInstanceType(ebx, FIRST_JS_RECEIVER_TYPE);
__ j(below, &runtime_call, Label::kNear);
- __ CmpObjectType(edx, FIRST_JS_RECEIVER_TYPE, ebx);
+ __ CmpInstanceType(ecx, FIRST_JS_RECEIVER_TYPE);
__ j(below, &runtime_call, Label::kNear);
- // We do not bail out after this point. Both are JSObjects, and
- // they are equal if and only if both are undetectable.
- // The and of the undetectable flags is 1 if and only if they are equal.
+
+ __ bind(&return_unequal);
+ // Return non-equal by returning the non-zero object pointer in eax.
+ __ ret(0); // eax, edx were pushed
+
+ __ bind(&undetectable);
__ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
__ j(zero, &return_unequal, Label::kNear);
- __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- __ j(zero, &return_unequal, Label::kNear);
- // The objects are both undetectable, so they both compare as the value
- // undefined, and are equal.
__ Move(eax, Immediate(EQUAL));
- __ bind(&return_unequal);
- // Return non-equal by returning the non-zero object pointer in eax,
- // or return equal if we fell through to here.
- __ ret(0); // rax, rdx were pushed
+ __ ret(0); // eax, edx were pushed
}
__ bind(&runtime_call);
@@ -1624,8 +1167,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
- : Runtime::kCompare);
+ __ TailCallRuntime(Runtime::kCompare);
}
__ bind(&miss);
@@ -1854,7 +1396,8 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&call_function);
__ Set(eax, argc);
- __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
+ __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
+ tail_call_mode()),
RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
@@ -1893,7 +1436,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&call);
__ Set(eax, argc);
- __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
+ __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
RelocInfo::CODE_TARGET);
__ bind(&uninitialized);
@@ -2015,16 +1558,22 @@ void CEntryStub::Generate(MacroAssembler* masm) {
ProfileEntryHookStub::MaybeCallEntryHook(masm);
+ // Reserve space on the stack for the three arguments passed to the call. If
+ // result size is greater than can be returned in registers, also reserve
+ // space for the hidden argument for the result location, and space for the
+ // result itself.
+ int arg_stack_space = result_size() < 3 ? 3 : 4 + result_size();
+
// Enter the exit frame that transitions from JavaScript to C++.
if (argv_in_register()) {
DCHECK(!save_doubles());
- __ EnterApiExitFrame(3);
+ __ EnterApiExitFrame(arg_stack_space);
// Move argc and argv into the correct registers.
__ mov(esi, ecx);
__ mov(edi, eax);
} else {
- __ EnterExitFrame(save_doubles());
+ __ EnterExitFrame(arg_stack_space, save_doubles());
}
// ebx: pointer to C function (C callee-saved)
@@ -2039,14 +1588,36 @@ void CEntryStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ CheckStackAlignment();
}
-
// Call C function.
- __ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
- __ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
- __ mov(Operand(esp, 2 * kPointerSize),
- Immediate(ExternalReference::isolate_address(isolate())));
+ if (result_size() <= 2) {
+ __ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
+ __ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
+ __ mov(Operand(esp, 2 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(isolate())));
+ } else {
+ DCHECK_EQ(3, result_size());
+ // Pass a pointer to the result location as the first argument.
+ __ lea(eax, Operand(esp, 4 * kPointerSize));
+ __ mov(Operand(esp, 0 * kPointerSize), eax);
+ __ mov(Operand(esp, 1 * kPointerSize), edi); // argc.
+ __ mov(Operand(esp, 2 * kPointerSize), esi); // argv.
+ __ mov(Operand(esp, 3 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(isolate())));
+ }
__ call(ebx);
- // Result is in eax or edx:eax - do not destroy these registers!
+
+ if (result_size() > 2) {
+ DCHECK_EQ(3, result_size());
+#ifndef _WIN32
+ // Restore the "hidden" argument on the stack which was popped by caller.
+ __ sub(esp, Immediate(kPointerSize));
+#endif
+ // Read result values stored on stack. Result is stored above the arguments.
+ __ mov(kReturnRegister0, Operand(esp, 4 * kPointerSize));
+ __ mov(kReturnRegister1, Operand(esp, 5 * kPointerSize));
+ __ mov(kReturnRegister2, Operand(esp, 6 * kPointerSize));
+ }
+ // Result is in eax, edx:eax or edi:edx:eax - do not destroy these registers!
// Check result for exception sentinel.
Label exception_returned;
@@ -2117,6 +1688,16 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Compute the handler entry address and jump to it.
__ mov(edi, Operand::StaticVariable(pending_handler_code_address));
__ mov(edx, Operand::StaticVariable(pending_handler_offset_address));
+ // Check whether it's a turbofanned exception handler code before jump to it.
+ Label not_turbo;
+ __ push(eax);
+ __ mov(eax, Operand(edi, Code::kKindSpecificFlags1Offset - kHeapObjectTag));
+ __ and_(eax, Immediate(1 << Code::kIsTurbofannedBit));
+ __ j(zero, &not_turbo);
+ __ fninit();
+ __ fld1();
+ __ bind(&not_turbo);
+ __ pop(eax);
__ lea(edi, FieldOperand(edi, edx, times_1, Code::kHeaderSize));
__ jmp(edi);
}
@@ -2840,6 +2421,42 @@ void ToStringStub::Generate(MacroAssembler* masm) {
}
+void ToNameStub::Generate(MacroAssembler* masm) {
+ // The ToName stub takes one argument in eax.
+ Label is_number;
+ __ JumpIfSmi(eax, &is_number, Label::kNear);
+
+ Label not_name;
+ STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+ __ CmpObjectType(eax, LAST_NAME_TYPE, edi);
+ // eax: receiver
+ // edi: receiver map
+ __ j(above, &not_name, Label::kNear);
+ __ Ret();
+ __ bind(&not_name);
+
+ Label not_heap_number;
+ __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
+ __ j(not_equal, &not_heap_number, Label::kNear);
+ __ bind(&is_number);
+ NumberToStringStub stub(isolate());
+ __ TailCallStub(&stub);
+ __ bind(&not_heap_number);
+
+ Label not_oddball;
+ __ CmpInstanceType(edi, ODDBALL_TYPE);
+ __ j(not_equal, &not_oddball, Label::kNear);
+ __ mov(eax, FieldOperand(eax, Oddball::kToStringOffset));
+ __ Ret();
+ __ bind(&not_oddball);
+
+ __ pop(ecx); // Pop return address.
+ __ push(eax); // Push argument.
+ __ push(ecx); // Push return address.
+ __ TailCallRuntime(Runtime::kToName);
+}
+
+
void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register left,
Register right,
@@ -3043,19 +2660,15 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ JumpIfNotRoot(ecx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
__ JumpIfNotRoot(ebx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
- if (op() != Token::EQ_STRICT && is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
- } else {
- if (!Token::IsEqualityOp(op())) {
- __ mov(eax, FieldOperand(eax, Oddball::kToNumberOffset));
- __ AssertSmi(eax);
- __ mov(edx, FieldOperand(edx, Oddball::kToNumberOffset));
- __ AssertSmi(edx);
- __ xchg(eax, edx);
- }
- __ sub(eax, edx);
- __ Ret();
+ if (!Token::IsEqualityOp(op())) {
+ __ mov(eax, FieldOperand(eax, Oddball::kToNumberOffset));
+ __ AssertSmi(eax);
+ __ mov(edx, FieldOperand(edx, Oddball::kToNumberOffset));
+ __ AssertSmi(edx);
+ __ xchg(eax, edx);
}
+ __ sub(eax, edx);
+ __ Ret();
__ bind(&miss);
GenerateMiss(masm);
@@ -3117,7 +2730,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ bind(&unordered);
__ bind(&generic_stub);
- CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
+ CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
CompareICState::GENERIC, CompareICState::GENERIC);
__ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
@@ -3360,8 +2973,6 @@ void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
if (Token::IsEqualityOp(op())) {
__ sub(eax, edx);
__ ret(0);
- } else if (is_strong(strength())) {
- __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
} else {
__ PopReturnAddressTo(ecx);
__ Push(edx);
@@ -3665,11 +3276,8 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
regs_.scratch0(),
&dont_need_remembered_set);
- __ CheckPageFlag(regs_.object(),
- regs_.scratch0(),
- 1 << MemoryChunk::SCAN_ON_SCAVENGE,
- not_zero,
- &dont_need_remembered_set);
+ __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
+ &dont_need_remembered_set);
// First notify the incremental marker if necessary, then update the
// remembered set.
@@ -4370,7 +3978,6 @@ static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
__ jmp(&compare_map);
}
-
void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // edx
Register key = VectorStoreICDescriptor::NameRegister(); // ecx
@@ -4426,14 +4033,12 @@ void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
KeyedStoreIC::GenerateMiss(masm);
}
-
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(ebx);
CallICStub stub(isolate(), state());
__ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@@ -4441,7 +4046,6 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
}
}
-
void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// Save volatile registers.
const int kNumSavedRegisters = 3;
@@ -4473,18 +4077,15 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
__ ret(0);
}
-
-template<class T>
+template <class T>
static void CreateArrayDispatch(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
if (mode == DISABLE_ALLOCATION_SITES) {
- T stub(masm->isolate(),
- GetInitialFastElementsKind(),
- mode);
+ T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
- int last_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
+ int last_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
Label next;
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
@@ -4502,7 +4103,6 @@ static void CreateArrayDispatch(MacroAssembler* masm,
}
}
-
static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
// ebx - allocation site (if mode != DISABLE_ALLOCATION_SITES)
@@ -4534,14 +4134,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
ElementsKind initial = GetInitialFastElementsKind();
ElementsKind holey_initial = GetHoleyElementsKind(initial);
- ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
- holey_initial,
- DISABLE_ALLOCATION_SITES);
+ ArraySingleArgumentConstructorStub stub_holey(
+ masm->isolate(), holey_initial, DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub_holey);
__ bind(&normal_sequence);
- ArraySingleArgumentConstructorStub stub(masm->isolate(),
- initial,
+ ArraySingleArgumentConstructorStub stub(masm->isolate(), initial,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
@@ -4564,8 +4162,8 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
Immediate(Smi::FromInt(kFastElementsKindPackedToHoley)));
__ bind(&normal_sequence);
- int last_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
+ int last_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
Label next;
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
@@ -4583,11 +4181,10 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
}
}
-
-template<class T>
+template <class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
- int to_index = GetSequenceIndexFromFastElementsKind(
- TERMINAL_FAST_ELEMENTS_KIND);
+ int to_index =
+ GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(isolate, kind);
@@ -4599,7 +4196,6 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
}
}
-
void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
isolate);
@@ -4609,10 +4205,9 @@ void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
isolate);
}
-
void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
Isolate* isolate) {
- ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
+ ElementsKind kinds[2] = {FAST_ELEMENTS, FAST_HOLEY_ELEMENTS};
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
@@ -4624,10 +4219,8 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
}
}
-
void ArrayConstructorStub::GenerateDispatchToArrayStub(
- MacroAssembler* masm,
- AllocationSiteOverrideMode mode) {
+ MacroAssembler* masm, AllocationSiteOverrideMode mode) {
if (argument_count() == ANY) {
Label not_zero_case, not_one_case;
__ test(eax, eax);
@@ -4652,7 +4245,6 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
}
}
-
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc (only if argument_count() is ANY or MORE_THAN_ONE)
@@ -4726,9 +4318,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
}
-
-void InternalArrayConstructorStub::GenerateCase(
- MacroAssembler* masm, ElementsKind kind) {
+void InternalArrayConstructorStub::GenerateCase(MacroAssembler* masm,
+ ElementsKind kind) {
Label not_zero_case, not_one_case;
Label normal_sequence;
@@ -4748,8 +4339,8 @@ void InternalArrayConstructorStub::GenerateCase(
__ test(ecx, ecx);
__ j(zero, &normal_sequence);
- InternalArraySingleArgumentConstructorStub
- stub1_holey(isolate(), GetHoleyElementsKind(kind));
+ InternalArraySingleArgumentConstructorStub stub1_holey(
+ isolate(), GetHoleyElementsKind(kind));
__ TailCallStub(&stub1_holey);
}
@@ -4762,7 +4353,6 @@ void InternalArrayConstructorStub::GenerateCase(
__ TailCallStub(&stubN);
}
-
void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
@@ -4798,8 +4388,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
__ cmp(ecx, Immediate(FAST_ELEMENTS));
__ j(equal, &done);
__ cmp(ecx, Immediate(FAST_HOLEY_ELEMENTS));
- __ Assert(equal,
- kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+ __ Assert(equal, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
__ bind(&done);
}
@@ -4812,6 +4401,639 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateCase(masm, FAST_ELEMENTS);
}
+void FastNewObjectStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- edi : target
+ // -- edx : new target
+ // -- esi : context
+ // -- esp[0] : return address
+ // -----------------------------------
+ __ AssertFunction(edi);
+ __ AssertReceiver(edx);
+
+ // Verify that the new target is a JSFunction.
+ Label new_object;
+ __ CmpObjectType(edx, JS_FUNCTION_TYPE, ebx);
+ __ j(not_equal, &new_object);
+
+ // Load the initial map and verify that it's in fact a map.
+ __ mov(ecx, FieldOperand(edx, JSFunction::kPrototypeOrInitialMapOffset));
+ __ JumpIfSmi(ecx, &new_object);
+ __ CmpObjectType(ecx, MAP_TYPE, ebx);
+ __ j(not_equal, &new_object);
+
+ // Fall back to runtime if the target differs from the new target's
+ // initial map constructor.
+ __ cmp(edi, FieldOperand(ecx, Map::kConstructorOrBackPointerOffset));
+ __ j(not_equal, &new_object);
+
+ // Allocate the JSObject on the heap.
+ Label allocate, done_allocate;
+ __ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceSizeOffset));
+ __ lea(ebx, Operand(ebx, times_pointer_size, 0));
+ __ Allocate(ebx, eax, edi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
+ __ bind(&done_allocate);
+
+ // Initialize the JSObject fields.
+ __ mov(Operand(eax, JSObject::kMapOffset), ecx);
+ __ mov(Operand(eax, JSObject::kPropertiesOffset),
+ masm->isolate()->factory()->empty_fixed_array());
+ __ mov(Operand(eax, JSObject::kElementsOffset),
+ masm->isolate()->factory()->empty_fixed_array());
+ STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
+ __ lea(ebx, Operand(eax, JSObject::kHeaderSize));
+
+ // ----------- S t a t e -------------
+ // -- eax : result (untagged)
+ // -- ebx : result fields (untagged)
+ // -- edi : result end (untagged)
+ // -- ecx : initial map
+ // -- esi : context
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ // Perform in-object slack tracking if requested.
+ Label slack_tracking;
+ STATIC_ASSERT(Map::kNoSlackTracking == 0);
+ __ test(FieldOperand(ecx, Map::kBitField3Offset),
+ Immediate(Map::ConstructionCounter::kMask));
+ __ j(not_zero, &slack_tracking, Label::kNear);
+ {
+ // Initialize all in-object fields with undefined.
+ __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
+ __ InitializeFieldsWithFiller(ebx, edi, edx);
+
+ // Add the object tag to make the JSObject real.
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ inc(eax);
+ __ Ret();
+ }
+ __ bind(&slack_tracking);
+ {
+ // Decrease generous allocation count.
+ STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+ __ sub(FieldOperand(ecx, Map::kBitField3Offset),
+ Immediate(1 << Map::ConstructionCounter::kShift));
+
+ // Initialize the in-object fields with undefined.
+ __ movzx_b(edx, FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset));
+ __ neg(edx);
+ __ lea(edx, Operand(edi, edx, times_pointer_size, 0));
+ __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
+ __ InitializeFieldsWithFiller(ebx, edx, edi);
+
+ // Initialize the remaining (reserved) fields with one pointer filler map.
+ __ movzx_b(edx, FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset));
+ __ lea(edx, Operand(ebx, edx, times_pointer_size, 0));
+ __ LoadRoot(edi, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(ebx, edx, edi);
+
+ // Add the object tag to make the JSObject real.
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ inc(eax);
+
+ // Check if we can finalize the instance size.
+ Label finalize;
+ STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
+ __ test(FieldOperand(ecx, Map::kBitField3Offset),
+ Immediate(Map::ConstructionCounter::kMask));
+ __ j(zero, &finalize, Label::kNear);
+ __ Ret();
+
+ // Finalize the instance size.
+ __ bind(&finalize);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(eax);
+ __ Push(ecx);
+ __ CallRuntime(Runtime::kFinalizeInstanceSize);
+ __ Pop(eax);
+ }
+ __ Ret();
+ }
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(ebx);
+ __ Push(ecx);
+ __ Push(ebx);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ Pop(ecx);
+ }
+ STATIC_ASSERT(kHeapObjectTag == 1);
+ __ dec(eax);
+ __ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceSizeOffset));
+ __ lea(edi, Operand(eax, ebx, times_pointer_size, 0));
+ __ jmp(&done_allocate);
+
+ // Fall back to %NewObject.
+ __ bind(&new_object);
+ __ PopReturnAddressTo(ecx);
+ __ Push(edi);
+ __ Push(edx);
+ __ PushReturnAddressFrom(ecx);
+ __ TailCallRuntime(Runtime::kNewObject);
+}
+
+void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- edi : function
+ // -- esi : context
+ // -- ebp : frame pointer
+ // -- esp[0] : return address
+ // -----------------------------------
+ __ AssertFunction(edi);
+
+ // For Ignition we need to skip all possible handler/stub frames until
+ // we reach the JavaScript frame for the function (similar to what the
+ // runtime fallback implementation does). So make edx point to that
+ // JavaScript frame.
+ {
+ Label loop, loop_entry;
+ __ mov(edx, ebp);
+ __ jmp(&loop_entry, Label::kNear);
+ __ bind(&loop);
+ __ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&loop_entry);
+ __ cmp(edi, Operand(edx, StandardFrameConstants::kMarkerOffset));
+ __ j(not_equal, &loop);
+ }
+
+ // Check if we have rest parameters (only possible if we have an
+ // arguments adaptor frame below the function frame).
+ Label no_rest_parameters;
+ __ mov(ebx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
+ __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &no_rest_parameters, Label::kNear);
+
+ // Check if the arguments adaptor frame contains more arguments than
+ // specified by the function's internal formal parameter count.
+ Label rest_parameters;
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ sub(eax,
+ FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ j(greater, &rest_parameters);
+
+ // Return an empty rest parameter array.
+ __ bind(&no_rest_parameters);
+ {
+ // ----------- S t a t e -------------
+ // -- esi : context
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ // Allocate an empty rest parameter array.
+ Label allocate, done_allocate;
+ __ Allocate(JSArray::kSize, eax, edx, ecx, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Setup the rest parameter array in rax.
+ __ LoadGlobalFunction(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, ecx);
+ __ mov(FieldOperand(eax, JSArray::kMapOffset), ecx);
+ __ mov(ecx, isolate()->factory()->empty_fixed_array());
+ __ mov(FieldOperand(eax, JSArray::kPropertiesOffset), ecx);
+ __ mov(FieldOperand(eax, JSArray::kElementsOffset), ecx);
+ __ mov(FieldOperand(eax, JSArray::kLengthOffset),
+ Immediate(Smi::FromInt(0)));
+ STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+ __ Ret();
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(Smi::FromInt(JSArray::kSize));
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ }
+ __ jmp(&done_allocate);
+ }
+
+ __ bind(&rest_parameters);
+ {
+ // Compute the pointer to the first rest parameter (skippping the receiver).
+ __ lea(ebx,
+ Operand(ebx, eax, times_half_pointer_size,
+ StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
+
+ // ----------- S t a t e -------------
+ // -- esi : context
+ // -- eax : number of rest parameters (tagged)
+ // -- ebx : pointer to first rest parameters
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ // Allocate space for the rest parameter array plus the backing store.
+ Label allocate, done_allocate;
+ __ lea(ecx, Operand(eax, times_half_pointer_size,
+ JSArray::kSize + FixedArray::kHeaderSize));
+ __ Allocate(ecx, edx, edi, no_reg, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Setup the elements array in edx.
+ __ mov(FieldOperand(edx, FixedArray::kMapOffset),
+ isolate()->factory()->fixed_array_map());
+ __ mov(FieldOperand(edx, FixedArray::kLengthOffset), eax);
+ {
+ Label loop, done_loop;
+ __ Move(ecx, Smi::FromInt(0));
+ __ bind(&loop);
+ __ cmp(ecx, eax);
+ __ j(equal, &done_loop, Label::kNear);
+ __ mov(edi, Operand(ebx, 0 * kPointerSize));
+ __ mov(FieldOperand(edx, ecx, times_half_pointer_size,
+ FixedArray::kHeaderSize),
+ edi);
+ __ sub(ebx, Immediate(1 * kPointerSize));
+ __ add(ecx, Immediate(Smi::FromInt(1)));
+ __ jmp(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Setup the rest parameter array in edi.
+ __ lea(edi,
+ Operand(edx, eax, times_half_pointer_size, FixedArray::kHeaderSize));
+ __ LoadGlobalFunction(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, ecx);
+ __ mov(FieldOperand(edi, JSArray::kMapOffset), ecx);
+ __ mov(FieldOperand(edi, JSArray::kPropertiesOffset),
+ isolate()->factory()->empty_fixed_array());
+ __ mov(FieldOperand(edi, JSArray::kElementsOffset), edx);
+ __ mov(FieldOperand(edi, JSArray::kLengthOffset), eax);
+ STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+ __ mov(eax, edi);
+ __ Ret();
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(ecx);
+ __ Push(eax);
+ __ Push(ebx);
+ __ Push(ecx);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ mov(edx, eax);
+ __ Pop(ebx);
+ __ Pop(eax);
+ }
+ __ jmp(&done_allocate);
+ }
+}
+
+void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- edi : function
+ // -- esi : context
+ // -- ebp : frame pointer
+ // -- esp[0] : return address
+ // -----------------------------------
+ __ AssertFunction(edi);
+
+ // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ecx,
+ FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ lea(edx, Operand(ebp, ecx, times_half_pointer_size,
+ StandardFrameConstants::kCallerSPOffset));
+
+ // ecx : number of parameters (tagged)
+ // edx : parameters pointer
+ // edi : function
+ // esp[0] : return address
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, try_allocate, runtime;
+ __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(eax, Operand(ebx, StandardFrameConstants::kContextOffset));
+ __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &adaptor_frame, Label::kNear);
+
+ // No adaptor, parameter count = argument count.
+ __ mov(ebx, ecx);
+ __ push(ecx);
+ __ jmp(&try_allocate, Label::kNear);
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ mov(ebx, ecx);
+ __ push(ecx);
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ lea(edx,
+ Operand(edx, ecx, times_2, StandardFrameConstants::kCallerSPOffset));
+
+ // ebx = parameter count (tagged)
+ // ecx = argument count (smi-tagged)
+ // Compute the mapped parameter count = min(ebx, ecx) in ebx.
+ __ cmp(ebx, ecx);
+ __ j(less_equal, &try_allocate, Label::kNear);
+ __ mov(ebx, ecx);
+
+ // Save mapped parameter count and function.
+ __ bind(&try_allocate);
+ __ push(edi);
+ __ push(ebx);
+
+ // Compute the sizes of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has 2 extra words containing context and backing store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+ Label no_parameter_map;
+ __ test(ebx, ebx);
+ __ j(zero, &no_parameter_map, Label::kNear);
+ __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
+ __ bind(&no_parameter_map);
+
+ // 2. Backing store.
+ __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
+
+ // 3. Arguments object.
+ __ add(ebx, Immediate(JSSloppyArgumentsObject::kSize));
+
+ // Do the allocation of all three objects in one go.
+ __ Allocate(ebx, eax, edi, no_reg, &runtime, TAG_OBJECT);
+
+ // eax = address of new object(s) (tagged)
+ // ecx = argument count (smi-tagged)
+ // esp[0] = mapped parameter count (tagged)
+ // esp[4] = function
+ // esp[8] = parameter count (tagged)
+ // Get the arguments map from the current native context into edi.
+ Label has_mapped_parameters, instantiate;
+ __ mov(edi, NativeContextOperand());
+ __ mov(ebx, Operand(esp, 0 * kPointerSize));
+ __ test(ebx, ebx);
+ __ j(not_zero, &has_mapped_parameters, Label::kNear);
+ __ mov(
+ edi,
+ Operand(edi, Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX)));
+ __ jmp(&instantiate, Label::kNear);
+
+ __ bind(&has_mapped_parameters);
+ __ mov(edi, Operand(edi, Context::SlotOffset(
+ Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX)));
+ __ bind(&instantiate);
+
+ // eax = address of new object (tagged)
+ // ebx = mapped parameter count (tagged)
+ // ecx = argument count (smi-tagged)
+ // edi = address of arguments map (tagged)
+ // esp[0] = mapped parameter count (tagged)
+ // esp[4] = function
+ // esp[8] = parameter count (tagged)
+ // Copy the JS object part.
+ __ mov(FieldOperand(eax, JSObject::kMapOffset), edi);
+ __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
+ masm->isolate()->factory()->empty_fixed_array());
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset),
+ masm->isolate()->factory()->empty_fixed_array());
+
+ // Set up the callee in-object property.
+ STATIC_ASSERT(JSSloppyArgumentsObject::kCalleeIndex == 1);
+ __ mov(edi, Operand(esp, 1 * kPointerSize));
+ __ AssertNotSmi(edi);
+ __ mov(FieldOperand(eax, JSSloppyArgumentsObject::kCalleeOffset), edi);
+
+ // Use the length (smi tagged) and set that as an in-object property too.
+ __ AssertSmi(ecx);
+ __ mov(FieldOperand(eax, JSSloppyArgumentsObject::kLengthOffset), ecx);
+
+ // Set up the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, edi will point there, otherwise to the
+ // backing store.
+ __ lea(edi, Operand(eax, JSSloppyArgumentsObject::kSize));
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
+
+ // eax = address of new object (tagged)
+ // ebx = mapped parameter count (tagged)
+ // ecx = argument count (tagged)
+ // edx = address of receiver argument
+ // edi = address of parameter map or backing store (tagged)
+ // esp[0] = mapped parameter count (tagged)
+ // esp[4] = function
+ // esp[8] = parameter count (tagged)
+ // Free two registers.
+ __ push(edx);
+ __ push(eax);
+
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ __ test(ebx, ebx);
+ __ j(zero, &skip_parameter_map);
+
+ __ mov(FieldOperand(edi, FixedArray::kMapOffset),
+ Immediate(isolate()->factory()->sloppy_arguments_elements_map()));
+ __ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2))));
+ __ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax);
+ __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi);
+ __ lea(eax, Operand(edi, ebx, times_2, kParameterMapHeaderSize));
+ __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 1 * kPointerSize), eax);
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. They index the context,
+ // where parameters are stored in reverse order, at
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+ // The mapped parameter thus need to get indices
+ // MIN_CONTEXT_SLOTS+parameter_count-1 ..
+ // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+ // We loop from right to left.
+ Label parameters_loop, parameters_test;
+ __ push(ecx);
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
+ __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+ __ add(ebx, Operand(esp, 5 * kPointerSize));
+ __ sub(ebx, eax);
+ __ mov(ecx, isolate()->factory()->the_hole_value());
+ __ mov(edx, edi);
+ __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
+ // eax = loop variable (tagged)
+ // ebx = mapping index (tagged)
+ // ecx = the hole value
+ // edx = address of parameter map (tagged)
+ // edi = address of backing store (tagged)
+ // esp[0] = argument count (tagged)
+ // esp[4] = address of new object (tagged)
+ // esp[8] = address of receiver argument
+ // esp[12] = mapped parameter count (tagged)
+ // esp[16] = function
+ // esp[20] = parameter count (tagged)
+ __ jmp(&parameters_test, Label::kNear);
+
+ __ bind(&parameters_loop);
+ __ sub(eax, Immediate(Smi::FromInt(1)));
+ __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
+ __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx);
+ __ add(ebx, Immediate(Smi::FromInt(1)));
+ __ bind(&parameters_test);
+ __ test(eax, eax);
+ __ j(not_zero, &parameters_loop, Label::kNear);
+ __ pop(ecx);
+
+ __ bind(&skip_parameter_map);
+
+ // ecx = argument count (tagged)
+ // edi = address of backing store (tagged)
+ // esp[0] = address of new object (tagged)
+ // esp[4] = address of receiver argument
+ // esp[8] = mapped parameter count (tagged)
+ // esp[12] = function
+ // esp[16] = parameter count (tagged)
+ // Copy arguments header and remaining slots (if there are any).
+ __ mov(FieldOperand(edi, FixedArray::kMapOffset),
+ Immediate(isolate()->factory()->fixed_array_map()));
+ __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
+
+ Label arguments_loop, arguments_test;
+ __ mov(ebx, Operand(esp, 2 * kPointerSize));
+ __ mov(edx, Operand(esp, 1 * kPointerSize));
+ __ sub(edx, ebx); // Is there a smarter way to do negative scaling?
+ __ sub(edx, ebx);
+ __ jmp(&arguments_test, Label::kNear);
+
+ __ bind(&arguments_loop);
+ __ sub(edx, Immediate(kPointerSize));
+ __ mov(eax, Operand(edx, 0));
+ __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax);
+ __ add(ebx, Immediate(Smi::FromInt(1)));
+
+ __ bind(&arguments_test);
+ __ cmp(ebx, ecx);
+ __ j(less, &arguments_loop, Label::kNear);
+
+ // Restore.
+ __ pop(eax); // Address of arguments object.
+ __ Drop(4);
+
+ // Return.
+ __ ret(0);
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ pop(eax); // Remove saved mapped parameter count.
+ __ pop(edi); // Pop saved function.
+ __ pop(eax); // Remove saved parameter count.
+ __ pop(eax); // Pop return address.
+ __ push(edi); // Push function.
+ __ push(edx); // Push parameters pointer.
+ __ push(ecx); // Push parameter count.
+ __ push(eax); // Push return address.
+ __ TailCallRuntime(Runtime::kNewSloppyArguments);
+}
+
+void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- edi : function
+ // -- esi : context
+ // -- ebp : frame pointer
+ // -- esp[0] : return address
+ // -----------------------------------
+ __ AssertFunction(edi);
+
+ // For Ignition we need to skip all possible handler/stub frames until
+ // we reach the JavaScript frame for the function (similar to what the
+ // runtime fallback implementation does). So make edx point to that
+ // JavaScript frame.
+ {
+ Label loop, loop_entry;
+ __ mov(edx, ebp);
+ __ jmp(&loop_entry, Label::kNear);
+ __ bind(&loop);
+ __ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
+ __ bind(&loop_entry);
+ __ cmp(edi, Operand(edx, StandardFrameConstants::kMarkerOffset));
+ __ j(not_equal, &loop);
+ }
+
+ // Check if we have an arguments adaptor frame below the function frame.
+ Label arguments_adaptor, arguments_done;
+ __ mov(ebx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
+ __ cmp(Operand(ebx, StandardFrameConstants::kContextOffset),
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(equal, &arguments_adaptor, Label::kNear);
+ {
+ __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(eax,
+ FieldOperand(eax, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ lea(ebx,
+ Operand(edx, eax, times_half_pointer_size,
+ StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
+ }
+ __ jmp(&arguments_done, Label::kNear);
+ __ bind(&arguments_adaptor);
+ {
+ __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ lea(ebx,
+ Operand(ebx, eax, times_half_pointer_size,
+ StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
+ }
+ __ bind(&arguments_done);
+
+ // ----------- S t a t e -------------
+ // -- eax : number of arguments (tagged)
+ // -- ebx : pointer to the first argument
+ // -- esi : context
+ // -- esp[0] : return address
+ // -----------------------------------
+
+ // Allocate space for the strict arguments object plus the backing store.
+ Label allocate, done_allocate;
+ __ lea(ecx,
+ Operand(eax, times_half_pointer_size,
+ JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
+ __ Allocate(ecx, edx, edi, no_reg, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Setup the elements array in edx.
+ __ mov(FieldOperand(edx, FixedArray::kMapOffset),
+ isolate()->factory()->fixed_array_map());
+ __ mov(FieldOperand(edx, FixedArray::kLengthOffset), eax);
+ {
+ Label loop, done_loop;
+ __ Move(ecx, Smi::FromInt(0));
+ __ bind(&loop);
+ __ cmp(ecx, eax);
+ __ j(equal, &done_loop, Label::kNear);
+ __ mov(edi, Operand(ebx, 0 * kPointerSize));
+ __ mov(FieldOperand(edx, ecx, times_half_pointer_size,
+ FixedArray::kHeaderSize),
+ edi);
+ __ sub(ebx, Immediate(1 * kPointerSize));
+ __ add(ecx, Immediate(Smi::FromInt(1)));
+ __ jmp(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Setup the rest parameter array in edi.
+ __ lea(edi,
+ Operand(edx, eax, times_half_pointer_size, FixedArray::kHeaderSize));
+ __ LoadGlobalFunction(Context::STRICT_ARGUMENTS_MAP_INDEX, ecx);
+ __ mov(FieldOperand(edi, JSStrictArgumentsObject::kMapOffset), ecx);
+ __ mov(FieldOperand(edi, JSStrictArgumentsObject::kPropertiesOffset),
+ isolate()->factory()->empty_fixed_array());
+ __ mov(FieldOperand(edi, JSStrictArgumentsObject::kElementsOffset), edx);
+ __ mov(FieldOperand(edi, JSStrictArgumentsObject::kLengthOffset), eax);
+ STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
+ __ mov(eax, edi);
+ __ Ret();
+
+ // Fall back to %AllocateInNewSpace.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(ecx);
+ __ Push(eax);
+ __ Push(ebx);
+ __ Push(ecx);
+ __ CallRuntime(Runtime::kAllocateInNewSpace);
+ __ mov(edx, eax);
+ __ Pop(ebx);
+ __ Pop(eax);
+ }
+ __ jmp(&done_allocate);
+}
void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
Register context_reg = esi;
@@ -5150,11 +5372,10 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ jmp(&leave_exit_frame);
}
-
static void CallApiFunctionStubHelper(MacroAssembler* masm,
const ParameterCount& argc,
bool return_first_arg,
- bool call_data_undefined) {
+ bool call_data_undefined, bool is_lazy) {
// ----------- S t a t e -------------
// -- edi : callee
// -- ebx : call_data
@@ -5228,8 +5449,10 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
// push return address
__ push(return_address);
- // load context from callee
- __ mov(context, FieldOperand(callee, JSFunction::kContextOffset));
+ if (!is_lazy) {
+ // load context from callee
+ __ mov(context, FieldOperand(callee, JSFunction::kContextOffset));
+ }
// API function gets reference to the v8::Arguments. If CPU profiler
// is enabled wrapper function will be called and we need to pass
@@ -5301,7 +5524,7 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
void CallApiFunctionStub::Generate(MacroAssembler* masm) {
bool call_data_undefined = this->call_data_undefined();
CallApiFunctionStubHelper(masm, ParameterCount(eax), false,
- call_data_undefined);
+ call_data_undefined, false);
}
@@ -5309,45 +5532,58 @@ void CallApiAccessorStub::Generate(MacroAssembler* masm) {
bool is_store = this->is_store();
int argc = this->argc();
bool call_data_undefined = this->call_data_undefined();
+ bool is_lazy = this->is_lazy();
CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
- call_data_undefined);
+ call_data_undefined, is_lazy);
}
void CallApiGetterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- esp[0] : return address
- // -- esp[4] : name
- // -- esp[8 - kArgsLength*4] : PropertyCallbackArguments object
+ // -- esp[0] : return address
+ // -- esp[4] : name
+ // -- esp[8 .. (8 + kArgsLength*4)] : v8::PropertyCallbackInfo::args_
// -- ...
- // -- edx : api_function_address
+ // -- edx : api_function_address
// -----------------------------------
DCHECK(edx.is(ApiGetterDescriptor::function_address()));
- // array for v8::Arguments::values_, handler for name and pointer
- // to the values (it considered as smi in GC).
- const int kStackSpace = PropertyCallbackArguments::kArgsLength + 2;
- // Allocate space for opional callback address parameter in case
- // CPU profiler is active.
- const int kApiArgc = 2 + 1;
+ // v8::PropertyCallbackInfo::args_ array and name handle.
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ // Allocate v8::PropertyCallbackInfo object, arguments for callback and
+ // space for optional callback address parameter (in case CPU profiler is
+ // active) in non-GCed stack space.
+ const int kApiArgc = 3 + 1;
Register api_function_address = edx;
Register scratch = ebx;
- // load address of name
- __ lea(scratch, Operand(esp, 1 * kPointerSize));
+ // Load address of v8::PropertyAccessorInfo::args_ array.
+ __ lea(scratch, Operand(esp, 2 * kPointerSize));
PrepareCallApiFunction(masm, kApiArgc);
+ // Create v8::PropertyCallbackInfo object on the stack and initialize
+ // it's args_ field.
+ Operand info_object = ApiParameterOperand(3);
+ __ mov(info_object, scratch);
+
+ __ sub(scratch, Immediate(kPointerSize));
__ mov(ApiParameterOperand(0), scratch); // name.
- __ add(scratch, Immediate(kPointerSize));
+ __ lea(scratch, info_object);
__ mov(ApiParameterOperand(1), scratch); // arguments pointer.
+ // Reserve space for optional callback address parameter.
+ Operand thunk_last_arg = ApiParameterOperand(2);
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback(isolate());
+ // +3 is to skip prolog, return address and name handle.
+ Operand return_value_operand(
+ ebp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
- ApiParameterOperand(2), kStackSpace, nullptr,
- Operand(ebp, 7 * kPointerSize), NULL);
+ thunk_last_arg, kStackUnwindSpace, nullptr,
+ return_value_operand, NULL);
}
diff --git a/deps/v8/src/x87/code-stubs-x87.h b/deps/v8/src/x87/code-stubs-x87.h
index a6a2a13057..39a4603626 100644
--- a/deps/v8/src/x87/code-stubs-x87.h
+++ b/deps/v8/src/x87/code-stubs-x87.h
@@ -271,24 +271,12 @@ class RecordWriteStub: public PlatformCodeStub {
// registers are eax, ecx and edx. The three scratch registers (incl. ecx)
// will be restored by other means so we don't bother pushing them here.
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
- if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax);
- if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx);
- if (mode == kSaveFPRegs) {
- // Save FPU state in m108byte.
- masm->sub(esp, Immediate(108));
- masm->fnsave(Operand(esp, 0));
- }
+ masm->PushCallerSaved(mode, ecx, scratch0_, scratch1_);
}
inline void RestoreCallerSaveRegisters(MacroAssembler* masm,
SaveFPRegsMode mode) {
- if (mode == kSaveFPRegs) {
- // Restore FPU state in m108byte.
- masm->frstor(Operand(esp, 0));
- masm->add(esp, Immediate(108));
- }
- if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx);
- if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->pop(eax);
+ masm->PopCallerSaved(mode, ecx, scratch0_, scratch1_);
}
inline Register object() { return object_; }
diff --git a/deps/v8/src/x87/deoptimizer-x87.cc b/deps/v8/src/x87/deoptimizer-x87.cc
index 5a1951a0ed..3b90276a93 100644
--- a/deps/v8/src/x87/deoptimizer-x87.cc
+++ b/deps/v8/src/x87/deoptimizer-x87.cc
@@ -169,27 +169,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
-void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
- // Set the register values. The values are not important as there are no
- // callee saved registers in JavaScript frames, so all registers are
- // spilled. Registers ebp and esp are set to the correct values though.
-
- for (int i = 0; i < Register::kNumRegisters; i++) {
- input_->SetRegister(i, i * 4);
- }
- input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
- input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < X87Register::kMaxNumRegisters; i++) {
- input_->SetDoubleRegister(i, 0.0);
- }
-
- // Fill the frame content from the actual data on the frame.
- for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
- input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
- }
-}
-
-
void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
intptr_t handler =
@@ -207,10 +186,8 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
}
}
-
-bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
- int parameter_count =
- function->shared()->internal_formal_parameter_count() + 1;
+bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
+ int parameter_count = shared->internal_formal_parameter_count() + 1;
unsigned input_frame_size = input_->GetFrameSize();
unsigned alignment_state_offset =
input_frame_size - parameter_count * kPointerSize -
diff --git a/deps/v8/src/x87/disasm-x87.cc b/deps/v8/src/x87/disasm-x87.cc
index bf2200ca11..a3f1939b73 100644
--- a/deps/v8/src/x87/disasm-x87.cc
+++ b/deps/v8/src/x87/disasm-x87.cc
@@ -28,32 +28,30 @@ struct ByteMnemonic {
OperandOrder op_order_;
};
-
static const ByteMnemonic two_operands_instr[] = {
- {0x01, "add", OPER_REG_OP_ORDER},
- {0x03, "add", REG_OPER_OP_ORDER},
- {0x09, "or", OPER_REG_OP_ORDER},
- {0x0B, "or", REG_OPER_OP_ORDER},
- {0x1B, "sbb", REG_OPER_OP_ORDER},
- {0x21, "and", OPER_REG_OP_ORDER},
- {0x23, "and", REG_OPER_OP_ORDER},
- {0x29, "sub", OPER_REG_OP_ORDER},
- {0x2A, "subb", REG_OPER_OP_ORDER},
- {0x2B, "sub", REG_OPER_OP_ORDER},
- {0x31, "xor", OPER_REG_OP_ORDER},
- {0x33, "xor", REG_OPER_OP_ORDER},
- {0x38, "cmpb", OPER_REG_OP_ORDER},
- {0x3A, "cmpb", REG_OPER_OP_ORDER},
- {0x3B, "cmp", REG_OPER_OP_ORDER},
- {0x84, "test_b", REG_OPER_OP_ORDER},
- {0x85, "test", REG_OPER_OP_ORDER},
- {0x87, "xchg", REG_OPER_OP_ORDER},
- {0x8A, "mov_b", REG_OPER_OP_ORDER},
- {0x8B, "mov", REG_OPER_OP_ORDER},
- {0x8D, "lea", REG_OPER_OP_ORDER},
- {-1, "", UNSET_OP_ORDER}
-};
-
+ {0x01, "add", OPER_REG_OP_ORDER},
+ {0x03, "add", REG_OPER_OP_ORDER},
+ {0x09, "or", OPER_REG_OP_ORDER},
+ {0x0B, "or", REG_OPER_OP_ORDER},
+ {0x1B, "sbb", REG_OPER_OP_ORDER},
+ {0x21, "and", OPER_REG_OP_ORDER},
+ {0x23, "and", REG_OPER_OP_ORDER},
+ {0x29, "sub", OPER_REG_OP_ORDER},
+ {0x2A, "subb", REG_OPER_OP_ORDER},
+ {0x2B, "sub", REG_OPER_OP_ORDER},
+ {0x31, "xor", OPER_REG_OP_ORDER},
+ {0x33, "xor", REG_OPER_OP_ORDER},
+ {0x38, "cmpb", OPER_REG_OP_ORDER},
+ {0x39, "cmp", OPER_REG_OP_ORDER},
+ {0x3A, "cmpb", REG_OPER_OP_ORDER},
+ {0x3B, "cmp", REG_OPER_OP_ORDER},
+ {0x84, "test_b", REG_OPER_OP_ORDER},
+ {0x85, "test", REG_OPER_OP_ORDER},
+ {0x87, "xchg", REG_OPER_OP_ORDER},
+ {0x8A, "mov_b", REG_OPER_OP_ORDER},
+ {0x8B, "mov", REG_OPER_OP_ORDER},
+ {0x8D, "lea", REG_OPER_OP_ORDER},
+ {-1, "", UNSET_OP_ORDER}};
static const ByteMnemonic zero_operands_instr[] = {
{0xC3, "ret", UNSET_OP_ORDER},
diff --git a/deps/v8/src/x87/interface-descriptors-x87.cc b/deps/v8/src/x87/interface-descriptors-x87.cc
index 5bd84fc298..bfed342eb8 100644
--- a/deps/v8/src/x87/interface-descriptors-x87.cc
+++ b/deps/v8/src/x87/interface-descriptors-x87.cc
@@ -59,20 +59,6 @@ const Register StringCompareDescriptor::LeftRegister() { return edx; }
const Register StringCompareDescriptor::RightRegister() { return eax; }
-const Register ArgumentsAccessReadDescriptor::index() { return edx; }
-const Register ArgumentsAccessReadDescriptor::parameter_count() { return eax; }
-
-
-const Register ArgumentsAccessNewDescriptor::function() { return edi; }
-const Register ArgumentsAccessNewDescriptor::parameter_count() { return ecx; }
-const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return edx; }
-
-
-const Register RestParamAccessDescriptor::parameter_count() { return ecx; }
-const Register RestParamAccessDescriptor::parameter_pointer() { return edx; }
-const Register RestParamAccessDescriptor::rest_parameter_index() { return ebx; }
-
-
const Register ApiGetterDescriptor::function_address() { return edx; }
@@ -101,6 +87,29 @@ void FastNewContextDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
+void FastNewObjectDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {edi, edx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void FastNewRestParameterDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {edi};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {edi};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
+
+void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {edi};
+ data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
void ToNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -119,6 +128,10 @@ const Register ToStringDescriptor::ReceiverRegister() { return eax; }
// static
+const Register ToNameDescriptor::ReceiverRegister() { return eax; }
+
+
+// static
const Register ToObjectDescriptor::ReceiverRegister() { return eax; }
@@ -171,13 +184,6 @@ void CreateWeakCellDescriptor::InitializePlatformSpecific(
}
-void StoreArrayLiteralElementDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ecx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
void CallFunctionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edi};
@@ -413,6 +419,13 @@ void ApiAccessorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void InterpreterDispatchDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ kInterpreterAccumulatorRegister, kInterpreterRegisterFileRegister,
+ kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeArrayRegister};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@@ -424,7 +437,6 @@ void InterpreterPushArgsAndCallDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
@@ -436,7 +448,6 @@ void InterpreterPushArgsAndConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
-
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
diff --git a/deps/v8/src/x87/macro-assembler-x87.cc b/deps/v8/src/x87/macro-assembler-x87.cc
index 7a0beb57bc..0c459ebfd4 100644
--- a/deps/v8/src/x87/macro-assembler-x87.cc
+++ b/deps/v8/src/x87/macro-assembler-x87.cc
@@ -120,29 +120,56 @@ void MacroAssembler::PushRoot(Heap::RootListIndex index) {
Push(isolate()->heap()->root_handle(index));
}
+#define REG(Name) \
+ { Register::kCode_##Name }
-void MacroAssembler::InNewSpace(
- Register object,
- Register scratch,
- Condition cc,
- Label* condition_met,
- Label::Distance condition_met_distance) {
- DCHECK(cc == equal || cc == not_equal);
- if (scratch.is(object)) {
- and_(scratch, Immediate(~Page::kPageAlignmentMask));
- } else {
- mov(scratch, Immediate(~Page::kPageAlignmentMask));
- and_(scratch, object);
+static const Register saved_regs[] = {REG(eax), REG(ecx), REG(edx)};
+
+#undef REG
+
+static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
+
+void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1, Register exclusion2,
+ Register exclusion3) {
+ // We don't allow a GC during a store buffer overflow so there is no need to
+ // store the registers in any particular way, but we do have to store and
+ // restore them.
+ for (int i = 0; i < kNumberOfSavedRegs; i++) {
+ Register reg = saved_regs[i];
+ if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
+ push(reg);
+ }
+ }
+ if (fp_mode == kSaveFPRegs) {
+ // Save FPU state in m108byte.
+ sub(esp, Immediate(108));
+ fnsave(Operand(esp, 0));
}
- // Check that we can use a test_b.
- DCHECK(MemoryChunk::IN_FROM_SPACE < 8);
- DCHECK(MemoryChunk::IN_TO_SPACE < 8);
- int mask = (1 << MemoryChunk::IN_FROM_SPACE)
- | (1 << MemoryChunk::IN_TO_SPACE);
- // If non-zero, the page belongs to new-space.
- test_b(Operand(scratch, MemoryChunk::kFlagsOffset),
- static_cast<uint8_t>(mask));
- j(cc, condition_met, condition_met_distance);
+}
+
+void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+ Register exclusion2, Register exclusion3) {
+ if (fp_mode == kSaveFPRegs) {
+ // Restore FPU state in m108byte.
+ frstor(Operand(esp, 0));
+ add(esp, Immediate(108));
+ }
+
+ for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
+ Register reg = saved_regs[i];
+ if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
+ pop(reg);
+ }
+ }
+}
+
+void MacroAssembler::InNewSpace(Register object, Register scratch, Condition cc,
+ Label* condition_met,
+ Label::Distance distance) {
+ const int mask =
+ (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
+ CheckPageFlag(object, scratch, mask, cc, condition_met, distance);
}
@@ -492,6 +519,75 @@ void MacroAssembler::RecordWrite(
}
}
+void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
+ Register code_entry,
+ Register scratch) {
+ const int offset = JSFunction::kCodeEntryOffset;
+
+ // Since a code entry (value) is always in old space, we don't need to update
+ // remembered set. If incremental marking is off, there is nothing for us to
+ // do.
+ if (!FLAG_incremental_marking) return;
+
+ DCHECK(!js_function.is(code_entry));
+ DCHECK(!js_function.is(scratch));
+ DCHECK(!code_entry.is(scratch));
+ AssertNotSmi(js_function);
+
+ if (emit_debug_code()) {
+ Label ok;
+ lea(scratch, FieldOperand(js_function, offset));
+ cmp(code_entry, Operand(scratch, 0));
+ j(equal, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ }
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis and stores into young gen.
+ Label done;
+
+ CheckPageFlag(code_entry, scratch,
+ MemoryChunk::kPointersToHereAreInterestingMask, zero, &done,
+ Label::kNear);
+ CheckPageFlag(js_function, scratch,
+ MemoryChunk::kPointersFromHereAreInterestingMask, zero, &done,
+ Label::kNear);
+
+ // Save input registers.
+ push(js_function);
+ push(code_entry);
+
+ const Register dst = scratch;
+ lea(dst, FieldOperand(js_function, offset));
+
+ // Save caller-saved registers.
+ PushCallerSaved(kDontSaveFPRegs, js_function, code_entry);
+
+ int argument_count = 3;
+ PrepareCallCFunction(argument_count, code_entry);
+ mov(Operand(esp, 0 * kPointerSize), js_function);
+ mov(Operand(esp, 1 * kPointerSize), dst); // Slot.
+ mov(Operand(esp, 2 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(isolate())));
+
+ {
+ AllowExternalCallThatCantCauseGC scope(this);
+ CallCFunction(
+ ExternalReference::incremental_marking_record_write_code_entry_function(
+ isolate()),
+ argument_count);
+ }
+
+ // Restore caller-saved registers.
+ PopCallerSaved(kDontSaveFPRegs, js_function, code_entry);
+
+ // Restore input registers.
+ pop(code_entry);
+ pop(js_function);
+
+ bind(&done);
+}
void MacroAssembler::DebugBreak() {
Move(eax, Immediate(0));
@@ -804,6 +900,17 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
}
+void MacroAssembler::AssertReceiver(Register object) {
+ if (emit_debug_code()) {
+ test(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsASmiAndNotAReceiver);
+ Push(object);
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, object);
+ Pop(object);
+ Check(above_equal, kOperandIsNotAReceiver);
+ }
+}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
if (emit_debug_code()) {
@@ -936,7 +1043,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
}
-void MacroAssembler::EnterExitFrame(bool save_doubles) {
+void MacroAssembler::EnterExitFrame(int argc, bool save_doubles) {
EnterExitFramePrologue();
// Set up argc and argv in callee-saved registers.
@@ -945,7 +1052,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles) {
lea(esi, Operand(ebp, eax, times_4, offset));
// Reserve space for argc, argv and isolate.
- EnterExitFrameEpilogue(3, save_doubles);
+ EnterExitFrameEpilogue(argc, save_doubles);
}
@@ -1734,13 +1841,13 @@ void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Register end_address,
Register filler) {
Label loop, entry;
- jmp(&entry);
+ jmp(&entry, Label::kNear);
bind(&loop);
mov(Operand(current_address, 0), filler);
add(current_address, Immediate(kPointerSize));
bind(&entry);
cmp(current_address, end_address);
- j(below, &loop);
+ j(below, &loop, Label::kNear);
}
@@ -1762,9 +1869,9 @@ void MacroAssembler::NegativeZeroTest(Register result,
Label* then_label) {
Label ok;
test(result, result);
- j(not_zero, &ok);
+ j(not_zero, &ok, Label::kNear);
test(op, op);
- j(sign, then_label);
+ j(sign, then_label, Label::kNear);
bind(&ok);
}
@@ -1776,10 +1883,10 @@ void MacroAssembler::NegativeZeroTest(Register result,
Label* then_label) {
Label ok;
test(result, result);
- j(not_zero, &ok);
+ j(not_zero, &ok, Label::kNear);
mov(scratch, op1);
or_(scratch, op2);
- j(sign, then_label);
+ j(sign, then_label, Label::kNear);
bind(&ok);
}
@@ -2009,7 +2116,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
}
Push(fun);
Push(fun);
- CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
Pop(fun);
if (new_target.is_valid()) {
Pop(new_target);
@@ -2111,26 +2218,6 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
}
-void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
- const CallWrapper& call_wrapper) {
- // You can't call a builtin without a valid frame.
- DCHECK(flag == JUMP_FUNCTION || has_frame());
-
- // Fake a parameter count to avoid emitting code to do the check.
- ParameterCount expected(0);
- GetBuiltinFunction(edi, native_context_index);
- InvokeFunctionCode(edi, no_reg, expected, expected, flag, call_wrapper);
-}
-
-
-void MacroAssembler::GetBuiltinFunction(Register target,
- int native_context_index) {
- // Load the JavaScript builtin function from the builtins object.
- mov(target, NativeContextOperand());
- mov(target, ContextOperand(target, native_context_index));
-}
-
-
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
@@ -2524,9 +2611,9 @@ void MacroAssembler::Abort(BailoutReason reason) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 1);
+ CallRuntime(Runtime::kAbort);
} else {
- CallRuntime(Runtime::kAbort, 1);
+ CallRuntime(Runtime::kAbort);
}
// will not return here
int3();
diff --git a/deps/v8/src/x87/macro-assembler-x87.h b/deps/v8/src/x87/macro-assembler-x87.h
index 9b6c5e8a0a..fc493610c4 100644
--- a/deps/v8/src/x87/macro-assembler-x87.h
+++ b/deps/v8/src/x87/macro-assembler-x87.h
@@ -16,6 +16,7 @@ namespace internal {
// Give alias names to registers for calling conventions.
const Register kReturnRegister0 = {Register::kCode_eax};
const Register kReturnRegister1 = {Register::kCode_edx};
+const Register kReturnRegister2 = {Register::kCode_edi};
const Register kJSFunctionRegister = {Register::kCode_edi};
const Register kContextRegister = {Register::kCode_esi};
const Register kInterpreterAccumulatorRegister = {Register::kCode_eax};
@@ -106,6 +107,16 @@ class MacroAssembler: public Assembler {
j(not_equal, if_not_equal, if_not_equal_distance);
}
+ // These functions do not arrange the registers in any particular order so
+ // they are not useful for calls that can cause a GC. The caller can
+ // exclude up to 3 registers that do not need to be saved and restored.
+ void PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg);
+ void PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg);
+
// ---------------------------------------------------------------------------
// GC Support
enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
@@ -206,6 +217,11 @@ class MacroAssembler: public Assembler {
PointersToHereCheck pointers_to_here_check_for_value =
kPointersToHereMaybeInteresting);
+ // Notify the garbage collector that we wrote a code entry into a
+ // JSFunction. Only scratch is clobbered by the operation.
+ void RecordWriteCodeEntryField(Register js_function, Register code_entry,
+ Register scratch);
+
// For page containing |object| mark the region covering the object's map
// dirty. |object| is the object being stored into, |map| is the Map object
// that was stored.
@@ -225,7 +241,7 @@ class MacroAssembler: public Assembler {
// arguments in register eax and sets up the number of arguments in
// register edi and the pointer to the first argument in register
// esi.
- void EnterExitFrame(bool save_doubles);
+ void EnterExitFrame(int argc, bool save_doubles);
void EnterApiExitFrame(int argc);
@@ -270,6 +286,9 @@ class MacroAssembler: public Assembler {
void StoreToSafepointRegisterSlot(Register dst, Immediate src);
void LoadFromSafepointRegisterSlot(Register dst, Register src);
+ // Nop, because x87 does not have a root register.
+ void InitializeRootRegister() {}
+
void LoadHeapObject(Register result, Handle<HeapObject> object);
void CmpHeapObject(Register reg, Handle<HeapObject> object);
void PushHeapObject(Handle<HeapObject> object);
@@ -325,13 +344,6 @@ class MacroAssembler: public Assembler {
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
- // Invoke specified builtin JavaScript function.
- void InvokeBuiltin(int native_context_index, InvokeFlag flag,
- const CallWrapper& call_wrapper = NullCallWrapper());
-
- // Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, int native_context_index);
-
// Expression support
// Support for constant splitting.
@@ -517,6 +529,9 @@ class MacroAssembler: public Assembler {
// enabled via --debug-code.
void AssertBoundFunction(Register object);
+ // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
+ void AssertReceiver(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object);
diff --git a/deps/v8/test/bot_default.gyp b/deps/v8/test/bot_default.gyp
index ccdf42a7d7..9b39f58412 100644
--- a/deps/v8/test/bot_default.gyp
+++ b/deps/v8/test/bot_default.gyp
@@ -11,6 +11,7 @@
'type': 'none',
'dependencies': [
'cctest/cctest.gyp:cctest_run',
+ 'fuzzer/fuzzer.gyp:fuzzer_run',
'intl/intl.gyp:intl_run',
'message/message.gyp:message_run',
'mjsunit/mjsunit.gyp:mjsunit_run',
diff --git a/deps/v8/test/bot_default.isolate b/deps/v8/test/bot_default.isolate
index 32773587c2..d6e4aa3474 100644
--- a/deps/v8/test/bot_default.isolate
+++ b/deps/v8/test/bot_default.isolate
@@ -2,8 +2,14 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
+ 'variables': {
+ 'command': [
+ '../tools/run-tests.py',
+ ],
+ },
'includes': [
'cctest/cctest.isolate',
+ 'fuzzer/fuzzer.isolate',
'intl/intl.isolate',
'message/message.isolate',
'mjsunit/mjsunit.isolate',
diff --git a/deps/v8/test/cctest/cctest.gyp b/deps/v8/test/cctest/cctest.gyp
index 708b767a10..3b76ce1778 100644
--- a/deps/v8/test/cctest/cctest.gyp
+++ b/deps/v8/test/cctest/cctest.gyp
@@ -90,6 +90,8 @@
'expression-type-collector.h',
'interpreter/test-bytecode-generator.cc',
'interpreter/test-interpreter.cc',
+ 'interpreter/bytecode-expectations-printer.cc',
+ 'interpreter/bytecode-expectations-printer.h',
'gay-fixed.cc',
'gay-precision.cc',
'gay-shortest.cc',
@@ -343,12 +345,35 @@
}
],
},
+ {
+ 'target_name': 'generate-bytecode-expectations',
+ 'type': 'executable',
+ 'dependencies': [
+ '../../tools/gyp/v8.gyp:v8_libplatform',
+ ],
+ 'conditions': [
+ ['component=="shared_library"', {
+ # Same as cctest, we need to depend on the underlying static target.
+ 'dependencies': ['../../tools/gyp/v8.gyp:v8_maybe_snapshot'],
+ }, {
+ 'dependencies': ['../../tools/gyp/v8.gyp:v8'],
+ }],
+ ],
+ 'include_dirs+': [
+ '../..',
+ ],
+ 'sources': [
+ 'interpreter/bytecode-expectations-printer.cc',
+ 'interpreter/bytecode-expectations-printer.h',
+ 'interpreter/generate-bytecode-expectations.cc',
+ ],
+ },
],
'conditions': [
['test_isolation_mode != "noop"', {
'targets': [
{
- 'target_name': 'cctest_run',
+ 'target_name': 'cctest_exe_run',
'type': 'none',
'dependencies': [
'cctest',
@@ -357,6 +382,19 @@
'../../build/isolate.gypi',
],
'sources': [
+ 'cctest_exe.isolate',
+ ],
+ },
+ {
+ 'target_name': 'cctest_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'cctest_exe_run',
+ ],
+ 'includes': [
+ '../../build/isolate.gypi',
+ ],
+ 'sources': [
'cctest.isolate',
],
},
diff --git a/deps/v8/test/cctest/cctest.isolate b/deps/v8/test/cctest/cctest.isolate
index aee8d83f85..ab55466214 100644
--- a/deps/v8/test/cctest/cctest.isolate
+++ b/deps/v8/test/cctest/cctest.isolate
@@ -4,13 +4,12 @@
{
'variables': {
'files': [
- '<(PRODUCT_DIR)/cctest<(EXECUTABLE_SUFFIX)',
'./cctest.status',
'./testcfg.py',
],
},
'includes': [
- '../../src/base.isolate',
+ 'cctest_exe.isolate',
'../../tools/testrunner/testrunner.isolate',
],
-} \ No newline at end of file
+}
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 80837534ce..8eaa9515b1 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -71,20 +71,9 @@
# This tests API threading, no point in running several variants.
'test-api/Threading*': [PASS, NO_VARIANTS],
- # The cpu profiler tests are notoriously flaky.
- # BUG(2999). (test/cpu-profiler/CollectCpuProfile)
- # BUG(3287). (test-cpu-profiler/SampleWhenFrameIsNotSetup)
- 'test-cpu-profiler/CollectCpuProfile': [SKIP],
- 'test-cpu-profiler/CollectCpuProfileSamples': [SKIP],
- 'test-cpu-profiler/FunctionApplySample': [SKIP],
- 'test-cpu-profiler/FunctionCallSample': [SKIP],
- 'test-cpu-profiler/SampleWhenFrameIsNotSetup': [SKIP],
- 'test-cpu-profiler/HotDeoptNoFrameEntry': [SKIP],
- 'test-cpu-profiler/BoundFunctionCall': [SKIP],
+ # BUG(2999). The cpu profiler tests are notoriously flaky.
'test-cpu-profiler/CpuProfileDeepStack': [SKIP],
- 'test-cpu-profiler/JsNativeJsSample': [SKIP],
- 'test-cpu-profiler/JsNativeJsRuntimeJsSample': [SKIP],
- 'test-cpu-profiler/JsNative1JsNative2JsSample': [SKIP],
+ 'test-cpu-profiler/HotDeoptNoFrameEntry': [SKIP],
# BUG(3525). Test crashes flakily.
'test-debug/RecursiveBreakpoints': [PASS, FLAKY],
@@ -100,6 +89,10 @@
'test-func-name-inference/UpperCaseClass': [FAIL],
'test-func-name-inference/LowerCaseClass': [FAIL],
+ # BUG(3956). Strong mode is being deprecated. Decide about these tests.
+ 'test-api/StrongModeAccessCheckAllowed': [FAIL],
+ 'test-api/StrongModeAccessCheckBlocked': [FAIL],
+
##############################################################################
# TurboFan compiler failures.
@@ -256,8 +249,11 @@
##############################################################################
['byteorder == big', {
# TODO(mips-team): Fix Wasm for big-endian.
+ 'test-run-wasm-module/Run_WasmModule_CallAdd': [SKIP],
'test-run-wasm-module/Run_WasmModule_CallAdd_rev': [SKIP],
+ 'test-run-wasm-module/Run_WasmModule_CallMain_recursive': [SKIP],
'test-run-wasm-module/Run_WasmModule_ReadLoadedDataSegment': [SKIP],
+ 'test-run-wasm-module/Run_WasmModule_Return114': [SKIP],
'test-run-wasm-module/Run_WasmModule_CheckMemoryIsZero': [SKIP],
'test-run-wasm-module/Run_WasmModule_Global': [SKIP],
'test-run-wasm/Run_WasmInt32*': [SKIP],
@@ -271,6 +267,12 @@
'test-run-wasm/Run_Wasm_LoadStoreI64_sx': [SKIP],
'test-run-wasm/Run_WASM_Int64DivS_byzero_const': [SKIP],
'test-run-wasm/Run_TestI64WasmRunner': [SKIP],
+ 'test-run-wasm-js/Run_JSSelect_1': [SKIP],
+ 'test-run-wasm-js/Run_JSSelect_2': [SKIP],
+ 'test-run-wasm-js/Run_JSSelect_3': [SKIP],
+ 'test-run-wasm-js/Run_JSSelect_4': [SKIP],
+ 'test-run-wasm-js/Run_JSSelect_5': [SKIP],
+ 'test-run-wasm-js/Run_JSSelect_6': [SKIP],
}], # 'byteorder == big'
##############################################################################
@@ -497,93 +499,120 @@
}], # 'arch == ppc64 and simulator_run == True'
['ignition == True', {
- 'test-api/*' : [SKIP],
- 'test-cpu-profiler/*' : [SKIP],
- 'test-debug/*' : [SKIP],
- 'test-func-name-inference/*' : [SKIP],
- 'test-inobject-slack-tracking/*' : [SKIP],
- 'test-run-jsexceptions/*' : [SKIP],
- 'test-serialize/*' : [SKIP],
-
- 'test-api-interceptors/InterceptorCallICInvalidatedConstantFunctionViaGlobal': [SKIP],
- 'test-api-interceptors/InterceptorLoadICInvalidatedCallbackViaGlobal': [SKIP],
- 'test-api-interceptors/InterceptorLoadICInvalidatedFieldViaGlobal': [SKIP],
- 'test-bytecode-generator/TryCatch': [SKIP],
- 'test-bytecode-generator/TryFinally': [SKIP],
- 'test-compiler/C2JSFrames': [SKIP],
- 'test-compiler/FeedbackVectorPreservedAcrossRecompiles': [SKIP],
- 'test-compiler/FeedbackVectorUnaffectedByScopeChanges': [SKIP],
- 'test-compiler/OptimizedCodeSharing2': [SKIP],
- 'test-compiler/OptimizedCodeSharing3': [SKIP],
- 'test-compiler/Print': [SKIP],
- 'test-compiler/UncaughtThrow': [SKIP],
- 'test-decls/CrossScriptDynamicLookup': [SKIP],
- 'test-decls/Regress425510': [SKIP],
- 'test-feedback-vector/VectorCallICStates': [SKIP],
- 'test-heap/AddInstructionChangesNewSpacePromotion': [SKIP],
- 'test-heap/ArrayShiftSweeping': [SKIP],
- 'test-heap/CanonicalSharedFunctionInfo': [SKIP],
- 'test-heap/CellsInOptimizedCodeAreWeak': [SKIP],
- 'test-heap/CompilationCacheCachingBehavior': [SKIP],
- 'test-heap/CountForcedGC': [SKIP],
- 'test-heap/IncrementalMarkingClearsMonomorphicConstructor': [SKIP],
- 'test-heap/IncrementalMarkingPreservesMonomorphicCallIC': [SKIP],
- 'test-heap/IncrementalMarkingPreservesMonomorphicConstructor': [SKIP],
- 'test-heap/NoWeakHashTableLeakWithIncrementalMarking': [SKIP],
- 'test-heap-profiler/HeapSnapshotCollection': [SKIP],
- 'test-heap-profiler/HeapSnapshotSimd': [SKIP],
- 'test-heap-profiler/HeapSnapshotWeakCollection': [SKIP],
- 'test-heap/OptimizedAllocationAlwaysInNewSpace': [SKIP],
- 'test-heap/PromotionQueue': [SKIP],
- 'test-heap/Regress169209': [SKIP],
- 'test-heap/Regress1878': [SKIP],
- 'test-heap/Regress357137': [SKIP],
- 'test-heap/Regress3631': [SKIP],
- 'test-heap/Regress388880': [SKIP],
- 'test-heap/TestCodeFlushingIncrementalAbort': [SKIP],
- 'test-heap/TestCodeFlushingIncrementalScavenge': [SKIP],
- 'test-heap/TestCodeFlushingIncremental': [SKIP],
- 'test-heap/TestCodeFlushingPreAged': [SKIP],
- 'test-heap/TestCodeFlushing': [SKIP],
- 'test-heap/WeakFunctionInConstructor': [SKIP],
- 'test-log-stack-tracer/CFromJSStackTrace': [SKIP],
- 'test-log-stack-tracer/JsEntrySp': [SKIP],
- 'test-log-stack-tracer/PureCStackTrace': [SKIP],
- 'test-log-stack-tracer/PureJSStackTrace': [SKIP],
- 'test-parsing/DestructuringNegativeTests': [SKIP],
- 'test-parsing/StrongModeFreeVariablesDeclaredByLanguage': [SKIP],
- 'test-parsing/StrongModeFreeVariablesDeclaredByPreviousScript': [SKIP],
- 'test-parsing/StrongModeFreeVariablesDeclaredInGlobalPrototype': [SKIP],
- 'test-pipeline/PipelineGeneric': [SKIP],
- 'test-pipeline/PipelineTyped': [SKIP],
- 'test-profile-generator/BailoutReason': [SKIP],
- 'test-profile-generator/LineNumber': [SKIP],
- 'test-profile-generator/ProfileNodeScriptId': [SKIP],
- 'test-profile-generator/RecordStackTraceAtStartProfiling': [SKIP],
- 'test-run-inlining/InlineTwice': [SKIP],
- 'test-run-jsbranches/ForOfContinueStatement': [SKIP],
- 'test-run-jscalls/LookupCall': [SKIP],
- 'test-run-jsobjects/ArgumentsRest': [SKIP],
- 'test-run-jsops/ClassLiteral': [SKIP],
- 'test-run-jsops/LookupLoad': [SKIP],
- 'test-run-jsops/LookupStore': [SKIP],
- 'test-run-variables/ContextInitializeVariables': [SKIP],
- 'test-run-variables/ContextLoadVariables': [SKIP],
- 'test-run-variables/ContextStoreVariables': [SKIP],
- 'test-run-variables/StackInitializeVariables': [SKIP],
- 'test-run-variables/StackLoadVariables': [SKIP],
- 'test-run-variables/StackStoreVariables': [SKIP],
- 'test-sampler-api/StackFramesConsistent': [SKIP],
- 'test-thread-termination/TerminateCancelTerminateFromThreadItself': [SKIP],
- 'test-thread-termination/TerminateFromOtherThreadWhileMicrotaskRunning': [SKIP],
- 'test-thread-termination/TerminateOnlyV8ThreadFromThreadItselfNoLoop': [SKIP],
- 'test-thread-termination/TerminationInInnerTryCall': [SKIP],
- 'test-unscopables-hidden-prototype/Unscopables': [SKIP],
+ # TODO(yangguo,4690): Test failures in debugger tests.
+ 'test-debug/DebugStepLocals': [FAIL],
+ 'test-debug/DebugStepKeyedLoadLoop': [FAIL],
+ 'test-debug/DebugStepKeyedStoreLoop': [FAIL],
+ 'test-debug/DebugStepIf': [FAIL],
+ 'test-debug/DebugStepNamedLoadLoop': [FAIL],
+ 'test-debug/DebugStepDeclarations': [FAIL],
+ 'test-debug/BreakPointConstructCallWithGC': [PASS, FAIL],
+ 'test-debug/DebugStepNamedStoreLoop': [FAIL],
+ 'test-debug/DebugStepLinearMixedICs': [FAIL],
+ 'test-debug/DebugStepSwitch': [FAIL],
+ 'test-debug/DebugStepWhile': [FAIL],
+ 'test-debug/DebugStepFor': [FAIL],
+ 'test-debug/DebugStepForContinue': [FAIL],
+ 'test-debug/DebugStepForIn': [FAIL],
+ 'test-debug/DebugStepDoWhile': [FAIL],
+ 'test-debug/DebugConditional': [FAIL],
+ 'test-debug/DebugStepForBreak': [FAIL],
+ 'test-debug/DebugStepWith': [FAIL],
+ 'test-debug/DebugStepFunctionApply': [FAIL],
+ 'test-debug/StepInOutBranch': [FAIL],
+ 'test-debug/DebugStepFunctionCall': [FAIL],
+
+ # TODO(yangguo,4690): Required DebuggerStatement support.
+ 'test-profile-generator/BailoutReason': [FAIL],
+
+ # TODO(rmcilroy,4680): Check failed: toplevel_test_code_event_found.
+ 'test-serialize/SerializeToplevelIsolates': [FAIL],
+
+ # BUG(4333). Function name inferrer does not work for ES6 clases.
+ 'test-func-name-inference/UpperCaseClass': [TIMEOUT],
+ 'test-func-name-inference/LowerCaseClass': [TIMEOUT],
+
+ # TODO(rmcilroy,4681): Requires support for generators.
+ 'test-inobject-slack-tracking/JSGeneratorObjectBasic': [FAIL],
+ 'test-inobject-slack-tracking/JSGeneratorObjectBasicNoInlineNew': [FAIL],
+ 'test-api/IsGeneratorFunctionOrObject': [FAIL],
+
+ # TODO(rmcilroy,4680): Strong mode failures.
+ 'test-api/AccessorShouldThrowOnError': [FAIL],
+ 'test-api/InterceptorShouldThrowOnError': [FAIL],
+
+ # TODO(rmcilroy,4680): The function_data field should be a BytecodeArray on interpreter entry
+ 'test-api/SetFunctionEntryHook': [FAIL],
+
+ # TODO(rmcilroy,4680): Fail on shared_function_data()->IsUndefined in
+ #compiler.cc
+ 'test-heap/CanonicalSharedFunctionInfo': [PASS, ['mode == debug or dcheck_always_on == True', FAIL]],
+
+ # TODO(rmcilroy,4680): Check failed: !function->shared()->is_compiled() || function->IsOptimized().
+ 'test-heap/TestCodeFlushingPreAged': [FAIL],
+ 'test-heap/TestCodeFlushingIncrementalScavenge': [FAIL],
+ 'test-heap/TestCodeFlushing': [FAIL],
+ 'test-heap/TestCodeFlushingIncremental': [FAIL],
+ 'test-heap/TestCodeFlushingIncrementalAbort': [PASS, ['mode == debug or dcheck_always_on == True', FAIL]],
+
+ # TODO(rmcilroy,4680): Check failed: fun1->IsOptimized() || !CcTest::i_isolate()->use_crankshaft().
+ 'test-compiler/OptimizedCodeSharing2': [FAIL],
+ 'test-compiler/OptimizedCodeSharing3': [FAIL],
+
+ # TODO(rmcilroy,4689): Stack trace line number failures.
+ 'test-run-jsexceptions/ThrowMessagePosition': [FAIL],
+ 'test-api/TryCatchMixedNesting': [FAIL],
+
+ # TODO(rmcilroy,4680): Test assert errors.
+ 'test-cpu-profiler/CodeEvents': [FAIL],
+ 'test-cpu-profiler/TickEvents': [FAIL],
+ 'test-cpu-profiler/BoundFunctionCall': [FAIL],
+ 'test-cpu-profiler/CollectCpuProfile': [FAIL],
+ 'test-cpu-profiler/CollectSampleAPI': [FAIL],
+ 'test-cpu-profiler/CpuProfileDeepStack': [FAIL],
+ 'test-cpu-profiler/FunctionApplySample': [FAIL],
+ 'test-cpu-profiler/FunctionCallSample': [FAIL],
+ 'test-cpu-profiler/FunctionDetails': [FAIL],
+ 'test-cpu-profiler/HotDeoptNoFrameEntry': [FAIL],
+ 'test-cpu-profiler/JsNative1JsNative2JsSample': [FAIL],
+ 'test-cpu-profiler/JsNativeJsRuntimeJsSample': [FAIL],
+ 'test-cpu-profiler/JsNativeJsRuntimeJsSampleMultiple': [FAIL],
+ 'test-cpu-profiler/JsNativeJsSample': [FAIL],
+ 'test-cpu-profiler/NativeMethodUninitializedIC': [FAIL],
+ 'test-cpu-profiler/NativeMethodMonomorphicIC': [FAIL],
+ 'test-cpu-profiler/NativeAccessorUninitializedIC': [FAIL],
+ 'test-cpu-profiler/NativeAccessorMonomorphicIC': [FAIL],
+ 'test-cpu-profiler/SampleWhenFrameIsNotSetup': [FAIL],
+ 'test-sampler-api/StackFramesConsistent': [FAIL],
+ 'test-profile-generator/LineNumber': [FAIL],
+ 'test-profile-generator/ProfileNodeScriptId': [FAIL],
+ 'test-profile-generator/RecordStackTraceAtStartProfiling': [FAIL],
+ 'test-feedback-vector/VectorCallICStates': [FAIL],
+ 'test-compiler/FeedbackVectorPreservedAcrossRecompiles': [FAIL],
+ 'test-api/PromiseRejectCallback': [FAIL],
+ 'test-api/SetJitCodeEventHandler': [FAIL],
+ 'test-heap/WeakFunctionInConstructor': [FAIL],
+ 'test-heap/Regress169209': [FAIL],
+ 'test-heap/IncrementalMarkingClearsMonomorphicConstructor': [FAIL],
+ 'test-heap/IncrementalMarkingPreservesMonomorphicConstructor': [FAIL],
+ 'test-heap/IncrementalMarkingPreservesMonomorphicCallIC': [FAIL],
+ 'test-heap/CompilationCacheCachingBehavior': [FAIL],
+ 'test-heap/CellsInOptimizedCodeAreWeak': [FAIL],
+ 'test-run-inlining/InlineTwice': [FAIL],
+ 'test-serialize/SerializeInternalReference': [FAIL, ['arch == arm or arch == arm64', PASS]],
}], # ignition == True
-['ignition == True and arch == arm64', {
- 'test-js-arm64-variables/lookup_slots': [SKIP],
- 'test-spaces/SizeOfFirstPageIsLargeEnough': [SKIP],
-}], # ignition == True and arch == arm64
+['ignition == True and arch == x64', {
+ # TODO(rmcilroy,4680): The function_data field should be a BytecodeArray on interpreter entry
+ 'test-serialize/PerIsolateSnapshotBlobsOutdatedContextWithOverflow': [PASS, ['mode == debug', FAIL]],
+ 'test-serialize/PerIsolateSnapshotBlobsWithLocker': [PASS, ['mode == debug', FAIL]],
+ 'test-serialize/SnapshotBlobsStackOverflow': [PASS, ['mode == debug', FAIL]],
+ 'test-serialize/PerIsolateSnapshotBlobs': [PASS, ['mode == debug', FAIL]],
+ 'test-serialize/SerializationMemoryStats': [PASS, ['mode == debug', FAIL]],
+
+ # TODO(rmcilroy,4680): Test assert errors.
+ 'test-heap-profiler/HeapSnapshotSimd': [PASS, ['mode == debug', FAIL]],
+ 'test-api/InitializeDefaultIsolateOnSecondaryThread1': [PASS, ['mode == debug', FAIL]],
+}],
]
diff --git a/deps/v8/test/cctest/cctest_exe.isolate b/deps/v8/test/cctest/cctest_exe.isolate
new file mode 100644
index 0000000000..da5327846e
--- /dev/null
+++ b/deps/v8/test/cctest/cctest_exe.isolate
@@ -0,0 +1,13 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/cctest<(EXECUTABLE_SUFFIX)',
+ ],
+ },
+ 'includes': [
+ '../../src/base.isolate',
+ ],
+}
diff --git a/deps/v8/test/cctest/compiler/c-signature.h b/deps/v8/test/cctest/compiler/c-signature.h
index 13ef38aaed..1c2f9638f4 100644
--- a/deps/v8/test/cctest/compiler/c-signature.h
+++ b/deps/v8/test/cctest/compiler/c-signature.h
@@ -53,16 +53,16 @@ class CSignature : public MachineSignature {
public:
template <typename P1 = void, typename P2 = void, typename P3 = void,
typename P4 = void, typename P5 = void>
- void VerifyParams() {
+ static void VerifyParams(MachineSignature* sig) {
// Verifies the C signature against the machine types. Maximum {5} params.
- CHECK_LT(parameter_count(), 6u);
+ CHECK_LT(sig->parameter_count(), 6u);
const int kMax = 5;
MachineType params[] = {MachineTypeForC<P1>(), MachineTypeForC<P2>(),
MachineTypeForC<P3>(), MachineTypeForC<P4>(),
MachineTypeForC<P5>()};
for (int p = kMax - 1; p >= 0; p--) {
- if (p < static_cast<int>(parameter_count())) {
- CHECK_EQ(GetParam(p), params[p]);
+ if (p < static_cast<int>(sig->parameter_count())) {
+ CHECK_EQ(sig->GetParam(p), params[p]);
} else {
CHECK_EQ(MachineType::None(), params[p]);
}
diff --git a/deps/v8/test/cctest/compiler/call-tester.h b/deps/v8/test/cctest/compiler/call-tester.h
index e60f7172fa..8ee6b99f3b 100644
--- a/deps/v8/test/cctest/compiler/call-tester.h
+++ b/deps/v8/test/cctest/compiler/call-tester.h
@@ -119,7 +119,7 @@ struct ParameterTraits<uint32_t> {
template <typename R>
class CallHelper {
public:
- explicit CallHelper(Isolate* isolate, CSignature* csig)
+ explicit CallHelper(Isolate* isolate, MachineSignature* csig)
: csig_(csig), isolate_(isolate) {
USE(isolate_);
}
@@ -127,47 +127,47 @@ class CallHelper {
R Call() {
typedef R V8_CDECL FType();
- csig_->VerifyParams();
+ CSignature::VerifyParams(csig_);
return DoCall(FUNCTION_CAST<FType*>(Generate()));
}
template <typename P1>
R Call(P1 p1) {
typedef R V8_CDECL FType(P1);
- csig_->VerifyParams<P1>();
+ CSignature::VerifyParams<P1>(csig_);
return DoCall(FUNCTION_CAST<FType*>(Generate()), p1);
}
template <typename P1, typename P2>
R Call(P1 p1, P2 p2) {
typedef R V8_CDECL FType(P1, P2);
- csig_->VerifyParams<P1, P2>();
+ CSignature::VerifyParams<P1, P2>(csig_);
return DoCall(FUNCTION_CAST<FType*>(Generate()), p1, p2);
}
template <typename P1, typename P2, typename P3>
R Call(P1 p1, P2 p2, P3 p3) {
typedef R V8_CDECL FType(P1, P2, P3);
- csig_->VerifyParams<P1, P2, P3>();
+ CSignature::VerifyParams<P1, P2, P3>(csig_);
return DoCall(FUNCTION_CAST<FType*>(Generate()), p1, p2, p3);
}
template <typename P1, typename P2, typename P3, typename P4>
R Call(P1 p1, P2 p2, P3 p3, P4 p4) {
typedef R V8_CDECL FType(P1, P2, P3, P4);
- csig_->VerifyParams<P1, P2, P3, P4>();
+ CSignature::VerifyParams<P1, P2, P3, P4>(csig_);
return DoCall(FUNCTION_CAST<FType*>(Generate()), p1, p2, p3, p4);
}
template <typename P1, typename P2, typename P3, typename P4, typename P5>
R Call(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) {
typedef R V8_CDECL FType(P1, P2, P3, P4, P5);
- csig_->VerifyParams<P1, P2, P3, P4, P5>();
+ CSignature::VerifyParams<P1, P2, P3, P4, P5>(csig_);
return DoCall(FUNCTION_CAST<FType*>(Generate()), p1, p2, p3, p4, p5);
}
protected:
- CSignature* csig_;
+ MachineSignature* csig_;
virtual byte* Generate() = 0;
@@ -342,7 +342,7 @@ class CallHelper {
template <typename T>
class CodeRunner : public CallHelper<T> {
public:
- CodeRunner(Isolate* isolate, Handle<Code> code, CSignature* csig)
+ CodeRunner(Isolate* isolate, Handle<Code> code, MachineSignature* csig)
: CallHelper<T>(isolate, csig), code_(code) {}
virtual ~CodeRunner() {}
diff --git a/deps/v8/test/cctest/compiler/codegen-tester.h b/deps/v8/test/cctest/compiler/codegen-tester.h
index 56e90c65b7..5d670bfee8 100644
--- a/deps/v8/test/cctest/compiler/codegen-tester.h
+++ b/deps/v8/test/cctest/compiler/codegen-tester.h
@@ -35,10 +35,13 @@ class RawMachineAssemblerTester : public HandleAndZoneScope,
Linkage::GetSimplifiedCDescriptor(
main_zone(),
CSignature::New(main_zone(), MachineTypeForC<ReturnType>(), p0,
- p1, p2, p3, p4)),
+ p1, p2, p3, p4),
+ true),
MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags()) {}
+ virtual ~RawMachineAssemblerTester() {}
+
void CheckNumber(double expected, Object* number) {
CHECK(this->isolate()->factory()->NewNumber(expected)->SameValue(number));
}
@@ -85,6 +88,7 @@ class BufferedRawMachineAssemblerTester
: BufferedRawMachineAssemblerTester(ComputeParameterCount(p0, p1, p2, p3),
p0, p1, p2, p3) {}
+ virtual byte* Generate() { return RawMachineAssemblerTester::Generate(); }
// The BufferedRawMachineAssemblerTester does not pass parameters directly
// to the constructed IR graph. Instead it passes a pointer to the parameter
@@ -92,11 +96,10 @@ class BufferedRawMachineAssemblerTester
// parameters from memory. Thereby it is possible to pass 64 bit parameters
// to the IR graph.
Node* Parameter(size_t index) {
- CHECK(index >= 0 && index < 4);
+ CHECK(index < 4);
return parameter_nodes_[index];
}
-
// The BufferedRawMachineAssemblerTester adds a Store node to the IR graph
// to store the graph's return value in memory. The memory address for the
// Store node is provided as a parameter. By storing the return value in
@@ -110,7 +113,7 @@ class BufferedRawMachineAssemblerTester
ReturnType Call() {
ReturnType return_value;
- test_graph_signature_->VerifyParams();
+ CSignature::VerifyParams(test_graph_signature_);
CallHelper<int32_t>::Call(reinterpret_cast<void*>(&return_value));
return return_value;
}
@@ -118,7 +121,7 @@ class BufferedRawMachineAssemblerTester
template <typename P0>
ReturnType Call(P0 p0) {
ReturnType return_value;
- test_graph_signature_->VerifyParams<P0>();
+ CSignature::VerifyParams<P0>(test_graph_signature_);
CallHelper<int32_t>::Call(reinterpret_cast<void*>(&p0),
reinterpret_cast<void*>(&return_value));
return return_value;
@@ -127,7 +130,7 @@ class BufferedRawMachineAssemblerTester
template <typename P0, typename P1>
ReturnType Call(P0 p0, P1 p1) {
ReturnType return_value;
- test_graph_signature_->VerifyParams<P0, P1>();
+ CSignature::VerifyParams<P0, P1>(test_graph_signature_);
CallHelper<int32_t>::Call(reinterpret_cast<void*>(&p0),
reinterpret_cast<void*>(&p1),
reinterpret_cast<void*>(&return_value));
@@ -137,7 +140,7 @@ class BufferedRawMachineAssemblerTester
template <typename P0, typename P1, typename P2>
ReturnType Call(P0 p0, P1 p1, P2 p2) {
ReturnType return_value;
- test_graph_signature_->VerifyParams<P0, P1, P2>();
+ CSignature::VerifyParams<P0, P1, P2>(test_graph_signature_);
CallHelper<int32_t>::Call(
reinterpret_cast<void*>(&p0), reinterpret_cast<void*>(&p1),
reinterpret_cast<void*>(&p2), reinterpret_cast<void*>(&return_value));
@@ -147,7 +150,7 @@ class BufferedRawMachineAssemblerTester
template <typename P0, typename P1, typename P2, typename P3>
ReturnType Call(P0 p0, P1 p1, P2 p2, P3 p3) {
ReturnType return_value;
- test_graph_signature_->VerifyParams<P0, P1, P2, P3>();
+ CSignature::VerifyParams<P0, P1, P2, P3>(test_graph_signature_);
CallHelper<int32_t>::Call(
reinterpret_cast<void*>(&p0), reinterpret_cast<void*>(&p1),
reinterpret_cast<void*>(&p2), reinterpret_cast<void*>(&p3),
@@ -245,6 +248,7 @@ class BufferedRawMachineAssemblerTester<void>
: Load(p3, RawMachineAssembler::Parameter(3));
}
+ virtual byte* Generate() { return RawMachineAssemblerTester::Generate(); }
// The BufferedRawMachineAssemblerTester does not pass parameters directly
// to the constructed IR graph. Instead it passes a pointer to the parameter
@@ -258,26 +262,26 @@ class BufferedRawMachineAssemblerTester<void>
void Call() {
- test_graph_signature_->VerifyParams();
+ CSignature::VerifyParams(test_graph_signature_);
CallHelper<void>::Call();
}
template <typename P0>
void Call(P0 p0) {
- test_graph_signature_->VerifyParams<P0>();
+ CSignature::VerifyParams<P0>(test_graph_signature_);
CallHelper<void>::Call(reinterpret_cast<void*>(&p0));
}
template <typename P0, typename P1>
void Call(P0 p0, P1 p1) {
- test_graph_signature_->VerifyParams<P0, P1>();
+ CSignature::VerifyParams<P0, P1>(test_graph_signature_);
CallHelper<void>::Call(reinterpret_cast<void*>(&p0),
reinterpret_cast<void*>(&p1));
}
template <typename P0, typename P1, typename P2>
void Call(P0 p0, P1 p1, P2 p2) {
- test_graph_signature_->VerifyParams<P0, P1, P2>();
+ CSignature::VerifyParams<P0, P1, P2>(test_graph_signature_);
CallHelper<void>::Call(reinterpret_cast<void*>(&p0),
reinterpret_cast<void*>(&p1),
reinterpret_cast<void*>(&p2));
@@ -285,7 +289,7 @@ class BufferedRawMachineAssemblerTester<void>
template <typename P0, typename P1, typename P2, typename P3>
void Call(P0 p0, P1 p1, P2 p2, P3 p3) {
- test_graph_signature_->VerifyParams<P0, P1, P2, P3>();
+ CSignature::VerifyParams<P0, P1, P2, P3>(test_graph_signature_);
CallHelper<void>::Call(
reinterpret_cast<void*>(&p0), reinterpret_cast<void*>(&p1),
reinterpret_cast<void*>(&p2), reinterpret_cast<void*>(&p3));
@@ -397,7 +401,6 @@ class Uint32BinopTester : public BinopTester<uint32_t, USE_RETURN_REGISTER> {
// A helper class for testing code sequences that take two float parameters and
// return a float value.
-// TODO(titzer): figure out how to return floats correctly on ia32.
class Float32BinopTester : public BinopTester<float, USE_RESULT_BUFFER> {
public:
explicit Float32BinopTester(RawMachineAssemblerTester<int32_t>* tester)
@@ -407,7 +410,6 @@ class Float32BinopTester : public BinopTester<float, USE_RESULT_BUFFER> {
// A helper class for testing code sequences that take two double parameters and
// return a double value.
-// TODO(titzer): figure out how to return doubles correctly on ia32.
class Float64BinopTester : public BinopTester<double, USE_RESULT_BUFFER> {
public:
explicit Float64BinopTester(RawMachineAssemblerTester<int32_t>* tester)
@@ -524,7 +526,8 @@ class BinopGen {
// and run the generated code to ensure it produces the correct results.
class Int32BinopInputShapeTester {
public:
- explicit Int32BinopInputShapeTester(BinopGen<int32_t>* g) : gen(g) {}
+ explicit Int32BinopInputShapeTester(BinopGen<int32_t>* g)
+ : gen(g), input_a(0), input_b(0) {}
void TestAllInputShapes();
@@ -537,24 +540,6 @@ class Int32BinopInputShapeTester {
void RunLeft(RawMachineAssemblerTester<int32_t>* m);
void RunRight(RawMachineAssemblerTester<int32_t>* m);
};
-
-// TODO(bmeurer): Drop this crap once we switch to GTest/Gmock.
-static inline void CheckFloatEq(volatile float x, volatile float y) {
- if (std::isnan(x)) {
- CHECK(std::isnan(y));
- } else {
- CHECK_EQ(x, y);
- }
-}
-
-static inline void CheckDoubleEq(volatile double x, volatile double y) {
- if (std::isnan(x)) {
- CHECK(std::isnan(y));
- } else {
- CHECK_EQ(x, y);
- }
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/function-tester.h b/deps/v8/test/cctest/compiler/function-tester.h
index 2fcd35398c..c6093ce6e4 100644
--- a/deps/v8/test/cctest/compiler/function-tester.h
+++ b/deps/v8/test/cctest/compiler/function-tester.h
@@ -162,15 +162,26 @@ class FunctionTester : public InitializedHandleScope {
Handle<Object> false_value() { return isolate->factory()->false_value(); }
+ static Handle<JSFunction> ForMachineGraph(Graph* graph, int param_count) {
+ JSFunction* p = NULL;
+ { // because of the implicit handle scope of FunctionTester.
+ FunctionTester f(graph, param_count);
+ p = *f.function;
+ }
+ return Handle<JSFunction>(p); // allocated in outer handle scope.
+ }
+
+ private:
+ uint32_t flags_;
+
Handle<JSFunction> Compile(Handle<JSFunction> function) {
-// TODO(titzer): make this method private.
Zone zone;
ParseInfo parse_info(&zone, function);
CompilationInfo info(&parse_info);
info.MarkAsDeoptimizationEnabled();
CHECK(Parser::ParseStatic(info.parse_info()));
- info.SetOptimizing(BailoutId::None(), Handle<Code>(function->code()));
+ info.SetOptimizing();
if (flags_ & CompilationInfo::kFunctionContextSpecializing) {
info.MarkAsFunctionContextSpecializing();
}
@@ -192,26 +203,13 @@ class FunctionTester : public InitializedHandleScope {
return function;
}
- static Handle<JSFunction> ForMachineGraph(Graph* graph, int param_count) {
- JSFunction* p = NULL;
- { // because of the implicit handle scope of FunctionTester.
- FunctionTester f(graph, param_count);
- p = *f.function;
- }
- return Handle<JSFunction>(p); // allocated in outer handle scope.
- }
-
- private:
- uint32_t flags_;
-
std::string BuildFunction(int param_count) {
std::string function_string = "(function(";
if (param_count > 0) {
- char next = 'a';
- function_string += next;
- while (param_count-- > 0) {
+ function_string += 'a';
+ for (int i = 1; i < param_count; i++) {
function_string += ',';
- function_string += ++next;
+ function_string += static_cast<char>('a' + i);
}
}
function_string += "){})";
@@ -231,8 +229,7 @@ class FunctionTester : public InitializedHandleScope {
CompilationInfo info(&parse_info);
CHECK(Parser::ParseStatic(info.parse_info()));
- info.SetOptimizing(BailoutId::None(),
- Handle<Code>(function->shared()->code()));
+ info.SetOptimizing();
CHECK(Compiler::Analyze(info.parse_info()));
CHECK(Compiler::EnsureDeoptimizationSupport(&info));
diff --git a/deps/v8/test/cctest/compiler/test-code-stub-assembler.cc b/deps/v8/test/cctest/compiler/test-code-stub-assembler.cc
index d7a7a8198a..0306561020 100644
--- a/deps/v8/test/cctest/compiler/test-code-stub-assembler.cc
+++ b/deps/v8/test/cctest/compiler/test-code-stub-assembler.cc
@@ -16,7 +16,7 @@ class CodeStubAssemblerTester : public CodeStubAssembler {
CodeStubAssemblerTester(Isolate* isolate,
const CallInterfaceDescriptor& descriptor)
: CodeStubAssembler(isolate, isolate->runtime_zone(), descriptor,
- Code::STUB, "test"),
+ Code::ComputeFlags(Code::STUB), "test"),
scope_(isolate) {}
private:
@@ -120,6 +120,133 @@ TEST(SimpleTailCallRuntime2Arg) {
CHECK_EQ(16, Handle<Smi>::cast(result.ToHandleChecked())->value());
}
+TEST(VariableMerge1) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ VoidDescriptor descriptor(isolate);
+ CodeStubAssemblerTester m(isolate, descriptor);
+ CodeStubAssembler::Variable var1(&m, MachineRepresentation::kTagged);
+ CodeStubAssembler::Label l1(&m), l2(&m), merge(&m);
+ Node* temp = m.Int32Constant(0);
+ var1.Bind(temp);
+ m.Branch(m.Int32Constant(1), &l1, &l2);
+ m.Bind(&l1);
+ CHECK_EQ(var1.value(), temp);
+ m.Goto(&merge);
+ m.Bind(&l2);
+ CHECK_EQ(var1.value(), temp);
+ m.Goto(&merge);
+ m.Bind(&merge);
+ CHECK_EQ(var1.value(), temp);
+}
+
+TEST(VariableMerge2) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ VoidDescriptor descriptor(isolate);
+ CodeStubAssemblerTester m(isolate, descriptor);
+ CodeStubAssembler::Variable var1(&m, MachineRepresentation::kTagged);
+ CodeStubAssembler::Label l1(&m), l2(&m), merge(&m);
+ Node* temp = m.Int32Constant(0);
+ var1.Bind(temp);
+ m.Branch(m.Int32Constant(1), &l1, &l2);
+ m.Bind(&l1);
+ CHECK_EQ(var1.value(), temp);
+ m.Goto(&merge);
+ m.Bind(&l2);
+ Node* temp2 = m.Int32Constant(2);
+ var1.Bind(temp2);
+ CHECK_EQ(var1.value(), temp2);
+ m.Goto(&merge);
+ m.Bind(&merge);
+ CHECK_NE(var1.value(), temp);
+}
+
+TEST(VariableMerge3) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ VoidDescriptor descriptor(isolate);
+ CodeStubAssemblerTester m(isolate, descriptor);
+ CodeStubAssembler::Variable var1(&m, MachineRepresentation::kTagged);
+ CodeStubAssembler::Variable var2(&m, MachineRepresentation::kTagged);
+ CodeStubAssembler::Label l1(&m), l2(&m), merge(&m);
+ Node* temp = m.Int32Constant(0);
+ var1.Bind(temp);
+ var2.Bind(temp);
+ m.Branch(m.Int32Constant(1), &l1, &l2);
+ m.Bind(&l1);
+ CHECK_EQ(var1.value(), temp);
+ m.Goto(&merge);
+ m.Bind(&l2);
+ Node* temp2 = m.Int32Constant(2);
+ var1.Bind(temp2);
+ CHECK_EQ(var1.value(), temp2);
+ m.Goto(&merge);
+ m.Bind(&merge);
+ CHECK_NE(var1.value(), temp);
+ CHECK_NE(var1.value(), temp2);
+ CHECK_EQ(var2.value(), temp);
+}
+
+TEST(VariableMergeBindFirst) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ VoidDescriptor descriptor(isolate);
+ CodeStubAssemblerTester m(isolate, descriptor);
+ CodeStubAssembler::Variable var1(&m, MachineRepresentation::kTagged);
+ CodeStubAssembler::Label l1(&m), l2(&m), merge(&m, &var1), end(&m);
+ Node* temp = m.Int32Constant(0);
+ var1.Bind(temp);
+ m.Branch(m.Int32Constant(1), &l1, &l2);
+ m.Bind(&l1);
+ CHECK_EQ(var1.value(), temp);
+ m.Goto(&merge);
+ m.Bind(&merge);
+ CHECK(var1.value() != temp);
+ CHECK(var1.value() != nullptr);
+ m.Goto(&end);
+ m.Bind(&l2);
+ Node* temp2 = m.Int32Constant(2);
+ var1.Bind(temp2);
+ CHECK_EQ(var1.value(), temp2);
+ m.Goto(&merge);
+ m.Bind(&end);
+ CHECK(var1.value() != temp);
+ CHECK(var1.value() != nullptr);
+}
+
+TEST(VariableMergeSwitch) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ VoidDescriptor descriptor(isolate);
+ CodeStubAssemblerTester m(isolate, descriptor);
+ CodeStubAssembler::Variable var1(&m, MachineRepresentation::kTagged);
+ CodeStubAssembler::Label l1(&m), l2(&m), default_label(&m);
+ CodeStubAssembler::Label* labels[] = {&l1, &l2};
+ int32_t values[] = {1, 2};
+ Node* temp = m.Int32Constant(0);
+ var1.Bind(temp);
+ m.Switch(m.Int32Constant(2), &default_label, values, labels, 2);
+ m.Bind(&l1);
+ DCHECK_EQ(temp, var1.value());
+ m.Return(temp);
+ m.Bind(&l2);
+ DCHECK_EQ(temp, var1.value());
+ m.Return(temp);
+ m.Bind(&default_label);
+ DCHECK_EQ(temp, var1.value());
+ m.Return(temp);
+}
+
+TEST(FixedArrayAccessSmiIndex) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ VoidDescriptor descriptor(isolate);
+ CodeStubAssemblerTester m(isolate, descriptor);
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(5);
+ array->set(4, Smi::FromInt(733));
+ m.Return(m.LoadFixedArrayElementSmiIndex(m.HeapConstant(array),
+ m.SmiTag(m.Int32Constant(4))));
+ Handle<Code> code = m.GenerateCode();
+ FunctionTester ft(descriptor, code);
+ MaybeHandle<Object> result = ft.Call();
+ CHECK_EQ(733, Handle<Smi>::cast(result.ToHandleChecked())->value());
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
index 43b7665459..c7cd47a55c 100644
--- a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
+++ b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
@@ -225,8 +225,8 @@ TEST(SpecializeToContext) {
t.graph()->NewNode(t.simplified()->ChangeTaggedToInt32(), other_load);
Node* add = t.graph()->NewNode(
- t.javascript()->Add(LanguageMode::SLOPPY, BinaryOperationHints::Any()),
- value_use, other_use, param_context, t.jsgraph()->EmptyFrameState(),
+ t.javascript()->Add(BinaryOperationHints::Any()), value_use, other_use,
+ param_context, t.jsgraph()->EmptyFrameState(),
t.jsgraph()->EmptyFrameState(), other_load, start);
Node* ret =
diff --git a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
index c8b7734eb2..24db6a532e 100644
--- a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
@@ -17,19 +17,6 @@ namespace v8 {
namespace internal {
namespace compiler {
-#ifndef TEST_WITH_STRONG
-#define TEST_WITH_STRONG(Name) \
- static void Test##Name(); \
- static void TestWithStrong##Name(LanguageMode language_mode); \
- CcTest register_test_##Name(Test##Name, __FILE__, #Name, NULL, true, true); \
- static void Test##Name() { \
- TestWithStrong##Name(LanguageMode::SLOPPY); \
- TestWithStrong##Name(LanguageMode::STRONG); \
- } \
- static void TestWithStrong##Name(LanguageMode language_mode)
-#endif
-
-
class JSTypedLoweringTester : public HandleAndZoneScope {
public:
explicit JSTypedLoweringTester(int num_parameters = 0)
@@ -163,17 +150,8 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
}
Node* UseForEffect(Node* node) {
- // TODO(titzer): use EffectPhi after fixing EffectCount
- if (OperatorProperties::GetFrameStateInputCount(javascript.ToNumber()) >
- 0) {
- CHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(
- javascript.ToNumber()));
- return graph.NewNode(javascript.ToNumber(), node, context(),
- EmptyFrameState(context()), node, control());
- } else {
- return graph.NewNode(javascript.ToNumber(), node, context(), node,
- control());
- }
+ Node* merge = graph.NewNode(common.Merge(1), start());
+ return graph.NewNode(common.EffectPhi(1), node, merge);
}
void CheckEffectInput(Node* effect, Node* use) {
@@ -240,7 +218,7 @@ static IrOpcode::Value NumberToI32(bool is_signed) {
// TODO(turbofan): Lowering of StringAdd is disabled for now.
#if 0
-TEST_WITH_STRONG(StringBinops) {
+TEST(StringBinops) {
JSTypedLoweringTester R;
for (size_t i = 0; i < arraysize(kStringTypes); ++i) {
@@ -249,7 +227,7 @@ TEST_WITH_STRONG(StringBinops) {
for (size_t j = 0; j < arraysize(kStringTypes); ++j) {
Node* p1 = R.Parameter(kStringTypes[j], 1);
- Node* add = R.Binop(R.javascript.Add(language_mode), p0, p1);
+ Node* add = R.Binop(R.javascript.Add(), p0, p1);
Node* r = R.reduce(add);
R.CheckBinop(IrOpcode::kStringAdd, r);
@@ -260,14 +238,12 @@ TEST_WITH_STRONG(StringBinops) {
}
#endif
-
-TEST_WITH_STRONG(AddNumber1) {
+TEST(AddNumber1) {
JSTypedLoweringTester R;
for (size_t i = 0; i < arraysize(kNumberTypes); ++i) {
Node* p0 = R.Parameter(kNumberTypes[i], 0);
Node* p1 = R.Parameter(kNumberTypes[i], 1);
- Node* add = R.Binop(
- R.javascript.Add(language_mode, BinaryOperationHints::Any()), p0, p1);
+ Node* add = R.Binop(R.javascript.Add(BinaryOperationHints::Any()), p0, p1);
Node* r = R.reduce(add);
R.CheckBinop(IrOpcode::kNumberAdd, r);
@@ -276,20 +252,14 @@ TEST_WITH_STRONG(AddNumber1) {
}
}
-
-TEST_WITH_STRONG(NumberBinops) {
+TEST(NumberBinops) {
JSTypedLoweringTester R;
const Operator* ops[] = {
- R.javascript.Add(language_mode, R.hints),
- R.simplified.NumberAdd(),
- R.javascript.Subtract(language_mode, R.hints),
- R.simplified.NumberSubtract(),
- R.javascript.Multiply(language_mode, R.hints),
- R.simplified.NumberMultiply(),
- R.javascript.Divide(language_mode, R.hints),
- R.simplified.NumberDivide(),
- R.javascript.Modulus(language_mode, R.hints),
- R.simplified.NumberModulus(),
+ R.javascript.Add(R.hints), R.simplified.NumberAdd(),
+ R.javascript.Subtract(R.hints), R.simplified.NumberSubtract(),
+ R.javascript.Multiply(R.hints), R.simplified.NumberMultiply(),
+ R.javascript.Divide(R.hints), R.simplified.NumberDivide(),
+ R.javascript.Modulus(R.hints), R.simplified.NumberModulus(),
};
for (size_t i = 0; i < arraysize(kNumberTypes); ++i) {
@@ -329,14 +299,13 @@ static void CheckToI32(Node* old_input, Node* new_input, bool is_signed) {
// A helper class for testing lowering of bitwise shift operators.
class JSBitwiseShiftTypedLoweringTester : public JSTypedLoweringTester {
public:
- explicit JSBitwiseShiftTypedLoweringTester(LanguageMode language_mode)
- : JSTypedLoweringTester(), language_mode_(language_mode) {
+ JSBitwiseShiftTypedLoweringTester() : JSTypedLoweringTester() {
int i = 0;
- set(i++, javascript.ShiftLeft(language_mode_, hints), true);
+ set(i++, javascript.ShiftLeft(hints), true);
set(i++, simplified.NumberShiftLeft(), false);
- set(i++, javascript.ShiftRight(language_mode_, hints), true);
+ set(i++, javascript.ShiftRight(hints), true);
set(i++, simplified.NumberShiftRight(), false);
- set(i++, javascript.ShiftRightLogical(language_mode_, hints), false);
+ set(i++, javascript.ShiftRightLogical(hints), false);
set(i++, simplified.NumberShiftRightLogical(), false);
}
static const int kNumberOps = 6;
@@ -344,7 +313,6 @@ class JSBitwiseShiftTypedLoweringTester : public JSTypedLoweringTester {
bool signedness[kNumberOps];
private:
- LanguageMode language_mode_;
void set(int idx, const Operator* op, bool s) {
ops[idx] = op;
signedness[idx] = s;
@@ -353,7 +321,7 @@ class JSBitwiseShiftTypedLoweringTester : public JSTypedLoweringTester {
TEST(Int32BitwiseShifts) {
- JSBitwiseShiftTypedLoweringTester R(LanguageMode::SLOPPY);
+ JSBitwiseShiftTypedLoweringTester R;
Type* types[] = {
Type::SignedSmall(), Type::UnsignedSmall(), Type::Negative32(),
@@ -387,14 +355,13 @@ TEST(Int32BitwiseShifts) {
// A helper class for testing lowering of bitwise operators.
class JSBitwiseTypedLoweringTester : public JSTypedLoweringTester {
public:
- explicit JSBitwiseTypedLoweringTester(LanguageMode language_mode)
- : JSTypedLoweringTester(), language_mode_(language_mode) {
+ JSBitwiseTypedLoweringTester() : JSTypedLoweringTester() {
int i = 0;
- set(i++, javascript.BitwiseOr(language_mode_, hints), true);
+ set(i++, javascript.BitwiseOr(hints), true);
set(i++, simplified.NumberBitwiseOr(), true);
- set(i++, javascript.BitwiseXor(language_mode_, hints), true);
+ set(i++, javascript.BitwiseXor(hints), true);
set(i++, simplified.NumberBitwiseXor(), true);
- set(i++, javascript.BitwiseAnd(language_mode_, hints), true);
+ set(i++, javascript.BitwiseAnd(hints), true);
set(i++, simplified.NumberBitwiseAnd(), true);
}
static const int kNumberOps = 6;
@@ -402,7 +369,6 @@ class JSBitwiseTypedLoweringTester : public JSTypedLoweringTester {
bool signedness[kNumberOps];
private:
- LanguageMode language_mode_;
void set(int idx, const Operator* op, bool s) {
ops[idx] = op;
signedness[idx] = s;
@@ -411,7 +377,7 @@ class JSBitwiseTypedLoweringTester : public JSTypedLoweringTester {
TEST(Int32BitwiseBinops) {
- JSBitwiseTypedLoweringTester R(LanguageMode::SLOPPY);
+ JSBitwiseTypedLoweringTester R;
Type* types[] = {
Type::SignedSmall(), Type::UnsignedSmall(), Type::Unsigned32(),
@@ -558,7 +524,6 @@ TEST(JSToString1) {
{ // ToString(number)
Node* r = R.ReduceUnop(op, Type::Number());
- // TODO(titzer): could remove effects
CHECK_EQ(IrOpcode::kJSToString, r->opcode());
}
@@ -602,17 +567,14 @@ TEST(JSToString_replacement) {
}
}
-
-TEST_WITH_STRONG(StringComparison) {
+TEST(StringComparison) {
JSTypedLoweringTester R;
const Operator* ops[] = {
- R.javascript.LessThan(language_mode), R.simplified.StringLessThan(),
- R.javascript.LessThanOrEqual(language_mode),
- R.simplified.StringLessThanOrEqual(),
- R.javascript.GreaterThan(language_mode), R.simplified.StringLessThan(),
- R.javascript.GreaterThanOrEqual(language_mode),
- R.simplified.StringLessThanOrEqual()};
+ R.javascript.LessThan(), R.simplified.StringLessThan(),
+ R.javascript.LessThanOrEqual(), R.simplified.StringLessThanOrEqual(),
+ R.javascript.GreaterThan(), R.simplified.StringLessThan(),
+ R.javascript.GreaterThanOrEqual(), R.simplified.StringLessThanOrEqual()};
for (size_t i = 0; i < arraysize(kStringTypes); i++) {
Node* p0 = R.Parameter(kStringTypes[i], 0);
@@ -652,17 +614,14 @@ static void CheckIsConvertedToNumber(Node* val, Node* converted) {
}
}
-
-TEST_WITH_STRONG(NumberComparison) {
+TEST(NumberComparison) {
JSTypedLoweringTester R;
const Operator* ops[] = {
- R.javascript.LessThan(language_mode), R.simplified.NumberLessThan(),
- R.javascript.LessThanOrEqual(language_mode),
- R.simplified.NumberLessThanOrEqual(),
- R.javascript.GreaterThan(language_mode), R.simplified.NumberLessThan(),
- R.javascript.GreaterThanOrEqual(language_mode),
- R.simplified.NumberLessThanOrEqual()};
+ R.javascript.LessThan(), R.simplified.NumberLessThan(),
+ R.javascript.LessThanOrEqual(), R.simplified.NumberLessThanOrEqual(),
+ R.javascript.GreaterThan(), R.simplified.NumberLessThan(),
+ R.javascript.GreaterThanOrEqual(), R.simplified.NumberLessThanOrEqual()};
Node* const p0 = R.Parameter(Type::Number(), 0);
Node* const p1 = R.Parameter(Type::Number(), 1);
@@ -684,8 +643,7 @@ TEST_WITH_STRONG(NumberComparison) {
}
}
-
-TEST_WITH_STRONG(MixedComparison1) {
+TEST(MixedComparison1) {
JSTypedLoweringTester R;
Type* types[] = {Type::Number(), Type::String(),
@@ -697,16 +655,15 @@ TEST_WITH_STRONG(MixedComparison1) {
for (size_t j = 0; j < arraysize(types); j++) {
Node* p1 = R.Parameter(types[j], 1);
{
- const Operator* less_than = R.javascript.LessThan(language_mode);
+ const Operator* less_than = R.javascript.LessThan();
Node* cmp = R.Binop(less_than, p0, p1);
Node* r = R.reduce(cmp);
if (types[i]->Is(Type::String()) && types[j]->Is(Type::String())) {
R.CheckBinop(R.simplified.StringLessThan(), r);
} else if ((types[i]->Is(Type::Number()) &&
types[j]->Is(Type::Number())) ||
- (!is_strong(language_mode) &&
- (!types[i]->Maybe(Type::String()) ||
- !types[j]->Maybe(Type::String())))) {
+ (!types[i]->Maybe(Type::String()) ||
+ !types[j]->Maybe(Type::String()))) {
R.CheckBinop(R.simplified.NumberLessThan(), r);
} else {
// No reduction of mixed types.
@@ -717,8 +674,7 @@ TEST_WITH_STRONG(MixedComparison1) {
}
}
-
-TEST_WITH_STRONG(RemoveToNumberEffects) {
+TEST(RemoveToNumberEffects) {
JSTypedLoweringTester R;
Node* effect_use = NULL;
@@ -744,14 +700,14 @@ TEST_WITH_STRONG(RemoveToNumberEffects) {
case 2:
effect_use = R.graph.NewNode(R.common.EffectPhi(1), ton, R.start());
case 3:
- effect_use = R.graph.NewNode(R.javascript.Add(language_mode, R.hints),
- ton, ton, R.context(), frame_state,
- frame_state, ton, R.start());
+ effect_use =
+ R.graph.NewNode(R.javascript.Add(R.hints), ton, ton, R.context(),
+ frame_state, frame_state, ton, R.start());
break;
case 4:
- effect_use = R.graph.NewNode(R.javascript.Add(language_mode, R.hints),
- p0, p0, R.context(), frame_state,
- frame_state, ton, R.start());
+ effect_use =
+ R.graph.NewNode(R.javascript.Add(R.hints), p0, p0, R.context(),
+ frame_state, frame_state, ton, R.start());
break;
case 5:
effect_use = R.graph.NewNode(R.common.Return(), p0, ton, R.start());
@@ -896,9 +852,16 @@ TEST(StrictEqualityForRefEqualTypes) {
Node* p1 = R.Parameter(types[i]);
CheckEqualityReduction(&R, true, p0, p1, IrOpcode::kReferenceEqual);
}
- // TODO(titzer): Equal(RefEqualTypes)
}
+TEST(StrictEqualityForUnique) {
+ JSTypedLoweringTester R;
+
+ Node* p0 = R.Parameter(Type::Unique());
+ Node* p1 = R.Parameter(Type::Unique());
+ CheckEqualityReduction(&R, true, p0, p1, IrOpcode::kReferenceEqual);
+ CheckEqualityReduction(&R, true, p1, p0, IrOpcode::kReferenceEqual);
+}
TEST(StringEquality) {
JSTypedLoweringTester R;
@@ -909,27 +872,18 @@ TEST(StringEquality) {
CheckEqualityReduction(&R, false, p0, p1, IrOpcode::kStringEqual);
}
-
-TEST_WITH_STRONG(RemovePureNumberBinopEffects) {
+TEST(RemovePureNumberBinopEffects) {
JSTypedLoweringTester R;
const Operator* ops[] = {
- R.javascript.Equal(),
- R.simplified.NumberEqual(),
- R.javascript.Add(language_mode, R.hints),
- R.simplified.NumberAdd(),
- R.javascript.Subtract(language_mode, R.hints),
- R.simplified.NumberSubtract(),
- R.javascript.Multiply(language_mode, R.hints),
- R.simplified.NumberMultiply(),
- R.javascript.Divide(language_mode, R.hints),
- R.simplified.NumberDivide(),
- R.javascript.Modulus(language_mode, R.hints),
- R.simplified.NumberModulus(),
- R.javascript.LessThan(language_mode),
- R.simplified.NumberLessThan(),
- R.javascript.LessThanOrEqual(language_mode),
- R.simplified.NumberLessThanOrEqual(),
+ R.javascript.Equal(), R.simplified.NumberEqual(),
+ R.javascript.Add(R.hints), R.simplified.NumberAdd(),
+ R.javascript.Subtract(R.hints), R.simplified.NumberSubtract(),
+ R.javascript.Multiply(R.hints), R.simplified.NumberMultiply(),
+ R.javascript.Divide(R.hints), R.simplified.NumberDivide(),
+ R.javascript.Modulus(R.hints), R.simplified.NumberModulus(),
+ R.javascript.LessThan(), R.simplified.NumberLessThan(),
+ R.javascript.LessThanOrEqual(), R.simplified.NumberLessThanOrEqual(),
};
for (size_t j = 0; j < arraysize(ops); j += 2) {
@@ -950,12 +904,9 @@ TEST(OrderNumberBinopEffects1) {
JSTypedLoweringTester R;
const Operator* ops[] = {
- R.javascript.Subtract(LanguageMode::SLOPPY, R.hints),
- R.simplified.NumberSubtract(),
- R.javascript.Multiply(LanguageMode::SLOPPY, R.hints),
- R.simplified.NumberMultiply(),
- R.javascript.Divide(LanguageMode::SLOPPY, R.hints),
- R.simplified.NumberDivide(),
+ R.javascript.Subtract(R.hints), R.simplified.NumberSubtract(),
+ R.javascript.Multiply(R.hints), R.simplified.NumberMultiply(),
+ R.javascript.Divide(R.hints), R.simplified.NumberDivide(),
};
for (size_t j = 0; j < arraysize(ops); j += 2) {
@@ -978,14 +929,10 @@ TEST(OrderNumberBinopEffects2) {
JSTypedLoweringTester R;
const Operator* ops[] = {
- R.javascript.Add(LanguageMode::SLOPPY, R.hints),
- R.simplified.NumberAdd(),
- R.javascript.Subtract(LanguageMode::SLOPPY, R.hints),
- R.simplified.NumberSubtract(),
- R.javascript.Multiply(LanguageMode::SLOPPY, R.hints),
- R.simplified.NumberMultiply(),
- R.javascript.Divide(LanguageMode::SLOPPY, R.hints),
- R.simplified.NumberDivide(),
+ R.javascript.Add(R.hints), R.simplified.NumberAdd(),
+ R.javascript.Subtract(R.hints), R.simplified.NumberSubtract(),
+ R.javascript.Multiply(R.hints), R.simplified.NumberMultiply(),
+ R.javascript.Divide(R.hints), R.simplified.NumberDivide(),
};
for (size_t j = 0; j < arraysize(ops); j += 2) {
@@ -1020,10 +967,8 @@ TEST(OrderCompareEffects) {
JSTypedLoweringTester R;
const Operator* ops[] = {
- R.javascript.GreaterThan(LanguageMode::SLOPPY),
- R.simplified.NumberLessThan(),
- R.javascript.GreaterThanOrEqual(LanguageMode::SLOPPY),
- R.simplified.NumberLessThanOrEqual(),
+ R.javascript.GreaterThan(), R.simplified.NumberLessThan(),
+ R.javascript.GreaterThanOrEqual(), R.simplified.NumberLessThanOrEqual(),
};
for (size_t j = 0; j < arraysize(ops); j += 2) {
@@ -1070,7 +1015,7 @@ TEST(OrderCompareEffects) {
TEST(Int32BinopEffects) {
- JSBitwiseTypedLoweringTester R(LanguageMode::SLOPPY);
+ JSBitwiseTypedLoweringTester R;
for (int j = 0; j < R.kNumberOps; j += 2) {
bool signed_left = R.signedness[j], signed_right = R.signedness[j + 1];
BinopEffectsTester B(R.ops[j], I32Type(signed_left), I32Type(signed_right));
@@ -1150,10 +1095,9 @@ TEST(Int32BinopEffects) {
}
}
-
-TEST_WITH_STRONG(Int32AddNarrowing) {
+TEST(Int32AddNarrowing) {
{
- JSBitwiseTypedLoweringTester R(language_mode);
+ JSBitwiseTypedLoweringTester R;
for (int o = 0; o < R.kNumberOps; o += 2) {
for (size_t i = 0; i < arraysize(kInt32Types); i++) {
@@ -1176,7 +1120,7 @@ TEST_WITH_STRONG(Int32AddNarrowing) {
}
}
{
- JSBitwiseShiftTypedLoweringTester R(language_mode);
+ JSBitwiseShiftTypedLoweringTester R;
for (int o = 0; o < R.kNumberOps; o += 2) {
for (size_t i = 0; i < arraysize(kInt32Types); i++) {
@@ -1199,7 +1143,7 @@ TEST_WITH_STRONG(Int32AddNarrowing) {
}
}
{
- JSBitwiseTypedLoweringTester R(language_mode);
+ JSBitwiseTypedLoweringTester R;
for (int o = 0; o < R.kNumberOps; o += 2) {
Node* n0 = R.Parameter(I32Type(R.signedness[o]));
@@ -1222,8 +1166,7 @@ TEST_WITH_STRONG(Int32AddNarrowing) {
}
}
-
-TEST_WITH_STRONG(Int32Comparisons) {
+TEST(Int32Comparisons) {
JSTypedLoweringTester R;
struct Entry {
@@ -1235,17 +1178,16 @@ TEST_WITH_STRONG(Int32Comparisons) {
};
Entry ops[] = {
- {R.javascript.LessThan(language_mode), R.machine.Uint32LessThan(),
+ {R.javascript.LessThan(), R.machine.Uint32LessThan(),
R.machine.Int32LessThan(), R.simplified.NumberLessThan(), false},
- {R.javascript.LessThanOrEqual(language_mode),
- R.machine.Uint32LessThanOrEqual(), R.machine.Int32LessThanOrEqual(),
- R.simplified.NumberLessThanOrEqual(), false},
- {R.javascript.GreaterThan(language_mode), R.machine.Uint32LessThan(),
+ {R.javascript.LessThanOrEqual(), R.machine.Uint32LessThanOrEqual(),
+ R.machine.Int32LessThanOrEqual(), R.simplified.NumberLessThanOrEqual(),
+ false},
+ {R.javascript.GreaterThan(), R.machine.Uint32LessThan(),
R.machine.Int32LessThan(), R.simplified.NumberLessThan(), true},
- {R.javascript.GreaterThanOrEqual(language_mode),
- R.machine.Uint32LessThanOrEqual(), R.machine.Int32LessThanOrEqual(),
- R.simplified.NumberLessThanOrEqual(), true}
- };
+ {R.javascript.GreaterThanOrEqual(), R.machine.Uint32LessThanOrEqual(),
+ R.machine.Int32LessThanOrEqual(), R.simplified.NumberLessThanOrEqual(),
+ true}};
for (size_t o = 0; o < arraysize(ops); o++) {
for (size_t i = 0; i < arraysize(kNumberTypes); i++) {
diff --git a/deps/v8/test/cctest/compiler/test-jump-threading.cc b/deps/v8/test/cctest/compiler/test-jump-threading.cc
index 8c02012e0a..71f774f562 100644
--- a/deps/v8/test/cctest/compiler/test-jump-threading.cc
+++ b/deps/v8/test/cctest/compiler/test-jump-threading.cc
@@ -108,7 +108,7 @@ class TestCode : public HandleAndZoneScope {
void VerifyForwarding(TestCode& code, int count, int* expected) {
Zone local_zone;
ZoneVector<RpoNumber> result(&local_zone);
- JumpThreading::ComputeForwarding(&local_zone, result, &code.sequence_);
+ JumpThreading::ComputeForwarding(&local_zone, result, &code.sequence_, true);
CHECK(count == static_cast<int>(result.size()));
for (int i = 0; i < count; i++) {
diff --git a/deps/v8/test/cctest/compiler/test-linkage.cc b/deps/v8/test/cctest/compiler/test-linkage.cc
index 939b144731..6722f59d60 100644
--- a/deps/v8/test/cctest/compiler/test-linkage.cc
+++ b/deps/v8/test/cctest/compiler/test-linkage.cc
@@ -71,20 +71,6 @@ TEST(TestLinkageJSFunctionIncoming) {
}
-TEST(TestLinkageCodeStubIncoming) {
- Isolate* isolate = CcTest::InitIsolateOnce();
- Zone zone;
- ToNumberStub stub(isolate);
- CompilationInfo info(&stub, isolate, &zone);
- CallDescriptor* descriptor = Linkage::ComputeIncoming(&zone, &info);
- CHECK(descriptor);
- CHECK_EQ(0, static_cast<int>(descriptor->StackParameterCount()));
- CHECK_EQ(1, static_cast<int>(descriptor->ReturnCount()));
- CHECK_EQ(Operator::kNoProperties, descriptor->properties());
- CHECK_EQ(false, descriptor->IsJSFunctionCall());
-}
-
-
TEST(TestLinkageJSCall) {
HandleAndZoneScope handles;
Handle<JSFunction> function = Compile("a + c");
@@ -109,6 +95,20 @@ TEST(TestLinkageRuntimeCall) {
TEST(TestLinkageStubCall) {
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ Zone zone;
+ ToNumberStub stub(isolate);
+ CompilationInfo info("test", isolate, &zone, Code::ComputeFlags(Code::STUB));
+ CallInterfaceDescriptor interface_descriptor =
+ stub.GetCallInterfaceDescriptor();
+ CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+ isolate, &zone, interface_descriptor, stub.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties);
+ CHECK(descriptor);
+ CHECK_EQ(0, static_cast<int>(descriptor->StackParameterCount()));
+ CHECK_EQ(1, static_cast<int>(descriptor->ReturnCount()));
+ CHECK_EQ(Operator::kNoProperties, descriptor->properties());
+ CHECK_EQ(false, descriptor->IsJSFunctionCall());
// TODO(titzer): test linkage creation for outgoing stub calls.
}
diff --git a/deps/v8/test/cctest/compiler/test-pipeline.cc b/deps/v8/test/cctest/compiler/test-pipeline.cc
index f4ffd02296..35e342765b 100644
--- a/deps/v8/test/cctest/compiler/test-pipeline.cc
+++ b/deps/v8/test/cctest/compiler/test-pipeline.cc
@@ -18,7 +18,7 @@ static void RunPipeline(Zone* zone, const char* source) {
ParseInfo parse_info(zone, function);
CHECK(Compiler::ParseAndAnalyze(&parse_info));
CompilationInfo info(&parse_info);
- info.SetOptimizing(BailoutId::None(), Handle<Code>(function->code()));
+ info.SetOptimizing();
Pipeline pipeline(&info);
Handle<Code> code = pipeline.GenerateCode();
diff --git a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
index 88555b7d57..9a038221a1 100644
--- a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
+++ b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
@@ -16,6 +16,14 @@ namespace v8 {
namespace internal {
namespace compiler {
+#define SHARD_TEST_BY_2(x) \
+ TEST(x##_0) { Test##x(0); } \
+ TEST(x##_1) { Test##x(1); }
+#define SHARD_TEST_BY_4(x) \
+ TEST(x##_0) { Test##x(0); } \
+ TEST(x##_1) { Test##x(1); } \
+ TEST(x##_2) { Test##x(2); } \
+ TEST(x##_3) { Test##x(3); }
static const char kFunctionName[] = "f";
@@ -70,7 +78,7 @@ class BytecodeGraphTester {
i::FLAG_ignition = true;
i::FLAG_always_opt = false;
i::FLAG_allow_natives_syntax = true;
- i::FLAG_ignition_fallback_on_eval_and_catch = false;
+ i::FLAG_loop_assignment_analysis = false;
// Set ignition filter flag via SetFlagsFromString to avoid double-free
// (or potential leak with StrDup() based on ownership confusion).
ScopedVector<char> ignition_filter(64);
@@ -119,13 +127,13 @@ class BytecodeGraphTester {
Handle<JSFunction>::cast(v8::Utils::OpenHandle(*api_function));
CHECK(function->shared()->HasBytecodeArray());
+ // TODO(mstarzinger): We should be able to prime CompilationInfo without
+ // having to instantiate a ParseInfo first. Fix this!
ParseInfo parse_info(zone_, function);
CompilationInfo compilation_info(&parse_info);
- compilation_info.SetOptimizing(BailoutId::None(), Handle<Code>());
+ compilation_info.SetOptimizing();
compilation_info.MarkAsDeoptimizationEnabled();
- // TODO(mythria): Remove this step once parse_info is not needed.
- CHECK(Compiler::ParseAndAnalyze(&parse_info));
compiler::Pipeline pipeline(&compilation_info);
Handle<Code> code = pipeline.GenerateCode();
function->ReplaceCode(*code);
@@ -205,8 +213,7 @@ TEST(BytecodeGraphBuilderReturnStatements) {
{"return 'catfood';", {factory->NewStringFromStaticChars("catfood")}},
{"return NaN;", {factory->nan_value()}}};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
ScopedVector<char> script(1024);
SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
snippets[i].code_snippet, kFunctionName);
@@ -233,8 +240,7 @@ TEST(BytecodeGraphBuilderPrimitiveExpressions) {
{"return 25 % 7;", {factory->NewNumberFromInt(4)}},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
ScopedVector<char> script(1024);
SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
snippets[i].code_snippet, kFunctionName);
@@ -292,8 +298,7 @@ TEST(BytecodeGraphBuilderTwoParameterTests) {
factory->NewStringFromStaticChars("abc"),
factory->NewStringFromStaticChars("def")}}};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
ScopedVector<char> script(1024);
SNPrintF(script, "function %s(p1, p2) { %s }\n%s(0, 0);", kFunctionName,
snippets[i].code_snippet, kFunctionName);
@@ -337,8 +342,7 @@ TEST(BytecodeGraphBuilderNamedLoad) {
BytecodeGraphTester::NewObject("({ name : 'abc'})")}},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
ScopedVector<char> script(2048);
SNPrintF(script, "function %s(p1) { %s };\n%s(0);", kFunctionName,
snippets[i].code_snippet, kFunctionName);
@@ -394,8 +398,7 @@ TEST(BytecodeGraphBuilderKeyedLoad) {
factory->NewNumberFromInt(100)}},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
ScopedVector<char> script(2048);
SNPrintF(script, "function %s(p1, p2) { %s };\n%s(0);", kFunctionName,
snippets[i].code_snippet, kFunctionName);
@@ -409,8 +412,7 @@ TEST(BytecodeGraphBuilderKeyedLoad) {
}
}
-
-TEST(BytecodeGraphBuilderNamedStore) {
+void TestBytecodeGraphBuilderNamedStore(size_t shard) {
HandleAndZoneScope scope;
Isolate* isolate = scope.main_isolate();
Zone* zone = scope.main_zone();
@@ -445,8 +447,8 @@ TEST(BytecodeGraphBuilderNamedStore) {
BytecodeGraphTester::NewObject("({ name : 'abc'})")}},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ if ((i % 2) != shard) continue;
ScopedVector<char> script(3072);
SNPrintF(script, "function %s(p1) { %s };\n%s({});", kFunctionName,
snippets[i].code_snippet, kFunctionName);
@@ -459,8 +461,9 @@ TEST(BytecodeGraphBuilderNamedStore) {
}
}
+SHARD_TEST_BY_2(BytecodeGraphBuilderNamedStore)
-TEST(BytecodeGraphBuilderKeyedStore) {
+void TestBytecodeGraphBuilderKeyedStore(size_t shard) {
HandleAndZoneScope scope;
Isolate* isolate = scope.main_isolate();
Zone* zone = scope.main_zone();
@@ -503,8 +506,8 @@ TEST(BytecodeGraphBuilderKeyedStore) {
factory->NewNumberFromInt(100)}},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ if ((i % 2) != shard) continue;
ScopedVector<char> script(2048);
SNPrintF(script, "function %s(p1, p2) { %s };\n%s({});", kFunctionName,
snippets[i].code_snippet, kFunctionName);
@@ -517,6 +520,7 @@ TEST(BytecodeGraphBuilderKeyedStore) {
}
}
+SHARD_TEST_BY_2(BytecodeGraphBuilderKeyedStore)
TEST(BytecodeGraphBuilderPropertyCall) {
HandleAndZoneScope scope;
@@ -538,8 +542,7 @@ TEST(BytecodeGraphBuilderPropertyCall) {
" return a + b + c + d + e + f + g + h;}})")}},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
ScopedVector<char> script(2048);
SNPrintF(script, "function %s(p1) { %s };\n%s({func() {}});", kFunctionName,
snippets[i].code_snippet, kFunctionName);
@@ -582,8 +585,7 @@ TEST(BytecodeGraphBuilderCallNew) {
{factory->NewNumberFromInt(25)}},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
BytecodeGraphTester tester(isolate, zone, snippets[i].code_snippet);
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
@@ -621,8 +623,7 @@ TEST(BytecodeGraphBuilderCreateClosure) {
{factory->NewNumberFromInt(25)}},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
BytecodeGraphTester tester(isolate, zone, snippets[i].code_snippet);
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
@@ -649,8 +650,7 @@ TEST(BytecodeGraphBuilderCallRuntime) {
BytecodeGraphTester::NewObject("[1, 2, 3]")}},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
BytecodeGraphTester tester(isolate, zone, snippets[i].code_snippet);
auto callable = tester.GetCallable<Handle<Object>>();
Handle<Object> return_value =
@@ -659,8 +659,7 @@ TEST(BytecodeGraphBuilderCallRuntime) {
}
}
-
-TEST(BytecodeGraphBuilderGlobals) {
+void TestBytecodeGraphBuilderGlobals(size_t shard) {
HandleAndZoneScope scope;
Isolate* isolate = scope.main_isolate();
Zone* zone = scope.main_zone();
@@ -700,8 +699,8 @@ TEST(BytecodeGraphBuilderGlobals) {
{factory->NewStringFromStaticChars("number")}},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ if ((i % 2) != shard) continue;
BytecodeGraphTester tester(isolate, zone, snippets[i].code_snippet);
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
@@ -709,6 +708,7 @@ TEST(BytecodeGraphBuilderGlobals) {
}
}
+SHARD_TEST_BY_2(BytecodeGraphBuilderGlobals)
TEST(BytecodeGraphBuilderToObject) {
// TODO(mythria): tests for ToObject. Needs ForIn.
@@ -746,8 +746,7 @@ TEST(BytecodeGraphBuilderToName) {
{factory->NewNumberFromInt(10)}},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
ScopedVector<char> script(1024);
SNPrintF(script, "function %s() { %s }\n%s({});", kFunctionName,
snippets[i].code_snippet, kFunctionName);
@@ -778,8 +777,7 @@ TEST(BytecodeGraphBuilderLogicalNot) {
{factory->false_value(), factory->NewStringFromStaticChars("abc")}},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
ScopedVector<char> script(1024);
SNPrintF(script, "function %s(p1) { %s }\n%s({});", kFunctionName,
snippets[i].code_snippet, kFunctionName);
@@ -816,8 +814,7 @@ TEST(BytecodeGraphBuilderTypeOf) {
factory->NewStringFromStaticChars("abc")}},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
ScopedVector<char> script(1024);
SNPrintF(script, "function %s(p1) { %s }\n%s({});", kFunctionName,
snippets[i].code_snippet, kFunctionName);
@@ -871,8 +868,7 @@ TEST(BytecodeGraphBuilderCountOperation) {
{factory->nan_value(), factory->NewStringFromStaticChars("String")}},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
ScopedVector<char> script(1024);
SNPrintF(script, "function %s(p1) { %s }\n%s({});", kFunctionName,
snippets[i].code_snippet, kFunctionName);
@@ -911,8 +907,7 @@ TEST(BytecodeGraphBuilderDelete) {
BytecodeGraphTester::NewObject("({val : 10, name:'abc'})")}},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
ScopedVector<char> script(1024);
SNPrintF(script, "function %s(p1) { %s }\n%s({});", kFunctionName,
snippets[i].code_snippet, kFunctionName);
@@ -966,8 +961,7 @@ TEST(BytecodeGraphBuilderDeleteGlobal) {
{factory->true_value()}},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
ScopedVector<char> script(1024);
SNPrintF(script, "%s %s({});", snippets[i].code_snippet, kFunctionName);
@@ -1003,8 +997,7 @@ TEST(BytecodeGraphBuilderDeleteLookupSlot) {
{"return delete z;", {factory->false_value()}},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
ScopedVector<char> script(1024);
SNPrintF(script, "%s %s %s", function_prologue, snippets[i].code_snippet,
function_epilogue);
@@ -1045,8 +1038,7 @@ TEST(BytecodeGraphBuilderLookupSlot) {
{"'use strict'; obj.val = 23.456; return obj.val;",
{factory->NewNumber(23.456)}}};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
ScopedVector<char> script(1024);
SNPrintF(script, "%s %s %s", function_prologue, snippets[i].code_snippet,
function_epilogue);
@@ -1089,8 +1081,7 @@ TEST(BytecodeGraphBuilderLookupSlotWide) {
{"'use strict';" REPEAT_256(SPACE, "y = 2.3;") "return obj.val = 23.456;",
{factory->NewNumber(23.456)}}};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
ScopedVector<char> script(3072);
SNPrintF(script, "%s %s %s", function_prologue, snippets[i].code_snippet,
function_epilogue);
@@ -1120,8 +1111,7 @@ TEST(BytecodeGraphBuilderCallLookupSlot) {
{handle(Smi::FromInt(30), isolate)}},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
ScopedVector<char> script(1024);
SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
snippets[i].code_snippet, kFunctionName);
@@ -1173,8 +1163,7 @@ TEST(BytecodeGraphBuilderEval) {
{factory->NewStringFromStaticChars("object")}},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
ScopedVector<char> script(1024);
SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
snippets[i].code_snippet, kFunctionName);
@@ -1202,8 +1191,7 @@ TEST(BytecodeGraphBuilderEvalParams) {
{handle(Smi::FromInt(30), isolate), handle(Smi::FromInt(20), isolate)}},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
ScopedVector<char> script(1024);
SNPrintF(script, "function %s(p1) { %s }\n%s(0);", kFunctionName,
snippets[i].code_snippet, kFunctionName);
@@ -1234,8 +1222,7 @@ TEST(BytecodeGraphBuilderEvalGlobal) {
{factory->NewStringFromStaticChars("undefined")}},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
BytecodeGraphTester tester(isolate, zone, snippets[i].code_snippet);
auto callable = tester.GetCallable<>();
Handle<Object> return_value = callable().ToHandleChecked();
@@ -1366,8 +1353,7 @@ TEST(BytecodeGraphBuilderTestIn) {
factory->NewNumberFromInt(1)}},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
ScopedVector<char> script(1024);
SNPrintF(script, "function %s(p1, p2) { %s }\n%s({}, {});", kFunctionName,
snippets[i].code_snippet, kFunctionName);
@@ -1399,8 +1385,7 @@ TEST(BytecodeGraphBuilderTestInstanceOf) {
{factory->true_value(), factory->undefined_value()}},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
ScopedVector<char> script(1024);
SNPrintF(script, "function %s(p1) { %s }\n%s({});", kFunctionName,
snippets[i].code_snippet, kFunctionName);
@@ -1413,6 +1398,98 @@ TEST(BytecodeGraphBuilderTestInstanceOf) {
}
}
+TEST(BytecodeGraphBuilderTryCatch) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+
+ ExpectedSnippet<0> snippets[] = {
+ {"var a = 1; try { a = 2 } catch(e) { a = 3 }; return a;",
+ {handle(Smi::FromInt(2), isolate)}},
+ {"var a; try { undef.x } catch(e) { a = 2 }; return a;",
+ {handle(Smi::FromInt(2), isolate)}},
+ {"var a; try { throw 1 } catch(e) { a = e + 2 }; return a;",
+ {handle(Smi::FromInt(3), isolate)}},
+ {"var a; try { throw 1 } catch(e) { a = e + 2 };"
+ " try { throw a } catch(e) { a = e + 3 }; return a;",
+ {handle(Smi::FromInt(6), isolate)}},
+ };
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+TEST(BytecodeGraphBuilderTryFinally1) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+
+ ExpectedSnippet<0> snippets[] = {
+ {"var a = 1; try { a = a + 1; } finally { a = a + 2; }; return a;",
+ {handle(Smi::FromInt(4), isolate)}},
+ {"var a = 1; try { a = 2; return 23; } finally { a = 3 }; return a;",
+ {handle(Smi::FromInt(23), isolate)}},
+ {"var a = 1; try { a = 2; throw 23; } finally { return a; };",
+ {handle(Smi::FromInt(2), isolate)}},
+ {"var a = 1; for (var i = 10; i < 20; i += 5) {"
+ " try { a = 2; break; } finally { a = 3; }"
+ "} return a + i;",
+ {handle(Smi::FromInt(13), isolate)}},
+ {"var a = 1; for (var i = 10; i < 20; i += 5) {"
+ " try { a = 2; continue; } finally { a = 3; }"
+ "} return a + i;",
+ {handle(Smi::FromInt(23), isolate)}},
+ {"var a = 1; try { a = 2;"
+ " try { a = 3; throw 23; } finally { a = 4; }"
+ "} catch(e) { a = a + e; } return a;",
+ {handle(Smi::FromInt(27), isolate)}},
+ };
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+TEST(BytecodeGraphBuilderTryFinally2) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+
+ ExpectedSnippet<0, const char*> snippets[] = {
+ {"var a = 1; try { a = 2; throw 23; } finally { a = 3 }; return a;",
+ {"Uncaught 23"}},
+ {"var a = 1; try { a = 2; throw 23; } finally { throw 42; };",
+ {"Uncaught 42"}},
+ };
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ v8::Local<v8::String> message = tester.CheckThrowsReturnMessage()->Get();
+ v8::Local<v8::String> expected_string = v8_str(snippets[i].return_value());
+ CHECK(
+ message->Equals(CcTest::isolate()->GetCurrentContext(), expected_string)
+ .FromJust());
+ }
+}
TEST(BytecodeGraphBuilderThrow) {
HandleAndZoneScope scope;
@@ -1426,15 +1503,14 @@ TEST(BytecodeGraphBuilderThrow) {
{"throw 1;", {"Uncaught 1"}},
{"throw 'Error';", {"Uncaught Error"}},
{"throw 'Error1'; throw 'Error2'", {"Uncaught Error1"}},
- // TODO(mythria): Enable these tests when JumpIfTrue is supported.
- // {"var a = true; if (a) { throw 'Error'; }", {"Error"}},
+ {"var a = true; if (a) { throw 'Error'; }", {"Uncaught Error"}},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
ScopedVector<char> script(1024);
SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
snippets[i].code_snippet, kFunctionName);
+
BytecodeGraphTester tester(isolate, zone, script.start());
v8::Local<v8::String> message = tester.CheckThrowsReturnMessage()->Get();
v8::Local<v8::String> expected_string = v8_str(snippets[i].return_value());
@@ -1492,8 +1568,7 @@ TEST(BytecodeGraphBuilderContext) {
{factory->NewStringFromStaticChars("innermost inner_changed outer")}},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
ScopedVector<char> script(1024);
SNPrintF(script, "%s", snippets[i].code_snippet);
@@ -1558,8 +1633,7 @@ TEST(BytecodeGraphBuilderLoadContext) {
"f(0);",
{factory->NewNumberFromInt(24), factory->NewNumberFromInt(4)}}};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
ScopedVector<char> script(1024);
SNPrintF(script, "%s", snippets[i].code_snippet);
@@ -1585,10 +1659,13 @@ TEST(BytecodeGraphBuilderCreateArgumentsNoParameters) {
{factory->undefined_value()}},
{"function f(a) {'use strict'; return arguments[0];}",
{factory->undefined_value()}},
+ {"function f(...restArgs) {return restArgs[0];}",
+ {factory->undefined_value()}},
+ {"function f(a, ...restArgs) {return restArgs[0];}",
+ {factory->undefined_value()}},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
ScopedVector<char> script(1024);
SNPrintF(script, "%s\n%s();", snippets[i].code_snippet, kFunctionName);
@@ -1631,8 +1708,7 @@ TEST(BytecodeGraphBuilderCreateArguments) {
factory->NewNumberFromInt(2), factory->NewNumberFromInt(30)}},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
ScopedVector<char> script(1024);
SNPrintF(script, "%s\n%s();", snippets[i].code_snippet, kFunctionName);
@@ -1647,6 +1723,48 @@ TEST(BytecodeGraphBuilderCreateArguments) {
}
}
+TEST(BytecodeGraphBuilderCreateRestArguments) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<3> snippets[] = {
+ {"function f(...restArgs) {return restArgs[0];}",
+ {factory->NewNumberFromInt(1), factory->NewNumberFromInt(1),
+ factory->NewNumberFromInt(2), factory->NewNumberFromInt(3)}},
+ {"function f(a, b, ...restArgs) {return restArgs[0];}",
+ {factory->NewNumberFromInt(3), factory->NewNumberFromInt(1),
+ factory->NewNumberFromInt(2), factory->NewNumberFromInt(3)}},
+ {"function f(a, b, ...restArgs) {return arguments[2];}",
+ {factory->NewNumberFromInt(3), factory->NewNumberFromInt(1),
+ factory->NewNumberFromInt(2), factory->NewNumberFromInt(3)}},
+ {"function f(a, ...restArgs) { return restArgs[2];}",
+ {factory->undefined_value(), factory->NewNumberFromInt(1),
+ factory->NewNumberFromInt(2), factory->NewNumberFromInt(3)}},
+ {"function f(a, ...restArgs) { return arguments[0] + restArgs[1];}",
+ {factory->NewNumberFromInt(4), factory->NewNumberFromInt(1),
+ factory->NewNumberFromInt(2), factory->NewNumberFromInt(3)}},
+ {"function inline_func(a, ...restArgs) { return restArgs[0] }"
+ "function f(a, b, c) {return inline_func(b, c) + arguments[0];}",
+ {factory->NewNumberFromInt(31), factory->NewNumberFromInt(1),
+ factory->NewNumberFromInt(2), factory->NewNumberFromInt(30)}},
+ };
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "%s\n%s();", snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable =
+ tester.GetCallable<Handle<Object>, Handle<Object>, Handle<Object>>();
+ Handle<Object> return_value =
+ callable(snippets[i].parameter(0), snippets[i].parameter(1),
+ snippets[i].parameter(2))
+ .ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
TEST(BytecodeGraphBuilderRegExpLiterals) {
HandleAndZoneScope scope;
@@ -1671,8 +1789,7 @@ TEST(BytecodeGraphBuilderRegExpLiterals) {
{factory->NewStringFromStaticChars("AbC")}},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
ScopedVector<char> script(4096);
SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
snippets[i].code_snippet, kFunctionName);
@@ -1712,8 +1829,7 @@ TEST(BytecodeGraphBuilderArrayLiterals) {
{"var t = 't'; return [[t, t + 'est'], [1 + t]][1][0];",
{factory->NewStringFromStaticChars("1t")}}};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
ScopedVector<char> script(4096);
SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
snippets[i].code_snippet, kFunctionName);
@@ -1778,8 +1894,7 @@ TEST(BytecodeGraphBuilderObjectLiterals) {
{factory->NewNumberFromInt(987)}},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
ScopedVector<char> script(4096);
SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
snippets[i].code_snippet, kFunctionName);
@@ -1856,10 +1971,38 @@ TEST(BytecodeGraphBuilderIf) {
" if (p1 < -10) { return -2; } else { return -1; }\n"
"}",
{factory->NewNumberFromInt(-1), factory->NewNumberFromInt(-10)}},
+ {"var b = 20, c;"
+ "if (p1 >= 0) {\n"
+ " if (b > 0) { c = 2; } else { c = 3; }\n"
+ "} else {\n"
+ " if (b < -10) { c = -2; } else { c = -1; }\n"
+ "}"
+ "return c;",
+ {factory->NewNumberFromInt(-1), factory->NewNumberFromInt(-1)}},
+ {"var b = 20, c = 10;"
+ "if (p1 >= 0) {\n"
+ " if (b < 0) { c = 2; }\n"
+ "} else {\n"
+ " if (b < -10) { c = -2; } else { c = -1; }\n"
+ "}"
+ "return c;",
+ {factory->NewNumberFromInt(10), factory->NewNumberFromInt(1)}},
+ {"var x = 2, a = 10, b = 20, c, d;"
+ "x = 0;"
+ "if (a) {\n"
+ " b = x;"
+ " if (b > 0) { c = 2; } else { c = 3; }\n"
+ " x = 4; d = 2;"
+ "} else {\n"
+ " d = 3;\n"
+ "}"
+ "x = d;"
+ "function f1() {x}"
+ "return x + c;",
+ {factory->NewNumberFromInt(5), factory->NewNumberFromInt(-1)}},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
ScopedVector<char> script(2048);
SNPrintF(script, "function %s(p1) { %s };\n%s(0);", kFunctionName,
snippets[i].code_snippet, kFunctionName);
@@ -1890,8 +2033,7 @@ TEST(BytecodeGraphBuilderConditionalOperator) {
{factory->NewNumberFromInt(-10), factory->NewNumberFromInt(20)}},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
+ for (size_t i = 0; i < arraysize(snippets); i++) {
ScopedVector<char> script(2048);
SNPrintF(script, "function %s(p1) { %s };\n%s(0);", kFunctionName,
snippets[i].code_snippet, kFunctionName);
@@ -1952,6 +2094,54 @@ TEST(BytecodeGraphBuilderSwitch) {
}
}
+TEST(BytecodeGraphBuilderSwitchMerge) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ const char* switch_code =
+ "var x = 10;"
+ "switch (p1) {\n"
+ " case 1: x = 0;\n"
+ " case 2: x = 1;\n"
+ " case 3:\n"
+ " case 4: x = 2; break;\n"
+ " case 5: x = 3;\n"
+ " case 9: break;\n"
+ " default: x = 4;\n"
+ "}\n"
+ "return x;";
+
+ ExpectedSnippet<1> snippets[] = {
+ {switch_code,
+ {factory->NewNumberFromInt(2), factory->NewNumberFromInt(1)}},
+ {switch_code,
+ {factory->NewNumberFromInt(2), factory->NewNumberFromInt(2)}},
+ {switch_code,
+ {factory->NewNumberFromInt(2), factory->NewNumberFromInt(3)}},
+ {switch_code,
+ {factory->NewNumberFromInt(2), factory->NewNumberFromInt(4)}},
+ {switch_code,
+ {factory->NewNumberFromInt(3), factory->NewNumberFromInt(5)}},
+ {switch_code,
+ {factory->NewNumberFromInt(10), factory->NewNumberFromInt(9)}},
+ {switch_code,
+ {factory->NewNumberFromInt(4), factory->NewNumberFromInt(6)}},
+ };
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ ScopedVector<char> script(2048);
+ SNPrintF(script, "function %s(p1) { %s };\n%s(0);", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<Handle<Object>>();
+ Handle<Object> return_value =
+ callable(snippets[i].parameter(0)).ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
TEST(BytecodeGraphBuilderNestedSwitch) {
HandleAndZoneScope scope;
@@ -2294,12 +2484,102 @@ TEST(BytecodeGraphBuilderForIn) {
}
-TEST(JumpWithConstantsAndWideConstants) {
+TEST(BytecodeGraphBuilderForOf) {
HandleAndZoneScope scope;
- auto isolate = scope.main_isolate();
- const int kStep = 19;
- int start = 7;
- for (int constants = start; constants < 256 + 3 * kStep; constants += kStep) {
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+ ExpectedSnippet<0> snippets[] = {
+ {" var r = 0;\n"
+ " for (var a of [0,6,7,9]) { r += a; }\n"
+ " return r;\n",
+ {handle(Smi::FromInt(22), isolate)}},
+ {" var r = '';\n"
+ " for (var a of 'foobar') { r = a + r; }\n"
+ " return r;\n",
+ {factory->NewStringFromStaticChars("raboof")}},
+ {" var a = [1, 2, 3];\n"
+ " a.name = 4;\n"
+ " var r = 0;\n"
+ " for (var x of a) { r += x; }\n"
+ " return r;\n",
+ {handle(Smi::FromInt(6), isolate)}},
+ {" var r = '';\n"
+ " var data = [1, 2, 3]; \n"
+ " for (a of data) { delete data[0]; r += a; } return r;",
+ {factory->NewStringFromStaticChars("123")}},
+ {" var r = '';\n"
+ " var data = [1, 2, 3]; \n"
+ " for (a of data) { delete data[2]; r += a; } return r;",
+ {factory->NewStringFromStaticChars("12undefined")}},
+ {" var r = '';\n"
+ " var data = [1, 2, 3]; \n"
+ " for (a of data) { delete data; r += a; } return r;",
+ {factory->NewStringFromStaticChars("123")}},
+ {" var r = '';\n"
+ " var input = 'foobar';\n"
+ " for (var a of input) {\n"
+ " if (a == 'b') break;\n"
+ " r += a;\n"
+ " }\n"
+ " return r;\n",
+ {factory->NewStringFromStaticChars("foo")}},
+ {" var r = '';\n"
+ " var input = 'foobar';\n"
+ " for (var a of input) {\n"
+ " if (a == 'b') continue;\n"
+ " r += a;\n"
+ " }\n"
+ " return r;\n",
+ {factory->NewStringFromStaticChars("fooar")}},
+ {" var r = '';\n"
+ " var data = [1, 2, 3, 4]; \n"
+ " for (a of data) { data[2] = 567; r += a; }\n"
+ " return r;\n",
+ {factory->NewStringFromStaticChars("125674")}},
+ {" var r = '';\n"
+ " var data = [1, 2, 3, 4]; \n"
+ " for (a of data) { data[4] = 567; r += a; }\n"
+ " return r;\n",
+ {factory->NewStringFromStaticChars("1234567")}},
+ {" var r = '';\n"
+ " var data = [1, 2, 3, 4]; \n"
+ " for (a of data) { data[5] = 567; r += a; }\n"
+ " return r;\n",
+ {factory->NewStringFromStaticChars("1234undefined567")}},
+ {" var r = '';\n"
+ " var obj = new Object();\n"
+ " obj[Symbol.iterator] = function() { return {\n"
+ " index: 3,\n"
+ " data: ['a', 'b', 'c', 'd'],"
+ " next: function() {"
+ " return {"
+ " done: this.index == -1,\n"
+ " value: this.index < 0 ? undefined : this.data[this.index--]\n"
+ " }\n"
+ " }\n"
+ " }}\n"
+ " for (a of obj) { r += a }\n"
+ " return r;\n",
+ {factory->NewStringFromStaticChars("dcba")}},
+ };
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+void TestJumpWithConstantsAndWideConstants(size_t shard) {
+ const int kStep = 46;
+ int start = static_cast<int>(7 + 17 * shard);
+ for (int constants = start; constants < 300; constants += kStep) {
std::stringstream filler_os;
// Generate a string that consumes constant pool entries and
// spread out branch distances in script below.
@@ -2321,11 +2601,14 @@ TEST(JumpWithConstantsAndWideConstants) {
script_os << "}\n";
script_os << kFunctionName << "(0);\n";
std::string script(script_os.str());
+
+ HandleAndZoneScope scope;
+ auto isolate = scope.main_isolate();
auto factory = isolate->factory();
auto zone = scope.main_zone();
+ BytecodeGraphTester tester(isolate, zone, script.c_str());
+ auto callable = tester.GetCallable<Handle<Object>>();
for (int a = 0; a < 3; a++) {
- BytecodeGraphTester tester(isolate, zone, script.c_str());
- auto callable = tester.GetCallable<Handle<Object>>();
Handle<Object> return_val =
callable(factory->NewNumberFromInt(a)).ToHandleChecked();
static const int results[] = {11, 12, 2};
@@ -2334,6 +2617,368 @@ TEST(JumpWithConstantsAndWideConstants) {
}
}
+SHARD_TEST_BY_4(JumpWithConstantsAndWideConstants)
+
+TEST(BytecodeGraphBuilderDoExpressions) {
+ bool old_flag = FLAG_harmony_do_expressions;
+ FLAG_harmony_do_expressions = true;
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+ ExpectedSnippet<0> snippets[] = {
+ {"var a = do {}; return a;", {factory->undefined_value()}},
+ {"var a = do { var x = 100; }; return a;", {factory->undefined_value()}},
+ {"var a = do { var x = 100; }; return a;", {factory->undefined_value()}},
+ {"var a = do { var x = 100; x++; }; return a;",
+ {handle(Smi::FromInt(100), isolate)}},
+ {"var i = 0; for (; i < 5;) { i = do { if (i == 3) { break; }; i + 1; }};"
+ "return i;",
+ {handle(Smi::FromInt(3), isolate)}},
+ };
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+
+ FLAG_harmony_do_expressions = old_flag;
+}
+
+TEST(BytecodeGraphBuilderWithStatement) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+
+ ExpectedSnippet<0> snippets[] = {
+ {"with({x:42}) return x;", {handle(Smi::FromInt(42), isolate)}},
+ {"with({}) { var y = 10; return y;}",
+ {handle(Smi::FromInt(10), isolate)}},
+ {"var y = {x:42};"
+ " function inner() {"
+ " var x = 20;"
+ " with(y) return x;"
+ "}"
+ "return inner();",
+ {handle(Smi::FromInt(42), isolate)}},
+ {"var y = {x:42};"
+ " function inner(o) {"
+ " var x = 20;"
+ " with(o) return x;"
+ "}"
+ "return inner(y);",
+ {handle(Smi::FromInt(42), isolate)}},
+ };
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+TEST(BytecodeGraphBuilderConstDeclaration) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<0> snippets[] = {
+ {"const x = 3; return x;", {handle(Smi::FromInt(3), isolate)}},
+ {"let x = 10; x = x + 20; return x;",
+ {handle(Smi::FromInt(30), isolate)}},
+ {"let x = 10; x = 20; return x;", {handle(Smi::FromInt(20), isolate)}},
+ {"let x; x = 20; return x;", {handle(Smi::FromInt(20), isolate)}},
+ {"let x; return x;", {factory->undefined_value()}},
+ {"var x = 10; { let x = 30; } return x;",
+ {handle(Smi::FromInt(10), isolate)}},
+ {"let x = 10; { let x = 20; } return x;",
+ {handle(Smi::FromInt(10), isolate)}},
+ {"var x = 10; eval('let x = 20;'); return x;",
+ {handle(Smi::FromInt(10), isolate)}},
+ {"var x = 10; eval('const x = 20;'); return x;",
+ {handle(Smi::FromInt(10), isolate)}},
+ {"var x = 10; { const x = 20; } return x;",
+ {handle(Smi::FromInt(10), isolate)}},
+ {"var x = 10; { const x = 20; return x;} return -1;",
+ {handle(Smi::FromInt(20), isolate)}},
+ {"var a = 10;\n"
+ "for (var i = 0; i < 10; ++i) {\n"
+ " const x = i;\n" // const declarations are block scoped.
+ " a = a + x;\n"
+ "}\n"
+ "return a;\n",
+ {handle(Smi::FromInt(55), isolate)}},
+ };
+
+ // Tests for sloppy mode.
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+
+ // Tests for strict mode.
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s() {'use strict'; %s }\n%s();", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+TEST(BytecodeGraphBuilderConstDeclarationLookupSlots) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<0> snippets[] = {
+ {"const x = 3; function f1() {return x;}; return x;",
+ {handle(Smi::FromInt(3), isolate)}},
+ {"let x = 10; x = x + 20; function f1() {return x;}; return x;",
+ {handle(Smi::FromInt(30), isolate)}},
+ {"let x; x = 20; function f1() {return x;}; return x;",
+ {handle(Smi::FromInt(20), isolate)}},
+ {"let x; function f1() {return x;}; return x;",
+ {factory->undefined_value()}},
+ };
+
+ // Tests for sloppy mode.
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+
+ // Tests for strict mode.
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s() {'use strict'; %s }\n%s();", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+TEST(BytecodeGraphBuilderConstInLookupContextChain) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+
+ const char* prologue =
+ "function OuterMost() {\n"
+ " const outerConst = 10;\n"
+ " let outerLet = 20;\n"
+ " function Outer() {\n"
+ " function Inner() {\n"
+ " this.innerFunc = function() { ";
+ const char* epilogue =
+ " }\n"
+ " }\n"
+ " this.getInnerFunc ="
+ " function() {return new Inner().innerFunc;}\n"
+ " }\n"
+ " this.getOuterFunc ="
+ " function() {return new Outer().getInnerFunc();}"
+ "}\n"
+ "var f = new OuterMost().getOuterFunc();\n"
+ "f();\n";
+
+ // Tests for let / constant.
+ ExpectedSnippet<0> const_decl[] = {
+ {"return outerConst;", {handle(Smi::FromInt(10), isolate)}},
+ {"return outerLet;", {handle(Smi::FromInt(20), isolate)}},
+ {"outerLet = 30; return outerLet;", {handle(Smi::FromInt(30), isolate)}},
+ {"var outerLet = 40; return outerLet;",
+ {handle(Smi::FromInt(40), isolate)}},
+ {"var outerConst = 50; return outerConst;",
+ {handle(Smi::FromInt(50), isolate)}},
+ {"try { outerConst = 30 } catch(e) { return -1; }",
+ {handle(Smi::FromInt(-1), isolate)}}};
+
+ for (size_t i = 0; i < arraysize(const_decl); i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "%s %s %s", prologue, const_decl[i].code_snippet,
+ epilogue);
+
+ BytecodeGraphTester tester(isolate, zone, script.start(), "*");
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*const_decl[i].return_value()));
+ }
+
+ // Tests for Legacy constant.
+ bool old_flag_legacy_const = FLAG_legacy_const;
+ FLAG_legacy_const = true;
+
+ ExpectedSnippet<0> legacy_const_decl[] = {
+ {"return outerConst = 23;", {handle(Smi::FromInt(23), isolate)}},
+ {"outerConst = 30; return outerConst;",
+ {handle(Smi::FromInt(10), isolate)}},
+ };
+
+ for (size_t i = 0; i < arraysize(legacy_const_decl); i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "%s %s %s", prologue, legacy_const_decl[i].code_snippet,
+ epilogue);
+
+ BytecodeGraphTester tester(isolate, zone, script.start(), "*");
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*legacy_const_decl[i].return_value()));
+ }
+
+ FLAG_legacy_const = old_flag_legacy_const;
+}
+
+TEST(BytecodeGraphBuilderIllegalConstDeclaration) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+
+ ExpectedSnippet<0, const char*> illegal_const_decl[] = {
+ {"const x = x = 10 + 3; return x;",
+ {"Uncaught ReferenceError: x is not defined"}},
+ {"const x = 10; x = 20; return x;",
+ {"Uncaught TypeError: Assignment to constant variable."}},
+ {"const x = 10; { x = 20; } return x;",
+ {"Uncaught TypeError: Assignment to constant variable."}},
+ {"const x = 10; eval('x = 20;'); return x;",
+ {"Uncaught TypeError: Assignment to constant variable."}},
+ {"let x = x + 10; return x;",
+ {"Uncaught ReferenceError: x is not defined"}},
+ {"'use strict'; (function f1() { f1 = 123; })() ",
+ {"Uncaught TypeError: Assignment to constant variable."}},
+ };
+
+ // Tests for sloppy mode.
+ for (size_t i = 0; i < arraysize(illegal_const_decl); i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
+ illegal_const_decl[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ v8::Local<v8::String> message = tester.CheckThrowsReturnMessage()->Get();
+ v8::Local<v8::String> expected_string =
+ v8_str(illegal_const_decl[i].return_value());
+ CHECK(
+ message->Equals(CcTest::isolate()->GetCurrentContext(), expected_string)
+ .FromJust());
+ }
+
+ // Tests for strict mode.
+ for (size_t i = 0; i < arraysize(illegal_const_decl); i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s() {'use strict'; %s }\n%s();", kFunctionName,
+ illegal_const_decl[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ v8::Local<v8::String> message = tester.CheckThrowsReturnMessage()->Get();
+ v8::Local<v8::String> expected_string =
+ v8_str(illegal_const_decl[i].return_value());
+ CHECK(
+ message->Equals(CcTest::isolate()->GetCurrentContext(), expected_string)
+ .FromJust());
+ }
+}
+
+TEST(BytecodeGraphBuilderLegacyConstDeclaration) {
+ bool old_flag_legacy_const = FLAG_legacy_const;
+ FLAG_legacy_const = true;
+
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+
+ ExpectedSnippet<0> snippets[] = {
+ {"const x = (x = 10) + 3; return x;",
+ {handle(Smi::FromInt(13), isolate)}},
+ {"const x = 10; x = 20; return x;", {handle(Smi::FromInt(10), isolate)}},
+ {"var a = 10;\n"
+ "for (var i = 0; i < 10; ++i) {\n"
+ " const x = i;\n" // Legacy constants are not block scoped.
+ " a = a + x;\n"
+ "}\n"
+ "return a;\n",
+ {handle(Smi::FromInt(10), isolate)}},
+ {"const x = 20; eval('x = 10;'); return x;",
+ {handle(Smi::FromInt(20), isolate)}},
+ };
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+
+ FLAG_legacy_const = old_flag_legacy_const;
+}
+
+TEST(BytecodeGraphBuilderDebuggerStatement) {
+ FLAG_expose_debug_as = "debug";
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+
+ ExpectedSnippet<0> snippet = {
+ "var Debug = debug.Debug;"
+ "var count = 0;"
+ "function f() {"
+ " debugger;"
+ "}"
+ "function listener(event) {"
+ " if (event == Debug.DebugEvent.Break) count++;"
+ "}"
+ "Debug.setListener(listener);"
+ "f();"
+ "Debug.setListener(null);"
+ "return count;",
+ {handle(Smi::FromInt(1), isolate)}};
+
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
+ snippet.code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippet.return_value()));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-run-deopt.cc b/deps/v8/test/cctest/compiler/test-run-deopt.cc
index 8b4c9dccb1..76dc9acae3 100644
--- a/deps/v8/test/cctest/compiler/test-run-deopt.cc
+++ b/deps/v8/test/cctest/compiler/test-run-deopt.cc
@@ -84,7 +84,6 @@ TEST(DeoptExceptionHandlerCatch) {
TEST(DeoptExceptionHandlerFinally) {
FLAG_allow_natives_syntax = true;
- FLAG_turbo_try_finally = true;
FunctionTester T(
"(function f() {"
@@ -98,9 +97,7 @@ TEST(DeoptExceptionHandlerFinally) {
CompileRun("function DeoptAndThrow(f) { %DeoptimizeFunction(f); throw 0; }");
InstallIsOptimizedHelper(CcTest::isolate());
-#if 0 // TODO(4195,mstarzinger): Reproduces on MIPS64, re-enable once fixed.
T.CheckCall(T.false_value());
-#endif
}
diff --git a/deps/v8/test/cctest/compiler/test-run-intrinsics.cc b/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
index b2017114b4..6e9ebf2282 100644
--- a/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
+++ b/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
@@ -104,18 +104,6 @@ TEST(IsFunction) {
}
-TEST(IsMinusZero) {
- FunctionTester T("(function(a) { return %_IsMinusZero(a); })", flags);
-
- T.CheckFalse(T.Val(1));
- T.CheckFalse(T.Val(1.1));
- T.CheckTrue(T.Val(-0.0));
- T.CheckFalse(T.Val(-2));
- T.CheckFalse(T.Val(-2.3));
- T.CheckFalse(T.undefined());
-}
-
-
TEST(IsRegExp) {
FunctionTester T("(function(a) { return %_IsRegExp(a); })", flags);
@@ -148,19 +136,6 @@ TEST(IsSmi) {
}
-TEST(ObjectEquals) {
- FunctionTester T("(function(a,b) { return %_ObjectEquals(a,b); })", flags);
- CompileRun("var o = {}");
-
- T.CheckTrue(T.NewObject("(o)"), T.NewObject("(o)"));
- T.CheckTrue(T.Val("internal"), T.Val("internal"));
- T.CheckTrue(T.true_value(), T.true_value());
- T.CheckFalse(T.true_value(), T.false_value());
- T.CheckFalse(T.NewObject("({})"), T.NewObject("({})"));
- T.CheckFalse(T.Val("a"), T.Val("b"));
-}
-
-
TEST(OneByteSeqStringGetChar) {
FunctionTester T("(function(a,b) { return %_OneByteSeqStringGetChar(a,b); })",
flags);
@@ -192,15 +167,6 @@ TEST(OneByteSeqStringSetChar) {
}
-TEST(SetValueOf) {
- FunctionTester T("(function(a,b) { return %_SetValueOf(a,b); })", flags);
-
- T.CheckCall(T.Val("a"), T.NewObject("(new String)"), T.Val("a"));
- T.CheckCall(T.Val(123), T.NewObject("(new Number)"), T.Val(123));
- T.CheckCall(T.Val("x"), T.undefined(), T.Val("x"));
-}
-
-
TEST(StringAdd) {
FunctionTester T("(function(a,b) { return %_StringAdd(a,b); })", flags);
diff --git a/deps/v8/test/cctest/compiler/test-run-jscalls.cc b/deps/v8/test/cctest/compiler/test-run-jscalls.cc
index 474453da7d..c28295857e 100644
--- a/deps/v8/test/cctest/compiler/test-run-jscalls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jscalls.cc
@@ -21,37 +21,36 @@ TEST(SimpleCall) {
TEST(SimpleCall2) {
FunctionTester T("(function(foo,a) { return foo(a); })");
- Handle<JSFunction> foo = T.NewFunction("(function(a) { return a; })");
- T.Compile(foo);
+ FunctionTester U("(function(a) { return a; })");
- T.CheckCall(T.Val(3), foo, T.Val(3));
- T.CheckCall(T.Val(3.1), foo, T.Val(3.1));
- T.CheckCall(foo, foo, foo);
- T.CheckCall(T.Val("Abba"), foo, T.Val("Abba"));
+ T.CheckCall(T.Val(3), U.function, T.Val(3));
+ T.CheckCall(T.Val(3.1), U.function, T.Val(3.1));
+ T.CheckCall(U.function, U.function, U.function);
+ T.CheckCall(T.Val("Abba"), U.function, T.Val("Abba"));
}
TEST(ConstCall) {
FunctionTester T("(function(foo,a) { return foo(a,3); })");
- Handle<JSFunction> foo = T.NewFunction("(function(a,b) { return a + b; })");
- T.Compile(foo);
+ FunctionTester U("(function(a,b) { return a + b; })");
- T.CheckCall(T.Val(6), foo, T.Val(3));
- T.CheckCall(T.Val(6.1), foo, T.Val(3.1));
- T.CheckCall(T.Val("function (a,b) { return a + b; }3"), foo, foo);
- T.CheckCall(T.Val("Abba3"), foo, T.Val("Abba"));
+ T.CheckCall(T.Val(6), U.function, T.Val(3));
+ T.CheckCall(T.Val(6.1), U.function, T.Val(3.1));
+ T.CheckCall(T.Val("function (a,b) { return a + b; }3"), U.function,
+ U.function);
+ T.CheckCall(T.Val("Abba3"), U.function, T.Val("Abba"));
}
TEST(ConstCall2) {
FunctionTester T("(function(foo,a) { return foo(a,\"3\"); })");
- Handle<JSFunction> foo = T.NewFunction("(function(a,b) { return a + b; })");
- T.Compile(foo);
+ FunctionTester U("(function(a,b) { return a + b; })");
- T.CheckCall(T.Val("33"), foo, T.Val(3));
- T.CheckCall(T.Val("3.13"), foo, T.Val(3.1));
- T.CheckCall(T.Val("function (a,b) { return a + b; }3"), foo, foo);
- T.CheckCall(T.Val("Abba3"), foo, T.Val("Abba"));
+ T.CheckCall(T.Val("33"), U.function, T.Val(3));
+ T.CheckCall(T.Val("3.13"), U.function, T.Val(3.1));
+ T.CheckCall(T.Val("function (a,b) { return a + b; }3"), U.function,
+ U.function);
+ T.CheckCall(T.Val("Abba3"), U.function, T.Val("Abba"));
}
@@ -130,7 +129,6 @@ TEST(ConstructorCall) {
}
-// TODO(titzer): factor these out into test-runtime-calls.cc
TEST(RuntimeCallCPP2) {
FLAG_allow_natives_syntax = true;
FunctionTester T("(function(a,b) { return %NumberImul(a, b); })");
diff --git a/deps/v8/test/cctest/compiler/test-run-jsexceptions.cc b/deps/v8/test/cctest/compiler/test-run-jsexceptions.cc
index 37b2a2d243..ab8c42a979 100644
--- a/deps/v8/test/cctest/compiler/test-run-jsexceptions.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jsexceptions.cc
@@ -61,7 +61,6 @@ TEST(ThrowMessageDirectly) {
TEST(ThrowMessageIndirectly) {
- i::FLAG_turbo_try_finally = true;
static const char* src =
"(function(a, b) {"
" try {"
@@ -170,7 +169,6 @@ TEST(CatchCall) {
TEST(Finally) {
- i::FLAG_turbo_try_finally = true;
const char* src =
"(function(a,b) {"
" var r = '-';"
@@ -188,7 +186,6 @@ TEST(Finally) {
TEST(FinallyBreak) {
- i::FLAG_turbo_try_finally = true;
const char* src =
"(function(a,b) {"
" var r = '-';"
@@ -244,7 +241,6 @@ TEST(DeoptCatch) {
TEST(DeoptFinallyReturn) {
- i::FLAG_turbo_try_finally = true;
const char* src =
"(function f(a) {"
" try {"
@@ -261,7 +257,6 @@ TEST(DeoptFinallyReturn) {
TEST(DeoptFinallyReThrow) {
- i::FLAG_turbo_try_finally = true;
const char* src =
"(function f(a) {"
" try {"
@@ -272,9 +267,7 @@ TEST(DeoptFinallyReThrow) {
"})";
FunctionTester T(src);
-#if 0 // TODO(mstarzinger): Enable once we can.
T.CheckThrows(T.NewObject("new Error"), T.Val(1));
-#endif
}
} // namespace compiler
diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc
index 11a3582cbb..fba9e0e1a5 100644
--- a/deps/v8/test/cctest/compiler/test-run-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-machops.cc
@@ -29,6 +29,25 @@ TEST(RunInt32Add) {
}
+TEST(RunWord32ReverseBits) {
+ BufferedRawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
+ if (!m.machine()->Word32ReverseBits().IsSupported()) {
+ // We can only test the operator if it exists on the testing platform.
+ return;
+ }
+ m.Return(m.AddNode(m.machine()->Word32ReverseBits().op(), m.Parameter(0)));
+
+ CHECK_EQ(uint32_t(0x00000000), m.Call(uint32_t(0x00000000)));
+ CHECK_EQ(uint32_t(0x12345678), m.Call(uint32_t(0x1e6a2c48)));
+ CHECK_EQ(uint32_t(0xfedcba09), m.Call(uint32_t(0x905d3b7f)));
+ CHECK_EQ(uint32_t(0x01010101), m.Call(uint32_t(0x80808080)));
+ CHECK_EQ(uint32_t(0x01020408), m.Call(uint32_t(0x10204080)));
+ CHECK_EQ(uint32_t(0xf0703010), m.Call(uint32_t(0x080c0e0f)));
+ CHECK_EQ(uint32_t(0x1f8d0a3a), m.Call(uint32_t(0x5c50b1f8)));
+ CHECK_EQ(uint32_t(0xffffffff), m.Call(uint32_t(0xffffffff)));
+}
+
+
TEST(RunWord32Ctz) {
BufferedRawMachineAssemblerTester<int32_t> m(MachineType::Uint32());
if (!m.machine()->Word32Ctz().IsSupported()) {
@@ -72,7 +91,6 @@ TEST(RunWord32Ctz) {
CHECK_EQ(0, m.Call(uint32_t(0x9afdbc81)));
}
-
TEST(RunWord32Clz) {
BufferedRawMachineAssemblerTester<int32_t> m(MachineType::Uint32());
m.Return(m.Word32Clz(m.Parameter(0)));
@@ -133,6 +151,25 @@ TEST(RunWord32Popcnt) {
#if V8_TARGET_ARCH_64_BIT
+TEST(RunWord64ReverseBits) {
+ RawMachineAssemblerTester<uint64_t> m(MachineType::Uint64());
+ if (!m.machine()->Word64ReverseBits().IsSupported()) {
+ return;
+ }
+
+ m.Return(m.AddNode(m.machine()->Word64ReverseBits().op(), m.Parameter(0)));
+
+ CHECK_EQ(uint64_t(0x0000000000000000), m.Call(uint64_t(0x0000000000000000)));
+ CHECK_EQ(uint64_t(0x1234567890abcdef), m.Call(uint64_t(0xf7b3d5091e6a2c48)));
+ CHECK_EQ(uint64_t(0xfedcba0987654321), m.Call(uint64_t(0x84c2a6e1905d3b7f)));
+ CHECK_EQ(uint64_t(0x0101010101010101), m.Call(uint64_t(0x8080808080808080)));
+ CHECK_EQ(uint64_t(0x0102040803060c01), m.Call(uint64_t(0x803060c010204080)));
+ CHECK_EQ(uint64_t(0xf0703010e060200f), m.Call(uint64_t(0xf0040607080c0e0f)));
+ CHECK_EQ(uint64_t(0x2f8a6df01c21fa3b), m.Call(uint64_t(0xdc5f84380fb651f4)));
+ CHECK_EQ(uint64_t(0xffffffffffffffff), m.Call(uint64_t(0xffffffffffffffff)));
+}
+
+
TEST(RunWord64Clz) {
BufferedRawMachineAssemblerTester<int32_t> m(MachineType::Uint64());
m.Return(m.Word64Clz(m.Parameter(0)));
@@ -3534,7 +3571,7 @@ static void RunLoadImmIndex(MachineType rep) {
const int kNumElems = 3;
Type buffer[kNumElems];
- // initialize the buffer with raw data.
+ // initialize the buffer with some raw data.
byte* raw = reinterpret_cast<byte*>(buffer);
for (size_t i = 0; i < sizeof(buffer); i++) {
raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
@@ -3543,14 +3580,14 @@ static void RunLoadImmIndex(MachineType rep) {
// Test with various large and small offsets.
for (int offset = -1; offset <= 200000; offset *= -5) {
for (int i = 0; i < kNumElems; i++) {
- RawMachineAssemblerTester<Type> m;
+ BufferedRawMachineAssemblerTester<Type> m;
Node* base = m.PointerConstant(buffer - offset);
Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0]));
m.Return(m.Load(rep, base, index));
- Type expected = buffer[i];
- Type actual = m.Call();
- CHECK(expected == actual);
+ volatile Type expected = buffer[i];
+ volatile Type actual = m.Call();
+ CHECK_EQ(expected, actual);
}
}
}
@@ -3564,9 +3601,11 @@ TEST(RunLoadImmIndex) {
RunLoadImmIndex<int32_t>(MachineType::Int32());
RunLoadImmIndex<uint32_t>(MachineType::Uint32());
RunLoadImmIndex<int32_t*>(MachineType::AnyTagged());
-
- // TODO(titzer): test kRepBit loads
- // TODO(titzer): test MachineType::Float64() loads
+ RunLoadImmIndex<float>(MachineType::Float32());
+ RunLoadImmIndex<double>(MachineType::Float64());
+ if (kPointerSize == 8) {
+ RunLoadImmIndex<int64_t>(MachineType::Int64());
+ }
// TODO(titzer): test various indexing modes.
}
@@ -4124,6 +4163,43 @@ TEST(RunChangeUint32ToFloat64) {
}
+TEST(RunTruncateFloat32ToInt32) {
+ BufferedRawMachineAssemblerTester<int32_t> m(MachineType::Float32());
+ m.Return(m.TruncateFloat32ToInt32(m.Parameter(0)));
+ FOR_FLOAT32_INPUTS(i) {
+ if (*i <= static_cast<float>(std::numeric_limits<int32_t>::max()) &&
+ *i >= static_cast<float>(std::numeric_limits<int32_t>::min())) {
+ CheckFloatEq(static_cast<int32_t>(*i), m.Call(*i));
+ }
+ }
+}
+
+
+TEST(RunTruncateFloat32ToUint32) {
+ BufferedRawMachineAssemblerTester<uint32_t> m(MachineType::Float32());
+ m.Return(m.TruncateFloat32ToUint32(m.Parameter(0)));
+ {
+ FOR_UINT32_INPUTS(i) {
+ float input = static_cast<float>(*i);
+ // This condition on 'input' is required because
+ // static_cast<float>(std::numeric_limits<uint32_t>::max()) results in a
+ // value outside uint32 range.
+ if (input < static_cast<float>(std::numeric_limits<uint32_t>::max())) {
+ CHECK_EQ(static_cast<uint32_t>(input), m.Call(input));
+ }
+ }
+ }
+ {
+ FOR_FLOAT32_INPUTS(i) {
+ if (*i <= static_cast<float>(std::numeric_limits<uint32_t>::max()) &&
+ *i >= static_cast<float>(std::numeric_limits<uint32_t>::min())) {
+ CheckFloatEq(static_cast<uint32_t>(*i), m.Call(*i));
+ }
+ }
+ }
+}
+
+
TEST(RunChangeFloat64ToInt32_A) {
BufferedRawMachineAssemblerTester<int32_t> m;
double magic = 11.1;
@@ -5577,6 +5653,79 @@ TEST(RunCallCFunction8) {
}
#endif // USE_SIMULATOR
+template <typename T>
+void TestExternalReferenceFunction(
+ BufferedRawMachineAssemblerTester<int32_t>* m, ExternalReference ref,
+ T (*comparison)(T)) {
+ T parameter;
+
+ Node* function = m->ExternalConstant(ref);
+ m->CallCFunction1(MachineType::Pointer(), MachineType::Pointer(), function,
+ m->PointerConstant(&parameter));
+ m->Return(m->Int32Constant(4356));
+ FOR_FLOAT64_INPUTS(i) {
+ parameter = *i;
+ m->Call();
+ CheckDoubleEq(comparison(*i), parameter);
+ }
+}
+
+TEST(RunCallExternalReferenceF32Trunc) {
+ BufferedRawMachineAssemblerTester<int32_t> m;
+ ExternalReference ref =
+ ExternalReference::f32_trunc_wrapper_function(m.isolate());
+ TestExternalReferenceFunction<float>(&m, ref, truncf);
+}
+
+TEST(RunCallExternalReferenceF32Floor) {
+ BufferedRawMachineAssemblerTester<int32_t> m;
+ ExternalReference ref =
+ ExternalReference::f32_floor_wrapper_function(m.isolate());
+ TestExternalReferenceFunction<float>(&m, ref, floorf);
+}
+
+TEST(RunCallExternalReferenceF32Ceil) {
+ BufferedRawMachineAssemblerTester<int32_t> m;
+ ExternalReference ref =
+ ExternalReference::f32_ceil_wrapper_function(m.isolate());
+ TestExternalReferenceFunction<float>(&m, ref, ceilf);
+}
+
+TEST(RunCallExternalReferenceF32RoundTiesEven) {
+ BufferedRawMachineAssemblerTester<int32_t> m;
+ ExternalReference ref =
+ ExternalReference::f32_nearest_int_wrapper_function(m.isolate());
+ TestExternalReferenceFunction<float>(&m, ref, nearbyintf);
+}
+
+TEST(RunCallExternalReferenceF64Trunc) {
+ BufferedRawMachineAssemblerTester<int32_t> m;
+ ExternalReference ref =
+ ExternalReference::f64_trunc_wrapper_function(m.isolate());
+ TestExternalReferenceFunction<double>(&m, ref, trunc);
+}
+
+TEST(RunCallExternalReferenceF64Floor) {
+ BufferedRawMachineAssemblerTester<int32_t> m;
+ ExternalReference ref =
+ ExternalReference::f64_floor_wrapper_function(m.isolate());
+ TestExternalReferenceFunction<double>(&m, ref, floor);
+}
+
+TEST(RunCallExternalReferenceF64Ceil) {
+ BufferedRawMachineAssemblerTester<int32_t> m;
+ ExternalReference ref =
+ ExternalReference::f64_ceil_wrapper_function(m.isolate());
+ TestExternalReferenceFunction<double>(&m, ref, ceil);
+}
+
+TEST(RunCallExternalReferenceF64RoundTiesEven) {
+ BufferedRawMachineAssemblerTester<int32_t> m;
+ ExternalReference ref =
+ ExternalReference::f64_nearest_int_wrapper_function(m.isolate());
+ TestExternalReferenceFunction<double>(&m, ref, nearbyint);
+}
+
#if V8_TARGET_ARCH_64_BIT
// TODO(titzer): run int64 tests on all platforms when supported.
TEST(RunCheckedLoadInt64) {
@@ -6001,6 +6150,26 @@ TEST(RunBitcastFloat32ToInt32) {
}
+TEST(RunRoundInt32ToFloat32) {
+ BufferedRawMachineAssemblerTester<float> m(MachineType::Int32());
+ m.Return(m.RoundInt32ToFloat32(m.Parameter(0)));
+ FOR_INT32_INPUTS(i) {
+ volatile float expected = static_cast<float>(*i);
+ CHECK_EQ(expected, m.Call(*i));
+ }
+}
+
+
+TEST(RunRoundUint32ToFloat32) {
+ BufferedRawMachineAssemblerTester<float> m(MachineType::Uint32());
+ m.Return(m.RoundUint32ToFloat32(m.Parameter(0)));
+ FOR_UINT32_INPUTS(i) {
+ volatile float expected = static_cast<float>(*i);
+ CHECK_EQ(expected, m.Call(*i));
+ }
+}
+
+
TEST(RunBitcastInt32ToFloat32) {
int32_t input = 1;
float output = 0.0;
@@ -6068,6 +6237,27 @@ TEST(RunComputedCodeObject) {
CHECK_EQ(44, r.Call(0));
}
+TEST(ParentFramePointer) {
+ RawMachineAssemblerTester<int32_t> r(MachineType::Int32());
+ RawMachineLabel tlabel;
+ RawMachineLabel flabel;
+ RawMachineLabel merge;
+ Node* frame = r.LoadFramePointer();
+ Node* parent_frame = r.LoadParentFramePointer();
+ frame = r.Load(MachineType::IntPtr(), frame);
+ r.Branch(r.WordEqual(frame, parent_frame), &tlabel, &flabel);
+ r.Bind(&tlabel);
+ Node* fa = r.Int32Constant(1);
+ r.Goto(&merge);
+ r.Bind(&flabel);
+ Node* fb = r.Int32Constant(0);
+ r.Goto(&merge);
+ r.Bind(&merge);
+ Node* phi = r.Phi(MachineRepresentation::kWord32, fa, fb);
+ r.Return(phi);
+ CHECK_EQ(1, r.Call(1));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-run-native-calls.cc b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
index 791b0d7ae5..89114097d8 100644
--- a/deps/v8/test/cctest/compiler/test-run-native-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
@@ -1157,6 +1157,99 @@ TEST(MixedParams_1) { MixedParamTest(1); }
TEST(MixedParams_2) { MixedParamTest(2); }
TEST(MixedParams_3) { MixedParamTest(3); }
+template <typename T>
+void TestStackSlot(MachineType slot_type, T expected) {
+ // Test: Generate with a function f which reserves a stack slot, call an inner
+ // function g from f which writes into the stack slot of f.
+
+ if (RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
+ ->num_allocatable_double_registers() < 2)
+ return;
+
+ Isolate* isolate = CcTest::InitIsolateOnce();
+
+ // Lots of code to generate the build descriptor for the inner function.
+ int parray_gp[] = {
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
+ ->GetAllocatableGeneralCode(0),
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
+ ->GetAllocatableGeneralCode(1)};
+ int rarray_gp[] = {
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
+ ->GetAllocatableGeneralCode(0)};
+ int parray_fp[] = {
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
+ ->GetAllocatableDoubleCode(0),
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
+ ->GetAllocatableDoubleCode(1)};
+ int rarray_fp[] = {
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
+ ->GetAllocatableDoubleCode(0)};
+ Allocator palloc(parray_gp, 2, parray_fp, 2);
+ Allocator ralloc(rarray_gp, 1, rarray_fp, 1);
+ RegisterConfig config(palloc, ralloc);
+
+ Zone zone;
+ HandleScope scope(isolate);
+ MachineSignature::Builder builder(&zone, 1, 12);
+ builder.AddReturn(MachineType::Int32());
+ for (int i = 0; i < 10; i++) {
+ builder.AddParam(MachineType::Int32());
+ }
+ builder.AddParam(slot_type);
+ builder.AddParam(MachineType::Pointer());
+ MachineSignature* sig = builder.Build();
+ CallDescriptor* desc = config.Create(&zone, sig);
+
+ // Create inner function g. g has lots of parameters so that they are passed
+ // over the stack.
+ Handle<Code> inner;
+ Graph graph(&zone);
+ RawMachineAssembler g(isolate, &graph, desc);
+
+ g.Store(slot_type.representation(), g.Parameter(11), g.Parameter(10),
+ WriteBarrierKind::kNoWriteBarrier);
+ g.Return(g.Parameter(9));
+ inner = CompileGraph("Compute", desc, &graph, g.Export());
+
+ // Create function f with a stack slot which calls the inner function g.
+ BufferedRawMachineAssemblerTester<T> f(slot_type);
+ Node* target = f.HeapConstant(inner);
+ Node* stack_slot = f.StackSlot(slot_type.representation());
+ Node* args[12];
+ for (int i = 0; i < 10; i++) {
+ args[i] = f.Int32Constant(i);
+ }
+ args[10] = f.Parameter(0);
+ args[11] = stack_slot;
+
+ f.CallN(desc, target, args);
+ f.Return(f.Load(slot_type, stack_slot, f.IntPtrConstant(0)));
+
+ CHECK_EQ(expected, f.Call(expected));
+}
+
+TEST(RunStackSlotInt32) {
+ int32_t magic = 0x12345678;
+ TestStackSlot(MachineType::Int32(), magic);
+}
+
+#if !V8_TARGET_ARCH_32_BIT
+TEST(RunStackSlotInt64) {
+ int64_t magic = 0x123456789abcdef0;
+ TestStackSlot(MachineType::Int64(), magic);
+}
+#endif
+
+TEST(RunStackSlotFloat32) {
+ float magic = 1234.125f;
+ TestStackSlot(MachineType::Float32(), magic);
+}
+
+TEST(RunStackSlotFloat64) {
+ double magic = 3456.375;
+ TestStackSlot(MachineType::Float64(), magic);
+}
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-run-stubs.cc b/deps/v8/test/cctest/compiler/test-run-stubs.cc
index 7a2a09405c..c7452191bf 100644
--- a/deps/v8/test/cctest/compiler/test-run-stubs.cc
+++ b/deps/v8/test/cctest/compiler/test-run-stubs.cc
@@ -27,8 +27,13 @@ TEST(RunStringLengthStub) {
// Create code and an accompanying descriptor.
StringLengthStub stub(isolate);
Handle<Code> code = stub.GenerateCode();
- CompilationInfo info(&stub, isolate, zone);
- CallDescriptor* descriptor = Linkage::ComputeIncoming(zone, &info);
+ CompilationInfo info("test", isolate, zone,
+ Code::ComputeFlags(Code::HANDLER));
+ CallInterfaceDescriptor interface_descriptor =
+ stub.GetCallInterfaceDescriptor();
+ CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+ isolate, zone, interface_descriptor, stub.GetStackParameterCount(),
+ CallDescriptor::kNoFlags, Operator::kNoProperties);
// Create a function to call the code using the descriptor.
Graph graph(zone);
diff --git a/deps/v8/test/cctest/compiler/value-helper.h b/deps/v8/test/cctest/compiler/value-helper.h
index cbde9a7417..83cd33c5b0 100644
--- a/deps/v8/test/cctest/compiler/value-helper.h
+++ b/deps/v8/test/cctest/compiler/value-helper.h
@@ -312,6 +312,23 @@ class ValueHelper {
#define FOR_UINT32_SHIFTS(var) for (uint32_t var = 0; var < 32; var++)
+// TODO(bmeurer): Drop this crap once we switch to GTest/Gmock.
+static inline void CheckFloatEq(volatile float x, volatile float y) {
+ if (std::isnan(x)) {
+ CHECK(std::isnan(y));
+ } else {
+ CHECK_EQ(x, y);
+ }
+}
+
+static inline void CheckDoubleEq(volatile double x, volatile double y) {
+ if (std::isnan(x)) {
+ CHECK(std::isnan(y));
+ } else {
+ CHECK_EQ(x, y);
+ }
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/heap/heap-tester.h b/deps/v8/test/cctest/heap/heap-tester.h
index 0a0860bcc4..5d098f57ab 100644
--- a/deps/v8/test/cctest/heap/heap-tester.h
+++ b/deps/v8/test/cctest/heap/heap-tester.h
@@ -28,9 +28,9 @@
V(StressHandles) \
V(TestMemoryReducerSampleJsCalls) \
V(TestSizeOfObjects) \
+ V(Regress587004) \
V(WriteBarriersInCopyJSObject)
-
#define HEAP_TEST(Name) \
CcTest register_test_##Name(v8::internal::HeapTester::Test##Name, __FILE__, \
#Name, NULL, true, true); \
@@ -59,25 +59,6 @@ class HeapTester {
/* test-api.cc */
static void ResetWeakHandle(bool global_gc);
-
- /* test-spaces.cc */
- static CompactionSpaceCollection** InitializeCompactionSpaces(Heap* heap,
- int num_spaces);
- static void DestroyCompactionSpaces(CompactionSpaceCollection** spaces,
- int num_spaces);
- static void MergeCompactionSpaces(PagedSpace* space,
- CompactionSpaceCollection** spaces,
- int num_spaces);
- static void AllocateInCompactionSpaces(CompactionSpaceCollection** spaces,
- AllocationSpace id, int num_spaces,
- int num_objects, int object_size);
- static void CompactionStats(CompactionSpaceCollection** spaces,
- AllocationSpace id, int num_spaces,
- intptr_t* capacity, intptr_t* size);
- static void TestCompactionSpaceDivide(int num_additional_objects,
- int object_size,
- int num_compaction_spaces,
- int additional_capacity_in_bytes);
};
} // namespace internal
diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index 726887a23a..88aee8adf8 100644
--- a/deps/v8/test/cctest/heap/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -33,6 +33,7 @@
#include "src/deoptimizer.h"
#include "src/execution.h"
#include "src/factory.h"
+#include "src/field-type.h"
#include "src/global-handles.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/memory-reducer.h"
@@ -1515,6 +1516,50 @@ TEST(TestCodeFlushingIncrementalAbort) {
CHECK(function->is_compiled() || !function->IsOptimized());
}
+TEST(TestUseOfIncrementalBarrierOnCompileLazy) {
+ // Turn off always_opt because it interferes with running the built-in for
+ // the last call to g().
+ i::FLAG_always_opt = false;
+ i::FLAG_allow_natives_syntax = true;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ Heap* heap = isolate->heap();
+ v8::HandleScope scope(CcTest::isolate());
+
+ CompileRun(
+ "function make_closure(x) {"
+ " return function() { return x + 3 };"
+ "}"
+ "var f = make_closure(5); f();"
+ "var g = make_closure(5);");
+
+ // Check f is compiled.
+ Handle<String> f_name = factory->InternalizeUtf8String("f");
+ Handle<Object> f_value =
+ Object::GetProperty(isolate->global_object(), f_name).ToHandleChecked();
+ Handle<JSFunction> f_function = Handle<JSFunction>::cast(f_value);
+ CHECK(f_function->is_compiled());
+
+ // Check g is not compiled.
+ Handle<String> g_name = factory->InternalizeUtf8String("g");
+ Handle<Object> g_value =
+ Object::GetProperty(isolate->global_object(), g_name).ToHandleChecked();
+ Handle<JSFunction> g_function = Handle<JSFunction>::cast(g_value);
+ // TODO(mvstanton): change to check that g is *not* compiled when optimized
+ // cache
+ // map lookup moves to the compile lazy builtin.
+ CHECK(g_function->is_compiled());
+
+ SimulateIncrementalMarking(heap);
+ CompileRun("%OptimizeFunctionOnNextCall(f); f();");
+
+ // g should now have available an optimized function, unmarked by gc. The
+ // CompileLazy built-in will discover it and install it in the closure, and
+ // the incremental write barrier should be used.
+ CompileRun("g();");
+ CHECK(g_function->is_compiled());
+}
TEST(CompilationCacheCachingBehavior) {
// If we do not flush code, or have the compilation cache turned off, this
@@ -3514,6 +3559,13 @@ TEST(ReleaseOverReservedPages) {
// The optimizer can allocate stuff, messing up the test.
i::FLAG_crankshaft = false;
i::FLAG_always_opt = false;
+ // Parallel compaction increases fragmentation, depending on how existing
+ // memory is distributed. Since this is non-deterministic because of
+ // concurrent sweeping, we disable it for this test.
+ i::FLAG_parallel_compaction = false;
+ // Concurrent sweeping adds non determinism, depending on when memory is
+ // available for further reuse.
+ i::FLAG_concurrent_sweeping = false;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
@@ -4163,9 +4215,6 @@ TEST(Regress169209) {
CHECK(shared1->code()->gc_metadata() != NULL);
// Optimize function and make sure the unoptimized code is replaced.
-#ifdef DEBUG
- FLAG_stop_at = "f";
-#endif
CompileRun("%OptimizeFunctionOnNextCall(g);"
"g(false);");
@@ -5555,8 +5604,8 @@ TEST(Regress507979) {
Handle<FixedArray> o1 = isolate->factory()->NewFixedArray(kFixedArrayLen);
Handle<FixedArray> o2 = isolate->factory()->NewFixedArray(kFixedArrayLen);
- CHECK(heap->InNewSpace(o1->address()));
- CHECK(heap->InNewSpace(o2->address()));
+ CHECK(heap->InNewSpace(*o1));
+ CHECK(heap->InNewSpace(*o2));
HeapIterator it(heap, i::HeapIterator::kFilterUnreachable);
@@ -5571,33 +5620,6 @@ TEST(Regress507979) {
}
-TEST(ArrayShiftSweeping) {
- i::FLAG_expose_gc = true;
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
- Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
-
- v8::Local<v8::Value> result = CompileRun(
- "var array = new Array(400);"
- "var tmp = new Array(1000);"
- "array[0] = 10;"
- "gc();"
- "gc();"
- "array.shift();"
- "array;");
-
- Handle<JSObject> o = Handle<JSObject>::cast(
- v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(result)));
- CHECK(heap->InOldSpace(o->elements()));
- CHECK(heap->InOldSpace(*o));
- Page* page = Page::FromAddress(o->elements()->address());
- CHECK(page->parallel_sweeping_state().Value() <=
- MemoryChunk::kSweepingFinalize ||
- Marking::IsBlack(Marking::MarkBitFrom(o->elements())));
-}
-
-
UNINITIALIZED_TEST(PromotionQueue) {
i::FLAG_expose_gc = true;
i::FLAG_max_semi_space_size = 2 * (Page::kPageSize / MB);
@@ -5681,10 +5703,12 @@ TEST(Regress388880) {
Heap* heap = isolate->heap();
Handle<Map> map1 = Map::Create(isolate, 1);
+ Handle<String> name = factory->NewStringFromStaticChars("foo");
+ name = factory->InternalizeString(name);
Handle<Map> map2 =
- Map::CopyWithField(map1, factory->NewStringFromStaticChars("foo"),
- HeapType::Any(isolate), NONE, Representation::Tagged(),
- OMIT_TRANSITION).ToHandleChecked();
+ Map::CopyWithField(map1, name, FieldType::Any(isolate), NONE,
+ Representation::Tagged(), OMIT_TRANSITION)
+ .ToHandleChecked();
int desired_offset = Page::kPageSize - map1->instance_size();
@@ -6232,7 +6256,6 @@ TEST(MessageObjectLeak) {
const char* flag = "--turbo-filter=*";
FlagList::SetFlagsFromString(flag, StrLength(flag));
FLAG_always_opt = true;
- FLAG_turbo_try_finally = true;
CompileRun(test);
}
@@ -6470,6 +6493,43 @@ HEAP_TEST(TestMemoryReducerSampleJsCalls) {
CheckDoubleEquals(2, calls_per_ms);
}
+HEAP_TEST(Regress587004) {
+ FLAG_concurrent_sweeping = false;
+#ifdef VERIFY_HEAP
+ FLAG_verify_heap = false;
+#endif
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Heap* heap = CcTest::heap();
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ const int N = (Page::kMaxRegularHeapObjectSize - FixedArray::kHeaderSize) /
+ kPointerSize;
+ Handle<FixedArray> array = factory->NewFixedArray(N, TENURED);
+ CHECK(heap->old_space()->Contains(*array));
+ Handle<Object> number = factory->NewHeapNumber(1.0);
+ CHECK(heap->InNewSpace(*number));
+ for (int i = 0; i < N; i++) {
+ array->set(i, *number);
+ }
+ heap->CollectGarbage(OLD_SPACE);
+ SimulateFullSpace(heap->old_space());
+ heap->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(*array, N - 1);
+ heap->mark_compact_collector()->EnsureSweepingCompleted();
+ ByteArray* byte_array;
+ const int M = 256;
+ // Don't allow old space expansion. The test works without this flag too,
+ // but becomes very slow.
+ heap->set_force_oom(true);
+ while (heap->AllocateByteArray(M, TENURED).To(&byte_array)) {
+ for (int j = 0; j < M; j++) {
+ byte_array->set(j, 0x31);
+ }
+ }
+ // Re-enable old space expansion to avoid OOM crash.
+ heap->set_force_oom(false);
+ heap->CollectGarbage(NEW_SPACE);
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/heap/test-spaces.cc b/deps/v8/test/cctest/heap/test-spaces.cc
index 2fe099d2e3..41345bc7d1 100644
--- a/deps/v8/test/cctest/heap/test-spaces.cc
+++ b/deps/v8/test/cctest/heap/test-spaces.cc
@@ -448,236 +448,6 @@ TEST(CompactionSpace) {
}
-TEST(CompactionSpaceUsingExternalMemory) {
- const int kObjectSize = 512;
-
- Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
- MemoryAllocator* allocator = new MemoryAllocator(isolate);
- CHECK(allocator != nullptr);
- CHECK(allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize()));
- TestMemoryAllocatorScope test_scope(isolate, allocator);
-
- CompactionSpaceCollection* collection = new CompactionSpaceCollection(heap);
- CompactionSpace* compaction_space = collection->Get(OLD_SPACE);
- CHECK(compaction_space != NULL);
- CHECK(compaction_space->SetUp());
-
- OldSpace* old_space = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
- CHECK(old_space != NULL);
- CHECK(old_space->SetUp());
-
- // The linear allocation area already counts as used bytes, making
- // exact testing impossible.
- heap->DisableInlineAllocation();
-
- // Test:
- // * Allocate a backing store in old_space.
- // * Compute the number num_rest_objects of kObjectSize objects that fit into
- // of available memory.
- // kNumRestObjects.
- // * Add the rest of available memory to the compaction space.
- // * Allocate kNumRestObjects in the compaction space.
- // * Allocate one object more.
- // * Merge the compaction space and compare the expected number of pages.
-
- // Allocate a single object in old_space to initialize a backing page.
- old_space->AllocateRawUnaligned(kObjectSize).ToObjectChecked();
- // Compute the number of objects that fit into the rest in old_space.
- intptr_t rest = static_cast<int>(old_space->Available());
- CHECK_GT(rest, 0);
- intptr_t num_rest_objects = rest / kObjectSize;
- // After allocating num_rest_objects in compaction_space we allocate a bit
- // more.
- const intptr_t kAdditionalCompactionMemory = kObjectSize;
- // We expect a single old_space page.
- const intptr_t kExpectedInitialOldSpacePages = 1;
- // We expect a single additional page in compaction space because we mostly
- // use external memory.
- const intptr_t kExpectedCompactionPages = 1;
- // We expect two pages to be reachable from old_space in the end.
- const intptr_t kExpectedOldSpacePagesAfterMerge = 2;
-
- CHECK_EQ(old_space->CountTotalPages(), kExpectedInitialOldSpacePages);
- CHECK_EQ(compaction_space->CountTotalPages(), 0);
- CHECK_EQ(compaction_space->Capacity(), 0);
- // Make the rest of memory available for compaction.
- old_space->DivideUponCompactionSpaces(&collection, 1, rest);
- CHECK_EQ(compaction_space->CountTotalPages(), 0);
- CHECK_EQ(compaction_space->Capacity(), rest);
- while (num_rest_objects-- > 0) {
- compaction_space->AllocateRawUnaligned(kObjectSize).ToObjectChecked();
- }
- // We only used external memory so far.
- CHECK_EQ(compaction_space->CountTotalPages(), 0);
- // Additional allocation.
- compaction_space->AllocateRawUnaligned(kAdditionalCompactionMemory)
- .ToObjectChecked();
- // Now the compaction space shouldve also acquired a page.
- CHECK_EQ(compaction_space->CountTotalPages(), kExpectedCompactionPages);
-
- old_space->MergeCompactionSpace(compaction_space);
- CHECK_EQ(old_space->CountTotalPages(), kExpectedOldSpacePagesAfterMerge);
-
- delete collection;
- delete old_space;
-
- allocator->TearDown();
- delete allocator;
-}
-
-
-CompactionSpaceCollection** HeapTester::InitializeCompactionSpaces(
- Heap* heap, int num_spaces) {
- CompactionSpaceCollection** spaces =
- new CompactionSpaceCollection*[num_spaces];
- for (int i = 0; i < num_spaces; i++) {
- spaces[i] = new CompactionSpaceCollection(heap);
- }
- return spaces;
-}
-
-
-void HeapTester::DestroyCompactionSpaces(CompactionSpaceCollection** spaces,
- int num_spaces) {
- for (int i = 0; i < num_spaces; i++) {
- delete spaces[i];
- }
- delete[] spaces;
-}
-
-
-void HeapTester::MergeCompactionSpaces(PagedSpace* space,
- CompactionSpaceCollection** spaces,
- int num_spaces) {
- AllocationSpace id = space->identity();
- for (int i = 0; i < num_spaces; i++) {
- space->MergeCompactionSpace(spaces[i]->Get(id));
- CHECK_EQ(spaces[i]->Get(id)->accounting_stats_.Size(), 0);
- CHECK_EQ(spaces[i]->Get(id)->accounting_stats_.Capacity(), 0);
- CHECK_EQ(spaces[i]->Get(id)->Waste(), 0);
- }
-}
-
-
-void HeapTester::AllocateInCompactionSpaces(CompactionSpaceCollection** spaces,
- AllocationSpace id, int num_spaces,
- int num_objects, int object_size) {
- for (int i = 0; i < num_spaces; i++) {
- for (int j = 0; j < num_objects; j++) {
- spaces[i]->Get(id)->AllocateRawUnaligned(object_size).ToObjectChecked();
- }
- spaces[i]->Get(id)->EmptyAllocationInfo();
- CHECK_EQ(spaces[i]->Get(id)->accounting_stats_.Size(),
- num_objects * object_size);
- CHECK_GE(spaces[i]->Get(id)->accounting_stats_.Capacity(),
- spaces[i]->Get(id)->accounting_stats_.Size());
- }
-}
-
-
-void HeapTester::CompactionStats(CompactionSpaceCollection** spaces,
- AllocationSpace id, int num_spaces,
- intptr_t* capacity, intptr_t* size) {
- *capacity = 0;
- *size = 0;
- for (int i = 0; i < num_spaces; i++) {
- *capacity += spaces[i]->Get(id)->accounting_stats_.Capacity();
- *size += spaces[i]->Get(id)->accounting_stats_.Size();
- }
-}
-
-
-void HeapTester::TestCompactionSpaceDivide(int num_additional_objects,
- int object_size,
- int num_compaction_spaces,
- int additional_capacity_in_bytes) {
- Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
- OldSpace* old_space = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
- CHECK(old_space != nullptr);
- CHECK(old_space->SetUp());
- old_space->AllocateRawUnaligned(object_size).ToObjectChecked();
- old_space->EmptyAllocationInfo();
-
- intptr_t rest_capacity = old_space->accounting_stats_.Capacity() -
- old_space->accounting_stats_.Size();
- intptr_t capacity_for_compaction_space =
- rest_capacity / num_compaction_spaces;
- int num_objects_in_compaction_space =
- static_cast<int>(capacity_for_compaction_space) / object_size +
- num_additional_objects;
- CHECK_GT(num_objects_in_compaction_space, 0);
- intptr_t initial_old_space_capacity = old_space->accounting_stats_.Capacity();
-
- CompactionSpaceCollection** spaces =
- InitializeCompactionSpaces(heap, num_compaction_spaces);
- old_space->DivideUponCompactionSpaces(spaces, num_compaction_spaces,
- capacity_for_compaction_space);
-
- intptr_t compaction_capacity = 0;
- intptr_t compaction_size = 0;
- CompactionStats(spaces, OLD_SPACE, num_compaction_spaces,
- &compaction_capacity, &compaction_size);
-
- intptr_t old_space_capacity = old_space->accounting_stats_.Capacity();
- intptr_t old_space_size = old_space->accounting_stats_.Size();
- // Compaction space memory is subtracted from the original space's capacity.
- CHECK_EQ(old_space_capacity,
- initial_old_space_capacity - compaction_capacity);
- CHECK_EQ(compaction_size, 0);
-
- AllocateInCompactionSpaces(spaces, OLD_SPACE, num_compaction_spaces,
- num_objects_in_compaction_space, object_size);
-
- // Old space size and capacity should be the same as after dividing.
- CHECK_EQ(old_space->accounting_stats_.Size(), old_space_size);
- CHECK_EQ(old_space->accounting_stats_.Capacity(), old_space_capacity);
-
- CompactionStats(spaces, OLD_SPACE, num_compaction_spaces,
- &compaction_capacity, &compaction_size);
- MergeCompactionSpaces(old_space, spaces, num_compaction_spaces);
-
- CHECK_EQ(old_space->accounting_stats_.Capacity(),
- old_space_capacity + compaction_capacity);
- CHECK_EQ(old_space->accounting_stats_.Size(),
- old_space_size + compaction_size);
- // We check against the expected end capacity.
- CHECK_EQ(old_space->accounting_stats_.Capacity(),
- initial_old_space_capacity + additional_capacity_in_bytes);
-
- DestroyCompactionSpaces(spaces, num_compaction_spaces);
- delete old_space;
-}
-
-
-HEAP_TEST(CompactionSpaceDivideSinglePage) {
- const int kObjectSize = KB;
- const int kCompactionSpaces = 4;
- // Since the bound for objects is tight and the dividing is best effort, we
- // subtract some objects to make sure we still fit in the initial page.
- // A CHECK makes sure that the overall number of allocated objects stays
- // > 0.
- const int kAdditionalObjects = -10;
- const int kAdditionalCapacityRequired = 0;
- TestCompactionSpaceDivide(kAdditionalObjects, kObjectSize, kCompactionSpaces,
- kAdditionalCapacityRequired);
-}
-
-
-HEAP_TEST(CompactionSpaceDivideMultiplePages) {
- const int kObjectSize = KB;
- const int kCompactionSpaces = 4;
- // Allocate half a page of objects to ensure that we need one more page per
- // compaction space.
- const int kAdditionalObjects = (Page::kPageSize / kObjectSize / 2);
- const int kAdditionalCapacityRequired =
- Page::kAllocatableMemory * kCompactionSpaces;
- TestCompactionSpaceDivide(kAdditionalObjects, kObjectSize, kCompactionSpaces,
- kAdditionalCapacityRequired);
-}
-
-
TEST(LargeObjectSpace) {
v8::V8::Initialize();
@@ -744,50 +514,6 @@ TEST(SizeOfFirstPageIsLargeEnough) {
CHECK(isolate->heap()->lo_space()->IsEmpty());
}
-
-UNINITIALIZED_TEST(NewSpaceGrowsToTargetCapacity) {
- FLAG_target_semi_space_size = 2 * (Page::kPageSize / MB);
- if (FLAG_optimize_for_size) return;
-
- v8::Isolate::CreateParams create_params;
- create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
- v8::Isolate* isolate = v8::Isolate::New(create_params);
- {
- v8::Isolate::Scope isolate_scope(isolate);
- v8::HandleScope handle_scope(isolate);
- v8::Context::New(isolate)->Enter();
-
- Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
-
- NewSpace* new_space = i_isolate->heap()->new_space();
-
- // This test doesn't work if we start with a non-default new space
- // configuration.
- if (new_space->InitialTotalCapacity() == Page::kPageSize) {
- CHECK_EQ(new_space->CommittedMemory(), new_space->InitialTotalCapacity());
-
- // Fill up the first (and only) page of the semi space.
- FillCurrentPage(new_space);
-
- // Try to allocate out of the new space. A new page should be added and
- // the
- // allocation should succeed.
- v8::internal::AllocationResult allocation =
- new_space->AllocateRawUnaligned(80);
- CHECK(!allocation.IsRetry());
- CHECK_EQ(new_space->CommittedMemory(), 2 * Page::kPageSize);
-
- // Turn the allocation into a proper object so isolate teardown won't
- // crash.
- HeapObject* free_space = NULL;
- CHECK(allocation.To(&free_space));
- new_space->heap()->CreateFillerObjectAt(free_space->address(), 80);
- }
- }
- isolate->Dispose();
-}
-
-
static HeapObject* AllocateUnaligned(NewSpace* space, int size) {
AllocationResult allocation = space->AllocateRawUnaligned(size);
CHECK(!allocation.IsRetry());
@@ -797,10 +523,27 @@ static HeapObject* AllocateUnaligned(NewSpace* space, int size) {
return filler;
}
-class Observer : public InlineAllocationObserver {
+static HeapObject* AllocateUnaligned(PagedSpace* space, int size) {
+ AllocationResult allocation = space->AllocateRaw(size, kDoubleUnaligned);
+ CHECK(!allocation.IsRetry());
+ HeapObject* filler = NULL;
+ CHECK(allocation.To(&filler));
+ space->heap()->CreateFillerObjectAt(filler->address(), size);
+ return filler;
+}
+
+static HeapObject* AllocateUnaligned(LargeObjectSpace* space, int size) {
+ AllocationResult allocation = space->AllocateRaw(size, EXECUTABLE);
+ CHECK(!allocation.IsRetry());
+ HeapObject* filler = NULL;
+ CHECK(allocation.To(&filler));
+ return filler;
+}
+
+class Observer : public AllocationObserver {
public:
explicit Observer(intptr_t step_size)
- : InlineAllocationObserver(step_size), count_(0) {}
+ : AllocationObserver(step_size), count_(0) {}
void Step(int bytes_allocated, Address, size_t) override { count_++; }
@@ -810,85 +553,93 @@ class Observer : public InlineAllocationObserver {
int count_;
};
+template <typename T>
+void testAllocationObserver(Isolate* i_isolate, T* space) {
+ Observer observer1(128);
+ space->AddAllocationObserver(&observer1);
-UNINITIALIZED_TEST(InlineAllocationObserver) {
- v8::Isolate::CreateParams create_params;
- create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
- v8::Isolate* isolate = v8::Isolate::New(create_params);
- {
- v8::Isolate::Scope isolate_scope(isolate);
- v8::HandleScope handle_scope(isolate);
- v8::Context::New(isolate)->Enter();
+ // The observer should not get notified if we have only allocated less than
+ // 128 bytes.
+ AllocateUnaligned(space, 64);
+ CHECK_EQ(observer1.count(), 0);
- Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+ // The observer should get called when we have allocated exactly 128 bytes.
+ AllocateUnaligned(space, 64);
+ CHECK_EQ(observer1.count(), 1);
- NewSpace* new_space = i_isolate->heap()->new_space();
+ // Another >128 bytes should get another notification.
+ AllocateUnaligned(space, 136);
+ CHECK_EQ(observer1.count(), 2);
- Observer observer1(128);
- new_space->AddInlineAllocationObserver(&observer1);
+ // Allocating a large object should get only one notification.
+ AllocateUnaligned(space, 1024);
+ CHECK_EQ(observer1.count(), 3);
- // The observer should not get notified if we have only allocated less than
- // 128 bytes.
- AllocateUnaligned(new_space, 64);
- CHECK_EQ(observer1.count(), 0);
+ // Allocating another 2048 bytes in small objects should get 16
+ // notifications.
+ for (int i = 0; i < 64; ++i) {
+ AllocateUnaligned(space, 32);
+ }
+ CHECK_EQ(observer1.count(), 19);
- // The observer should get called when we have allocated exactly 128 bytes.
- AllocateUnaligned(new_space, 64);
- CHECK_EQ(observer1.count(), 1);
+ // Multiple observers should work.
+ Observer observer2(96);
+ space->AddAllocationObserver(&observer2);
- // Another >128 bytes should get another notification.
- AllocateUnaligned(new_space, 136);
- CHECK_EQ(observer1.count(), 2);
+ AllocateUnaligned(space, 2048);
+ CHECK_EQ(observer1.count(), 20);
+ CHECK_EQ(observer2.count(), 1);
- // Allocating a large object should get only one notification.
- AllocateUnaligned(new_space, 1024);
- CHECK_EQ(observer1.count(), 3);
+ AllocateUnaligned(space, 104);
+ CHECK_EQ(observer1.count(), 20);
+ CHECK_EQ(observer2.count(), 2);
- // Allocating another 2048 bytes in small objects should get 16
- // notifications.
- for (int i = 0; i < 64; ++i) {
- AllocateUnaligned(new_space, 32);
- }
- CHECK_EQ(observer1.count(), 19);
+ // Callback should stop getting called after an observer is removed.
+ space->RemoveAllocationObserver(&observer1);
- // Multiple observers should work.
- Observer observer2(96);
- new_space->AddInlineAllocationObserver(&observer2);
+ AllocateUnaligned(space, 384);
+ CHECK_EQ(observer1.count(), 20); // no more notifications.
+ CHECK_EQ(observer2.count(), 3); // this one is still active.
- AllocateUnaligned(new_space, 2048);
- CHECK_EQ(observer1.count(), 20);
- CHECK_EQ(observer2.count(), 1);
-
- AllocateUnaligned(new_space, 104);
- CHECK_EQ(observer1.count(), 20);
- CHECK_EQ(observer2.count(), 2);
+ // Ensure that PauseInlineAllocationObserversScope work correctly.
+ AllocateUnaligned(space, 48);
+ CHECK_EQ(observer2.count(), 3);
+ {
+ PauseAllocationObserversScope pause_observers(i_isolate->heap());
+ CHECK_EQ(observer2.count(), 3);
+ AllocateUnaligned(space, 384);
+ CHECK_EQ(observer2.count(), 3);
+ }
+ CHECK_EQ(observer2.count(), 3);
+ // Coupled with the 48 bytes allocated before the pause, another 48 bytes
+ // allocated here should trigger a notification.
+ AllocateUnaligned(space, 48);
+ CHECK_EQ(observer2.count(), 4);
+
+ space->RemoveAllocationObserver(&observer2);
+ AllocateUnaligned(space, 384);
+ CHECK_EQ(observer1.count(), 20);
+ CHECK_EQ(observer2.count(), 4);
+}
- // Callback should stop getting called after an observer is removed.
- new_space->RemoveInlineAllocationObserver(&observer1);
+UNINITIALIZED_TEST(AllocationObserver) {
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+ {
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+ v8::Context::New(isolate)->Enter();
- AllocateUnaligned(new_space, 384);
- CHECK_EQ(observer1.count(), 20); // no more notifications.
- CHECK_EQ(observer2.count(), 3); // this one is still active.
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
- // Ensure that PauseInlineAllocationObserversScope work correctly.
- AllocateUnaligned(new_space, 48);
- CHECK_EQ(observer2.count(), 3);
- {
- PauseInlineAllocationObserversScope pause_observers(new_space);
- CHECK_EQ(observer2.count(), 3);
- AllocateUnaligned(new_space, 384);
- CHECK_EQ(observer2.count(), 3);
- }
- CHECK_EQ(observer2.count(), 3);
- // Coupled with the 48 bytes allocated before the pause, another 48 bytes
- // allocated here should trigger a notification.
- AllocateUnaligned(new_space, 48);
- CHECK_EQ(observer2.count(), 4);
-
- new_space->RemoveInlineAllocationObserver(&observer2);
- AllocateUnaligned(new_space, 384);
- CHECK_EQ(observer1.count(), 20);
- CHECK_EQ(observer2.count(), 4);
+ testAllocationObserver<NewSpace>(i_isolate, i_isolate->heap()->new_space());
+ // Old space is used but the code path is shared for all
+ // classes inheriting from PagedSpace.
+ testAllocationObserver<PagedSpace>(i_isolate,
+ i_isolate->heap()->old_space());
+ testAllocationObserver<LargeObjectSpace>(i_isolate,
+ i_isolate->heap()->lo_space());
}
isolate->Dispose();
}
@@ -908,16 +659,16 @@ UNINITIALIZED_TEST(InlineAllocationObserverCadence) {
NewSpace* new_space = i_isolate->heap()->new_space();
Observer observer1(512);
- new_space->AddInlineAllocationObserver(&observer1);
+ new_space->AddAllocationObserver(&observer1);
Observer observer2(576);
- new_space->AddInlineAllocationObserver(&observer2);
+ new_space->AddAllocationObserver(&observer2);
for (int i = 0; i < 512; ++i) {
AllocateUnaligned(new_space, 32);
}
- new_space->RemoveInlineAllocationObserver(&observer1);
- new_space->RemoveInlineAllocationObserver(&observer2);
+ new_space->RemoveAllocationObserver(&observer1);
+ new_space->RemoveAllocationObserver(&observer2);
CHECK_EQ(observer1.count(), 32);
CHECK_EQ(observer2.count(), 28);
diff --git a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
new file mode 100644
index 0000000000..d5e0456511
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
@@ -0,0 +1,301 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/interpreter/bytecode-expectations-printer.h"
+
+#include <iostream>
+#include <vector>
+
+#include "include/libplatform/libplatform.h"
+#include "include/v8.h"
+
+#include "src/base/logging.h"
+#include "src/base/smart-pointers.h"
+#include "src/compiler.h"
+
+#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/bytecode-generator.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/interpreter/interpreter.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+// static
+const char* const BytecodeExpectationsPrinter::kDefaultTopFunctionName =
+ "__genbckexp_wrapper__";
+
+v8::Local<v8::String> BytecodeExpectationsPrinter::V8StringFromUTF8(
+ const char* data) const {
+ return v8::String::NewFromUtf8(isolate_, data, v8::NewStringType::kNormal)
+ .ToLocalChecked();
+}
+
+std::string BytecodeExpectationsPrinter::WrapCodeInFunction(
+ const char* function_name, const std::string& function_body) const {
+ std::ostringstream program_stream;
+ program_stream << "function " << function_name << "() {" << function_body
+ << "}\n"
+ << function_name << "();";
+
+ return program_stream.str();
+}
+
+v8::Local<v8::Script> BytecodeExpectationsPrinter::Compile(
+ const char* program) const {
+ v8::Local<v8::String> source = V8StringFromUTF8(program);
+ return v8::Script::Compile(isolate_->GetCurrentContext(), source)
+ .ToLocalChecked();
+}
+
+void BytecodeExpectationsPrinter::Run(v8::Local<v8::Script> script) const {
+ (void)script->Run(isolate_->GetCurrentContext());
+}
+
+i::Handle<v8::internal::BytecodeArray>
+BytecodeExpectationsPrinter::GetBytecodeArrayForGlobal(
+ const char* global_name) const {
+ const v8::Local<v8::Context>& context = isolate_->GetCurrentContext();
+ v8::Local<v8::String> v8_global_name = V8StringFromUTF8(global_name);
+ v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
+ context->Global()->Get(context, v8_global_name).ToLocalChecked());
+ i::Handle<i::JSFunction> js_function =
+ i::Handle<i::JSFunction>::cast(v8::Utils::OpenHandle(*function));
+
+ i::Handle<i::BytecodeArray> bytecodes =
+ i::handle(js_function->shared()->bytecode_array(), i_isolate());
+
+ return bytecodes;
+}
+
+i::Handle<i::BytecodeArray>
+BytecodeExpectationsPrinter::GetBytecodeArrayForScript(
+ v8::Local<v8::Script> script) const {
+ i::Handle<i::JSFunction> js_function = v8::Utils::OpenHandle(*script);
+ return i::handle(js_function->shared()->bytecode_array(), i_isolate());
+}
+
+void BytecodeExpectationsPrinter::PrintEscapedString(
+ std::ostream& stream, const std::string& string) const {
+ for (char c : string) {
+ switch (c) {
+ case '"':
+ stream << "\\\"";
+ break;
+ case '\\':
+ stream << "\\\\";
+ break;
+ default:
+ stream << c;
+ break;
+ }
+ }
+}
+
+void BytecodeExpectationsPrinter::PrintBytecodeOperand(
+ std::ostream& stream, const BytecodeArrayIterator& bytecode_iter,
+ const Bytecode& bytecode, int op_index, int parameter_count) const {
+ OperandType op_type = Bytecodes::GetOperandType(bytecode, op_index);
+ OperandSize op_size = Bytecodes::GetOperandSize(bytecode, op_index);
+
+ const char* size_tag;
+ switch (op_size) {
+ case OperandSize::kByte:
+ size_tag = "8";
+ break;
+ case OperandSize::kShort:
+ size_tag = "16";
+ break;
+ default:
+ UNREACHABLE();
+ return;
+ }
+
+ if (Bytecodes::IsRegisterOperandType(op_type)) {
+ Register register_value = bytecode_iter.GetRegisterOperand(op_index);
+ stream << 'R';
+ if (op_size != OperandSize::kByte) stream << size_tag;
+ if (register_value.is_new_target()) {
+ stream << "(new_target)";
+ } else if (register_value.is_current_context()) {
+ stream << "(context)";
+ } else if (register_value.is_function_closure()) {
+ stream << "(closure)";
+ } else if (register_value.is_parameter()) {
+ int parameter_index = register_value.ToParameterIndex(parameter_count);
+ if (parameter_index == 0) {
+ stream << "(this)";
+ } else {
+ stream << "(arg" << (parameter_index - 1) << ')';
+ }
+ } else {
+ stream << '(' << register_value.index() << ')';
+ }
+ } else {
+ stream << 'U' << size_tag << '(';
+
+ if (Bytecodes::IsImmediateOperandType(op_type)) {
+ // We need a cast, otherwise the result is printed as char.
+ stream << static_cast<int>(bytecode_iter.GetImmediateOperand(op_index));
+ } else if (Bytecodes::IsRegisterCountOperandType(op_type)) {
+ stream << bytecode_iter.GetRegisterCountOperand(op_index);
+ } else if (Bytecodes::IsIndexOperandType(op_type)) {
+ stream << bytecode_iter.GetIndexOperand(op_index);
+ } else {
+ UNREACHABLE();
+ }
+
+ stream << ')';
+ }
+}
+
+void BytecodeExpectationsPrinter::PrintBytecode(
+ std::ostream& stream, const BytecodeArrayIterator& bytecode_iter,
+ int parameter_count) const {
+ Bytecode bytecode = bytecode_iter.current_bytecode();
+
+ stream << "B(" << Bytecodes::ToString(bytecode) << ')';
+
+ int operands_count = Bytecodes::NumberOfOperands(bytecode);
+ for (int op_index = 0; op_index < operands_count; ++op_index) {
+ stream << ", ";
+ PrintBytecodeOperand(stream, bytecode_iter, bytecode, op_index,
+ parameter_count);
+ }
+}
+
+void BytecodeExpectationsPrinter::PrintV8String(std::ostream& stream,
+ i::String* string) const {
+ stream << '"';
+ for (int i = 0, length = string->length(); i < length; ++i) {
+ stream << i::AsEscapedUC16ForJSON(string->Get(i));
+ }
+ stream << '"';
+}
+
+void BytecodeExpectationsPrinter::PrintConstant(
+ std::ostream& stream, i::Handle<i::Object> constant) const {
+ switch (const_pool_type_) {
+ case ConstantPoolType::kString:
+ CHECK(constant->IsString());
+ PrintV8String(stream, i::String::cast(*constant));
+ break;
+ case ConstantPoolType::kNumber:
+ if (constant->IsSmi()) {
+ i::Smi::cast(*constant)->SmiPrint(stream);
+ } else if (constant->IsHeapNumber()) {
+ i::HeapNumber::cast(*constant)->HeapNumberPrint(stream);
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case ConstantPoolType::kMixed:
+ if (constant->IsSmi()) {
+ stream << "kInstanceTypeDontCare";
+ } else {
+ stream << "InstanceType::"
+ << i::HeapObject::cast(*constant)->map()->instance_type();
+ }
+ break;
+ case ConstantPoolType::kUnknown:
+ default:
+ UNREACHABLE();
+ return;
+ }
+}
+
+void BytecodeExpectationsPrinter::PrintFrameSize(
+ std::ostream& stream, i::Handle<i::BytecodeArray> bytecode_array) const {
+ const int kPointerSize = sizeof(void*);
+ int frame_size = bytecode_array->frame_size();
+
+ DCHECK_EQ(frame_size % kPointerSize, 0);
+ stream << "frame size: " << frame_size / kPointerSize;
+ if (frame_size > 0) stream << " # in multiples of sizeof(void*)";
+ stream << "\nparameter count: " << bytecode_array->parameter_count() << '\n';
+}
+
+void BytecodeExpectationsPrinter::PrintBytecodeSequence(
+ std::ostream& stream, i::Handle<i::BytecodeArray> bytecode_array) const {
+ stream << "bytecodes: [\n";
+ BytecodeArrayIterator bytecode_iter(bytecode_array);
+ for (; !bytecode_iter.done(); bytecode_iter.Advance()) {
+ stream << " ";
+ PrintBytecode(stream, bytecode_iter, bytecode_array->parameter_count());
+ stream << ",\n";
+ }
+ stream << "]\n";
+}
+
+void BytecodeExpectationsPrinter::PrintConstantPool(
+ std::ostream& stream, i::FixedArray* constant_pool) const {
+ stream << "constant pool: [\n";
+ int num_constants = constant_pool->length();
+ if (num_constants > 0) {
+ for (int i = 0; i < num_constants; ++i) {
+ stream << " ";
+ PrintConstant(stream, i::FixedArray::get(constant_pool, i, i_isolate()));
+ stream << ",\n";
+ }
+ }
+ stream << "]\n";
+}
+
+void BytecodeExpectationsPrinter::PrintCodeSnippet(
+ std::ostream& stream, const std::string& body) const {
+ stream << "snippet: \"\n";
+ std::stringstream body_stream(body);
+ std::string body_line;
+ while (std::getline(body_stream, body_line)) {
+ stream << " ";
+ PrintEscapedString(stream, body_line);
+ stream << '\n';
+ }
+ stream << "\"\n";
+}
+
+void BytecodeExpectationsPrinter::PrintHandlers(
+ std::ostream& stream, i::Handle<i::BytecodeArray> bytecode_array) const {
+ stream << "handlers: [\n";
+ HandlerTable* table = HandlerTable::cast(bytecode_array->handler_table());
+ for (int i = 0, num_entries = table->NumberOfRangeEntries(); i < num_entries;
+ ++i) {
+ stream << " [" << table->GetRangeStart(i) << ", " << table->GetRangeEnd(i)
+ << ", " << table->GetRangeHandler(i) << "],\n";
+ }
+ stream << "]\n";
+}
+
+void BytecodeExpectationsPrinter::PrintBytecodeArray(
+ std::ostream& stream, i::Handle<i::BytecodeArray> bytecode_array) const {
+ PrintFrameSize(stream, bytecode_array);
+ PrintBytecodeSequence(stream, bytecode_array);
+ PrintConstantPool(stream, bytecode_array->constant_pool());
+ PrintHandlers(stream, bytecode_array);
+}
+
+void BytecodeExpectationsPrinter::PrintExpectation(
+ std::ostream& stream, const std::string& snippet) const {
+ std::string source_code =
+ wrap_ ? WrapCodeInFunction(test_function_name_.c_str(), snippet)
+ : snippet;
+
+ v8::Local<v8::Script> script = Compile(source_code.c_str());
+
+ if (execute_) Run(script);
+
+ i::Handle<i::BytecodeArray> bytecode_array =
+ top_level_ ? GetBytecodeArrayForScript(script)
+ : GetBytecodeArrayForGlobal(test_function_name_.c_str());
+
+ stream << "---\n";
+ PrintCodeSnippet(stream, snippet);
+ PrintBytecodeArray(stream, bytecode_array);
+ stream << '\n';
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
new file mode 100644
index 0000000000..236a7d4190
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
@@ -0,0 +1,119 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef TEST_CCTEST_INTERPRETER_BYTECODE_EXPECTATIONS_PRINTER_H_
+#define TEST_CCTEST_INTERPRETER_BYTECODE_EXPECTATIONS_PRINTER_H_
+
+#include <iostream>
+#include <string>
+#include <vector>
+
+#include "src/interpreter/bytecodes.h"
+#include "src/objects.h"
+
+namespace v8 {
+
+class Isolate;
+
+namespace internal {
+namespace interpreter {
+
+class BytecodeArrayIterator;
+
+class BytecodeExpectationsPrinter final {
+ public:
+ enum class ConstantPoolType {
+ kUnknown,
+ kString,
+ kNumber,
+ kMixed,
+ };
+
+ BytecodeExpectationsPrinter(v8::Isolate* i,
+ ConstantPoolType t = ConstantPoolType::kMixed)
+ : isolate_(i),
+ const_pool_type_(t),
+ execute_(true),
+ wrap_(true),
+ test_function_name_(kDefaultTopFunctionName) {}
+
+ void PrintExpectation(std::ostream& stream, // NOLINT
+ const std::string& snippet) const;
+
+ void set_constant_pool_type(ConstantPoolType const_pool_type) {
+ const_pool_type_ = const_pool_type;
+ }
+ ConstantPoolType const_pool_type() const { return const_pool_type_; }
+
+ void set_execute(bool execute) { execute_ = execute; }
+ bool execute() const { return execute_; }
+
+ void set_wrap(bool wrap) { wrap_ = wrap; }
+ bool wrap() const { return wrap_; }
+
+ void set_top_level(bool top_level) { top_level_ = top_level; }
+ bool top_level() const { return top_level_; }
+
+ void set_test_function_name(const std::string& test_function_name) {
+ test_function_name_ = test_function_name;
+ }
+ std::string test_function_name() const { return test_function_name_; }
+
+ private:
+ void PrintEscapedString(std::ostream& stream, // NOLINT
+ const std::string& string) const;
+ void PrintBytecodeOperand(std::ostream& stream, // NOLINT
+ const BytecodeArrayIterator& bytecode_iter,
+ const Bytecode& bytecode, int op_index,
+ int parameter_count) const;
+ void PrintBytecode(std::ostream& stream, // NOLINT
+ const BytecodeArrayIterator& bytecode_iter,
+ int parameter_count) const;
+ void PrintV8String(std::ostream& stream, // NOLINT
+ i::String* string) const;
+ void PrintConstant(std::ostream& stream, // NOLINT
+ i::Handle<i::Object> constant) const;
+ void PrintFrameSize(std::ostream& stream, // NOLINT
+ i::Handle<i::BytecodeArray> bytecode_array) const;
+ void PrintBytecodeSequence(std::ostream& stream, // NOLINT
+ i::Handle<i::BytecodeArray> bytecode_array) const;
+ void PrintConstantPool(std::ostream& stream, // NOLINT
+ i::FixedArray* constant_pool) const;
+ void PrintCodeSnippet(std::ostream& stream, // NOLINT
+ const std::string& body) const;
+ void PrintBytecodeArray(std::ostream& stream, // NOLINT
+ i::Handle<i::BytecodeArray> bytecode_array) const;
+ void PrintHandlers(std::ostream& stream, // NOLINT
+ i::Handle<i::BytecodeArray> bytecode_array) const;
+
+ v8::Local<v8::String> V8StringFromUTF8(const char* data) const;
+ std::string WrapCodeInFunction(const char* function_name,
+ const std::string& function_body) const;
+
+ v8::Local<v8::Script> Compile(const char* program) const;
+ void Run(v8::Local<v8::Script> script) const;
+ i::Handle<i::BytecodeArray> GetBytecodeArrayForGlobal(
+ const char* global_name) const;
+ i::Handle<v8::internal::BytecodeArray> GetBytecodeArrayForScript(
+ v8::Local<v8::Script> script) const;
+
+ i::Isolate* i_isolate() const {
+ return reinterpret_cast<i::Isolate*>(isolate_);
+ }
+
+ v8::Isolate* isolate_;
+ ConstantPoolType const_pool_type_;
+ bool execute_;
+ bool wrap_;
+ bool top_level_;
+ std::string test_function_name_;
+
+ static const char* const kDefaultTopFunctionName;
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // TEST_CCTEST_INTERPRETER_BYTECODE_EXPECTATIONS_PRINTER_H_
diff --git a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
new file mode 100644
index 0000000000..567aa41a8e
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
@@ -0,0 +1,469 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <cstring>
+#include <fstream>
+
+#include "test/cctest/interpreter/bytecode-expectations-printer.h"
+
+#include "include/libplatform/libplatform.h"
+#include "include/v8.h"
+
+#include "src/base/logging.h"
+#include "src/base/smart-pointers.h"
+#include "src/compiler.h"
+#include "src/interpreter/interpreter.h"
+
+using v8::internal::interpreter::BytecodeExpectationsPrinter;
+
+namespace {
+
+class ProgramOptions final {
+ public:
+ static ProgramOptions FromCommandLine(int argc, char** argv);
+
+ ProgramOptions()
+ : parsing_failed_(false),
+ print_help_(false),
+ read_raw_js_snippet_(false),
+ read_from_stdin_(false),
+ rebaseline_(false),
+ wrap_(true),
+ execute_(true),
+ top_level_(false),
+ legacy_const_(false),
+ do_expressions_(false),
+ const_pool_type_(
+ BytecodeExpectationsPrinter::ConstantPoolType::kMixed) {}
+
+ bool Validate() const;
+ void UpdateFromHeader(std::istream& stream); // NOLINT
+ void PrintHeader(std::ostream& stream) const; // NOLINT
+
+ bool parsing_failed() const { return parsing_failed_; }
+ bool print_help() const { return print_help_; }
+ bool read_raw_js_snippet() const { return read_raw_js_snippet_; }
+ bool read_from_stdin() const { return read_from_stdin_; }
+ bool write_to_stdout() const {
+ return output_filename_.empty() && !rebaseline_;
+ }
+ bool rebaseline() const { return rebaseline_; }
+ bool wrap() const { return wrap_; }
+ bool execute() const { return execute_; }
+ bool top_level() const { return top_level_; }
+ bool legacy_const() const { return legacy_const_; }
+ bool do_expressions() const { return do_expressions_; }
+ BytecodeExpectationsPrinter::ConstantPoolType const_pool_type() const {
+ return const_pool_type_;
+ }
+ std::string input_filename() const { return input_filename_; }
+ std::string output_filename() const { return output_filename_; }
+ std::string test_function_name() const { return test_function_name_; }
+
+ private:
+ bool parsing_failed_;
+ bool print_help_;
+ bool read_raw_js_snippet_;
+ bool read_from_stdin_;
+ bool rebaseline_;
+ bool wrap_;
+ bool execute_;
+ bool top_level_;
+ bool legacy_const_;
+ bool do_expressions_;
+ BytecodeExpectationsPrinter::ConstantPoolType const_pool_type_;
+ std::string input_filename_;
+ std::string output_filename_;
+ std::string test_function_name_;
+};
+
+class ArrayBufferAllocator final : public v8::ArrayBuffer::Allocator {
+ public:
+ void* Allocate(size_t length) override {
+ void* data = AllocateUninitialized(length);
+ if (data != nullptr) memset(data, 0, length);
+ return data;
+ }
+ void* AllocateUninitialized(size_t length) override { return malloc(length); }
+ void Free(void* data, size_t) override { free(data); }
+};
+
+class V8InitializationScope final {
+ public:
+ explicit V8InitializationScope(const char* exec_path);
+ ~V8InitializationScope();
+
+ v8::Platform* platform() const { return platform_.get(); }
+ v8::Isolate* isolate() const { return isolate_; }
+
+ private:
+ v8::base::SmartPointer<v8::Platform> platform_;
+ v8::Isolate* isolate_;
+
+ DISALLOW_COPY_AND_ASSIGN(V8InitializationScope);
+};
+
+BytecodeExpectationsPrinter::ConstantPoolType ParseConstantPoolType(
+ const char* type_string) {
+ if (strcmp(type_string, "number") == 0) {
+ return BytecodeExpectationsPrinter::ConstantPoolType::kNumber;
+ } else if (strcmp(type_string, "string") == 0) {
+ return BytecodeExpectationsPrinter::ConstantPoolType::kString;
+ } else if (strcmp(type_string, "mixed") == 0) {
+ return BytecodeExpectationsPrinter::ConstantPoolType::kMixed;
+ }
+ return BytecodeExpectationsPrinter::ConstantPoolType::kUnknown;
+}
+
+const char* ConstantPoolTypeToString(
+ BytecodeExpectationsPrinter::ConstantPoolType type) {
+ switch (type) {
+ case BytecodeExpectationsPrinter::ConstantPoolType::kNumber:
+ return "number";
+ case BytecodeExpectationsPrinter::ConstantPoolType::kMixed:
+ return "mixed";
+ case BytecodeExpectationsPrinter::ConstantPoolType::kString:
+ return "string";
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+}
+
+bool ParseBoolean(const char* string) {
+ if (strcmp(string, "yes") == 0) {
+ return true;
+ } else if (strcmp(string, "no") == 0) {
+ return false;
+ } else {
+ UNREACHABLE();
+ return false;
+ }
+}
+
+const char* BooleanToString(bool value) { return value ? "yes" : "no"; }
+
+// static
+ProgramOptions ProgramOptions::FromCommandLine(int argc, char** argv) {
+ ProgramOptions options;
+
+ for (int i = 1; i < argc; ++i) {
+ if (strcmp(argv[i], "--help") == 0) {
+ options.print_help_ = true;
+ } else if (strcmp(argv[i], "--raw-js") == 0) {
+ options.read_raw_js_snippet_ = true;
+ } else if (strncmp(argv[i], "--pool-type=", 12) == 0) {
+ options.const_pool_type_ = ParseConstantPoolType(argv[i] + 12);
+ } else if (strcmp(argv[i], "--stdin") == 0) {
+ options.read_from_stdin_ = true;
+ } else if (strcmp(argv[i], "--rebaseline") == 0) {
+ options.rebaseline_ = true;
+ } else if (strcmp(argv[i], "--no-wrap") == 0) {
+ options.wrap_ = false;
+ } else if (strcmp(argv[i], "--no-execute") == 0) {
+ options.execute_ = false;
+ } else if (strcmp(argv[i], "--top-level") == 0) {
+ options.top_level_ = true;
+ } else if (strcmp(argv[i], "--legacy-const") == 0) {
+ options.legacy_const_ = true;
+ } else if (strcmp(argv[i], "--do-expressions") == 0) {
+ options.do_expressions_ = true;
+ } else if (strncmp(argv[i], "--output=", 9) == 0) {
+ options.output_filename_ = argv[i] + 9;
+ } else if (strncmp(argv[i], "--test-function-name=", 21) == 0) {
+ options.test_function_name_ = argv[i] + 21;
+ } else if (strncmp(argv[i], "--", 2) != 0) { // It doesn't start with --
+ if (!options.input_filename_.empty()) {
+ std::cerr << "ERROR: More than one input file specified\n";
+ options.parsing_failed_ = true;
+ break;
+ }
+ options.input_filename_ = argv[i];
+ } else {
+ std::cerr << "ERROR: Unknonwn option " << argv[i] << "\n";
+ options.parsing_failed_ = true;
+ break;
+ }
+ }
+
+ return options;
+}
+
+bool ProgramOptions::Validate() const {
+ if (parsing_failed_) return false;
+ if (print_help_) return true;
+
+ if (const_pool_type_ ==
+ BytecodeExpectationsPrinter::ConstantPoolType::kUnknown) {
+ std::cerr << "ERROR: Unknown constant pool type.\n";
+ return false;
+ }
+
+ if (!read_from_stdin_ && input_filename_.empty()) {
+ std::cerr << "ERROR: No input file specified.\n";
+ return false;
+ }
+
+ if (read_from_stdin_ && !input_filename_.empty()) {
+ std::cerr << "ERROR: Reading from stdin, but input files supplied.\n";
+ return false;
+ }
+
+ if (rebaseline_ && read_raw_js_snippet_) {
+ std::cerr << "ERROR: Cannot use --rebaseline on a raw JS snippet.\n";
+ return false;
+ }
+
+ if (top_level_ && !test_function_name_.empty()) {
+ std::cerr << "ERROR: test function name specified while processing "
+ "top level code.\n";
+ return false;
+ }
+
+ return true;
+}
+
+void ProgramOptions::UpdateFromHeader(std::istream& stream) {
+ std::string line;
+
+ // Skip to the beginning of the options header
+ while (std::getline(stream, line)) {
+ if (line == "---") break;
+ }
+
+ while (std::getline(stream, line)) {
+ if (line.compare(0, 11, "pool type: ") == 0) {
+ const_pool_type_ = ParseConstantPoolType(line.c_str() + 11);
+ } else if (line.compare(0, 9, "execute: ") == 0) {
+ execute_ = ParseBoolean(line.c_str() + 9);
+ } else if (line.compare(0, 6, "wrap: ") == 0) {
+ wrap_ = ParseBoolean(line.c_str() + 6);
+ } else if (line.compare(0, 20, "test function name: ") == 0) {
+ test_function_name_ = line.c_str() + 20;
+ } else if (line.compare(0, 11, "top level: ") == 0) {
+ top_level_ = ParseBoolean(line.c_str() + 11);
+ } else if (line.compare(0, 14, "legacy const: ") == 0) {
+ legacy_const_ = ParseBoolean(line.c_str() + 14);
+ } else if (line.compare(0, 16, "do expressions: ") == 0) {
+ do_expressions_ = ParseBoolean(line.c_str() + 16);
+ } else if (line == "---") {
+ break;
+ } else if (line.empty()) {
+ continue;
+ } else {
+ UNREACHABLE();
+ return;
+ }
+ }
+}
+
+void ProgramOptions::PrintHeader(std::ostream& stream) const { // NOLINT
+ stream << "---"
+ "\npool type: "
+ << ConstantPoolTypeToString(const_pool_type_)
+ << "\nexecute: " << BooleanToString(execute_)
+ << "\nwrap: " << BooleanToString(wrap_);
+
+ if (!test_function_name_.empty()) {
+ stream << "\ntest function name: " << test_function_name_;
+ }
+
+ if (top_level_) stream << "\ntop level: yes";
+ if (legacy_const_) stream << "\nlegacy const: yes";
+ if (do_expressions_) stream << "\ndo expressions: yes";
+
+ stream << "\n\n";
+}
+
+V8InitializationScope::V8InitializationScope(const char* exec_path)
+ : platform_(v8::platform::CreateDefaultPlatform()) {
+ i::FLAG_ignition = true;
+ i::FLAG_always_opt = false;
+ i::FLAG_allow_natives_syntax = true;
+
+ v8::V8::InitializeICU();
+ v8::V8::InitializeExternalStartupData(exec_path);
+ v8::V8::InitializePlatform(platform_.get());
+ v8::V8::Initialize();
+
+ ArrayBufferAllocator allocator;
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = &allocator;
+
+ isolate_ = v8::Isolate::New(create_params);
+}
+
+V8InitializationScope::~V8InitializationScope() {
+ isolate_->Dispose();
+ v8::V8::Dispose();
+ v8::V8::ShutdownPlatform();
+}
+
+std::string ReadRawJSSnippet(std::istream& stream) { // NOLINT
+ std::stringstream body_buffer;
+ CHECK(body_buffer << stream.rdbuf());
+ return body_buffer.str();
+}
+
+bool ReadNextSnippet(std::istream& stream, std::string* string_out) { // NOLINT
+ std::string line;
+ bool found_begin_snippet = false;
+ string_out->clear();
+ while (std::getline(stream, line)) {
+ if (line == "snippet: \"") {
+ found_begin_snippet = true;
+ continue;
+ }
+ if (!found_begin_snippet) continue;
+ if (line == "\"") return true;
+ CHECK_GE(line.size(), 2u); // We should have the indent
+ string_out->append(line.begin() + 2, line.end());
+ *string_out += '\n';
+ }
+ return false;
+}
+
+std::string UnescapeString(const std::string& escaped_string) {
+ std::string unescaped_string;
+ bool previous_was_backslash = false;
+ for (char c : escaped_string) {
+ if (previous_was_backslash) {
+ // If it was not an escape sequence, emit the previous backslash
+ if (c != '\\' && c != '"') unescaped_string += '\\';
+ unescaped_string += c;
+ previous_was_backslash = false;
+ } else {
+ if (c == '\\') {
+ previous_was_backslash = true;
+ // Defer emission to the point where we can check if it was an escape.
+ } else {
+ unescaped_string += c;
+ }
+ }
+ }
+ return unescaped_string;
+}
+
+void ExtractSnippets(std::vector<std::string>* snippet_list,
+ std::istream& body_stream, // NOLINT
+ bool read_raw_js_snippet) {
+ if (read_raw_js_snippet) {
+ snippet_list->push_back(ReadRawJSSnippet(body_stream));
+ } else {
+ std::string snippet;
+ while (ReadNextSnippet(body_stream, &snippet)) {
+ snippet_list->push_back(UnescapeString(snippet));
+ }
+ }
+}
+
+void GenerateExpectationsFile(std::ostream& stream, // NOLINT
+ const std::vector<std::string>& snippet_list,
+ const ProgramOptions& options,
+ const char* exec_path) {
+ V8InitializationScope platform(exec_path);
+ {
+ v8::Isolate::Scope isolate_scope(platform.isolate());
+ v8::HandleScope handle_scope(platform.isolate());
+ v8::Local<v8::Context> context = v8::Context::New(platform.isolate());
+ v8::Context::Scope context_scope(context);
+
+ BytecodeExpectationsPrinter printer(platform.isolate(),
+ options.const_pool_type());
+ printer.set_wrap(options.wrap());
+ printer.set_execute(options.execute());
+ printer.set_top_level(options.top_level());
+ if (!options.test_function_name().empty()) {
+ printer.set_test_function_name(options.test_function_name());
+ }
+
+ if (options.legacy_const()) i::FLAG_legacy_const = true;
+ if (options.do_expressions()) i::FLAG_harmony_do_expressions = true;
+
+ stream << "#\n# Autogenerated by generate-bytecode-expectations\n#\n\n";
+ options.PrintHeader(stream);
+ for (const std::string& snippet : snippet_list) {
+ printer.PrintExpectation(stream, snippet);
+ }
+ }
+}
+
+void PrintUsage(const char* exec_path) {
+ std::cerr
+ << "\nUsage: " << exec_path
+ << " [OPTIONS]... [INPUT FILE]\n\n"
+ "Options:\n"
+ " --help Print this help message.\n"
+ " --raw-js Read raw JavaScript, instead of the output format.\n"
+ " --stdin Read from standard input instead of file.\n"
+ " --rebaseline Rebaseline input snippet file.\n"
+ " --no-wrap Do not wrap the snippet in a function.\n"
+ " --no-execute Do not execute after compilation.\n"
+ " --test-function-name=foo "
+ "Specify the name of the test function.\n"
+ " --top-level Process top level code, not the top-level function."
+ " --legacy-const Enable legacy_const flag.\n"
+ " --do-expressions Enable harmony_do_expressions flag.\n"
+ " --output=file.name\n"
+ " Specify the output file. If not specified, output goes to "
+ "stdout.\n"
+ " --pool-type=(number|string|mixed)\n"
+ " Specify the type of the entries in the constant pool "
+ "(default: mixed).\n"
+ "\n"
+ "When using --rebaseline, flags --no-wrap, --no-execute, "
+ "--test-function-name\nand --pool-type will be overridden by the "
+ "options specified in the input file\nheader.\n\n"
+ "Each raw JavaScript file is interpreted as a single snippet.\n\n"
+ "This tool is intended as a help in writing tests.\n"
+ "Please, DO NOT blindly copy and paste the output "
+ "into the test suite.\n";
+}
+
+} // namespace
+
+int main(int argc, char** argv) {
+ ProgramOptions options = ProgramOptions::FromCommandLine(argc, argv);
+
+ if (!options.Validate() || options.print_help()) {
+ PrintUsage(argv[0]);
+ return options.print_help() ? 0 : 1;
+ }
+
+ std::ifstream input_file_handle;
+ if (!options.read_from_stdin()) {
+ input_file_handle.open(options.input_filename().c_str());
+ if (!input_file_handle.is_open()) {
+ std::cerr << "ERROR: Could not open '" << options.input_filename()
+ << "' for reading.\n";
+ return 2;
+ }
+ }
+ std::istream& input_stream =
+ options.read_from_stdin() ? std::cin : input_file_handle;
+
+ if (options.rebaseline()) {
+ options.UpdateFromHeader(input_stream);
+ CHECK(options.Validate());
+ }
+
+ std::vector<std::string> snippet_list;
+ ExtractSnippets(&snippet_list, input_stream, options.read_raw_js_snippet());
+
+ std::ofstream output_file_handle;
+ if (!options.write_to_stdout()) {
+ output_file_handle.open(options.rebaseline()
+ ? options.input_filename().c_str()
+ : options.output_filename().c_str());
+ if (!output_file_handle.is_open()) {
+ std::cerr << "ERROR: Could not open '" << options.output_filename()
+ << "' for writing.\n";
+ return 3;
+ }
+ }
+ std::ostream& output_stream =
+ options.write_to_stdout() ? std::cout : output_file_handle;
+
+ GenerateExpectationsFile(output_stream, snippet_list, options, argv[0]);
+}
diff --git a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
index 2c06da26a1..73767eb3c6 100644
--- a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
+++ b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
@@ -15,6 +15,8 @@ namespace v8 {
namespace internal {
namespace interpreter {
+static const InstanceType kInstanceTypeDontCare = static_cast<InstanceType>(-1);
+
class BytecodeGeneratorHelper {
public:
const char* kFunctionName = "f";
@@ -24,12 +26,9 @@ class BytecodeGeneratorHelper {
BytecodeGeneratorHelper() {
i::FLAG_ignition = true;
- i::FLAG_ignition_fake_try_catch = true;
- i::FLAG_ignition_fallback_on_eval_and_catch = false;
i::FLAG_ignition_filter = StrDup(kFunctionName);
i::FLAG_always_opt = false;
i::FLAG_allow_natives_syntax = true;
- i::FLAG_legacy_const = true;
CcTest::i_isolate()->interpreter()->Initialize();
}
@@ -94,6 +93,7 @@ class BytecodeGeneratorHelper {
#define B(x) static_cast<uint8_t>(Bytecode::k##x)
#define U8(x) static_cast<uint8_t>((x) & 0xff)
#define R(x) static_cast<uint8_t>(-(x) & 0xff)
+#define R16(x) U16(-(x))
#define A(x, n) R(helper.kLastParamIndex - (n) + 1 + (x))
#define THIS(n) A(0, n)
#if defined(V8_TARGET_LITTLE_ENDIAN)
@@ -160,6 +160,12 @@ struct ExpectedSnippet {
const uint8_t bytecode[2048];
int constant_count;
T constants[C];
+ int handler_count;
+ struct {
+ int start;
+ int end;
+ int handler;
+ } handlers[C];
};
@@ -186,7 +192,9 @@ static void CheckConstant(Handle<Object> expected, Object* actual) {
static void CheckConstant(InstanceType expected, Object* actual) {
- CHECK_EQ(expected, HeapObject::cast(actual)->map()->instance_type());
+ if (expected != kInstanceTypeDontCare) {
+ CHECK_EQ(expected, HeapObject::cast(actual)->map()->instance_type());
+ }
}
@@ -204,6 +212,17 @@ static void CheckBytecodeArrayEqual(const ExpectedSnippet<T, C>& expected,
CheckConstant(expected.constants[i], actual->constant_pool()->get(i));
}
}
+ if (expected.handler_count == 0) {
+ CHECK_EQ(CcTest::heap()->empty_fixed_array(), actual->handler_table());
+ } else {
+ HandlerTable* table = HandlerTable::cast(actual->handler_table());
+ CHECK_EQ(expected.handler_count, table->NumberOfRangeEntries());
+ for (int i = 0; i < expected.handler_count; i++) {
+ CHECK_EQ(expected.handlers[i].start, table->GetRangeStart(i));
+ CHECK_EQ(expected.handlers[i].end, table->GetRangeEnd(i));
+ CHECK_EQ(expected.handlers[i].handler, table->GetRangeHandler(i));
+ }
+ }
BytecodeArrayIterator iterator(actual);
int i = 0;
@@ -258,18 +277,110 @@ TEST(PrimitiveReturnStatements) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
+ // clang-format off
ExpectedSnippet<int> snippets[] = {
- {"", 0, 1, 2, {B(LdaUndefined), B(Return)}, 0},
- {"return;", 0, 1, 2, {B(LdaUndefined), B(Return)}, 0},
- {"return null;", 0, 1, 2, {B(LdaNull), B(Return)}, 0},
- {"return true;", 0, 1, 2, {B(LdaTrue), B(Return)}, 0},
- {"return false;", 0, 1, 2, {B(LdaFalse), B(Return)}, 0},
- {"return 0;", 0, 1, 2, {B(LdaZero), B(Return)}, 0},
- {"return +1;", 0, 1, 3, {B(LdaSmi8), U8(1), B(Return)}, 0},
- {"return -1;", 0, 1, 3, {B(LdaSmi8), U8(-1), B(Return)}, 0},
- {"return +127;", 0, 1, 3, {B(LdaSmi8), U8(127), B(Return)}, 0},
- {"return -128;", 0, 1, 3, {B(LdaSmi8), U8(-128), B(Return)}, 0},
+ {"",
+ 0,
+ 1,
+ 3,
+ {
+ B(StackCheck), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
+ 0},
+ {"return;",
+ 0,
+ 1,
+ 3,
+ {
+ B(StackCheck), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
+ 0},
+ {"return null;",
+ 0,
+ 1,
+ 3,
+ {
+ B(StackCheck), //
+ B(LdaNull), //
+ B(Return) //
+ },
+ 0},
+ {"return true;",
+ 0,
+ 1,
+ 3,
+ {
+ B(StackCheck), //
+ B(LdaTrue), //
+ B(Return) //
+ },
+ 0},
+ {"return false;",
+ 0,
+ 1,
+ 3,
+ {
+ B(StackCheck), //
+ B(LdaFalse), //
+ B(Return) //
+ },
+ 0},
+ {"return 0;",
+ 0,
+ 1,
+ 3,
+ {
+ B(StackCheck), //
+ B(LdaZero), //
+ B(Return) //
+ },
+ 0},
+ {"return +1;",
+ 0,
+ 1,
+ 4,
+ {
+ B(StackCheck), //
+ B(LdaSmi8), U8(1), //
+ B(Return) //
+ },
+ 0},
+ {"return -1;",
+ 0,
+ 1,
+ 4,
+ {
+ B(StackCheck), //
+ B(LdaSmi8), U8(-1), //
+ B(Return) //
+ },
+ 0},
+ {"return +127;",
+ 0,
+ 1,
+ 4,
+ {
+ B(StackCheck), //
+ B(LdaSmi8), U8(127), //
+ B(Return) //
+ },
+ 0},
+ {"return -128;",
+ 0,
+ 1,
+ 4,
+ {
+ B(StackCheck), //
+ B(LdaSmi8), U8(-128), //
+ B(Return) //
+ },
+ 0},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -283,20 +394,23 @@ TEST(PrimitiveExpressions) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
+ // clang-format off
ExpectedSnippet<int> snippets[] = {
{"var x = 0; return x;",
kPointerSize,
1,
- 4,
- {B(LdaZero), //
+ 5,
+ {B(StackCheck), //
+ B(LdaZero), //
B(Star), R(0), //
B(Return)},
0},
{"var x = 0; return x + 3;",
2 * kPointerSize,
1,
- 10,
- {B(LdaZero), //
+ 11,
+ {B(StackCheck), //
+ B(LdaZero), //
B(Star), R(0), //
B(Star), R(1), //
B(LdaSmi8), U8(3), //
@@ -306,8 +420,9 @@ TEST(PrimitiveExpressions) {
{"var x = 0; return x - 3;",
2 * kPointerSize,
1,
- 10,
- {B(LdaZero), //
+ 11,
+ {B(StackCheck), //
+ B(LdaZero), //
B(Star), R(0), //
B(Star), R(1), //
B(LdaSmi8), U8(3), //
@@ -317,8 +432,9 @@ TEST(PrimitiveExpressions) {
{"var x = 4; return x * 3;",
2 * kPointerSize,
1,
- 11,
- {B(LdaSmi8), U8(4), //
+ 12,
+ {B(StackCheck), //
+ B(LdaSmi8), U8(4), //
B(Star), R(0), //
B(Star), R(1), //
B(LdaSmi8), U8(3), //
@@ -328,8 +444,9 @@ TEST(PrimitiveExpressions) {
{"var x = 4; return x / 3;",
2 * kPointerSize,
1,
- 11,
- {B(LdaSmi8), U8(4), //
+ 12,
+ {B(StackCheck), //
+ B(LdaSmi8), U8(4), //
B(Star), R(0), //
B(Star), R(1), //
B(LdaSmi8), U8(3), //
@@ -339,8 +456,9 @@ TEST(PrimitiveExpressions) {
{"var x = 4; return x % 3;",
2 * kPointerSize,
1,
- 11,
- {B(LdaSmi8), U8(4), //
+ 12,
+ {B(StackCheck), //
+ B(LdaSmi8), U8(4), //
B(Star), R(0), //
B(Star), R(1), //
B(LdaSmi8), U8(3), //
@@ -350,8 +468,9 @@ TEST(PrimitiveExpressions) {
{"var x = 1; return x | 2;",
2 * kPointerSize,
1,
- 11,
- {B(LdaSmi8), U8(1), //
+ 12,
+ {B(StackCheck), //
+ B(LdaSmi8), U8(1), //
B(Star), R(0), //
B(Star), R(1), //
B(LdaSmi8), U8(2), //
@@ -361,8 +480,9 @@ TEST(PrimitiveExpressions) {
{"var x = 1; return x ^ 2;",
2 * kPointerSize,
1,
- 11,
- {B(LdaSmi8), U8(1), //
+ 12,
+ {B(StackCheck), //
+ B(LdaSmi8), U8(1), //
B(Star), R(0), //
B(Star), R(1), //
B(LdaSmi8), U8(2), //
@@ -372,8 +492,9 @@ TEST(PrimitiveExpressions) {
{"var x = 1; return x & 2;",
2 * kPointerSize,
1,
- 11,
- {B(LdaSmi8), U8(1), //
+ 12,
+ {B(StackCheck), //
+ B(LdaSmi8), U8(1), //
B(Star), R(0), //
B(Star), R(1), //
B(LdaSmi8), U8(2), //
@@ -383,8 +504,9 @@ TEST(PrimitiveExpressions) {
{"var x = 10; return x << 3;",
2 * kPointerSize,
1,
- 11,
- {B(LdaSmi8), U8(10), //
+ 12,
+ {B(StackCheck), //
+ B(LdaSmi8), U8(10), //
B(Star), R(0), //
B(Star), R(1), //
B(LdaSmi8), U8(3), //
@@ -394,8 +516,9 @@ TEST(PrimitiveExpressions) {
{"var x = 10; return x >> 3;",
2 * kPointerSize,
1,
- 11,
- {B(LdaSmi8), U8(10), //
+ 12,
+ {B(StackCheck), //
+ B(LdaSmi8), U8(10), //
B(Star), R(0), //
B(Star), R(1), //
B(LdaSmi8), U8(3), //
@@ -405,8 +528,9 @@ TEST(PrimitiveExpressions) {
{"var x = 10; return x >>> 3;",
2 * kPointerSize,
1,
- 11,
- {B(LdaSmi8), U8(10), //
+ 12,
+ {B(StackCheck), //
+ B(LdaSmi8), U8(10), //
B(Star), R(0), //
B(Star), R(1), //
B(LdaSmi8), U8(3), //
@@ -416,12 +540,15 @@ TEST(PrimitiveExpressions) {
{"var x = 0; return (x, 3);",
1 * kPointerSize,
1,
- 6,
- {B(LdaZero), //
+ 7,
+ {B(StackCheck), //
+ B(LdaZero), //
B(Star), R(0), //
B(LdaSmi8), U8(3), //
B(Return)},
- 0}};
+ 0},
+ };
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -435,12 +562,14 @@ TEST(LogicalExpressions) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
+ // clang-format off
ExpectedSnippet<int> snippets[] = {
{"var x = 0; return x || 3;",
1 * kPointerSize,
1,
- 8,
- {B(LdaZero), //
+ 9,
+ {B(StackCheck), //
+ B(LdaZero), //
B(Star), R(0), //
B(JumpIfToBooleanTrue), U8(4), //
B(LdaSmi8), U8(3), //
@@ -449,8 +578,9 @@ TEST(LogicalExpressions) {
{"var x = 0; return (x == 1) || 3;",
2 * kPointerSize,
1,
- 14,
- {B(LdaZero), //
+ 15,
+ {B(StackCheck), //
+ B(LdaZero), //
B(Star), R(0), //
B(Star), R(1), //
B(LdaSmi8), U8(1), //
@@ -462,8 +592,9 @@ TEST(LogicalExpressions) {
{"var x = 0; return x && 3;",
1 * kPointerSize,
1,
- 8,
- {B(LdaZero), //
+ 9,
+ {B(StackCheck), //
+ B(LdaZero), //
B(Star), R(0), //
B(JumpIfToBooleanFalse), U8(4), //
B(LdaSmi8), U8(3), //
@@ -472,8 +603,9 @@ TEST(LogicalExpressions) {
{"var x = 0; return (x == 0) && 3;",
2 * kPointerSize,
1,
- 13,
- {B(LdaZero), //
+ 14,
+ {B(StackCheck), //
+ B(LdaZero), //
B(Star), R(0), //
B(Star), R(1), //
B(LdaZero), //
@@ -485,8 +617,9 @@ TEST(LogicalExpressions) {
{"var x = 0; return x || (1, 2, 3);",
1 * kPointerSize,
1,
- 8,
- {B(LdaZero), //
+ 9,
+ {B(StackCheck), //
+ B(LdaZero), //
B(Star), R(0), //
B(JumpIfToBooleanTrue), U8(4), //
B(LdaSmi8), U8(3), //
@@ -495,8 +628,9 @@ TEST(LogicalExpressions) {
{"var a = 2, b = 3, c = 4; return a || (a, b, a, b, c = 5, 3);",
3 * kPointerSize,
1,
- 31,
- {B(LdaSmi8), U8(2), //
+ 32,
+ {B(StackCheck), //
+ B(LdaSmi8), U8(2), //
B(Star), R(0), //
B(LdaSmi8), U8(3), //
B(Star), R(1), //
@@ -518,8 +652,9 @@ TEST(LogicalExpressions) {
"3);",
3 * kPointerSize,
1,
- 275,
- {B(LdaSmi8), U8(1), //
+ 276,
+ {B(StackCheck), //
+ B(LdaSmi8), U8(1), //
B(Star), R(0), //
B(LdaSmi8), U8(2), //
B(Star), R(1), //
@@ -541,8 +676,9 @@ TEST(LogicalExpressions) {
"3);",
3 * kPointerSize,
1,
- 274,
- {B(LdaZero), //
+ 275,
+ {B(StackCheck), //
+ B(LdaZero), //
B(Star), R(0), //
B(LdaSmi8), U8(2), //
B(Star), R(1), //
@@ -564,8 +700,9 @@ TEST(LogicalExpressions) {
"3);",
4 * kPointerSize,
1,
- 281,
- {B(LdaSmi8), U8(1), //
+ 282,
+ {B(StackCheck), //
+ B(LdaSmi8), U8(1), //
B(Star), R(0), //
B(LdaSmi8), U8(2), //
B(Star), R(1), //
@@ -590,8 +727,9 @@ TEST(LogicalExpressions) {
"3);",
4 * kPointerSize,
1,
- 280,
- {B(LdaZero), //
+ 281,
+ {B(StackCheck), //
+ B(LdaZero), //
B(Star), R(0), //
B(LdaSmi8), U8(2), //
B(Star), R(1), //
@@ -614,22 +752,25 @@ TEST(LogicalExpressions) {
{"return 0 && 3;",
0 * kPointerSize,
1,
- 2,
- {B(LdaZero), //
+ 3,
+ {B(StackCheck), //
+ B(LdaZero), //
B(Return)},
0},
{"return 1 || 3;",
0 * kPointerSize,
1,
- 3,
- {B(LdaSmi8), U8(1), //
+ 4,
+ {B(StackCheck), //
+ B(LdaSmi8), U8(1), //
B(Return)},
0},
{"var x = 1; return x && 3 || 0, 1;",
1 * kPointerSize,
1,
- 14,
- {B(LdaSmi8), U8(1), //
+ 15,
+ {B(StackCheck), //
+ B(LdaSmi8), U8(1), //
B(Star), R(0), //
B(JumpIfToBooleanFalse), U8(4), //
B(LdaSmi8), U8(3), //
@@ -637,7 +778,9 @@ TEST(LogicalExpressions) {
B(LdaZero), //
B(LdaSmi8), U8(1), //
B(Return)},
- 0}};
+ 0}
+ };
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -651,42 +794,54 @@ TEST(Parameters) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
+ // clang-format off
ExpectedSnippet<int> snippets[] = {
{"function f() { return this; }",
0,
1,
- 3,
- {B(Ldar), THIS(1), B(Return)},
+ 4,
+ {B(StackCheck), //
+ B(Ldar), THIS(1), //
+ B(Return)},
0},
{"function f(arg1) { return arg1; }",
0,
2,
- 3,
- {B(Ldar), A(1, 2), B(Return)},
+ 4,
+ {B(StackCheck), //
+ B(Ldar), A(1, 2), //
+ B(Return)},
0},
{"function f(arg1) { return this; }",
0,
2,
- 3,
- {B(Ldar), THIS(2), B(Return)},
+ 4,
+ {B(StackCheck), //
+ B(Ldar), THIS(2), //
+ B(Return)},
0},
{"function f(arg1, arg2, arg3, arg4, arg5, arg6, arg7) { return arg4; }",
0,
8,
- 3,
- {B(Ldar), A(4, 8), B(Return)},
+ 4,
+ {B(StackCheck), //
+ B(Ldar), A(4, 8), //
+ B(Return)},
0},
{"function f(arg1, arg2, arg3, arg4, arg5, arg6, arg7) { return this; }",
0,
8,
- 3,
- {B(Ldar), THIS(8), B(Return)},
+ 4,
+ {B(StackCheck), //
+ B(Ldar), THIS(8), //
+ B(Return)},
0},
{"function f(arg1) { arg1 = 1; }",
0,
2,
- 6,
- {B(LdaSmi8), U8(1), //
+ 7,
+ {B(StackCheck), //
+ B(LdaSmi8), U8(1), //
B(Star), A(1, 2), //
B(LdaUndefined), //
B(Return)},
@@ -694,13 +849,15 @@ TEST(Parameters) {
{"function f(arg1, arg2, arg3, arg4) { arg2 = 1; }",
0,
5,
- 6,
- {B(LdaSmi8), U8(1), //
+ 7,
+ {B(StackCheck), //
+ B(LdaSmi8), U8(1), //
B(Star), A(2, 5), //
B(LdaUndefined), //
B(Return)},
0},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -714,12 +871,14 @@ TEST(IntegerConstants) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
+ // clang-format off
ExpectedSnippet<int> snippets[] = {
{"return 12345678;",
0,
1,
- 3,
+ 4,
{
+ B(StackCheck), //
B(LdaConstant), U8(0), //
B(Return) //
},
@@ -728,8 +887,9 @@ TEST(IntegerConstants) {
{"var a = 1234; return 5678;",
1 * kPointerSize,
1,
- 7,
+ 8,
{
+ B(StackCheck), //
B(LdaConstant), U8(0), //
B(Star), R(0), //
B(LdaConstant), U8(1), //
@@ -740,15 +900,18 @@ TEST(IntegerConstants) {
{"var a = 1234; return 1234;",
1 * kPointerSize,
1,
- 7,
+ 8,
{
+ B(StackCheck), //
B(LdaConstant), U8(0), //
B(Star), R(0), //
B(LdaConstant), U8(0), //
B(Return) //
},
1,
- {1234}}};
+ {1234}}
+ };
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -764,12 +927,14 @@ TEST(HeapNumberConstants) {
int wide_idx = 0;
+ // clang-format off
ExpectedSnippet<double, 257> snippets[] = {
{"return 1.2;",
0,
1,
- 3,
+ 4,
{
+ B(StackCheck), //
B(LdaConstant), U8(0), //
B(Return) //
},
@@ -778,8 +943,9 @@ TEST(HeapNumberConstants) {
{"var a = 1.2; return 2.6;",
1 * kPointerSize,
1,
- 7,
+ 8,
{
+ B(StackCheck), //
B(LdaConstant), U8(0), //
B(Star), R(0), //
B(LdaConstant), U8(1), //
@@ -790,8 +956,9 @@ TEST(HeapNumberConstants) {
{"var a = 3.14; return 3.14;",
1 * kPointerSize,
1,
- 7,
+ 8,
{
+ B(StackCheck), //
B(LdaConstant), U8(0), //
B(Star), R(0), //
B(LdaConstant), U8(1), //
@@ -804,8 +971,9 @@ TEST(HeapNumberConstants) {
" a = 3.14;",
1 * kPointerSize,
1,
- 1031,
+ 1032,
{
+ B(StackCheck), //
REPEAT_256(COMMA, //
B(LdaConstant), U8(wide_idx++), //
B(Star), R(0)), //
@@ -818,6 +986,8 @@ TEST(HeapNumberConstants) {
{REPEAT_256(COMMA, 1.414),
3.14}}
};
+ // clang-format on
+
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
@@ -830,12 +1000,14 @@ TEST(StringConstants) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
+ // clang-format off
ExpectedSnippet<const char*> snippets[] = {
{"return \"This is a string\";",
0,
1,
- 3,
+ 4,
{
+ B(StackCheck), //
B(LdaConstant), U8(0), //
B(Return) //
},
@@ -844,8 +1016,9 @@ TEST(StringConstants) {
{"var a = \"First string\"; return \"Second string\";",
1 * kPointerSize,
1,
- 7,
+ 8,
{
+ B(StackCheck), //
B(LdaConstant), U8(0), //
B(Star), R(0), //
B(LdaConstant), U8(1), //
@@ -856,15 +1029,18 @@ TEST(StringConstants) {
{"var a = \"Same string\"; return \"Same string\";",
1 * kPointerSize,
1,
- 7,
+ 8,
{
+ B(StackCheck), //
B(LdaConstant), U8(0), //
B(Star), R(0), //
B(LdaConstant), U8(0), //
B(Return) //
},
1,
- {"Same string"}}};
+ {"Same string"}}
+ };
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -889,18 +1065,18 @@ TEST(PropertyLoads) {
// These are a hack used by the LoadICXXXWide tests below.
int wide_idx_1 = vector->GetIndex(slot1) - 2;
int wide_idx_2 = vector->GetIndex(slot1) - 2;
- int wide_idx_3 = vector->GetIndex(slot1) - 2;
- int wide_idx_4 = vector->GetIndex(slot1) - 2;
+ // clang-format off
ExpectedSnippet<const char*> snippets[] = {
{"function f(a) { return a.name; }\nf({name : \"test\"})",
1 * kPointerSize,
2,
- 9,
+ 10,
{
+ B(StackCheck), //
B(Ldar), A(1, 2), //
B(Star), R(0), //
- B(LoadICSloppy), R(0), U8(0), U8(vector->GetIndex(slot1)), //
+ B(LoadIC), R(0), U8(0), U8(vector->GetIndex(slot1)), //
B(Return), //
},
1,
@@ -908,11 +1084,12 @@ TEST(PropertyLoads) {
{"function f(a) { return a[\"key\"]; }\nf({key : \"test\"})",
1 * kPointerSize,
2,
- 9,
+ 10,
{
+ B(StackCheck), //
B(Ldar), A(1, 2), //
B(Star), R(0), //
- B(LoadICSloppy), R(0), U8(0), U8(vector->GetIndex(slot1)), //
+ B(LoadIC), R(0), U8(0), U8(vector->GetIndex(slot1)), //
B(Return) //
},
1,
@@ -920,24 +1097,26 @@ TEST(PropertyLoads) {
{"function f(a) { return a[100]; }\nf({100 : \"test\"})",
1 * kPointerSize,
2,
- 10,
+ 11,
{
+ B(StackCheck), //
B(Ldar), A(1, 2), //
B(Star), R(0), //
B(LdaSmi8), U8(100), //
- B(KeyedLoadICSloppy), R(0), U8(vector->GetIndex(slot1)), //
+ B(KeyedLoadIC), R(0), U8(vector->GetIndex(slot1)), //
B(Return) //
},
0},
{"function f(a, b) { return a[b]; }\nf({arg : \"test\"}, \"arg\")",
1 * kPointerSize,
3,
- 10,
+ 11,
{
+ B(StackCheck), //
B(Ldar), A(1, 3), //
B(Star), R(0), //
B(Ldar), A(1, 2), //
- B(KeyedLoadICSloppy), R(0), U8(vector->GetIndex(slot1)), //
+ B(KeyedLoadIC), R(0), U8(vector->GetIndex(slot1)), //
B(Return) //
},
0},
@@ -945,45 +1124,21 @@ TEST(PropertyLoads) {
"f({\"-124\" : \"test\", name : 123 })",
2 * kPointerSize,
2,
- 20,
+ 21,
{
+ B(StackCheck), //
B(Ldar), A(1, 2), //
B(Star), R(1), //
- B(LoadICSloppy), R(1), U8(0), U8(vector->GetIndex(slot1)), //
+ B(LoadIC), R(1), U8(0), U8(vector->GetIndex(slot1)), //
B(Star), R(0), //
B(Ldar), A(1, 2), //
B(Star), R(1), //
B(LdaSmi8), U8(-124), //
- B(KeyedLoadICSloppy), R(1), U8(vector->GetIndex(slot2)), //
+ B(KeyedLoadIC), R(1), U8(vector->GetIndex(slot2)), //
B(Return), //
},
1,
{"name"}},
- {"function f(a) { \"use strict\"; return a.name; }\nf({name : \"test\"})",
- 1 * kPointerSize,
- 2,
- 9,
- {
- B(Ldar), A(1, 2), //
- B(Star), R(0), //
- B(LoadICStrict), R(0), U8(0), U8(vector->GetIndex(slot1)), //
- B(Return), //
- },
- 1,
- {"name"}},
- {"function f(a, b) { \"use strict\"; return a[b]; }\n"
- "f({arg : \"test\"}, \"arg\")",
- 1 * kPointerSize,
- 3,
- 10,
- {
- B(Ldar), A(1, 3), //
- B(Star), R(0), //
- B(Ldar), A(2, 3), //
- B(KeyedLoadICStrict), R(0), U8(vector->GetIndex(slot1)), //
- B(Return), //
- },
- 0},
{"function f(a) {\n"
" var b;\n"
"b = a.name;"
@@ -992,48 +1147,22 @@ TEST(PropertyLoads) {
"f({name : \"test\"})\n",
2 * kPointerSize,
2,
- 1291,
+ 1292,
{
+ B(StackCheck), //
B(Ldar), A(1, 2), //
B(Star), R(1), //
- B(LoadICSloppy), R(1), U8(0), U8(wide_idx_1 += 2), //
+ B(LoadIC), R(1), U8(0), U8(wide_idx_1 += 2), //
B(Star), R(0), //
REPEAT_127(COMMA, //
B(Ldar), A(1, 2), //
B(Star), R(1), //
- B(LoadICSloppy), R(1), U8(0), //
+ B(LoadIC), R(1), U8(0), //
U8((wide_idx_1 += 2)), //
B(Star), R(0)), //
B(Ldar), A(1, 2), //
B(Star), R(1), //
- B(LoadICSloppyWide), R(1), U16(0), U16(wide_idx_1 + 2), //
- B(Return), //
- },
- 1,
- {"name"}},
- {"function f(a) {\n"
- " 'use strict'; var b;\n"
- " b = a.name;\n"
- REPEAT_127(SPACE, " b = a.name; ")
- " return a.name; }\n"
- "f({name : \"test\"})\n",
- 2 * kPointerSize,
- 2,
- 1291,
- {
- B(Ldar), A(1, 2), //
- B(Star), R(1), //
- B(LoadICStrict), R(1), U8(0), U8((wide_idx_2 += 2)), //
- B(Star), R(0), //
- REPEAT_127(COMMA, //
- B(Ldar), A(1, 2), //
- B(Star), R(1), //
- B(LoadICStrict), R(1), U8(0), //
- U8((wide_idx_2 += 2)), //
- B(Star), R(0)), //
- B(Ldar), A(1, 2), //
- B(Star), R(1), //
- B(LoadICStrictWide), R(1), U16(0), U16(wide_idx_2 + 2), //
+ B(LoadICWide), R(1), U16(0), U16(wide_idx_1 + 2), //
B(Return), //
},
1,
@@ -1046,53 +1175,29 @@ TEST(PropertyLoads) {
"f({name : \"test\"}, \"name\")\n",
2 * kPointerSize,
3,
- 1419,
- {
- B(Ldar), A(1, 3), //
- B(Star), R(1), //
- B(Ldar), A(2, 3), //
- B(KeyedLoadICSloppy), R(1), U8((wide_idx_3 += 2)), //
- B(Star), R(0), //
- REPEAT_127(COMMA, //
- B(Ldar), A(1, 3), //
- B(Star), R(1), //
- B(Ldar), A(2, 3), //
- B(KeyedLoadICSloppy), R(1), U8((wide_idx_3 += 2)), //
- B(Star), R(0)), //
- B(Ldar), A(1, 3), //
- B(Star), R(1), //
- B(Ldar), A(2, 3), //
- B(KeyedLoadICSloppyWide), R(1), U16(wide_idx_3 + 2), //
- B(Return), //
- }},
- {"function f(a, b) {\n"
- " 'use strict'; var c;\n"
- " c = a[b];"
- REPEAT_127(SPACE, " c = a[b]; ")
- " return a[b]; }\n"
- "f({name : \"test\"}, \"name\")\n",
- 2 * kPointerSize,
- 3,
- 1419,
+ 1420,
{
+ B(StackCheck), //
B(Ldar), A(1, 3), //
B(Star), R(1), //
B(Ldar), A(2, 3), //
- B(KeyedLoadICStrict), R(1), U8((wide_idx_4 += 2)), //
+ B(KeyedLoadIC), R(1), U8((wide_idx_2 += 2)), //
B(Star), R(0), //
REPEAT_127(COMMA, //
B(Ldar), A(1, 3), //
B(Star), R(1), //
B(Ldar), A(2, 3), //
- B(KeyedLoadICStrict), R(1), U8((wide_idx_4 += 2)), //
+ B(KeyedLoadIC), R(1), U8((wide_idx_2 += 2)), //
B(Star), R(0)), //
B(Ldar), A(1, 3), //
B(Star), R(1), //
B(Ldar), A(2, 3), //
- B(KeyedLoadICStrictWide), R(1), U16(wide_idx_4 + 2), //
+ B(KeyedLoadICWide), R(1), U16(wide_idx_2 + 2), //
B(Return), //
}},
};
+ // clang-format on
+
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
helper.MakeBytecode(snippets[i].code_snippet, helper.kFunctionName);
@@ -1119,12 +1224,14 @@ TEST(PropertyStores) {
int wide_idx_3 = vector->GetIndex(slot1) - 2;
int wide_idx_4 = vector->GetIndex(slot1) - 2;
+ // clang-format off
ExpectedSnippet<const char*> snippets[] = {
{"function f(a) { a.name = \"val\"; }\nf({name : \"test\"})",
kPointerSize,
2,
- 12,
+ 13,
{
+ B(StackCheck), //
B(Ldar), A(1, 2), //
B(Star), R(0), //
B(LdaConstant), U8(0), //
@@ -1137,8 +1244,9 @@ TEST(PropertyStores) {
{"function f(a) { a[\"key\"] = \"val\"; }\nf({key : \"test\"})",
kPointerSize,
2,
- 12,
+ 13,
{
+ B(StackCheck), //
B(Ldar), A(1, 2), //
B(Star), R(0), //
B(LdaConstant), U8(0), //
@@ -1151,8 +1259,9 @@ TEST(PropertyStores) {
{"function f(a) { a[100] = \"val\"; }\nf({100 : \"test\"})",
2 * kPointerSize,
2,
- 16,
+ 17,
{
+ B(StackCheck), //
B(Ldar), A(1, 2), //
B(Star), R(0), //
B(LdaSmi8), U8(100), //
@@ -1168,8 +1277,9 @@ TEST(PropertyStores) {
{"function f(a, b) { a[b] = \"val\"; }\nf({arg : \"test\"}, \"arg\")",
2 * kPointerSize,
3,
- 16,
+ 17,
{
+ B(StackCheck), //
B(Ldar), A(1, 3), //
B(Star), R(0), //
B(Ldar), A(2, 3), //
@@ -1186,14 +1296,15 @@ TEST(PropertyStores) {
"f({\"-124\" : \"test\", name : 123 })",
2 * kPointerSize,
2,
- 19,
+ 20,
{
+ B(StackCheck), //
B(Ldar), A(1, 2), //
B(Star), R(0), //
B(Ldar), A(1, 2), //
B(Star), R(1), //
B(LdaSmi8), U8(-124), //
- B(KeyedLoadICSloppy), R(1), U8(vector->GetIndex(slot1)), //
+ B(KeyedLoadIC), R(1), U8(vector->GetIndex(slot1)), //
B(StoreICSloppy), R(0), U8(0), U8(vector->GetIndex(slot2)), //
B(LdaUndefined), //
B(Return), //
@@ -1204,8 +1315,9 @@ TEST(PropertyStores) {
"f({name : \"test\"})",
kPointerSize,
2,
- 12,
+ 13,
{
+ B(StackCheck), //
B(Ldar), A(1, 2), //
B(Star), R(0), //
B(LdaConstant), U8(0), //
@@ -1219,8 +1331,9 @@ TEST(PropertyStores) {
"f({arg : \"test\"}, \"arg\")",
2 * kPointerSize,
3,
- 16,
+ 17,
{
+ B(StackCheck), //
B(Ldar), A(1, 3), //
B(Star), R(0), //
B(Ldar), A(2, 3), //
@@ -1239,8 +1352,9 @@ TEST(PropertyStores) {
"f({name : \"test\"})\n",
kPointerSize,
2,
- 1294,
+ 1295,
{
+ B(StackCheck), //
B(Ldar), A(1, 2), //
B(Star), R(0), //
B(LdaSmi8), U8(1), //
@@ -1268,8 +1382,9 @@ TEST(PropertyStores) {
"f({name : \"test\"})\n",
kPointerSize,
2,
- 1294,
+ 1295,
{
+ B(StackCheck), //
B(Ldar), A(1, 2), //
B(Star), R(0), //
B(LdaSmi8), U8(1), //
@@ -1296,8 +1411,9 @@ TEST(PropertyStores) {
"f({name : \"test\"})\n",
2 * kPointerSize,
3,
- 1809,
+ 1810,
{
+ B(StackCheck), //
B(Ldar), A(1, 3), //
B(Star), R(0), //
B(Ldar), A(2, 3), //
@@ -1329,8 +1445,9 @@ TEST(PropertyStores) {
"f({name : \"test\"})\n",
2 * kPointerSize,
3,
- 1809,
+ 1810,
{
+ B(StackCheck), //
B(Ldar), A(1, 3), //
B(Star), R(0), //
B(Ldar), A(2, 3), //
@@ -1353,7 +1470,10 @@ TEST(PropertyStores) {
B(KeyedStoreICStrictWide), R(0), R(1), U16(wide_idx_4 + 2), //
B(LdaUndefined), //
B(Return), //
- }}};
+ }}
+ };
+ // clang-format on
+
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
helper.MakeBytecode(snippets[i].code_snippet, helper.kFunctionName);
@@ -1380,17 +1500,19 @@ TEST(PropertyCall) {
// These are a hack used by the CallWide test below.
int wide_idx = vector->GetIndex(slot1) - 2;
+ // clang-format off
ExpectedSnippet<const char*> snippets[] = {
{"function f(a) { return a.func(); }\nf(" FUNC_ARG ")",
2 * kPointerSize,
2,
- 16,
+ 17,
{
+ B(StackCheck), //
B(Ldar), A(1, 2), //
B(Star), R(1), //
- B(LoadICSloppy), R(1), U8(0), U8(vector->GetIndex(slot2)), //
+ B(LoadIC), R(1), U8(0), U8(vector->GetIndex(slot2)), //
B(Star), R(0), //
- B(Call), R(0), R(1), U8(0), U8(vector->GetIndex(slot1)), //
+ B(Call), R(0), R(1), U8(1), U8(vector->GetIndex(slot1)), //
B(Return), //
},
1,
@@ -1398,17 +1520,18 @@ TEST(PropertyCall) {
{"function f(a, b, c) { return a.func(b, c); }\nf(" FUNC_ARG ", 1, 2)",
4 * kPointerSize,
4,
- 24,
+ 25,
{
+ B(StackCheck), //
B(Ldar), A(1, 4), //
B(Star), R(1), //
- B(LoadICSloppy), R(1), U8(0), U8(vector->GetIndex(slot2)), //
+ B(LoadIC), R(1), U8(0), U8(vector->GetIndex(slot2)), //
B(Star), R(0), //
B(Ldar), A(2, 4), //
B(Star), R(2), //
B(Ldar), A(3, 4), //
B(Star), R(3), //
- B(Call), R(0), R(1), U8(2), U8(vector->GetIndex(slot1)), //
+ B(Call), R(0), R(1), U8(3), U8(vector->GetIndex(slot1)), //
B(Return) //
},
1,
@@ -1416,11 +1539,12 @@ TEST(PropertyCall) {
{"function f(a, b) { return a.func(b + b, b); }\nf(" FUNC_ARG ", 1)",
4 * kPointerSize,
3,
- 30,
+ 31,
{
+ B(StackCheck), //
B(Ldar), A(1, 3), //
B(Star), R(1), //
- B(LoadICSloppy), R(1), U8(0), U8(vector->GetIndex(slot2)), //
+ B(LoadIC), R(1), U8(0), U8(vector->GetIndex(slot2)), //
B(Star), R(0), //
B(Ldar), A(2, 3), //
B(Star), R(3), //
@@ -1429,36 +1553,38 @@ TEST(PropertyCall) {
B(Star), R(2), //
B(Ldar), A(2, 3), //
B(Star), R(3), //
- B(Call), R(0), R(1), U8(2), U8(vector->GetIndex(slot1)), //
+ B(Call), R(0), R(1), U8(3), U8(vector->GetIndex(slot1)), //
B(Return), //
},
1,
{"func"}},
{"function f(a) {\n"
- " a.func;\n"
- REPEAT_127(SPACE, " a.func;\n")
- " return a.func(); }\nf(" FUNC_ARG ")",
+ " a.func;\n" REPEAT_127(
+ SPACE, " a.func;\n") " return a.func(); }\nf(" FUNC_ARG ")",
2 * kPointerSize,
2,
- 1044,
+ 1047,
{
+ B(StackCheck), //
B(Ldar), A(1, 2), //
B(Star), R(0), //
- B(LoadICSloppy), R(0), U8(0), U8(wide_idx += 2), //
+ B(LoadIC), R(0), U8(0), U8(wide_idx += 2), //
REPEAT_127(COMMA, //
B(Ldar), A(1, 2), //
B(Star), R(0), //
- B(LoadICSloppy), R(0), U8(0), U8((wide_idx += 2))), //
+ B(LoadIC), R(0), U8(0), U8((wide_idx += 2))), //
B(Ldar), A(1, 2), //
B(Star), R(1), //
- B(LoadICSloppyWide), R(1), U16(0), U16(wide_idx + 4), //
+ B(LoadICWide), R(1), U16(0), U16(wide_idx + 4), //
B(Star), R(0), //
- B(CallWide), R(0), R(1), U16(0), U16(wide_idx + 2), //
+ B(CallWide), R16(0), R16(1), U16(1), U16(wide_idx + 2), //
B(Return), //
},
1,
{"func"}},
};
+ // clang-format on
+
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
helper.MakeBytecode(snippets[i].code_snippet, helper.kFunctionName);
@@ -1480,15 +1606,16 @@ TEST(LoadGlobal) {
// These are a hack used by the LdaGlobalXXXWide tests below.
int wide_idx_1 = vector->GetIndex(slot) - 2;
- int wide_idx_2 = vector->GetIndex(slot) - 2;
+ // clang-format off
ExpectedSnippet<const char*> snippets[] = {
{"var a = 1;\nfunction f() { return a; }\nf()",
0,
1,
- 4,
+ 5,
{
- B(LdaGlobalSloppy), U8(0), U8(vector->GetIndex(slot)), //
+ B(StackCheck), //
+ B(LdaGlobal), U8(0), U8(vector->GetIndex(slot)), //
B(Return) //
},
1,
@@ -1496,29 +1623,21 @@ TEST(LoadGlobal) {
{"function t() { }\nfunction f() { return t; }\nf()",
0,
1,
- 4,
+ 5,
{
- B(LdaGlobalSloppy), U8(0), U8(vector->GetIndex(slot)), //
+ B(StackCheck), //
+ B(LdaGlobal), U8(0), U8(vector->GetIndex(slot)), //
B(Return) //
},
1,
{"t"}},
- {"'use strict'; var a = 1;\nfunction f() { return a; }\nf()",
- 0,
- 1,
- 4,
- {
- B(LdaGlobalStrict), U8(0), U8(vector->GetIndex(slot)), //
- B(Return) //
- },
- 1,
- {"a"}},
{"a = 1;\nfunction f() { return a; }\nf()",
0,
1,
- 4,
+ 5,
{
- B(LdaGlobalSloppy), U8(0), U8(vector->GetIndex(slot)), //
+ B(StackCheck), //
+ B(LdaGlobal), U8(0), U8(vector->GetIndex(slot)), //
B(Return) //
},
1,
@@ -1531,44 +1650,23 @@ TEST(LoadGlobal) {
"}\nf({name: 1});",
kPointerSize,
2,
- 1030,
- {
- B(Ldar), A(1, 2), //
- B(Star), R(0), //
- B(LoadICSloppy), R(0), U8(0), U8(wide_idx_1 += 2), //
- REPEAT_127(COMMA, //
- B(Ldar), A(1, 2), //
- B(Star), R(0), //
- B(LoadICSloppy), R(0), U8(0), U8(wide_idx_1 += 2)), //
- B(LdaGlobalSloppyWide), U16(1), U16(wide_idx_1 + 2), //
- B(Return), //
- },
- 2,
- {"name", "a"}},
- {"a = 1;"
- "function f(b) {\n"
- " 'use strict';\n"
- " b.name\n"
- REPEAT_127(SPACE, "b.name; ")
- " return a;"
- "}\nf({name: 1});",
- kPointerSize,
- 2,
- 1030,
+ 1031,
{
+ B(StackCheck), //
B(Ldar), A(1, 2), //
B(Star), R(0), //
- B(LoadICStrict), R(0), U8(0), U8(wide_idx_2 += 2), //
+ B(LoadIC), R(0), U8(0), U8(wide_idx_1 += 2), //
REPEAT_127(COMMA, //
B(Ldar), A(1, 2), //
B(Star), R(0), //
- B(LoadICStrict), R(0), U8(0), U8(wide_idx_2 += 2)), //
- B(LdaGlobalStrictWide), U16(1), U16(wide_idx_2 + 2), //
+ B(LoadIC), R(0), U8(0), U8(wide_idx_1 += 2)), //
+ B(LdaGlobalWide), U16(1), U16(wide_idx_1 + 2), //
B(Return), //
},
2,
{"name", "a"}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -1593,12 +1691,14 @@ TEST(StoreGlobal) {
int wide_idx_1 = vector->GetIndex(slot) - 2;
int wide_idx_2 = vector->GetIndex(slot) - 2;
+ // clang-format off
ExpectedSnippet<const char*> snippets[] = {
{"var a = 1;\nfunction f() { a = 2; }\nf()",
0,
1,
- 7,
+ 8,
{
+ B(StackCheck), //
B(LdaSmi8), U8(2), //
B(StaGlobalSloppy), U8(0), U8(vector->GetIndex(slot)), //
B(LdaUndefined), //
@@ -1609,8 +1709,9 @@ TEST(StoreGlobal) {
{"var a = \"test\"; function f(b) { a = b; }\nf(\"global\")",
0,
2,
- 7,
+ 8,
{
+ B(StackCheck), //
B(Ldar), R(helper.kLastParamIndex), //
B(StaGlobalSloppy), U8(0), U8(vector->GetIndex(slot)), //
B(LdaUndefined), //
@@ -1621,8 +1722,9 @@ TEST(StoreGlobal) {
{"'use strict'; var a = 1;\nfunction f() { a = 2; }\nf()",
0,
1,
- 7,
+ 8,
{
+ B(StackCheck), //
B(LdaSmi8), U8(2), //
B(StaGlobalStrict), U8(0), U8(vector->GetIndex(slot)), //
B(LdaUndefined), //
@@ -1633,8 +1735,9 @@ TEST(StoreGlobal) {
{"a = 1;\nfunction f() { a = 2; }\nf()",
0,
1,
- 7,
+ 8,
{
+ B(StackCheck), //
B(LdaSmi8), U8(2), //
B(StaGlobalSloppy), U8(0), U8(vector->GetIndex(slot)), //
B(LdaUndefined), //
@@ -1650,15 +1753,16 @@ TEST(StoreGlobal) {
"f({name: 1});",
kPointerSize,
2,
- 1033,
+ 1034,
{
+ B(StackCheck), //
B(Ldar), A(1, 2), //
B(Star), R(0), //
- B(LoadICSloppy), R(0), U8(0), U8(wide_idx_1 += 2), //
+ B(LoadIC), R(0), U8(0), U8(wide_idx_1 += 2), //
REPEAT_127(COMMA, //
B(Ldar), A(1, 2), //
B(Star), R(0), //
- B(LoadICSloppy), R(0), U8(0), U8(wide_idx_1 += 2)), //
+ B(LoadIC), R(0), U8(0), U8(wide_idx_1 += 2)), //
B(LdaSmi8), U8(2), //
B(StaGlobalSloppyWide), U16(1), U16(wide_idx_1 + 2), //
B(LdaUndefined), //
@@ -1675,15 +1779,16 @@ TEST(StoreGlobal) {
"f({name: 1});",
kPointerSize,
2,
- 1033,
+ 1034,
{
+ B(StackCheck), //
B(Ldar), A(1, 2), //
B(Star), R(0), //
- B(LoadICStrict), R(0), U8(0), U8(wide_idx_2 += 2), //
+ B(LoadIC), R(0), U8(0), U8(wide_idx_2 += 2), //
REPEAT_127(COMMA, //
B(Ldar), A(1, 2), //
B(Star), R(0), //
- B(LoadICStrict), R(0), U8(0), U8(wide_idx_2 += 2)), //
+ B(LoadIC), R(0), U8(0), U8(wide_idx_2 += 2)), //
B(LdaSmi8), U8(2), //
B(StaGlobalStrictWide), U16(1), U16(wide_idx_2 + 2), //
B(LdaUndefined), //
@@ -1692,6 +1797,7 @@ TEST(StoreGlobal) {
2,
{"name", "a"}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -1713,17 +1819,19 @@ TEST(CallGlobal) {
Handle<i::TypeFeedbackVector> vector =
i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
+ // clang-format off
ExpectedSnippet<const char*> snippets[] = {
{"function t() { }\nfunction f() { return t(); }\nf()",
2 * kPointerSize,
1,
- 14,
+ 15,
{
+ B(StackCheck), //
B(LdaUndefined), //
B(Star), R(1), //
- B(LdaGlobalSloppy), U8(0), U8(vector->GetIndex(slot2)), //
+ B(LdaGlobal), U8(0), U8(vector->GetIndex(slot2)), //
B(Star), R(0), //
- B(Call), R(0), R(1), U8(0), U8(vector->GetIndex(slot1)), //
+ B(Call), R(0), R(1), U8(1), U8(vector->GetIndex(slot1)), //
B(Return) //
},
1,
@@ -1731,11 +1839,12 @@ TEST(CallGlobal) {
{"function t(a, b, c) { }\nfunction f() { return t(1, 2, 3); }\nf()",
5 * kPointerSize,
1,
- 26,
+ 27,
{
+ B(StackCheck), //
B(LdaUndefined), //
B(Star), R(1), //
- B(LdaGlobalSloppy), U8(0), U8(vector->GetIndex(slot2)), //
+ B(LdaGlobal), U8(0), U8(vector->GetIndex(slot2)), //
B(Star), R(0), //
B(LdaSmi8), U8(1), //
B(Star), R(2), //
@@ -1743,12 +1852,13 @@ TEST(CallGlobal) {
B(Star), R(3), //
B(LdaSmi8), U8(3), //
B(Star), R(4), //
- B(Call), R(0), R(1), U8(3), U8(vector->GetIndex(slot1)), //
+ B(Call), R(0), R(1), U8(4), U8(vector->GetIndex(slot1)), //
B(Return) //
},
1,
{"t"}},
};
+ // clang-format on
size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
for (size_t i = 0; i < num_snippets; i++) {
@@ -1763,13 +1873,15 @@ TEST(CallRuntime) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
+ // clang-format off
ExpectedSnippet<InstanceType> snippets[] = {
{
"function f() { %TheHole() }\nf()",
0,
1,
- 7,
+ 8,
{
+ B(StackCheck), //
B(CallRuntime), U16(Runtime::kTheHole), R(0), U8(0), //
B(LdaUndefined), //
B(Return) //
@@ -1779,8 +1891,9 @@ TEST(CallRuntime) {
"function f(a) { return %IsArray(a) }\nf(undefined)",
1 * kPointerSize,
2,
- 10,
+ 11,
{
+ B(StackCheck), //
B(Ldar), A(1, 2), //
B(Star), R(0), //
B(CallRuntime), U16(Runtime::kIsArray), R(0), U8(1), //
@@ -1791,8 +1904,9 @@ TEST(CallRuntime) {
"function f() { return %Add(1, 2) }\nf()",
2 * kPointerSize,
1,
- 14,
+ 15,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Star), R(0), //
B(LdaSmi8), U8(2), //
@@ -1805,20 +1919,22 @@ TEST(CallRuntime) {
"function f() { return %spread_iterable([1]) }\nf()",
2 * kPointerSize,
1,
- 15,
+ 16,
{
+ B(StackCheck), //
B(LdaUndefined), //
B(Star), R(0), //
B(CreateArrayLiteral), U8(0), U8(0), U8(3), //
B(Star), R(1), //
B(CallJSRuntime), U16(Context::SPREAD_ITERABLE_INDEX), R(0), //
- U8(1), //
+ /* */ U8(2), //
B(Return), //
},
1,
{InstanceType::FIXED_ARRAY_TYPE},
},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -1834,12 +1950,14 @@ TEST(IfConditions) {
Handle<Object> unused = helper.factory()->undefined_value();
+ // clang-format off
ExpectedSnippet<Handle<Object>> snippets[] = {
{"function f() { if (0) { return 1; } else { return -1; } } f()",
0,
1,
- 3,
+ 4,
{
+ B(StackCheck), //
B(LdaSmi8), U8(-1), //
B(Return), //
},
@@ -1848,8 +1966,9 @@ TEST(IfConditions) {
{"function f() { if ('lucky') { return 1; } else { return -1; } } f();",
0,
1,
- 3,
+ 4,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Return), //
},
@@ -1858,8 +1977,9 @@ TEST(IfConditions) {
{"function f() { if (false) { return 1; } else { return -1; } } f();",
0,
1,
- 3,
+ 4,
{
+ B(StackCheck), //
B(LdaSmi8), U8(-1), //
B(Return), //
},
@@ -1868,8 +1988,9 @@ TEST(IfConditions) {
{"function f() { if (false) { return 1; } } f();",
0,
1,
- 2,
+ 3,
{
+ B(StackCheck), //
B(LdaUndefined), //
B(Return), //
},
@@ -1878,8 +1999,9 @@ TEST(IfConditions) {
{"function f() { var a = 1; if (a) { a += 1; } else { return 2; } } f();",
2 * kPointerSize,
1,
- 23,
+ 24,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Star), R(0), //
B(JumpIfToBooleanFalse), U8(14), //
@@ -1900,8 +2022,9 @@ TEST(IfConditions) {
"f(99);",
kPointerSize,
2,
- 17,
+ 18,
{
+ B(StackCheck), //
B(Ldar), A(1, 2), //
B(Star), R(0), //
B(LdaZero), //
@@ -1922,8 +2045,9 @@ TEST(IfConditions) {
"f('prop', { prop: 'yes'});",
kPointerSize,
3,
- 15,
+ 16,
{
+ B(StackCheck), //
B(Ldar), A(1, 3), //
B(Star), R(0), //
B(Ldar), A(2, 3), //
@@ -1942,8 +2066,9 @@ TEST(IfConditions) {
" return 200; } else { return -200; } } f(0.001)",
3 * kPointerSize,
2,
- 282,
+ 283,
{
+ B(StackCheck), //
B(LdaZero), //
B(Star), R(0), //
B(LdaZero), //
@@ -1973,8 +2098,9 @@ TEST(IfConditions) {
" return 200; } else { return -200; } } f()",
2 * kPointerSize,
1,
- 276,
+ 277,
{
+ B(StackCheck), //
B(LdaZero), //
B(Star), R(0), //
B(LdaZero), //
@@ -2009,7 +2135,7 @@ TEST(IfConditions) {
"} f(1, 1);",
kPointerSize,
3,
- 106,
+ 107,
{
#define IF_CONDITION_RETURN(condition) \
B(Ldar), A(1, 3), \
@@ -2019,6 +2145,7 @@ TEST(IfConditions) {
B(JumpIfFalse), U8(5), \
B(LdaSmi8), U8(1), \
B(Return),
+ B(StackCheck), //
IF_CONDITION_RETURN(TestEqual) //
IF_CONDITION_RETURN(TestEqualStrict) //
IF_CONDITION_RETURN(TestLessThan) //
@@ -2042,8 +2169,9 @@ TEST(IfConditions) {
"f();",
1 * kPointerSize,
1,
- 13,
+ 14,
{
+ B(StackCheck), //
B(LdaZero), //
B(Star), R(0), //
B(JumpIfToBooleanFalse), U8(5), //
@@ -2055,7 +2183,9 @@ TEST(IfConditions) {
B(Return)
},
0,
- {unused, unused, unused, unused, unused, unused}}};
+ {unused, unused, unused, unused, unused, unused}}
+ };
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -2086,17 +2216,19 @@ TEST(DeclareGlobals) {
Handle<i::TypeFeedbackVector> load_vector =
i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec_loads);
+ // clang-format off
ExpectedSnippet<InstanceType> snippets[] = {
{"var a = 1;",
4 * kPointerSize,
1,
- 30,
+ 31,
{
B(LdaConstant), U8(0), //
B(Star), R(1), //
B(LdaZero), //
B(Star), R(2), //
B(CallRuntime), U16(Runtime::kDeclareGlobals), R(1), U8(2), //
+ B(StackCheck), //
B(LdaConstant), U8(1), //
B(Star), R(1), //
B(LdaZero), //
@@ -2113,13 +2245,14 @@ TEST(DeclareGlobals) {
{"function f() {}",
2 * kPointerSize,
1,
- 14,
+ 15,
{
B(LdaConstant), U8(0), //
B(Star), R(0), //
B(LdaZero), //
B(Star), R(1), //
B(CallRuntime), U16(Runtime::kDeclareGlobals), R(0), U8(2), //
+ B(StackCheck), //
B(LdaUndefined), //
B(Return) //
},
@@ -2128,13 +2261,14 @@ TEST(DeclareGlobals) {
{"var a = 1;\na=2;",
4 * kPointerSize,
1,
- 36,
+ 37,
{
B(LdaConstant), U8(0), //
B(Star), R(1), //
B(LdaZero), //
B(Star), R(2), //
B(CallRuntime), U16(Runtime::kDeclareGlobals), R(1), U8(2), //
+ B(StackCheck), //
B(LdaConstant), U8(1), //
B(Star), R(1), //
B(LdaZero), //
@@ -2144,7 +2278,7 @@ TEST(DeclareGlobals) {
B(CallRuntime), U16(Runtime::kInitializeVarGlobal), R(1), U8(3), //
B(LdaSmi8), U8(2), //
B(StaGlobalSloppy), U8(1), //
- U8(store_vector->GetIndex(store_slot_2)), //
+ /* */ U8(store_vector->GetIndex(store_slot_2)), //
B(Star), R(0), //
B(Return) //
},
@@ -2154,20 +2288,20 @@ TEST(DeclareGlobals) {
{"function f() {}\nf();",
3 * kPointerSize,
1,
- 28,
+ 29,
{
B(LdaConstant), U8(0), //
B(Star), R(1), //
B(LdaZero), //
B(Star), R(2), //
B(CallRuntime), U16(Runtime::kDeclareGlobals), R(1), U8(2), //
+ B(StackCheck), //
B(LdaUndefined), //
B(Star), R(2), //
- B(LdaGlobalSloppy), U8(1), //
- U8(load_vector->GetIndex(load_slot_1)), //
+ B(LdaGlobal), U8(1), U8(load_vector->GetIndex(load_slot_1)), //
B(Star), R(1), //
- B(Call), R(1), R(2), U8(0), //
- U8(load_vector->GetIndex(call_slot_1)), //
+ B(Call), R(1), R(2), U8(1), //
+ /* */ U8(load_vector->GetIndex(call_slot_1)), //
B(Star), R(0), //
B(Return) //
},
@@ -2175,6 +2309,7 @@ TEST(DeclareGlobals) {
{InstanceType::FIXED_ARRAY_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -2188,7 +2323,11 @@ TEST(BreakableBlocks) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
- ExpectedSnippet<int> snippets[] = {
+ int closure = Register::function_closure().index();
+ int context = Register::current_context().index();
+
+ // clang-format off
+ ExpectedSnippet<InstanceType> snippets[] = {
{"var x = 0;\n"
"label: {\n"
" x = x + 1;\n"
@@ -2198,8 +2337,9 @@ TEST(BreakableBlocks) {
"return x;",
2 * kPointerSize,
1,
- 16,
+ 17,
{
+ B(StackCheck), //
B(LdaZero), //
B(Star), R(0), //
B(Star), R(1), //
@@ -2222,8 +2362,9 @@ TEST(BreakableBlocks) {
"return sum;",
5 * kPointerSize,
1,
- 72,
+ 75,
{
+ B(StackCheck), //
B(LdaZero), //
B(Star), R(0), //
B(LdaZero), //
@@ -2232,14 +2373,16 @@ TEST(BreakableBlocks) {
B(Star), R(3), //
B(LdaSmi8), U8(10), //
B(TestLessThan), R(3), //
- B(JumpIfFalse), U8(55), //
+ B(JumpIfFalse), U8(57), //
+ B(StackCheck), //
B(LdaZero), //
B(Star), R(2), //
B(Ldar), R(2), //
B(Star), R(3), //
B(LdaSmi8), U8(3), //
B(TestLessThan), R(3), //
- B(JumpIfFalse), U8(34), //
+ B(JumpIfFalse), U8(35), //
+ B(StackCheck), //
B(Ldar), R(0), //
B(ToNumber), //
B(Inc), //
@@ -2257,16 +2400,128 @@ TEST(BreakableBlocks) {
B(ToNumber), //
B(Inc), //
B(Star), R(2), //
- B(Jump), U8(-40), //
+ B(Jump), U8(-41), //
B(Ldar), R(1), //
B(ToNumber), //
B(Inc), //
B(Star), R(1), //
- B(Jump), U8(-61), //
+ B(Jump), U8(-63), //
B(Ldar), R(0), //
B(Return), //
}},
+ {"outer: {\n"
+ " let y = 10;"
+ " function f() { return y; }\n"
+ " break outer;\n"
+ "}\n",
+ 5 * kPointerSize,
+ 1,
+ 51,
+ {
+ B(StackCheck), //
+ B(LdaConstant), U8(0), //
+ B(Star), R(3), //
+ B(Ldar), R(closure), //
+ B(Star), R(4), //
+ B(CallRuntime), U16(Runtime::kPushBlockContext), R(3), U8(2), //
+ B(PushContext), R(2), //
+ B(LdaTheHole), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(CreateClosure), U8(1), U8(0), //
+ B(Star), R(0), //
+ B(LdaSmi8), U8(10), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(Ldar), R(0), //
+ B(JumpIfNotHole), U8(11), //
+ B(LdaConstant), U8(2), //
+ B(Star), R(3), //
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1), //
+ B(Star), R(1), //
+ B(Jump), U8(2), //
+ B(PopContext), R(2), //
+ B(LdaUndefined), //
+ B(Return), //
+ },
+ 3,
+ {InstanceType::FIXED_ARRAY_TYPE, InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
+ {"let x = 1;\n"
+ "outer: {\n"
+ " inner: {\n"
+ " let y = 2;\n"
+ " function f() { return x + y; }\n"
+ " if (y) break outer;\n"
+ " y = 3;\n"
+ " }\n"
+ "}\n"
+ "x = 4;",
+ 6 * kPointerSize,
+ 1,
+ 131,
+ {
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
+ U8(1), //
+ B(PushContext), R(2), //
+ B(LdaTheHole), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(StackCheck), //
+ B(LdaSmi8), U8(1), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(LdaConstant), U8(0), //
+ B(Star), R(4), //
+ B(Ldar), R(closure), //
+ B(Star), R(5), //
+ B(CallRuntime), U16(Runtime::kPushBlockContext), R(4), U8(2), //
+ B(PushContext), R(3), //
+ B(LdaTheHole), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(CreateClosure), U8(1), U8(0), //
+ B(Star), R(0), //
+ B(LdaSmi8), U8(2), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(Ldar), R(0), //
+ B(JumpIfNotHole), U8(11), //
+ B(LdaConstant), U8(2), //
+ B(Star), R(4), //
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1), //
+ B(Star), R(1), //
+ B(LdaContextSlot), R(context), U8(4), //
+ B(JumpIfNotHole), U8(11), //
+ B(LdaConstant), U8(3), //
+ B(Star), R(4), //
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1), //
+ B(JumpIfToBooleanFalse), U8(6), //
+ B(PopContext), R(3), //
+ B(Jump), U8(27), //
+ B(LdaSmi8), U8(3), //
+ B(Star), R(4), //
+ B(LdaContextSlot), R(context), U8(4), //
+ B(JumpIfNotHole), U8(11), //
+ B(LdaConstant), U8(3), //
+ B(Star), R(5), //
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(5), U8(1), //
+ B(Ldar), R(4), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(PopContext), R(3), //
+ B(LdaSmi8), U8(4), //
+ B(Star), R(4), //
+ B(LdaContextSlot), R(context), U8(4), //
+ B(JumpIfNotHole), U8(11), //
+ B(LdaConstant), U8(4), //
+ B(Star), R(5), //
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(5), U8(1), //
+ B(Ldar), R(4), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(LdaUndefined), //
+ B(Return), //
+ },
+ 5,
+ {InstanceType::FIXED_ARRAY_TYPE, InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -2280,14 +2535,19 @@ TEST(BasicLoops) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
- ExpectedSnippet<int> snippets[] = {
+ int closure = Register::function_closure().index();
+ int context = Register::current_context().index();
+
+ // clang-format off
+ ExpectedSnippet<InstanceType> snippets[] = {
{"var x = 0;\n"
"while (false) { x = 99; break; continue; }\n"
"return x;",
1 * kPointerSize,
1,
- 4,
+ 5,
{
+ B(StackCheck), //
B(LdaZero), //
B(Star), R(0), //
B(Return) //
@@ -2299,8 +2559,9 @@ TEST(BasicLoops) {
"return x;",
1 * kPointerSize,
1,
- 4,
+ 5,
{
+ B(StackCheck), //
B(LdaZero), //
B(Star), R(0), //
B(Return), //
@@ -2317,8 +2578,9 @@ TEST(BasicLoops) {
"return y;",
3 * kPointerSize,
1,
- 64,
+ 66,
{
+ B(StackCheck), //
B(LdaZero), //
B(Star), R(0), //
B(LdaSmi8), U8(1), //
@@ -2327,7 +2589,8 @@ TEST(BasicLoops) {
B(Star), R(2), //
B(LdaSmi8), U8(10), //
B(TestLessThan), R(2), //
- B(JumpIfFalse), U8(46), //
+ B(JumpIfFalse), U8(47), //
+ B(StackCheck), //
B(Ldar), R(1), //
B(Star), R(2), //
B(LdaSmi8), U8(12), //
@@ -2342,14 +2605,14 @@ TEST(BasicLoops) {
B(LdaSmi8), U8(3), //
B(TestEqual), R(2), //
B(JumpIfFalse), U8(4), //
- B(Jump), U8(-38), //
+ B(Jump), U8(-39), //
B(Ldar), R(0), //
B(Star), R(2), //
B(LdaSmi8), U8(4), //
B(TestEqual), R(2), //
B(JumpIfFalse), U8(4), //
B(Jump), U8(4), //
- B(Jump), U8(-52), //
+ B(Jump), U8(-53), //
B(Ldar), R(1), //
B(Return), //
},
@@ -2366,16 +2629,18 @@ TEST(BasicLoops) {
"return i;",
2 * kPointerSize,
1,
- 77,
+ 79,
{
+ B(StackCheck), //
B(LdaZero), //
B(Star), R(0), //
+ B(StackCheck), //
B(Ldar), R(0), //
B(Star), R(1), //
B(LdaZero), //
B(TestLessThan), R(1), //
B(JumpIfFalse), U8(4), //
- B(Jump), U8(-9), //
+ B(Jump), U8(-10), //
B(Ldar), R(0), //
B(Star), R(1), //
B(LdaSmi8), U8(3), //
@@ -2393,7 +2658,7 @@ TEST(BasicLoops) {
B(LdaSmi8), U8(10), //
B(TestEqual), R(1), //
B(JumpIfFalse), U8(4), //
- B(Jump), U8(-45), //
+ B(Jump), U8(-46), //
B(Ldar), R(0), //
B(Star), R(1), //
B(LdaSmi8), U8(5), //
@@ -2405,7 +2670,7 @@ TEST(BasicLoops) {
B(LdaSmi8), U8(1), //
B(Add), R(1), //
B(Star), R(0), //
- B(Jump), U8(-69), //
+ B(Jump), U8(-70), //
B(Ldar), R(0), //
B(Return), //
},
@@ -2422,15 +2687,18 @@ TEST(BasicLoops) {
"return i;",
2 * kPointerSize,
1,
- 54,
+ 57,
{
+ B(StackCheck), //
B(LdaZero), //
B(Star), R(0), //
+ B(StackCheck), //
B(Ldar), R(0), //
B(Star), R(1), //
B(LdaSmi8), U8(3), //
B(TestLessThan), R(1), //
- B(JumpIfFalse), U8(26), //
+ B(JumpIfFalse), U8(27), //
+ B(StackCheck), //
B(Ldar), R(0), //
B(Star), R(1), //
B(LdaSmi8), U8(2), //
@@ -2442,14 +2710,14 @@ TEST(BasicLoops) {
B(LdaSmi8), U8(1), //
B(Add), R(1), //
B(Star), R(0), //
- B(Jump), U8(-32), //
+ B(Jump), U8(-33), //
B(Ldar), R(0), //
B(Star), R(1), //
B(LdaSmi8), U8(1), //
B(Add), R(1), //
B(Star), R(0), //
B(Jump), U8(4), //
- B(Jump), U8(-46), //
+ B(Jump), U8(-48), //
B(Ldar), R(0), //
B(Return), //
},
@@ -2463,14 +2731,16 @@ TEST(BasicLoops) {
"return y;",
3 * kPointerSize,
1,
- 37,
+ 39,
{
+ B(StackCheck), //
B(LdaSmi8), U8(10), //
B(Star), R(0), //
B(LdaSmi8), U8(1), //
B(Star), R(1), //
B(Ldar), R(0), //
- B(JumpIfToBooleanFalse), U8(24), //
+ B(JumpIfToBooleanFalse), U8(25), //
+ B(StackCheck), //
B(Ldar), R(1), //
B(Star), R(2), //
B(LdaSmi8), U8(12), //
@@ -2481,7 +2751,7 @@ TEST(BasicLoops) {
B(LdaSmi8), U8(1), //
B(Sub), R(2), //
B(Star), R(0), //
- B(Jump), U8(-24), //
+ B(Jump), U8(-25), //
B(Ldar), R(1), //
B(Return), //
},
@@ -2496,12 +2766,14 @@ TEST(BasicLoops) {
"return y;",
3 * kPointerSize,
1,
- 64,
+ 66,
{
+ B(StackCheck), //
B(LdaZero), //
B(Star), R(0), //
B(LdaSmi8), U8(1), //
B(Star), R(1), //
+ B(StackCheck), //
B(Ldar), R(1), //
B(Star), R(2), //
B(LdaSmi8), U8(10), //
@@ -2528,7 +2800,7 @@ TEST(BasicLoops) {
B(Star), R(2), //
B(LdaSmi8), U8(10), //
B(TestLessThan), R(2), //
- B(JumpIfTrue), U8(-52), //
+ B(JumpIfTrue), U8(-53), //
B(Ldar), R(1), //
B(Return), //
},
@@ -2542,12 +2814,14 @@ TEST(BasicLoops) {
"return y;",
3 * kPointerSize,
1,
- 35,
+ 37,
{
+ B(StackCheck), //
B(LdaSmi8), U8(10), //
B(Star), R(0), //
B(LdaSmi8), U8(1), //
B(Star), R(1), //
+ B(StackCheck), //
B(Ldar), R(1), //
B(Star), R(2), //
B(LdaSmi8), U8(12), //
@@ -2559,7 +2833,7 @@ TEST(BasicLoops) {
B(Sub), R(2), //
B(Star), R(0), //
B(Ldar), R(0), //
- B(JumpIfToBooleanTrue), U8(-22), //
+ B(JumpIfToBooleanTrue), U8(-23), //
B(Ldar), R(1), //
B(Return), //
},
@@ -2574,12 +2848,14 @@ TEST(BasicLoops) {
"return y;",
3 * kPointerSize,
1,
- 52,
+ 54,
{
+ B(StackCheck), //
B(LdaZero), //
B(Star), R(0), //
B(LdaSmi8), U8(1), //
B(Star), R(1), //
+ B(StackCheck), //
B(Ldar), R(1), //
B(Star), R(2), //
B(LdaSmi8), U8(10), //
@@ -2615,12 +2891,14 @@ TEST(BasicLoops) {
"return y;",
3 * kPointerSize,
1,
- 54,
+ 56,
{
+ B(StackCheck), //
B(LdaZero), //
B(Star), R(0), //
B(LdaSmi8), U8(1), //
B(Star), R(1), //
+ B(StackCheck), //
B(Ldar), R(1), //
B(Star), R(2), //
B(LdaSmi8), U8(10), //
@@ -2641,8 +2919,8 @@ TEST(BasicLoops) {
B(LdaSmi8), U8(6), //
B(TestEqual), R(2), //
B(JumpIfFalse), U8(4), //
- B(Jump), U8(-40), //
- B(Jump), U8(-42), //
+ B(Jump), U8(-41), //
+ B(Jump), U8(-43), //
B(Ldar), R(1), //
B(Return), //
},
@@ -2655,10 +2933,12 @@ TEST(BasicLoops) {
"}",
2 * kPointerSize,
1,
- 41,
+ 43,
{
+ B(StackCheck), //
B(LdaZero), //
B(Star), R(0), //
+ B(StackCheck), //
B(Ldar), R(0), //
B(Star), R(1), //
B(LdaSmi8), U8(1), //
@@ -2670,13 +2950,13 @@ TEST(BasicLoops) {
B(LdaSmi8), U8(2), //
B(TestEqual), R(1), //
B(JumpIfFalse), U8(4), //
- B(Jump), U8(-22), //
+ B(Jump), U8(-23), //
B(Ldar), R(0), //
B(Star), R(1), //
B(LdaSmi8), U8(1), //
B(Add), R(1), //
B(Star), R(0), //
- B(Jump), U8(-34), //
+ B(Jump), U8(-35), //
B(LdaUndefined), //
B(Return), //
},
@@ -2688,10 +2968,12 @@ TEST(BasicLoops) {
"}",
2 * kPointerSize,
1,
- 41,
+ 43,
{
+ B(StackCheck), //
B(LdaZero), //
B(Star), R(0), //
+ B(StackCheck), //
B(Ldar), R(0), //
B(Star), R(1), //
B(LdaSmi8), U8(1), //
@@ -2703,13 +2985,13 @@ TEST(BasicLoops) {
B(LdaSmi8), U8(2), //
B(TestEqual), R(1), //
B(JumpIfFalse), U8(4), //
- B(Jump), U8(-22), //
+ B(Jump), U8(-23), //
B(Ldar), R(0), //
B(Star), R(1), //
B(LdaSmi8), U8(1), //
B(Add), R(1), //
B(Star), R(0), //
- B(Jump), U8(-34), //
+ B(Jump), U8(-35), //
B(LdaUndefined), //
B(Return), //
},
@@ -2721,10 +3003,12 @@ TEST(BasicLoops) {
"}",
2 * kPointerSize,
1,
- 41,
+ 43,
{
+ B(StackCheck), //
B(LdaZero), //
B(Star), R(0), //
+ B(StackCheck), //
B(Ldar), R(0), //
B(Star), R(1), //
B(LdaSmi8), U8(1), //
@@ -2742,7 +3026,7 @@ TEST(BasicLoops) {
B(LdaSmi8), U8(1), //
B(Add), R(1), //
B(Star), R(0), //
- B(Jump), U8(-34), //
+ B(Jump), U8(-35), //
B(LdaUndefined), //
B(Return), //
},
@@ -2753,10 +3037,12 @@ TEST(BasicLoops) {
"}",
2 * kPointerSize,
1,
- 41,
+ 43,
{
+ B(StackCheck), //
B(LdaZero), //
B(Star), R(0), //
+ B(StackCheck), //
B(Ldar), R(0), //
B(Star), R(1), //
B(LdaSmi8), U8(1), //
@@ -2774,7 +3060,7 @@ TEST(BasicLoops) {
B(LdaSmi8), U8(1), //
B(Add), R(1), //
B(Star), R(0), //
- B(Jump), U8(-34), //
+ B(Jump), U8(-35), //
B(LdaUndefined), //
B(Return), //
},
@@ -2786,8 +3072,9 @@ TEST(BasicLoops) {
"}",
3 * kPointerSize,
1,
- 42,
+ 44,
{
+ B(StackCheck), //
B(LdaZero), //
B(Star), R(0), //
B(LdaZero), //
@@ -2796,7 +3083,8 @@ TEST(BasicLoops) {
B(Star), R(2), //
B(LdaSmi8), U8(100), //
B(TestLessThan), R(2), //
- B(JumpIfFalse), U8(26), //
+ B(JumpIfFalse), U8(27), //
+ B(StackCheck), //
B(Ldar), R(0), //
B(Star), R(2), //
B(LdaSmi8), U8(1), //
@@ -2808,7 +3096,7 @@ TEST(BasicLoops) {
B(LdaSmi8), U8(1), //
B(Add), R(2), //
B(Star), R(1), //
- B(Jump), U8(-32), //
+ B(Jump), U8(-33), //
B(LdaUndefined), //
B(Return), //
},
@@ -2820,14 +3108,16 @@ TEST(BasicLoops) {
"return y;",
3 * kPointerSize,
1,
- 33,
+ 35,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Star), R(0), //
B(LdaSmi8), U8(10), //
B(Star), R(1), //
B(Ldar), R(1), //
- B(JumpIfToBooleanFalse), U8(20), //
+ B(JumpIfToBooleanFalse), U8(21), //
+ B(StackCheck), //
B(Ldar), R(0), //
B(Star), R(2), //
B(LdaSmi8), U8(12), //
@@ -2837,7 +3127,7 @@ TEST(BasicLoops) {
B(ToNumber), //
B(Dec), //
B(Star), R(1), //
- B(Jump), U8(-20), //
+ B(Jump), U8(-21), //
B(Ldar), R(0), //
B(Return), //
},
@@ -2849,8 +3139,9 @@ TEST(BasicLoops) {
"return x;",
2 * kPointerSize,
1,
- 9,
+ 10,
{
+ B(StackCheck), //
B(LdaZero), //
B(Star), R(0), //
B(LdaZero), //
@@ -2867,12 +3158,14 @@ TEST(BasicLoops) {
"return x;",
3 * kPointerSize,
1,
- 37,
+ 39,
{
+ B(StackCheck), //
B(LdaZero), //
B(Star), R(0), //
B(LdaZero), //
B(Star), R(1), //
+ B(StackCheck), //
B(Ldar), R(0), //
B(Star), R(2), //
B(LdaSmi8), U8(1), //
@@ -2887,12 +3180,83 @@ TEST(BasicLoops) {
B(ToNumber), //
B(Inc), //
B(Star), R(1), //
- B(Jump), U8(-26), //
+ B(Jump), U8(-27), //
B(Ldar), R(0), //
B(Return), //
},
0},
+ {"var a = 0;\n"
+ "while (a) {\n"
+ " { \n"
+ " let z = 1;\n"
+ " function f() { z = 2; }\n"
+ " if (z) continue;\n"
+ " z++;\n"
+ " }\n"
+ "}\n",
+ 7 * kPointerSize,
+ 1,
+ 118,
+ {
+ B(StackCheck), //
+ B(LdaZero), //
+ B(Star), R(1), //
+ B(Ldar), R(1), //
+ B(JumpIfToBooleanFalse), U8(110), //
+ B(StackCheck), //
+ B(LdaConstant), U8(0), //
+ B(Star), R(4), //
+ B(Ldar), R(closure), //
+ B(Star), R(5), //
+ B(CallRuntime), U16(Runtime::kPushBlockContext), R(4), U8(2), //
+ B(PushContext), R(3), //
+ B(LdaTheHole), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(CreateClosure), U8(1), U8(0), //
+ B(Star), R(0), //
+ B(LdaSmi8), U8(1), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(Ldar), R(0), //
+ B(JumpIfNotHole), U8(11), //
+ B(LdaConstant), U8(2), //
+ B(Star), R(4), //
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1), //
+ B(Star), R(2), //
+ B(LdaContextSlot), R(context), U8(4), //
+ B(JumpIfNotHole), U8(11), //
+ B(LdaConstant), U8(3), //
+ B(Star), R(4), //
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1), //
+ B(JumpIfToBooleanFalse), U8(6), //
+ B(PopContext), R(3), //
+ B(Jump), U8(-67), //
+ B(LdaContextSlot), R(context), U8(4), //
+ B(JumpIfNotHole), U8(11), //
+ B(LdaConstant), U8(3), //
+ B(Star), R(4), //
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1), //
+ B(ToNumber), //
+ B(Star), R(4), //
+ B(Inc), //
+ B(Star), R(5), //
+ B(LdaContextSlot), R(context), U8(4), //
+ B(JumpIfNotHole), U8(11), //
+ B(LdaConstant), U8(3), //
+ B(Star), R(6), //
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(6), U8(1), //
+ B(Ldar), R(5), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(PopContext), R(3), //
+ B(Jump), U8(-110), //
+ B(LdaUndefined), //
+ B(Return), //
+ },
+ 4,
+ {InstanceType::FIXED_ARRAY_TYPE, InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -2907,6 +3271,7 @@ TEST(JumpsRequiringConstantWideOperands) {
BytecodeGeneratorHelper helper;
int constant_count = 0;
+ // clang-format off
ExpectedSnippet<Handle<Object>, 316> snippets[] = {
{
REPEAT_256(SPACE, "var x = 0.1;")
@@ -2920,8 +3285,9 @@ TEST(JumpsRequiringConstantWideOperands) {
"return 3;",
kPointerSize * 3,
1,
- 1359,
+ 1361,
{
+ B(StackCheck), //
#define L(c) B(LdaConstant), U8(c), B(Star), R(0)
REPEAT_256(COMMA, L(constant_count++)),
#undef L
@@ -2937,6 +3303,7 @@ TEST(JumpsRequiringConstantWideOperands) {
B(LdaSmi8), U8(3), //
B(TestLessThan), R(2), //
B(JumpIfFalseConstantWide), U16(313), //
+ B(StackCheck), //
B(Ldar), R(1), //
B(Star), R(2), //
B(LdaSmi8), U8(1), //
@@ -2954,7 +3321,7 @@ TEST(JumpsRequiringConstantWideOperands) {
B(Star), R(2), //
B(Inc), //
B(Star), R(1), //
- B(Jump), U8(-47), //
+ B(Jump), U8(-48), //
B(LdaSmi8), U8(3), //
B(Return) //
},
@@ -2967,9 +3334,11 @@ TEST(JumpsRequiringConstantWideOperands) {
REPEAT_8(COMMA, S(0.4)),
#undef S
#define N(x) CcTest::i_isolate()->factory()->NewNumberFromInt(x)
- N(6), N(41), N(13), N(17)
+ N(6), N(42), N(13), N(17)
#undef N
- }}};
+ }}
+ };
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -2983,6 +3352,7 @@ TEST(UnaryOperators) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
+ // clang-format off
ExpectedSnippet<int> snippets[] = {
{"var x = 0;"
"while (x != 10) {"
@@ -2991,8 +3361,9 @@ TEST(UnaryOperators) {
"return x;",
2 * kPointerSize,
1,
- 29,
+ 31,
{
+ B(StackCheck), //
B(LdaZero), //
B(Star), R(0), //
B(Ldar), R(0), //
@@ -3000,13 +3371,14 @@ TEST(UnaryOperators) {
B(LdaSmi8), U8(10), //
B(TestEqual), R(1), //
B(LogicalNot), //
- B(JumpIfFalse), U8(14), //
+ B(JumpIfFalse), U8(15), //
+ B(StackCheck), //
B(Ldar), R(0), //
B(Star), R(1), //
B(LdaSmi8), U8(10), //
B(Add), R(1), //
B(Star), R(0), //
- B(Jump), U8(-21), //
+ B(Jump), U8(-22), //
B(Ldar), R(0), //
B(Return), //
},
@@ -3018,28 +3390,31 @@ TEST(UnaryOperators) {
"return x;",
2 * kPointerSize,
1,
- 20,
+ 22,
{
- B(LdaFalse), //
- B(Star), R(0), //
- B(Ldar), R(0), //
- B(LogicalNot), //
- B(Star), R(0), //
- B(Ldar), R(0), //
- B(Star), R(1), //
- B(LdaFalse), //
- B(TestEqual), R(1), //
- B(JumpIfTrue), U8(-12), //
- B(Ldar), R(0), //
- B(Return), //
+ B(StackCheck), //
+ B(LdaFalse), //
+ B(Star), R(0), //
+ B(StackCheck), //
+ B(Ldar), R(0), //
+ B(LogicalNot), //
+ B(Star), R(0), //
+ B(Ldar), R(0), //
+ B(Star), R(1), //
+ B(LdaFalse), //
+ B(TestEqual), R(1), //
+ B(JumpIfTrue), U8(-13), //
+ B(Ldar), R(0), //
+ B(Return), //
},
0},
{"var x = 101;"
"return void(x * 3);",
2 * kPointerSize,
1,
- 12,
+ 13,
{
+ B(StackCheck), //
B(LdaSmi8), U8(101), //
B(Star), R(0), //
B(Star), R(1), //
@@ -3054,8 +3429,9 @@ TEST(UnaryOperators) {
"return y;",
4 * kPointerSize,
1,
- 20,
+ 21,
{
+ B(StackCheck), //
B(LdaConstant), U8(0), //
B(Star), R(0), //
B(Star), R(2), //
@@ -3074,8 +3450,9 @@ TEST(UnaryOperators) {
"return ~x;",
2 * kPointerSize,
1,
- 11,
+ 12,
{
+ B(StackCheck), //
B(LdaSmi8), U8(13), //
B(Star), R(0), //
B(Star), R(1), //
@@ -3088,8 +3465,9 @@ TEST(UnaryOperators) {
"return +x;",
2 * kPointerSize,
1,
- 11,
+ 12,
{
+ B(StackCheck), //
B(LdaSmi8), U8(13), //
B(Star), R(0), //
B(Star), R(1), //
@@ -3102,8 +3480,9 @@ TEST(UnaryOperators) {
"return -x;",
2 * kPointerSize,
1,
- 11,
+ 12,
{
+ B(StackCheck), //
B(LdaSmi8), U8(13), //
B(Star), R(0), //
B(Star), R(1), //
@@ -3111,7 +3490,9 @@ TEST(UnaryOperators) {
B(Mul), R(1), //
B(Return), //
},
- 0}};
+ 0}
+ };
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -3132,6 +3513,7 @@ TEST(Typeof) {
Handle<i::TypeFeedbackVector> vector =
i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
+ // clang-format off
ExpectedSnippet<const char*> snippets[] = {
{"function f() {\n"
" var x = 13;\n"
@@ -3139,8 +3521,9 @@ TEST(Typeof) {
"}; f();",
kPointerSize,
1,
- 6,
+ 7,
{
+ B(StackCheck), //
B(LdaSmi8), U8(13), //
B(Star), R(0), //
B(TypeOf), //
@@ -3152,32 +3535,17 @@ TEST(Typeof) {
"}; f();",
0,
1,
- 5,
- {
- B(LdaGlobalInsideTypeofSloppy), U8(0), //
- U8(vector->GetIndex(slot)), //
- B(TypeOf), //
- B(Return), //
- },
- 1,
- {"x"}},
- {"var x = 13;\n"
- "function f() {\n"
- " 'use strict';\n"
- " return typeof(x);\n"
- "}; f();",
- 0,
- 1,
- 5,
+ 6,
{
- B(LdaGlobalInsideTypeofStrict), U8(0), //
- U8(vector->GetIndex(slot)), //
- B(TypeOf), //
- B(Return), //
+ B(StackCheck), //
+ B(LdaGlobalInsideTypeof), U8(0), U8(vector->GetIndex(slot)), //
+ B(TypeOf), //
+ B(Return), //
},
1,
{"x"}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -3194,63 +3562,65 @@ TEST(Delete) {
int deep_elements_flags =
ObjectLiteral::kFastElements | ObjectLiteral::kDisableMementos;
int closure = Register::function_closure().index();
+ int context = Register::current_context().index();
int first_context_slot = Context::MIN_CONTEXT_SLOTS;
+ // clang-format off
ExpectedSnippet<InstanceType> snippets[] = {
{"var a = {x:13, y:14}; return delete a.x;",
2 * kPointerSize,
1,
- 13,
+ 16,
{
- B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaConstant), U8(1), //
- B(DeletePropertySloppy), R(1), //
- B(Return)
- },
+ B(StackCheck), //
+ B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
+ B(Star), R(1), //
+ B(Star), R(0), //
+ B(Star), R(1), //
+ B(LdaConstant), U8(1), //
+ B(DeletePropertySloppy), R(1), //
+ B(Return)},
2,
{InstanceType::FIXED_ARRAY_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
{"'use strict'; var a = {x:13, y:14}; return delete a.x;",
2 * kPointerSize,
1,
- 13,
- {
- B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaConstant), U8(1), //
- B(DeletePropertyStrict), R(1), //
- B(Return)
- },
+ 16,
+ {B(StackCheck), //
+ B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
+ B(Star), R(1), //
+ B(Star), R(0), //
+ B(Star), R(1), //
+ B(LdaConstant), U8(1), //
+ B(DeletePropertyStrict), R(1), //
+ B(Return)},
2,
{InstanceType::FIXED_ARRAY_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
{"var a = {1:13, 2:14}; return delete a[2];",
2 * kPointerSize,
1,
- 13,
- {
- B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
- B(Star), R(0), //
- B(Star), R(1), //
- B(LdaSmi8), U8(2), //
- B(DeletePropertySloppy), R(1), //
- B(Return)
- },
+ 16,
+ {B(StackCheck), //
+ B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
+ B(Star), R(1), //
+ B(Star), R(0), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(2), //
+ B(DeletePropertySloppy), R(1), //
+ B(Return)},
1,
{InstanceType::FIXED_ARRAY_TYPE}},
{"var a = 10; return delete a;",
1 * kPointerSize,
1,
- 6,
- {
- B(LdaSmi8), U8(10), //
- B(Star), R(0), //
- B(LdaFalse), //
- B(Return)
- },
+ 7,
+ {B(StackCheck), //
+ B(LdaSmi8), U8(10), //
+ B(Star), R(0), //
+ B(LdaFalse), //
+ B(Return)},
0},
{"'use strict';"
"var a = {1:10};"
@@ -3258,33 +3628,33 @@ TEST(Delete) {
"return delete a[1];",
2 * kPointerSize,
1,
- 27,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), //
- R(closure), U8(1), //
- B(PushContext), R(0), //
- B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
- B(StaContextSlot), R(0), U8(first_context_slot), //
- B(CreateClosure), U8(1), U8(0), //
- B(LdaContextSlot), R(0), U8(first_context_slot), //
- B(Star), R(1), //
- B(LdaSmi8), U8(1), //
- B(DeletePropertyStrict), R(1), //
- B(Return)
- },
+ 30,
+ {B(CallRuntime), U16(Runtime::kNewFunctionContext), //
+ /* */ R(closure), U8(1), //
+ B(PushContext), R(0), //
+ B(StackCheck), //
+ B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
+ B(Star), R(1), //
+ B(StaContextSlot), R(context), U8(first_context_slot), //
+ B(CreateClosure), U8(1), U8(0), //
+ B(LdaContextSlot), R(context), U8(first_context_slot), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(1), //
+ B(DeletePropertyStrict), R(1), //
+ B(Return)},
2,
{InstanceType::FIXED_ARRAY_TYPE,
InstanceType::SHARED_FUNCTION_INFO_TYPE}},
{"return delete 'test';",
0 * kPointerSize,
1,
- 2,
- {
- B(LdaTrue), //
- B(Return)
- },
+ 3,
+ {B(StackCheck), //
+ B(LdaTrue), //
+ B(Return)},
0},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -3299,7 +3669,7 @@ TEST(GlobalDelete) {
BytecodeGeneratorHelper helper;
Zone zone;
- int context = Register::function_context().index();
+ int context = Register::current_context().index();
int native_context_index = Context::NATIVE_CONTEXT_INDEX;
int global_context_index = Context::EXTENSION_INDEX;
FeedbackVectorSpec feedback_spec(&zone);
@@ -3308,12 +3678,14 @@ TEST(GlobalDelete) {
Handle<i::TypeFeedbackVector> vector =
i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
+ // clang-format off
ExpectedSnippet<InstanceType> snippets[] = {
{"var a = {x:13, y:14};\n function f() { return delete a.x; };\n f();",
1 * kPointerSize,
1,
- 10,
- {B(LdaGlobalSloppy), U8(0), U8(vector->GetIndex(slot)), //
+ 11,
+ {B(StackCheck), //
+ B(LdaGlobal), U8(0), U8(vector->GetIndex(slot)), //
B(Star), R(0), //
B(LdaConstant), U8(1), //
B(DeletePropertySloppy), R(0), //
@@ -3325,8 +3697,9 @@ TEST(GlobalDelete) {
"function f() {'use strict'; return delete a[1];};\n f();",
1 * kPointerSize,
1,
- 10,
- {B(LdaGlobalStrict), U8(0), U8(vector->GetIndex(slot)), //
+ 11,
+ {B(StackCheck), //
+ B(LdaGlobal), U8(0), U8(vector->GetIndex(slot)), //
B(Star), R(0), //
B(LdaSmi8), U8(1), //
B(DeletePropertyStrict), R(0), //
@@ -3336,8 +3709,9 @@ TEST(GlobalDelete) {
{"var a = {x:13, y:14};\n function f() { return delete a; };\n f();",
2 * kPointerSize,
1,
- 15,
- {B(LdaContextSlot), R(context), U8(native_context_index), //
+ 16,
+ {B(StackCheck), //
+ B(LdaContextSlot), R(context), U8(native_context_index), //
B(Star), R(0), //
B(LdaContextSlot), R(0), U8(global_context_index), //
B(Star), R(1), //
@@ -3349,8 +3723,9 @@ TEST(GlobalDelete) {
{"b = 30;\n function f() { return delete b; };\n f();",
2 * kPointerSize,
1,
- 15,
- {B(LdaContextSlot), R(context), U8(native_context_index), //
+ 16,
+ {B(StackCheck), //
+ B(LdaContextSlot), R(context), U8(native_context_index), //
B(Star), R(0), //
B(LdaContextSlot), R(0), U8(global_context_index), //
B(Star), R(1), //
@@ -3358,7 +3733,9 @@ TEST(GlobalDelete) {
B(DeletePropertySloppy), R(1), //
B(Return)},
1,
- {InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}}};
+ {InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}}
+ };
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -3379,12 +3756,14 @@ TEST(FunctionLiterals) {
Handle<i::TypeFeedbackVector> vector =
i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
+ // clang-format off
ExpectedSnippet<InstanceType> snippets[] = {
{"return function(){ }",
0,
1,
- 4,
+ 5,
{
+ B(StackCheck), //
B(CreateClosure), U8(0), U8(0), //
B(Return) //
},
@@ -3393,13 +3772,14 @@ TEST(FunctionLiterals) {
{"return (function(){ })()",
2 * kPointerSize,
1,
- 14,
+ 15,
{
+ B(StackCheck), //
B(LdaUndefined), //
B(Star), R(1), //
B(CreateClosure), U8(0), U8(0), //
B(Star), R(0), //
- B(Call), R(0), R(1), U8(0), U8(vector->GetIndex(slot)), //
+ B(Call), R(0), R(1), U8(1), U8(vector->GetIndex(slot)), //
B(Return) //
},
1,
@@ -3407,20 +3787,22 @@ TEST(FunctionLiterals) {
{"return (function(x){ return x; })(1)",
3 * kPointerSize,
1,
- 18,
+ 19,
{
+ B(StackCheck), //
B(LdaUndefined), //
B(Star), R(1), //
B(CreateClosure), U8(0), U8(0), //
B(Star), R(0), //
B(LdaSmi8), U8(1), //
B(Star), R(2), //
- B(Call), R(0), R(1), U8(1), U8(vector->GetIndex(slot)), //
+ B(Call), R(0), R(1), U8(2), U8(vector->GetIndex(slot)), //
B(Return) //
},
1,
{InstanceType::SHARED_FUNCTION_INFO_TYPE}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -3443,12 +3825,14 @@ TEST(RegExpLiterals) {
Handle<i::TypeFeedbackVector> vector =
i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
+ // clang-format off
ExpectedSnippet<const char*> snippets[] = {
{"return /ab+d/;",
0 * kPointerSize,
1,
- 5,
+ 6,
{
+ B(StackCheck), //
B(CreateRegExpLiteral), U8(0), U8(0), U8(0), //
B(Return), //
},
@@ -3457,8 +3841,9 @@ TEST(RegExpLiterals) {
{"return /(\\w+)\\s(\\w+)/i;",
0 * kPointerSize,
1,
- 5,
+ 6,
{
+ B(StackCheck), //
B(CreateRegExpLiteral), U8(0), U8(0), U8(i_flags), //
B(Return), //
},
@@ -3467,20 +3852,22 @@ TEST(RegExpLiterals) {
{"return /ab+d/.exec('abdd');",
3 * kPointerSize,
1,
- 22,
+ 23,
{
+ B(StackCheck), //
B(CreateRegExpLiteral), U8(0), U8(0), U8(0), //
B(Star), R(1), //
- B(LoadICSloppy), R(1), U8(1), U8(vector->GetIndex(slot2)), //
+ B(LoadIC), R(1), U8(1), U8(vector->GetIndex(slot2)), //
B(Star), R(0), //
B(LdaConstant), U8(2), //
B(Star), R(2), //
- B(Call), R(0), R(1), U8(1), U8(vector->GetIndex(slot1)), //
+ B(Call), R(0), R(1), U8(2), U8(vector->GetIndex(slot1)), //
B(Return), //
},
3,
{"ab+d", "exec", "abdd"}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -3497,12 +3884,14 @@ TEST(RegExpLiteralsWide) {
int wide_idx = 0;
+ // clang-format off
ExpectedSnippet<InstanceType, 257> snippets[] = {
{"var a;" REPEAT_256(SPACE, "a = 1.23;") "return /ab+d/;",
1 * kPointerSize,
1,
- 1031,
+ 1032,
{
+ B(StackCheck), //
REPEAT_256(COMMA, //
B(LdaConstant), U8(wide_idx++), //
B(Star), R(0)), //
@@ -3513,6 +3902,7 @@ TEST(RegExpLiteralsWide) {
{REPEAT_256(COMMA, InstanceType::HEAP_NUMBER_TYPE),
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -3538,12 +3928,14 @@ TEST(ArrayLiterals) {
int simple_flags =
ArrayLiteral::kDisableMementos | ArrayLiteral::kShallowElements;
int deep_elements_flags = ArrayLiteral::kDisableMementos;
+ // clang-format off
ExpectedSnippet<InstanceType> snippets[] = {
{"return [ 1, 2 ];",
0,
1,
- 5,
+ 6,
{
+ B(StackCheck), //
B(CreateArrayLiteral), U8(0), U8(0), U8(simple_flags), //
B(Return) //
},
@@ -3552,8 +3944,9 @@ TEST(ArrayLiterals) {
{"var a = 1; return [ a, a + 1 ];",
4 * kPointerSize,
1,
- 38,
+ 39,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Star), R(0), //
B(CreateArrayLiteral), U8(0), U8(0), U8(3), //
@@ -3577,8 +3970,9 @@ TEST(ArrayLiterals) {
{"return [ [ 1, 2 ], [ 3 ] ];",
0,
1,
- 5,
+ 6,
{
+ B(StackCheck), //
B(CreateArrayLiteral), U8(0), U8(2), U8(deep_elements_flags), //
B(Return) //
},
@@ -3587,8 +3981,9 @@ TEST(ArrayLiterals) {
{"var a = 1; return [ [ a, 2 ], [ a + 2 ] ];",
6 * kPointerSize,
1,
- 68,
+ 69,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Star), R(0), //
B(CreateArrayLiteral), U8(0), U8(2), U8(deep_elements_flags), //
@@ -3623,6 +4018,7 @@ TEST(ArrayLiterals) {
{InstanceType::FIXED_ARRAY_TYPE, InstanceType::FIXED_ARRAY_TYPE,
InstanceType::FIXED_ARRAY_TYPE}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -3641,12 +4037,14 @@ TEST(ArrayLiteralsWide) {
int simple_flags =
ArrayLiteral::kDisableMementos | ArrayLiteral::kShallowElements;
+ // clang-format off
ExpectedSnippet<InstanceType, 257> snippets[] = {
{"var a;" REPEAT_256(SPACE, "a = 1.23;") "return [ 1 , 2 ];",
1 * kPointerSize,
1,
- 1031,
+ 1032,
{
+ B(StackCheck), //
REPEAT_256(COMMA, //
B(LdaConstant), U8(wide_idx++), //
B(Star), R(0)), //
@@ -3657,6 +4055,7 @@ TEST(ArrayLiteralsWide) {
{REPEAT_256(COMMA, InstanceType::HEAP_NUMBER_TYPE),
InstanceType::FIXED_ARRAY_TYPE}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -3682,23 +4081,29 @@ TEST(ObjectLiterals) {
ObjectLiteral::kDisableMementos;
int deep_elements_flags =
ObjectLiteral::kFastElements | ObjectLiteral::kDisableMementos;
+
+ // clang-format off
ExpectedSnippet<InstanceType> snippets[] = {
{"return { };",
- 0,
+ kPointerSize,
1,
- 5,
+ 8,
{
+ B(StackCheck), //
B(CreateObjectLiteral), U8(0), U8(0), U8(simple_flags), //
+ B(Star), R(0), //
B(Return) //
},
1,
{InstanceType::FIXED_ARRAY_TYPE}},
{"return { name: 'string', val: 9.2 };",
- 0,
+ kPointerSize,
1,
- 5,
+ 8,
{
+ B(StackCheck), //
B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
+ B(Star), R(0), //
B(Return) //
},
1,
@@ -3706,8 +4111,9 @@ TEST(ObjectLiterals) {
{"var a = 1; return { name: 'string', val: a };",
2 * kPointerSize,
1,
- 19,
+ 20,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Star), R(0), //
B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
@@ -3723,8 +4129,9 @@ TEST(ObjectLiterals) {
{"var a = 1; return { val: a, val: a + 1 };",
3 * kPointerSize,
1,
- 25,
+ 26,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Star), R(0), //
B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
@@ -3743,8 +4150,9 @@ TEST(ObjectLiterals) {
{"return { func: function() { } };",
1 * kPointerSize,
1,
- 16,
+ 17,
{
+ B(StackCheck), //
B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
B(Star), R(0), //
B(CreateClosure), U8(1), U8(0), //
@@ -3753,14 +4161,14 @@ TEST(ObjectLiterals) {
B(Return), //
},
3,
- {InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ {InstanceType::FIXED_ARRAY_TYPE, InstanceType::SHARED_FUNCTION_INFO_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
{"return { func(a) { return a; } };",
1 * kPointerSize,
1,
- 16,
+ 17,
{
+ B(StackCheck), //
B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
B(Star), R(0), //
B(CreateClosure), U8(1), U8(0), //
@@ -3769,26 +4177,27 @@ TEST(ObjectLiterals) {
B(Return), //
},
3,
- {InstanceType::FIXED_ARRAY_TYPE,
- InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ {InstanceType::FIXED_ARRAY_TYPE, InstanceType::SHARED_FUNCTION_INFO_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
{"return { get a() { return 2; } };",
- 5 * kPointerSize,
+ 6 * kPointerSize,
1,
- 29,
+ 33,
{
+ B(StackCheck), //
B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
B(Star), R(0), //
+ B(Mov), R(0), R(1), //
B(LdaConstant), U8(1), //
- B(Star), R(1), //
- B(CreateClosure), U8(2), U8(0), //
B(Star), R(2), //
- B(LdaNull), //
+ B(CreateClosure), U8(2), U8(0), //
B(Star), R(3), //
- B(LdaZero), //
+ B(LdaNull), //
B(Star), R(4), //
+ B(LdaZero), //
+ B(Star), R(5), //
B(CallRuntime), U16(Runtime::kDefineAccessorPropertyUnchecked), //
- R(0), U8(5), //
+ /* */ R(1), U8(5), //
B(Ldar), R(0), //
B(Return), //
},
@@ -3797,22 +4206,24 @@ TEST(ObjectLiterals) {
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
InstanceType::SHARED_FUNCTION_INFO_TYPE}},
{"return { get a() { return this.x; }, set a(val) { this.x = val } };",
- 5 * kPointerSize,
+ 6 * kPointerSize,
1,
- 31,
+ 35,
{
+ B(StackCheck), //
B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
B(Star), R(0), //
+ B(Mov), R(0), R(1), //
B(LdaConstant), U8(1), //
- B(Star), R(1), //
- B(CreateClosure), U8(2), U8(0), //
B(Star), R(2), //
- B(CreateClosure), U8(3), U8(0), //
+ B(CreateClosure), U8(2), U8(0), //
B(Star), R(3), //
- B(LdaZero), //
+ B(CreateClosure), U8(3), U8(0), //
B(Star), R(4), //
+ B(LdaZero), //
+ B(Star), R(5), //
B(CallRuntime), U16(Runtime::kDefineAccessorPropertyUnchecked), //
- R(0), U8(5), //
+ /* */ R(1), U8(5), //
B(Ldar), R(0), //
B(Return), //
},
@@ -3822,22 +4233,24 @@ TEST(ObjectLiterals) {
InstanceType::SHARED_FUNCTION_INFO_TYPE,
InstanceType::SHARED_FUNCTION_INFO_TYPE}},
{"return { set b(val) { this.y = val } };",
- 5 * kPointerSize,
+ 6 * kPointerSize,
1,
- 29,
+ 33,
{
+ B(StackCheck), //
B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
B(Star), R(0), //
+ B(Mov), R(0), R(1), //
B(LdaConstant), U8(1), //
- B(Star), R(1), //
- B(LdaNull), //
B(Star), R(2), //
- B(CreateClosure), U8(2), U8(0), //
+ B(LdaNull), //
B(Star), R(3), //
- B(LdaZero), //
+ B(CreateClosure), U8(2), U8(0), //
B(Star), R(4), //
+ B(LdaZero), //
+ B(Star), R(5), //
B(CallRuntime), U16(Runtime::kDefineAccessorPropertyUnchecked), //
- R(0), U8(5), //
+ /* */ R(1), U8(5), //
B(Ldar), R(0), //
B(Return), //
},
@@ -3846,58 +4259,66 @@ TEST(ObjectLiterals) {
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
InstanceType::SHARED_FUNCTION_INFO_TYPE}},
{"var a = 1; return { 1: a };",
- 5 * kPointerSize,
+ 6 * kPointerSize,
1,
- 29,
+ 33,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Star), R(0), //
B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
B(Star), R(1), //
+ B(Mov), R(1), R(2), //
B(LdaSmi8), U8(1), //
- B(Star), R(2), //
- B(Ldar), R(0), //
B(Star), R(3), //
- B(LdaZero), //
+ B(Ldar), R(0), //
B(Star), R(4), //
- B(CallRuntime), U16(Runtime::kSetProperty), R(1), U8(4), //
+ B(LdaZero), //
+ B(Star), R(5), //
+ B(CallRuntime), U16(Runtime::kSetProperty), R(2), U8(4), //
B(Ldar), R(1), //
B(Return), //
},
1,
{InstanceType::FIXED_ARRAY_TYPE}},
{"return { __proto__: null }",
- 2 * kPointerSize,
+ 3 * kPointerSize,
1,
- 17,
+ 21,
{
+ B(StackCheck), //
B(CreateObjectLiteral), U8(0), U8(0), U8(simple_flags), //
B(Star), R(0), //
- B(LdaNull), B(Star), R(1), //
- B(CallRuntime), U16(Runtime::kInternalSetPrototype), R(0), U8(2), //
+ B(Mov), R(0), R(1), //
+ B(LdaNull), B(Star), R(2), //
+ B(CallRuntime), U16(Runtime::kInternalSetPrototype), R(1), U8(2), //
B(Ldar), R(0), //
B(Return), //
},
1,
{InstanceType::FIXED_ARRAY_TYPE}},
{"var a = 'test'; return { [a]: 1 }",
- 5 * kPointerSize,
+ 7 * kPointerSize,
1,
- 30,
+ 37,
{
+ B(StackCheck), //
B(LdaConstant), U8(0), //
B(Star), R(0), //
B(CreateObjectLiteral), U8(1), U8(0), U8(simple_flags), //
B(Star), R(1), //
+ B(Mov), R(1), R(2), //
B(Ldar), R(0), //
B(ToName), //
- B(Star), R(2), //
- B(LdaSmi8), U8(1), //
B(Star), R(3), //
- B(LdaZero), //
+ B(LdaSmi8), U8(1), //
B(Star), R(4), //
- B(CallRuntime), U16(Runtime::kDefineDataPropertyUnchecked), R(1), //
- U8(4), //
+ B(LdaZero), //
+ B(Star), R(5), //
+ B(LdaZero), //
+ B(Star), R(6), //
+ B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(2), //
+ /* */ U8(5), //
B(Ldar), R(1), //
B(Return), //
},
@@ -3905,25 +4326,29 @@ TEST(ObjectLiterals) {
{InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
InstanceType::FIXED_ARRAY_TYPE}},
{"var a = 'test'; return { val: a, [a]: 1 }",
- 5 * kPointerSize,
+ 7 * kPointerSize,
1,
- 36,
+ 43,
{
+ B(StackCheck), //
B(LdaConstant), U8(0), //
B(Star), R(0), //
B(CreateObjectLiteral), U8(1), U8(0), U8(deep_elements_flags), //
B(Star), R(1), //
B(Ldar), R(0), //
B(StoreICSloppy), R(1), U8(2), U8(vector->GetIndex(slot1)), //
+ B(Mov), R(1), R(2), //
B(Ldar), R(0), //
B(ToName), //
- B(Star), R(2), //
- B(LdaSmi8), U8(1), //
B(Star), R(3), //
- B(LdaZero), //
+ B(LdaSmi8), U8(1), //
B(Star), R(4), //
- B(CallRuntime), U16(Runtime::kDefineDataPropertyUnchecked), R(1), //
- U8(4), //
+ B(LdaZero), //
+ B(Star), R(5), //
+ B(LdaZero), //
+ B(Star), R(6), //
+ B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(2), //
+ /* */ U8(5), //
B(Ldar), R(1), //
B(Return), //
},
@@ -3932,26 +4357,32 @@ TEST(ObjectLiterals) {
InstanceType::FIXED_ARRAY_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
{"var a = 'test'; return { [a]: 1, __proto__: {} }",
- 5 * kPointerSize,
+ 7 * kPointerSize,
1,
- 41,
+ 53,
{
+ B(StackCheck), //
B(LdaConstant), U8(0), //
B(Star), R(0), //
B(CreateObjectLiteral), U8(1), U8(1), U8(simple_flags), //
B(Star), R(1), //
+ B(Mov), R(1), R(2), //
B(Ldar), R(0), //
B(ToName), //
- B(Star), R(2), //
- B(LdaSmi8), U8(1), //
B(Star), R(3), //
- B(LdaZero), //
+ B(LdaSmi8), U8(1), //
B(Star), R(4), //
- B(CallRuntime), U16(Runtime::kDefineDataPropertyUnchecked), R(1), //
- U8(4), //
+ B(LdaZero), //
+ B(Star), R(5), //
+ B(LdaZero), //
+ B(Star), R(6), //
+ B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(2), //
+ /* */ U8(5), //
+ B(Mov), R(1), R(2), //
B(CreateObjectLiteral), U8(1), U8(0), U8(13), //
- B(Star), R(2), //
- B(CallRuntime), U16(Runtime::kInternalSetPrototype), R(1), U8(2), //
+ B(Star), R(4), //
+ B(Star), R(3), //
+ B(CallRuntime), U16(Runtime::kInternalSetPrototype), R(2), U8(2), //
B(Ldar), R(1), //
B(Return), //
},
@@ -3959,39 +4390,45 @@ TEST(ObjectLiterals) {
{InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
InstanceType::FIXED_ARRAY_TYPE}},
{"var n = 'name'; return { [n]: 'val', get a() { }, set a(b) {} };",
- 5 * kPointerSize,
+ 7 * kPointerSize,
1,
- 64,
+ 77,
{
+ B(StackCheck), //
B(LdaConstant), U8(0), //
B(Star), R(0), //
B(CreateObjectLiteral), U8(1), U8(0), U8(simple_flags), //
B(Star), R(1), //
+ B(Mov), R(1), R(2), //
B(Ldar), R(0), //
B(ToName), //
- B(Star), R(2), //
- B(LdaConstant), U8(2), //
B(Star), R(3), //
- B(LdaZero), //
+ B(LdaConstant), U8(2), //
B(Star), R(4), //
- B(CallRuntime), U16(Runtime::kDefineDataPropertyUnchecked), R(1), //
- U8(4), //
+ B(LdaZero), //
+ B(Star), R(5), //
+ B(LdaZero), //
+ B(Star), R(6), //
+ B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(2), //
+ /* */ U8(5), //
+ B(Mov), R(1), R(2), //
B(LdaConstant), U8(3), //
- B(Star), R(2), //
- B(CreateClosure), U8(4), U8(0), //
B(Star), R(3), //
- B(LdaZero), //
+ B(CreateClosure), U8(4), U8(0), //
B(Star), R(4), //
+ B(LdaZero), //
+ B(Star), R(5), //
B(CallRuntime), U16(Runtime::kDefineGetterPropertyUnchecked), //
- R(1), U8(4), //
+ /* */ R(2), U8(4), //
+ B(Mov), R(1), R(2), //
B(LdaConstant), U8(3), //
- B(Star), R(2), //
- B(CreateClosure), U8(5), U8(0), //
B(Star), R(3), //
- B(LdaZero), //
+ B(CreateClosure), U8(5), U8(0), //
B(Star), R(4), //
+ B(LdaZero), //
+ B(Star), R(5), //
B(CallRuntime), U16(Runtime::kDefineSetterPropertyUnchecked), //
- R(1), U8(4), //
+ /* */ R(2), U8(4), //
B(Ldar), R(1), //
B(Return), //
},
@@ -4003,6 +4440,7 @@ TEST(ObjectLiterals) {
InstanceType::SHARED_FUNCTION_INFO_TYPE,
InstanceType::SHARED_FUNCTION_INFO_TYPE}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -4021,24 +4459,28 @@ TEST(ObjectLiteralsWide) {
ObjectLiteral::kFastElements | ObjectLiteral::kDisableMementos;
int wide_idx = 0;
+ // clang-format off
ExpectedSnippet<InstanceType, 257> snippets[] = {
{"var a;" REPEAT_256(SPACE,
"a = 1.23;") "return { name: 'string', val: 9.2 };",
- 1 * kPointerSize,
+ 2 * kPointerSize,
1,
- 1031,
+ 1034,
{
+ B(StackCheck), //
REPEAT_256(COMMA, //
- B(LdaConstant), U8(wide_idx++), //
- B(Star), R(0)), //
+ B(LdaConstant), U8(wide_idx++), //
+ B(Star), R(0)), //
B(CreateObjectLiteralWide), U16(256), U16(0), //
- U8(deep_elements_flags), //
+ /* */ U8(deep_elements_flags), //
+ B(Star), R(1), //
B(Return) //
},
257,
{REPEAT_256(COMMA, InstanceType::HEAP_NUMBER_TYPE),
InstanceType::FIXED_ARRAY_TYPE}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -4055,17 +4497,19 @@ TEST(TopLevelObjectLiterals) {
int has_function_flags = ObjectLiteral::kFastElements |
ObjectLiteral::kHasFunction |
ObjectLiteral::kDisableMementos;
+ // clang-format off
ExpectedSnippet<InstanceType> snippets[] = {
{"var a = { func: function() { } };",
5 * kPointerSize,
1,
- 48,
+ 49,
{
B(LdaConstant), U8(0), //
B(Star), R(1), //
B(LdaZero), //
B(Star), R(2), //
B(CallRuntime), U16(Runtime::kDeclareGlobals), R(1), U8(2), //
+ B(StackCheck), //
B(LdaConstant), U8(1), //
B(Star), R(1), //
B(LdaZero), //
@@ -4088,6 +4532,7 @@ TEST(TopLevelObjectLiterals) {
InstanceType::SHARED_FUNCTION_INFO_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -4101,18 +4546,91 @@ TEST(TryCatch) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
- // TODO(rmcilroy): modify tests when we have real try catch support.
- ExpectedSnippet<int> snippets[] = {
+ int closure = Register::function_closure().index();
+ int context = Register::current_context().index();
+
+ // clang-format off
+ ExpectedSnippet<const char*> snippets[] = {
{"try { return 1; } catch(e) { return 2; }",
- kPointerSize,
+ 5 * kPointerSize,
1,
- 3,
+ 40,
{
- B(LdaSmi8), U8(1), //
- B(Return), //
+ B(StackCheck), //
+ B(Mov), R(context), R(1), //
+ B(LdaSmi8), U8(1), //
+ B(Return), //
+ B(Star), R(3), //
+ B(LdaConstant), U8(0), //
+ B(Star), R(2), //
+ B(Ldar), R(closure), //
+ B(Star), R(4), //
+ B(CallRuntime), U16(Runtime::kPushCatchContext), R(2), U8(3), //
+ B(Star), R(1), //
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), //
+ /* */ R(0), U8(0), //
+ B(Ldar), R(1), //
+ B(PushContext), R(0), //
+ B(LdaSmi8), U8(2), //
+ B(PopContext), R(0), //
+ B(Return), //
+ // TODO(mstarzinger): Potential optimization, elide next bytes.
+ B(LdaUndefined), //
+ B(Return), //
},
- 0},
+ 1,
+ {"e"},
+ 1,
+ {{4, 7, 7}}},
+ {"var a; try { a = 1 } catch(e1) {}; try { a = 2 } catch(e2) { a = 3 }",
+ 6 * kPointerSize,
+ 1,
+ 81,
+ {
+ B(StackCheck), //
+ B(Mov), R(context), R(2), //
+ B(LdaSmi8), U8(1), //
+ B(Star), R(0), //
+ B(Jump), U8(30), //
+ B(Star), R(4), //
+ B(LdaConstant), U8(0), //
+ B(Star), R(3), //
+ B(Ldar), R(closure), //
+ B(Star), R(5), //
+ B(CallRuntime), U16(Runtime::kPushCatchContext), R(3), U8(3), //
+ B(Star), R(2), //
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), //
+ /* */ R(0), U8(0), //
+ B(Ldar), R(2), //
+ B(PushContext), R(1), //
+ B(PopContext), R(1), //
+ B(Mov), R(context), R(2), //
+ B(LdaSmi8), U8(2), //
+ B(Star), R(0), //
+ B(Jump), U8(34), //
+ B(Star), R(4), //
+ B(LdaConstant), U8(1), //
+ B(Star), R(3), //
+ B(Ldar), R(closure), //
+ B(Star), R(5), //
+ B(CallRuntime), U16(Runtime::kPushCatchContext), R(3), U8(3), //
+ B(Star), R(2), //
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), //
+ /* */ R(0), U8(0), //
+ B(Ldar), R(2), //
+ B(PushContext), R(1), //
+ B(LdaSmi8), U8(3), //
+ B(Star), R(0), //
+ B(PopContext), R(1), //
+ B(LdaUndefined), //
+ B(Return), //
+ },
+ 2,
+ {"e1", "e2"},
+ 2,
+ {{4, 8, 10}, {41, 45, 47}}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -4126,39 +4644,172 @@ TEST(TryFinally) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
- // TODO(rmcilroy): modify tests when we have real try finally support.
- ExpectedSnippet<int> snippets[] = {
+ int closure = Register::function_closure().index();
+ int context = Register::current_context().index();
+
+ // clang-format off
+ ExpectedSnippet<const char*> snippets[] = {
{"var a = 1; try { a = 2; } finally { a = 3; }",
- kPointerSize,
+ 4 * kPointerSize,
1,
- 14,
+ 51,
{
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(LdaSmi8), U8(2), //
- B(Star), R(0), //
- B(LdaSmi8), U8(3), //
- B(Star), R(0), //
- B(LdaUndefined), //
- B(Return), //
+ B(StackCheck), //
+ B(LdaSmi8), U8(1), //
+ B(Star), R(0), //
+ B(Mov), R(context), R(3), //
+ B(LdaSmi8), U8(2), //
+ B(Star), R(0), //
+ B(LdaSmi8), U8(-1), //
+ B(Star), R(1), //
+ B(Jump), U8(7), //
+ B(Star), R(2), //
+ B(LdaZero), //
+ B(Star), R(1), //
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), //
+ /* */ R(0), U8(0), //
+ B(Star), R(3), //
+ B(LdaSmi8), U8(3), //
+ B(Star), R(0), //
+ B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), //
+ /* */ R(3), U8(1), //
+ B(LdaZero), //
+ B(TestEqualStrict), R(1), //
+ B(JumpIfTrue), U8(4), //
+ B(Jump), U8(5), //
+ B(Ldar), R(2), //
+ B(ReThrow), //
+ B(LdaUndefined), //
+ B(Return), //
},
- 0},
+ 0,
+ {},
+ 1,
+ {{8, 12, 18}}},
{"var a = 1; try { a = 2; } catch(e) { a = 20 } finally { a = 3; }",
- 2 * kPointerSize,
+ 9 * kPointerSize,
1,
- 14,
+ 88,
{
- B(LdaSmi8), U8(1), //
- B(Star), R(0), //
- B(LdaSmi8), U8(2), //
- B(Star), R(0), //
- B(LdaSmi8), U8(3), //
- B(Star), R(0), //
- B(LdaUndefined), //
- B(Return), //
+ B(StackCheck), //
+ B(LdaSmi8), U8(1), //
+ B(Star), R(0), //
+ B(Mov), R(context), R(4), //
+ B(Mov), R(context), R(5), //
+ B(LdaSmi8), U8(2), //
+ B(Star), R(0), //
+ B(Jump), U8(34), //
+ B(Star), R(7), //
+ B(LdaConstant), U8(0), //
+ B(Star), R(6), //
+ B(Ldar), R(closure), //
+ B(Star), R(8), //
+ B(CallRuntime), U16(Runtime::kPushCatchContext), R(6), U8(3), //
+ B(Star), R(5), //
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), //
+ /* */ R(0), U8(0), //
+ B(Ldar), R(5), //
+ B(PushContext), R(1), //
+ B(LdaSmi8), U8(20), //
+ B(Star), R(0), //
+ B(PopContext), R(1), //
+ B(LdaSmi8), U8(-1), //
+ B(Star), R(2), //
+ B(Jump), U8(7), //
+ B(Star), R(3), //
+ B(LdaZero), //
+ B(Star), R(2), //
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), //
+ /* */ R(0), U8(0), //
+ B(Star), R(4), //
+ B(LdaSmi8), U8(3), //
+ B(Star), R(0), //
+ B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), //
+ /* */ R(4), U8(1), //
+ B(LdaZero), //
+ B(TestEqualStrict), R(2), //
+ B(JumpIfTrue), U8(4), //
+ B(Jump), U8(5), //
+ B(Ldar), R(3), //
+ B(ReThrow), //
+ B(LdaUndefined), //
+ B(Return), //
},
- 0},
+ 1,
+ {"e"},
+ 2,
+ {{8, 49, 55}, {11, 15, 17}}},
+ {"var a; try {"
+ " try { a = 1 } catch(e) { a = 2 }"
+ "} catch(e) { a = 20 } finally { a = 3; }",
+ 10 * kPointerSize,
+ 1,
+ 121,
+ {
+ B(StackCheck), //
+ B(Mov), R(context), R(4), //
+ B(Mov), R(context), R(5), //
+ B(Mov), R(context), R(6), //
+ B(LdaSmi8), U8(1), //
+ B(Star), R(0), //
+ B(Jump), U8(34), //
+ B(Star), R(8), //
+ B(LdaConstant), U8(0), //
+ B(Star), R(7), //
+ B(Ldar), R(closure), //
+ B(Star), R(9), //
+ B(CallRuntime), U16(Runtime::kPushCatchContext), R(7), U8(3), //
+ B(Star), R(6), //
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), //
+ /* */ R(0), U8(0), //
+ B(Ldar), R(6), //
+ B(PushContext), R(1), //
+ B(LdaSmi8), U8(2), //
+ B(Star), R(0), //
+ B(PopContext), R(1), //
+ B(Jump), U8(34), //
+ B(Star), R(7), //
+ B(LdaConstant), U8(0), //
+ B(Star), R(6), //
+ B(Ldar), R(closure), //
+ B(Star), R(8), //
+ B(CallRuntime), U16(Runtime::kPushCatchContext), R(6), U8(3), //
+ B(Star), R(5), //
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), //
+ /* */ R(0), U8(0), //
+ B(Ldar), R(5), //
+ B(PushContext), R(1), //
+ B(LdaSmi8), U8(20), //
+ B(Star), R(0), //
+ B(PopContext), R(1), //
+ B(LdaSmi8), U8(-1), //
+ B(Star), R(2), //
+ B(Jump), U8(7), //
+ B(Star), R(3), //
+ B(LdaZero), //
+ B(Star), R(2), //
+ B(CallRuntime), U16(Runtime::kInterpreterClearPendingMessage), //
+ /* */ R(0), U8(0), //
+ B(Star), R(4), //
+ B(LdaSmi8), U8(3), //
+ B(Star), R(0), //
+ B(CallRuntime), U16(Runtime::kInterpreterSetPendingMessage), //
+ /* */ R(4), U8(1), //
+ B(LdaZero), //
+ B(TestEqualStrict), R(2), //
+ B(JumpIfTrue), U8(4), //
+ B(Jump), U8(5), //
+ B(Ldar), R(3), //
+ B(ReThrow), //
+ B(LdaUndefined), //
+ B(Return), //
+ },
+ 1,
+ {"e"},
+ 3,
+ {{4, 82, 88}, {7, 48, 50}, {10, 14, 16}}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -4172,13 +4823,14 @@ TEST(Throw) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
- // TODO(rmcilroy): modify tests when we have real try catch support.
+ // clang-format off
ExpectedSnippet<const char*> snippets[] = {
{"throw 1;",
0,
1,
- 3,
+ 4,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Throw), //
},
@@ -4186,8 +4838,9 @@ TEST(Throw) {
{"throw 'Error';",
0,
1,
- 3,
+ 4,
{
+ B(StackCheck), //
B(LdaConstant), U8(0), //
B(Throw), //
},
@@ -4196,8 +4849,9 @@ TEST(Throw) {
{"var a = 1; if (a) { throw 'Error'; };",
1 * kPointerSize,
1,
- 11,
+ 12,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Star), R(0), //
B(JumpIfToBooleanFalse), U8(5), //
@@ -4209,6 +4863,7 @@ TEST(Throw) {
1,
{"Error"}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -4231,15 +4886,17 @@ TEST(CallNew) {
Handle<i::TypeFeedbackVector> vector =
i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
+ // clang-format off
ExpectedSnippet<InstanceType> snippets[] = {
{"function bar() { this.value = 0; }\n"
"function f() { return new bar(); }\n"
"f()",
1 * kPointerSize,
1,
- 10,
+ 11,
{
- B(LdaGlobalSloppy), U8(0), U8(vector->GetIndex(slot2)), //
+ B(StackCheck), //
+ B(LdaGlobal), U8(0), U8(vector->GetIndex(slot2)), //
B(Star), R(0), //
B(New), R(0), R(0), U8(0), //
B(Return), //
@@ -4251,12 +4908,14 @@ TEST(CallNew) {
"f()",
2 * kPointerSize,
1,
- 14,
+ 17,
{
- B(LdaGlobalSloppy), U8(0), U8(vector->GetIndex(slot2)), //
+ B(StackCheck), //
+ B(LdaGlobal), U8(0), U8(vector->GetIndex(slot2)), //
B(Star), R(0), //
B(LdaSmi8), U8(3), //
B(Star), R(1), //
+ B(Ldar), R(0), //
B(New), R(0), R(1), U8(1), //
B(Return), //
},
@@ -4272,9 +4931,10 @@ TEST(CallNew) {
"f()",
4 * kPointerSize,
1,
- 22,
+ 25,
{
- B(LdaGlobalSloppy), U8(0), U8(vector->GetIndex(slot2)), //
+ B(StackCheck), //
+ B(LdaGlobal), U8(0), U8(vector->GetIndex(slot2)), //
B(Star), R(0), //
B(LdaSmi8), U8(3), //
B(Star), R(1), //
@@ -4282,12 +4942,14 @@ TEST(CallNew) {
B(Star), R(2), //
B(LdaSmi8), U8(5), //
B(Star), R(3), //
+ B(Ldar), R(0), //
B(New), R(0), R(1), U8(3), //
B(Return), //
},
1,
{InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -4309,6 +4971,7 @@ TEST(ContextVariables) {
i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
int closure = Register::function_closure().index();
+ int context = Register::current_context().index();
int new_target = Register::new_target().index();
int first_context_slot = Context::MIN_CONTEXT_SLOTS;
@@ -4318,15 +4981,17 @@ TEST(ContextVariables) {
STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS + 3 + 249 == 256);
int wide_slot = first_context_slot + 3;
+ // clang-format off
ExpectedSnippet<InstanceType> snippets[] = {
{"var a; return function() { a = 1; };",
1 * kPointerSize,
1,
- 11,
+ 12,
{
B(CallRuntime), U16(Runtime::kNewFunctionContext), //
- R(closure), U8(1), //
+ /* */ R(closure), U8(1), //
B(PushContext), R(0), //
+ B(StackCheck), //
B(CreateClosure), U8(0), U8(0), //
B(Return), //
},
@@ -4335,49 +5000,52 @@ TEST(ContextVariables) {
{"var a = 1; return function() { a = 2; };",
1 * kPointerSize,
1,
- 16,
+ 17,
{
- B(CallRuntime), U16(Runtime::kNewFunctionContext), //
- R(closure), U8(1), //
- B(PushContext), R(0), //
- B(LdaSmi8), U8(1), //
- B(StaContextSlot), R(0), U8(first_context_slot), //
- B(CreateClosure), U8(0), U8(0), //
- B(Return), //
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), //
+ /* */ R(closure), U8(1), //
+ B(PushContext), R(0), //
+ B(StackCheck), //
+ B(LdaSmi8), U8(1), //
+ B(StaContextSlot), R(context), U8(first_context_slot), //
+ B(CreateClosure), U8(0), U8(0), //
+ B(Return), //
},
1,
{InstanceType::SHARED_FUNCTION_INFO_TYPE}},
{"var a = 1; var b = 2; return function() { a = 2; b = 3 };",
1 * kPointerSize,
1,
- 21,
+ 22,
{
- B(CallRuntime), U16(Runtime::kNewFunctionContext), //
- R(closure), U8(1), //
- B(PushContext), R(0), //
- B(LdaSmi8), U8(1), //
- B(StaContextSlot), R(0), U8(first_context_slot), //
- B(LdaSmi8), U8(2), //
- B(StaContextSlot), R(0), U8(first_context_slot + 1), //
- B(CreateClosure), U8(0), U8(0), //
- B(Return), //
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), //
+ /* */ R(closure), U8(1), //
+ B(PushContext), R(0), //
+ B(StackCheck), //
+ B(LdaSmi8), U8(1), //
+ B(StaContextSlot), R(context), U8(first_context_slot), //
+ B(LdaSmi8), U8(2), //
+ B(StaContextSlot), R(context), U8(first_context_slot + 1), //
+ B(CreateClosure), U8(0), U8(0), //
+ B(Return), //
},
1,
{InstanceType::SHARED_FUNCTION_INFO_TYPE}},
{"var a; (function() { a = 2; })(); return a;",
3 * kPointerSize,
1,
- 24,
+ 25,
{
B(CallRuntime), U16(Runtime::kNewFunctionContext), //
- R(closure), U8(1), //
+ /* */ R(closure), U8(1), //
B(PushContext), R(0), //
+ B(StackCheck), //
B(LdaUndefined), //
B(Star), R(2), //
B(CreateClosure), U8(0), U8(0), //
B(Star), R(1), //
- B(Call), R(1), R(2), U8(0), U8(vector->GetIndex(slot)), //
- B(LdaContextSlot), R(0), U8(first_context_slot), //
+ B(Call), R(1), R(2), U8(1), U8(vector->GetIndex(slot)), //
+ B(LdaContextSlot), R(context), U8(first_context_slot), //
B(Return), //
},
1,
@@ -4385,15 +5053,16 @@ TEST(ContextVariables) {
{"'use strict'; let a = 1; { let b = 2; return function() { a + b; }; }",
4 * kPointerSize,
1,
- 44,
+ 47,
{
B(CallRuntime), U16(Runtime::kNewFunctionContext), //
- R(closure), U8(1), //
+ /* */ R(closure), U8(1), //
B(PushContext), R(0), //
B(LdaTheHole), //
- B(StaContextSlot), R(0), U8(first_context_slot), //
+ B(StaContextSlot), R(context), U8(first_context_slot), //
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
- B(StaContextSlot), R(0), U8(first_context_slot), //
+ B(StaContextSlot), R(context), U8(first_context_slot), //
B(LdaConstant), U8(0), //
B(Star), R(2), //
B(Ldar), R(closure), //
@@ -4401,10 +5070,11 @@ TEST(ContextVariables) {
B(CallRuntime), U16(Runtime::kPushBlockContext), R(2), U8(2), //
B(PushContext), R(1), //
B(LdaTheHole), //
- B(StaContextSlot), R(1), U8(first_context_slot), //
+ B(StaContextSlot), R(context), U8(first_context_slot), //
B(LdaSmi8), U8(2), //
- B(StaContextSlot), R(1), U8(first_context_slot), //
+ B(StaContextSlot), R(context), U8(first_context_slot), //
B(CreateClosure), U8(1), U8(0), //
+ B(PopContext), R(0), //
B(Return), //
},
2,
@@ -4417,33 +5087,35 @@ TEST(ContextVariables) {
"return b",
3 * kPointerSize,
1,
- 1041,
+ 1042,
{
B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- U8(1), //
+ /* */ U8(1), //
B(PushContext), R(0), //
B(Ldar), THIS(1), //
- B(StaContextSlot), R(0), U8(first_context_slot), //
+ B(StaContextSlot), R(context), U8(first_context_slot), //
B(CreateUnmappedArguments), //
- B(StaContextSlot), R(0), U8(first_context_slot + 1), //
+ B(StaContextSlot), R(context), U8(first_context_slot + 1), //
B(Ldar), R(new_target), //
- B(StaContextSlot), R(0), U8(first_context_slot + 2), //
+ B(StaContextSlot), R(context), U8(first_context_slot + 2), //
+ B(StackCheck), //
REPEAT_249(COMMA, //
B(LdaZero), //
- B(StaContextSlot), R(0), U8(wide_slot++)), //
+ B(StaContextSlot), R(context), U8(wide_slot++)), //
B(LdaUndefined), //
B(Star), R(2), //
- B(LdaGlobalStrict), U8(0), U8(1), //
+ B(LdaGlobal), U8(0), U8(1), //
B(Star), R(1), //
- B(Call), R(1), R(2), U8(0), U8(0), //
+ B(Call), R(1), R(2), U8(1), U8(0), //
B(LdaSmi8), U8(100), //
- B(StaContextSlotWide), R(0), U16(256), //
- B(LdaContextSlotWide), R(0), U16(256), //
+ B(StaContextSlotWide), R(context), U16(256), //
+ B(LdaContextSlotWide), R(context), U16(256), //
B(Return), //
},
1,
{InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -4458,74 +5130,81 @@ TEST(ContextParameters) {
BytecodeGeneratorHelper helper;
int closure = Register::function_closure().index();
+ int context = Register::current_context().index();
int first_context_slot = Context::MIN_CONTEXT_SLOTS;
+ // clang-format off
ExpectedSnippet<InstanceType> snippets[] = {
{"function f(arg1) { return function() { arg1 = 2; }; }",
1 * kPointerSize,
2,
- 16,
+ 17,
{
- B(CallRuntime), U16(Runtime::kNewFunctionContext), //
- R(closure), U8(1), //
- B(PushContext), R(0), //
- B(Ldar), R(helper.kLastParamIndex), //
- B(StaContextSlot), R(0), U8(first_context_slot), //
- B(CreateClosure), U8(0), U8(0), //
- B(Return), //
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), //
+ /* */ R(closure), U8(1), //
+ B(PushContext), R(0), //
+ B(Ldar), R(helper.kLastParamIndex), //
+ B(StaContextSlot), R(context), U8(first_context_slot), //
+ B(StackCheck), //
+ B(CreateClosure), U8(0), U8(0), //
+ B(Return), //
},
1,
{InstanceType::SHARED_FUNCTION_INFO_TYPE}},
{"function f(arg1) { var a = function() { arg1 = 2; }; return arg1; }",
2 * kPointerSize,
2,
- 21,
+ 22,
{
- B(CallRuntime), U16(Runtime::kNewFunctionContext), //
- R(closure), U8(1), //
- B(PushContext), R(1), //
- B(Ldar), R(helper.kLastParamIndex), //
- B(StaContextSlot), R(1), U8(first_context_slot), //
- B(CreateClosure), U8(0), U8(0), //
- B(Star), R(0), //
- B(LdaContextSlot), R(1), U8(first_context_slot), //
- B(Return), //
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), //
+ /* */ R(closure), U8(1), //
+ B(PushContext), R(1), //
+ B(Ldar), R(helper.kLastParamIndex), //
+ B(StaContextSlot), R(context), U8(first_context_slot), //
+ B(StackCheck), //
+ B(CreateClosure), U8(0), U8(0), //
+ B(Star), R(0), //
+ B(LdaContextSlot), R(context), U8(first_context_slot), //
+ B(Return), //
},
1,
{InstanceType::SHARED_FUNCTION_INFO_TYPE}},
{"function f(a1, a2, a3, a4) { return function() { a1 = a3; }; }",
1 * kPointerSize,
5,
- 21,
+ 22,
{
- B(CallRuntime), U16(Runtime::kNewFunctionContext), //
- R(closure), U8(1), //
- B(PushContext), R(0), //
- B(Ldar), R(helper.kLastParamIndex - 3), //
- B(StaContextSlot), R(0), U8(first_context_slot + 1), //
- B(Ldar), R(helper.kLastParamIndex -1), //
- B(StaContextSlot), R(0), U8(first_context_slot), //
- B(CreateClosure), U8(0), U8(0), //
- B(Return), //
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), //
+ /* */ R(closure), U8(1), //
+ B(PushContext), R(0), //
+ B(Ldar), R(helper.kLastParamIndex - 3), //
+ B(StaContextSlot), R(context), U8(first_context_slot + 1), //
+ B(Ldar), R(helper.kLastParamIndex -1), //
+ B(StaContextSlot), R(context), U8(first_context_slot), //
+ B(StackCheck), //
+ B(CreateClosure), U8(0), U8(0), //
+ B(Return), //
},
1,
{InstanceType::SHARED_FUNCTION_INFO_TYPE}},
{"function f() { var self = this; return function() { self = 2; }; }",
1 * kPointerSize,
1,
- 16,
+ 17,
{
- B(CallRuntime), U16(Runtime::kNewFunctionContext), //
- R(closure), U8(1), //
- B(PushContext), R(0), //
- B(Ldar), R(helper.kLastParamIndex), //
- B(StaContextSlot), R(0), U8(first_context_slot), //
- B(CreateClosure), U8(0), U8(0), //
- B(Return), //
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), //
+ /* */ R(closure), U8(1), //
+ B(PushContext), R(0), //
+ B(StackCheck), //
+ B(Ldar), R(helper.kLastParamIndex), //
+ B(StaContextSlot), R(context), U8(first_context_slot), //
+ B(CreateClosure), U8(0), U8(0), //
+ B(Return), //
},
1,
{InstanceType::SHARED_FUNCTION_INFO_TYPE}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -4539,9 +5218,10 @@ TEST(OuterContextVariables) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
- int context = Register::function_context().index();
+ int context = Register::current_context().index();
int first_context_slot = Context::MIN_CONTEXT_SLOTS;
+ // clang-format off
ExpectedSnippet<InstanceType> snippets[] = {
{"function Outer() {"
" var outerVar = 1;"
@@ -4553,8 +5233,9 @@ TEST(OuterContextVariables) {
"var f = new Outer().getInnerFunc();",
2 * kPointerSize,
1,
- 20,
+ 21,
{
+ B(StackCheck), //
B(Ldar), R(context), //
B(Star), R(0), //
B(LdaContextSlot), R(0), U8(Context::PREVIOUS_INDEX), //
@@ -4575,8 +5256,9 @@ TEST(OuterContextVariables) {
"var f = new Outer().getInnerFunc();",
2 * kPointerSize,
1,
- 21,
+ 22,
{
+ B(StackCheck), //
B(LdaContextSlot), R(context), U8(first_context_slot), //
B(Star), R(0), //
B(Ldar), R(context), //
@@ -4589,6 +5271,7 @@ TEST(OuterContextVariables) {
B(Return), //
}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -4615,6 +5298,7 @@ TEST(CountOperators) {
i::NewTypeFeedbackVector(helper.isolate(), &store_feedback_spec);
int closure = Register::function_closure().index();
+ int context = Register::current_context().index();
int first_context_slot = Context::MIN_CONTEXT_SLOTS;
int object_literal_flags =
@@ -4622,12 +5306,14 @@ TEST(CountOperators) {
int array_literal_flags =
ArrayLiteral::kDisableMementos | ArrayLiteral::kShallowElements;
+ // clang-format off
ExpectedSnippet<InstanceType> snippets[] = {
{"var a = 1; return ++a;",
1 * kPointerSize,
1,
- 9,
+ 10,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Star), R(0), //
B(ToNumber), //
@@ -4638,8 +5324,9 @@ TEST(CountOperators) {
{"var a = 1; return a++;",
2 * kPointerSize,
1,
- 13,
+ 14,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Star), R(0), //
B(ToNumber), //
@@ -4652,8 +5339,9 @@ TEST(CountOperators) {
{"var a = 1; return --a;",
1 * kPointerSize,
1,
- 9,
+ 10,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Star), R(0), //
B(ToNumber), //
@@ -4664,8 +5352,9 @@ TEST(CountOperators) {
{"var a = 1; return a--;",
2 * kPointerSize,
1,
- 13,
+ 14,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Star), R(0), //
B(ToNumber), //
@@ -4678,12 +5367,14 @@ TEST(CountOperators) {
{"var a = { val: 1 }; return a.val++;",
3 * kPointerSize,
1,
- 23,
+ 26,
{
+ B(StackCheck), //
B(CreateObjectLiteral), U8(0), U8(0), U8(object_literal_flags), //
+ B(Star), R(1), //
B(Star), R(0), //
B(Star), R(1), //
- B(LoadICSloppy), R(1), U8(1), U8(vector->GetIndex(slot1)), //
+ B(LoadIC), R(1), U8(1), U8(vector->GetIndex(slot1)), //
B(ToNumber), //
B(Star), R(2), //
B(Inc), //
@@ -4697,12 +5388,14 @@ TEST(CountOperators) {
{"var a = { val: 1 }; return --a.val;",
2 * kPointerSize,
1,
- 19,
+ 22,
{
+ B(StackCheck), //
B(CreateObjectLiteral), U8(0), U8(0), U8(object_literal_flags), //
+ B(Star), R(1), //
B(Star), R(0), //
B(Star), R(1), //
- B(LoadICSloppy), R(1), U8(1), U8(vector->GetIndex(slot1)), //
+ B(LoadIC), R(1), U8(1), U8(vector->GetIndex(slot1)), //
B(ToNumber), //
B(Dec), //
B(StoreICSloppy), R(1), U8(1), U8(vector->GetIndex(slot2)), //
@@ -4714,16 +5407,18 @@ TEST(CountOperators) {
{"var name = 'var'; var a = { val: 1 }; return a[name]--;",
5 * kPointerSize,
1,
- 30,
+ 33,
{
+ B(StackCheck), //
B(LdaConstant), U8(0), //
B(Star), R(0), //
B(CreateObjectLiteral), U8(1), U8(0), U8(object_literal_flags), //
+ B(Star), R(2), //
B(Star), R(1), //
B(Star), R(2), //
B(Ldar), R(0), //
B(Star), R(3), //
- B(KeyedLoadICSloppy), R(2), U8(vector->GetIndex(slot1)), //
+ B(KeyedLoadIC), R(2), U8(vector->GetIndex(slot1)), //
B(ToNumber), //
B(Star), R(4), //
B(Dec), //
@@ -4737,16 +5432,18 @@ TEST(CountOperators) {
{"var name = 'var'; var a = { val: 1 }; return ++a[name];",
4 * kPointerSize,
1,
- 26,
+ 29,
{
+ B(StackCheck), //
B(LdaConstant), U8(0), //
B(Star), R(0), //
B(CreateObjectLiteral), U8(1), U8(0), U8(object_literal_flags), //
+ B(Star), R(2), //
B(Star), R(1), //
B(Star), R(2), //
B(Ldar), R(0), //
B(Star), R(3), //
- B(KeyedLoadICSloppy), R(2), U8(vector->GetIndex(slot1)), //
+ B(KeyedLoadIC), R(2), U8(vector->GetIndex(slot1)), //
B(ToNumber), //
B(Inc), //
B(KeyedStoreICSloppy), R(2), R(3), U8(vector->GetIndex(slot2)), //
@@ -4758,19 +5455,20 @@ TEST(CountOperators) {
{"var a = 1; var b = function() { return a }; return ++a;",
2 * kPointerSize,
1,
- 26,
+ 27,
{
B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- U8(1), //
+ /* */ U8(1), //
B(PushContext), R(1), //
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
- B(StaContextSlot), R(1), U8(first_context_slot), //
+ B(StaContextSlot), R(context), U8(first_context_slot), //
B(CreateClosure), U8(0), U8(0), //
B(Star), R(0), //
- B(LdaContextSlot), R(1), U8(first_context_slot), //
+ B(LdaContextSlot), R(context), U8(first_context_slot), //
B(ToNumber), //
B(Inc), //
- B(StaContextSlot), R(1), U8(first_context_slot), //
+ B(StaContextSlot), R(context), U8(first_context_slot), //
B(Return), //
},
1,
@@ -4778,20 +5476,21 @@ TEST(CountOperators) {
{"var a = 1; var b = function() { return a }; return a--;",
3 * kPointerSize,
1,
- 30,
+ 31,
{
B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- U8(1), //
+ /* */ U8(1), //
B(PushContext), R(1), //
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
- B(StaContextSlot), R(1), U8(first_context_slot), //
+ B(StaContextSlot), R(context), U8(first_context_slot), //
B(CreateClosure), U8(0), U8(0), //
B(Star), R(0), //
- B(LdaContextSlot), R(1), U8(first_context_slot), //
+ B(LdaContextSlot), R(context), U8(first_context_slot), //
B(ToNumber), //
B(Star), R(2), //
B(Dec), //
- B(StaContextSlot), R(1), U8(first_context_slot), //
+ B(StaContextSlot), R(context), U8(first_context_slot), //
B(Ldar), R(2), //
B(Return), //
},
@@ -4800,8 +5499,9 @@ TEST(CountOperators) {
{"var idx = 1; var a = [1, 2]; return a[idx++] = 2;",
4 * kPointerSize,
1,
- 27,
+ 28,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Star), R(0), //
B(CreateArrayLiteral), U8(0), U8(0), U8(array_literal_flags), //
@@ -4814,12 +5514,13 @@ TEST(CountOperators) {
B(Star), R(0), //
B(LdaSmi8), U8(2), //
B(KeyedStoreICSloppy), R(2), R(3), //
- U8(store_vector->GetIndex(store_slot)), //
+ /* */ U8(store_vector->GetIndex(store_slot)), //
B(Return), //
},
1,
{InstanceType::FIXED_ARRAY_TYPE}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -4841,13 +5542,15 @@ TEST(GlobalCountOperators) {
Handle<i::TypeFeedbackVector> vector =
i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
+ // clang-format off
ExpectedSnippet<const char*> snippets[] = {
{"var global = 1;\nfunction f() { return ++global; }\nf()",
0,
1,
- 9,
+ 10,
{
- B(LdaGlobalSloppy), U8(0), U8(vector->GetIndex(slot1)), //
+ B(StackCheck), //
+ B(LdaGlobal), U8(0), U8(vector->GetIndex(slot1)), //
B(ToNumber), //
B(Inc), //
B(StaGlobalSloppy), U8(0), U8(vector->GetIndex(slot2)), //
@@ -4858,9 +5561,10 @@ TEST(GlobalCountOperators) {
{"var global = 1;\nfunction f() { return global--; }\nf()",
1 * kPointerSize,
1,
- 13,
+ 14,
{
- B(LdaGlobalSloppy), U8(0), U8(vector->GetIndex(slot1)), //
+ B(StackCheck), //
+ B(LdaGlobal), U8(0), U8(vector->GetIndex(slot1)), //
B(ToNumber), //
B(Star), R(0), //
B(Dec), //
@@ -4874,9 +5578,10 @@ TEST(GlobalCountOperators) {
"f()",
0,
1,
- 9,
+ 10,
{
- B(LdaGlobalStrict), U8(0), U8(vector->GetIndex(slot1)), //
+ B(StackCheck), //
+ B(LdaGlobal), U8(0), U8(vector->GetIndex(slot1)), //
B(ToNumber), //
B(Dec), //
B(StaGlobalStrict), U8(0), U8(vector->GetIndex(slot2)), //
@@ -4887,9 +5592,10 @@ TEST(GlobalCountOperators) {
{"unallocated = 1;\nfunction f() { return unallocated++; }\nf()",
1 * kPointerSize,
1,
- 13,
+ 14,
{
- B(LdaGlobalSloppy), U8(0), U8(vector->GetIndex(slot1)), //
+ B(StackCheck), //
+ B(LdaGlobal), U8(0), U8(vector->GetIndex(slot1)), //
B(ToNumber), //
B(Star), R(0), //
B(Inc), //
@@ -4900,6 +5606,7 @@ TEST(GlobalCountOperators) {
1,
{"unallocated"}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -4915,6 +5622,7 @@ TEST(CompoundExpressions) {
Zone zone;
int closure = Register::function_closure().index();
+ int context = Register::current_context().index();
int first_context_slot = Context::MIN_CONTEXT_SLOTS;
FeedbackVectorSpec feedback_spec(&zone);
@@ -4926,12 +5634,15 @@ TEST(CompoundExpressions) {
int object_literal_flags =
ObjectLiteral::kFastElements | ObjectLiteral::kDisableMementos;
+
+ // clang-format off
ExpectedSnippet<InstanceType> snippets[] = {
{"var a = 1; a += 2;",
2 * kPointerSize,
1,
- 14,
+ 15,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Star), R(0), //
B(Star), R(1), //
@@ -4944,8 +5655,9 @@ TEST(CompoundExpressions) {
{"var a = 1; a /= 2;",
2 * kPointerSize,
1,
- 14,
+ 15,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Star), R(0), //
B(Star), R(1), //
@@ -4958,12 +5670,14 @@ TEST(CompoundExpressions) {
{"var a = { val: 2 }; a.name *= 2;",
3 * kPointerSize,
1,
- 24,
+ 27,
{
+ B(StackCheck), //
B(CreateObjectLiteral), U8(0), U8(0), U8(object_literal_flags), //
+ B(Star), R(1), //
B(Star), R(0), //
B(Star), R(1), //
- B(LoadICSloppy), R(1), U8(1), U8(vector->GetIndex(slot1)), //
+ B(LoadIC), R(1), U8(1), U8(vector->GetIndex(slot1)), //
B(Star), R(2), //
B(LdaSmi8), U8(2), //
B(Mul), R(2), //
@@ -4977,14 +5691,16 @@ TEST(CompoundExpressions) {
{"var a = { 1: 2 }; a[1] ^= 2;",
4 * kPointerSize,
1,
- 27,
+ 30,
{
+ B(StackCheck), //
B(CreateObjectLiteral), U8(0), U8(0), U8(object_literal_flags), //
+ B(Star), R(1), //
B(Star), R(0), //
B(Star), R(1), //
B(LdaSmi8), U8(1), //
B(Star), R(2), //
- B(KeyedLoadICSloppy), R(1), U8(vector->GetIndex(slot1)), //
+ B(KeyedLoadIC), R(1), U8(vector->GetIndex(slot1)), //
B(Star), R(3), //
B(LdaSmi8), U8(2), //
B(BitwiseXor), R(3), //
@@ -4997,25 +5713,27 @@ TEST(CompoundExpressions) {
{"var a = 1; (function f() { return a; }); a |= 24;",
2 * kPointerSize,
1,
- 29,
+ 30,
{
B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- U8(1), //
+ /* */ U8(1), //
B(PushContext), R(0), //
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
- B(StaContextSlot), R(0), U8(first_context_slot), //
+ B(StaContextSlot), R(context), U8(first_context_slot), //
B(CreateClosure), U8(0), U8(0), //
- B(LdaContextSlot), R(0), U8(first_context_slot), //
+ B(LdaContextSlot), R(context), U8(first_context_slot), //
B(Star), R(1), //
B(LdaSmi8), U8(24), //
B(BitwiseOr), R(1), //
- B(StaContextSlot), R(0), U8(first_context_slot), //
+ B(StaContextSlot), R(context), U8(first_context_slot), //
B(LdaUndefined), //
B(Return), //
},
1,
{InstanceType::SHARED_FUNCTION_INFO_TYPE}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -5037,13 +5755,15 @@ TEST(GlobalCompoundExpressions) {
Handle<i::TypeFeedbackVector> vector =
i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
+ // clang-format off
ExpectedSnippet<const char*> snippets[] = {
{"var global = 1;\nfunction f() { return global &= 1; }\nf()",
1 * kPointerSize,
1,
- 13,
+ 14,
{
- B(LdaGlobalSloppy), U8(0), U8(vector->GetIndex(slot1)), //
+ B(StackCheck), //
+ B(LdaGlobal), U8(0), U8(vector->GetIndex(slot1)), //
B(Star), R(0), //
B(LdaSmi8), U8(1), //
B(BitwiseAnd), R(0), //
@@ -5055,9 +5775,10 @@ TEST(GlobalCompoundExpressions) {
{"unallocated = 1;\nfunction f() { return unallocated += 1; }\nf()",
1 * kPointerSize,
1,
- 13,
+ 14,
{
- B(LdaGlobalSloppy), U8(0), U8(vector->GetIndex(slot1)), //
+ B(StackCheck), //
+ B(LdaGlobal), U8(0), U8(vector->GetIndex(slot1)), //
B(Star), R(0), //
B(LdaSmi8), U8(1), //
B(Add), R(0), //
@@ -5067,6 +5788,7 @@ TEST(GlobalCompoundExpressions) {
1,
{"unallocated"}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -5082,6 +5804,7 @@ TEST(CreateArguments) {
Zone zone;
int closure = Register::function_closure().index();
+ int context = Register::current_context().index();
int first_context_slot = Context::MIN_CONTEXT_SLOTS;
FeedbackVectorSpec feedback_spec(&zone);
@@ -5090,82 +5813,96 @@ TEST(CreateArguments) {
Handle<i::TypeFeedbackVector> vector =
i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
+ // clang-format off
ExpectedSnippet<const char*> snippets[] = {
{"function f() { return arguments; }",
1 * kPointerSize,
1,
- 4,
+ 7,
{
B(CreateMappedArguments), //
B(Star), R(0), //
+ B(StackCheck), //
+ B(Ldar), R(0), //
B(Return), //
}},
{"function f() { return arguments[0]; }",
2 * kPointerSize,
1,
- 10,
+ 13,
{
B(CreateMappedArguments), //
B(Star), R(0), //
+ B(StackCheck), //
+ B(Ldar), R(0), //
B(Star), R(1), //
B(LdaZero), //
- B(KeyedLoadICSloppy), R(1), U8(vector->GetIndex(slot)), //
+ B(KeyedLoadIC), R(1), U8(vector->GetIndex(slot)), //
B(Return), //
}},
{"function f() { 'use strict'; return arguments; }",
1 * kPointerSize,
1,
- 4,
+ 7,
{
B(CreateUnmappedArguments), //
B(Star), R(0), //
+ B(StackCheck), //
+ B(Ldar), R(0), //
B(Return), //
}},
{"function f(a) { return arguments[0]; }",
3 * kPointerSize,
2,
- 22,
+ 25,
{
B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- U8(1), //
+ /* */ U8(1), //
B(PushContext), R(1), //
B(Ldar), R(BytecodeGeneratorHelper::kLastParamIndex), //
- B(StaContextSlot), R(1), U8(first_context_slot), //
+ B(StaContextSlot), R(context), U8(first_context_slot), //
B(CreateMappedArguments), //
B(Star), R(0), //
+ B(StackCheck), //
+ B(Ldar), R(0), //
B(Star), R(2), //
B(LdaZero), //
- B(KeyedLoadICSloppy), R(2), U8(vector->GetIndex(slot)), //
+ B(KeyedLoadIC), R(2), U8(vector->GetIndex(slot)), //
B(Return), //
}},
{"function f(a, b, c) { return arguments; }",
2 * kPointerSize,
4,
- 26,
+ 29,
{
B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- U8(1), //
+ /* */ U8(1), //
B(PushContext), R(1), //
B(Ldar), R(BytecodeGeneratorHelper::kLastParamIndex - 2), //
- B(StaContextSlot), R(1), U8(first_context_slot + 2), //
+ B(StaContextSlot), R(context), U8(first_context_slot + 2), //
B(Ldar), R(BytecodeGeneratorHelper::kLastParamIndex - 1), //
- B(StaContextSlot), R(1), U8(first_context_slot + 1), //
+ B(StaContextSlot), R(context), U8(first_context_slot + 1), //
B(Ldar), R(BytecodeGeneratorHelper::kLastParamIndex), //
- B(StaContextSlot), R(1), U8(first_context_slot), //
+ B(StaContextSlot), R(context), U8(first_context_slot), //
B(CreateMappedArguments), //
B(Star), R(0), //
+ B(StackCheck), //
+ B(Ldar), R(0), //
B(Return), //
}},
{"function f(a, b, c) { 'use strict'; return arguments; }",
1 * kPointerSize,
4,
- 4,
+ 7,
{
B(CreateUnmappedArguments), //
B(Star), R(0), //
+ B(StackCheck), //
+ B(Ldar), R(0), //
B(Return), //
}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -5174,14 +5911,119 @@ TEST(CreateArguments) {
}
}
+TEST(CreateRestParameter) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+ Zone zone;
+
+ FeedbackVectorSpec feedback_spec(&zone);
+ FeedbackVectorSlot slot = feedback_spec.AddKeyedLoadICSlot();
+ FeedbackVectorSlot slot1 = feedback_spec.AddKeyedLoadICSlot();
+
+ Handle<i::TypeFeedbackVector> vector =
+ i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
+
+ // clang-format off
+ ExpectedSnippet<int> snippets[] = {
+ {"function f(...restArgs) { return restArgs; }",
+ 1 * kPointerSize,
+ 1,
+ 7,
+ {
+ B(CreateRestParameter), //
+ B(Star), R(0), //
+ B(StackCheck), //
+ B(Ldar), R(0), //
+ B(Return), //
+ },
+ 0,
+ {}},
+ {"function f(a, ...restArgs) { return restArgs; }",
+ 2 * kPointerSize,
+ 2,
+ 14,
+ {
+ B(CreateRestParameter), //
+ B(Star), R(0), //
+ B(LdaTheHole), //
+ B(Star), R(1), //
+ B(StackCheck), //
+ B(Ldar), A(1, 2), //
+ B(Star), R(1), //
+ B(Ldar), R(0), //
+ B(Return), //
+ },
+ 0,
+ {}},
+ {"function f(a, ...restArgs) { return restArgs[0]; }",
+ 3 * kPointerSize,
+ 2,
+ 20,
+ {
+ B(CreateRestParameter), //
+ B(Star), R(0), //
+ B(LdaTheHole), //
+ B(Star), R(1), //
+ B(StackCheck), //
+ B(Ldar), A(1, 2), //
+ B(Star), R(1), //
+ B(Ldar), R(0), //
+ B(Star), R(2), //
+ B(LdaZero), //
+ B(KeyedLoadIC), R(2), U8(vector->GetIndex(slot)), //
+ B(Return), //
+ },
+ 0,
+ {}},
+ {"function f(a, ...restArgs) { return restArgs[0] + arguments[0]; }",
+ 5 * kPointerSize,
+ 2,
+ 35,
+ {
+ B(CreateUnmappedArguments), //
+ B(Star), R(0), //
+ B(CreateRestParameter), //
+ B(Star), R(1), //
+ B(LdaTheHole), //
+ B(Star), R(2), //
+ B(StackCheck), //
+ B(Ldar), A(1, 2), //
+ B(Star), R(2), //
+ B(Ldar), R(1), //
+ B(Star), R(3), //
+ B(LdaZero), //
+ B(KeyedLoadIC), R(3), U8(vector->GetIndex(slot)), //
+ B(Star), R(4), //
+ B(Ldar), R(0), //
+ B(Star), R(3), //
+ B(LdaZero), //
+ B(KeyedLoadIC), R(3), U8(vector->GetIndex(slot1)), //
+ B(Add), R(4), //
+ B(Return), //
+ },
+ 0,
+ {}},
+ };
+ // clang-format on
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecodeForFunction(snippets[i].code_snippet);
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+}
TEST(IllegalRedeclaration) {
+ bool old_legacy_const_flag = FLAG_legacy_const;
+ FLAG_legacy_const = true;
+
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
CHECK_GE(MessageTemplate::kVarRedeclaration, 128);
// Must adapt bytecode if this changes.
+ // clang-format off
ExpectedSnippet<Handle<Object>, 2> snippets[] = {
{"const a = 1; { var a = 2; }",
3 * kPointerSize,
@@ -5199,12 +6041,15 @@ TEST(IllegalRedeclaration) {
{helper.factory()->NewNumberFromInt(MessageTemplate::kVarRedeclaration),
helper.factory()->NewStringFromAsciiChecked("a")}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
CheckBytecodeArrayEqual(snippets[i], bytecode_array);
}
+
+ FLAG_legacy_const = old_legacy_const_flag;
}
@@ -5226,53 +6071,69 @@ TEST(ForIn) {
Handle<i::TypeFeedbackVector> vector =
i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
+ // clang-format off
ExpectedSnippet<InstanceType> snippets[] = {
{"for (var p in null) {}",
2 * kPointerSize,
1,
- 2,
- {B(LdaUndefined), B(Return)},
+ 3,
+ {
+ B(StackCheck), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
0},
{"for (var p in undefined) {}",
2 * kPointerSize,
1,
- 2,
- {B(LdaUndefined), B(Return)},
+ 3,
+ {
+ B(StackCheck), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
0},
{"for (var p in undefined) {}",
2 * kPointerSize,
1,
- 2,
- {B(LdaUndefined), B(Return)},
+ 3,
+ {
+ B(StackCheck), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
0},
{"var x = 'potatoes';\n"
"for (var p in x) { return p; }",
8 * kPointerSize,
1,
- 45,
+ 46,
{
- B(LdaConstant), U8(0), //
- B(Star), R(1), //
- B(JumpIfUndefined), U8(39), //
- B(JumpIfNull), U8(37), //
- B(ToObject), //
- B(JumpIfNull), U8(34), //
- B(Star), R(3), //
- B(ForInPrepare), R(4), R(5), R(6), //
- B(LdaZero), //
- B(Star), R(7), //
- B(ForInDone), R(7), R(6), //
- B(JumpIfTrue), U8(20), //
- B(ForInNext), R(3), R(4), R(5), R(7), //
- B(JumpIfUndefined), U8(7), //
- B(Star), R(0), //
- B(Star), R(2), //
- B(Return), //
- B(ForInStep), R(7), //
- B(Star), R(7), //
- B(Jump), U8(-21), //
- B(LdaUndefined), //
- B(Return), //
+ B(StackCheck), //
+ B(LdaConstant), U8(0), //
+ B(Star), R(1), //
+ B(JumpIfUndefined), U8(39), //
+ B(JumpIfNull), U8(37), //
+ B(ToObject), //
+ B(JumpIfNull), U8(34), //
+ B(Star), R(3), //
+ B(ForInPrepare), R(4), //
+ B(LdaZero), //
+ B(Star), R(7), //
+ B(ForInDone), R(7), R(6), //
+ B(JumpIfTrue), U8(22), //
+ B(ForInNext), R(3), R(7), R(4), //
+ B(JumpIfUndefined), U8(10), //
+ B(Star), R(0), //
+ B(StackCheck), //
+ B(Ldar), R(0), //
+ B(Star), R(2), //
+ B(Return), //
+ B(ForInStep), R(7), //
+ B(Star), R(7), //
+ B(Jump), U8(-23), //
+ B(LdaUndefined), //
+ B(Return), //
},
1,
{InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
@@ -5280,8 +6141,9 @@ TEST(ForIn) {
"for (var p in [1,2,3]) { x += p; }",
9 * kPointerSize,
1,
- 57,
+ 58,
{
+ B(StackCheck), //
B(LdaZero), //
B(Star), R(1), //
B(CreateArrayLiteral), U8(0), U8(0), U8(3), //
@@ -5290,14 +6152,16 @@ TEST(ForIn) {
B(ToObject), //
B(JumpIfNull), U8(43), //
B(Star), R(3), //
- B(ForInPrepare), R(4), R(5), R(6), //
+ B(ForInPrepare), R(4), //
B(LdaZero), //
B(Star), R(7), //
B(ForInDone), R(7), R(6), //
- B(JumpIfTrue), U8(29), //
- B(ForInNext), R(3), R(4), R(5), R(7), //
- B(JumpIfUndefined), U8(16), //
+ B(JumpIfTrue), U8(31), //
+ B(ForInNext), R(3), R(7), R(4), //
+ B(JumpIfUndefined), U8(19), //
B(Star), R(0), //
+ B(StackCheck), //
+ B(Ldar), R(0), //
B(Star), R(2), //
B(Ldar), R(1), //
B(Star), R(8), //
@@ -5306,7 +6170,7 @@ TEST(ForIn) {
B(Star), R(1), //
B(ForInStep), R(7), //
B(Star), R(7), //
- B(Jump), U8(-30), //
+ B(Jump), U8(-32), //
B(LdaUndefined), //
B(Return), //
},
@@ -5319,31 +6183,34 @@ TEST(ForIn) {
"}",
8 * kPointerSize,
1,
- 94,
+ 95,
{
+ B(StackCheck), //
B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
+ B(Star), R(1), //
B(Star), R(0), //
B(CreateArrayLiteral), U8(1), U8(1), U8(simple_flags), //
- B(JumpIfUndefined), U8(82), //
- B(JumpIfNull), U8(80), //
+ B(JumpIfUndefined), U8(80), //
+ B(JumpIfNull), U8(78), //
B(ToObject), //
- B(JumpIfNull), U8(77), //
+ B(JumpIfNull), U8(75), //
B(Star), R(1), //
- B(ForInPrepare), R(2), R(3), R(4), //
+ B(ForInPrepare), R(2), //
B(LdaZero), //
B(Star), R(5), //
B(ForInDone), R(5), R(4), //
B(JumpIfTrue), U8(63), //
- B(ForInNext), R(1), R(2), R(3), R(5), //
- B(JumpIfUndefined), U8(50), //
+ B(ForInNext), R(1), R(5), R(2), //
+ B(JumpIfUndefined), U8(51), //
B(Star), R(6), //
B(Ldar), R(0), //
B(Star), R(7), //
B(Ldar), R(6), //
B(StoreICSloppy), R(7), U8(2), U8(vector->GetIndex(slot4)), //
+ B(StackCheck), //
B(Ldar), R(0), //
B(Star), R(6), //
- B(LoadICSloppy), R(6), U8(2), U8(vector->GetIndex(slot2)), //
+ B(LoadIC), R(6), U8(2), U8(vector->GetIndex(slot2)), //
B(Star), R(7), //
B(LdaSmi8), U8(10), //
B(TestEqual), R(7), //
@@ -5351,7 +6218,7 @@ TEST(ForIn) {
B(Jump), U8(20), //
B(Ldar), R(0), //
B(Star), R(6), //
- B(LoadICSloppy), R(6), U8(2), U8(vector->GetIndex(slot3)), //
+ B(LoadIC), R(6), U8(2), U8(vector->GetIndex(slot3)), //
B(Star), R(7), //
B(LdaSmi8), U8(20), //
B(TestEqual), R(7), //
@@ -5370,23 +6237,24 @@ TEST(ForIn) {
"for (x[0] in [1,2,3]) { return x[3]; }",
9 * kPointerSize,
1,
- 71,
+ 70,
{
+ B(StackCheck), //
B(CreateArrayLiteral), U8(0), U8(0), U8(simple_flags), //
B(Star), R(0), //
B(CreateArrayLiteral), U8(1), U8(1), U8(simple_flags), //
- B(JumpIfUndefined), U8(59), //
- B(JumpIfNull), U8(57), //
+ B(JumpIfUndefined), U8(57), //
+ B(JumpIfNull), U8(55), //
B(ToObject), //
- B(JumpIfNull), U8(54), //
+ B(JumpIfNull), U8(52), //
B(Star), R(1), //
- B(ForInPrepare), R(2), R(3), R(4), //
+ B(ForInPrepare), R(2), //
B(LdaZero), //
B(Star), R(5), //
B(ForInDone), R(5), R(4), //
B(JumpIfTrue), U8(40), //
- B(ForInNext), R(1), R(2), R(3), R(5), //
- B(JumpIfUndefined), U8(27), //
+ B(ForInNext), R(1), R(5), R(2), //
+ B(JumpIfUndefined), U8(28), //
B(Star), R(6), //
B(Ldar), R(0), //
B(Star), R(7), //
@@ -5394,10 +6262,11 @@ TEST(ForIn) {
B(Star), R(8), //
B(Ldar), R(6), //
B(KeyedStoreICSloppy), R(7), R(8), U8(vector->GetIndex(slot3)), //
+ B(StackCheck), //
B(Ldar), R(0), //
B(Star), R(6), //
B(LdaSmi8), U8(3), //
- B(KeyedLoadICSloppy), R(6), U8(vector->GetIndex(slot2)), //
+ B(KeyedLoadIC), R(6), U8(vector->GetIndex(slot2)), //
B(Return), //
B(ForInStep), R(5), //
B(Star), R(5), //
@@ -5408,6 +6277,259 @@ TEST(ForIn) {
2,
{InstanceType::FIXED_ARRAY_TYPE, InstanceType::FIXED_ARRAY_TYPE}},
};
+ // clang-format on
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+}
+
+
+// TODO(rmcilroy): Do something about this; new bytecode is too large
+// (150+ instructions) to adapt manually.
+DISABLED_TEST(ForOf) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+ Zone zone;
+
+ int array_literal_flags =
+ ArrayLiteral::kDisableMementos | ArrayLiteral::kShallowElements;
+ int object_literal_flags =
+ ObjectLiteral::kFastElements | ObjectLiteral::kDisableMementos;
+
+ FeedbackVectorSpec feedback_spec(&zone);
+ FeedbackVectorSlot slot1 = feedback_spec.AddCallICSlot();
+ FeedbackVectorSlot slot2 = feedback_spec.AddKeyedLoadICSlot();
+ FeedbackVectorSlot slot3 = feedback_spec.AddCallICSlot();
+ FeedbackVectorSlot slot4 = feedback_spec.AddLoadICSlot();
+ FeedbackVectorSlot slot5 = feedback_spec.AddLoadICSlot();
+ FeedbackVectorSlot slot6 = feedback_spec.AddLoadICSlot();
+ FeedbackVectorSlot slot7 = feedback_spec.AddStoreICSlot();
+ FeedbackVectorSlot slot8 = feedback_spec.AddLoadICSlot();
+ Handle<i::TypeFeedbackVector> vector =
+ i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
+
+ // clang-format off
+ ExpectedSnippet<InstanceType, 8> snippets[] = {
+ {"for (var p of [0, 1, 2]) {}",
+ 7 * kPointerSize,
+ 1,
+ 86,
+ {
+ B(StackCheck), //
+ B(CreateArrayLiteral), U8(0), U8(0), U8(array_literal_flags), //
+ B(Star), R(5), //
+ B(LdaConstant), U8(1), //
+ B(KeyedLoadIC), R(5), U8(vector->GetIndex(slot2)), //
+ B(Star), R(4), //
+ B(Call), R(4), R(5), U8(1), U8(vector->GetIndex(slot1)), //
+ B(Star), R(1), //
+ B(Ldar), R(1), //
+ B(Star), R(6), //
+ B(LoadIC), R(6), U8(2), U8(vector->GetIndex(slot4)), //
+ B(Star), R(5), //
+ B(Call), R(5), R(6), U8(1), U8(vector->GetIndex(slot3)), //
+ B(Star), R(2), //
+ B(Star), R(4), //
+ B(CallRuntime), U16(Runtime::kInlineIsJSReceiver), R(4), U8(1), //
+ B(LogicalNot), //
+ B(JumpIfFalse), U8(11), //
+ B(Ldar), R(2), //
+ B(Star), R(4), //
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), //
+ /* */ R(4), U8(1), //
+ B(Ldar), R(2), //
+ B(Star), R(4), //
+ B(LoadIC), R(4), U8(3), U8(vector->GetIndex(slot5)), //
+ B(JumpIfToBooleanTrue), U8(19), //
+ B(Ldar), R(2), //
+ B(Star), R(4), //
+ B(LoadIC), R(4), U8(4), U8(vector->GetIndex(slot6)), //
+ B(Star), R(0), //
+ B(StackCheck), //
+ B(Ldar), R(0), //
+ B(Star), R(3), //
+ B(Jump), U8(-61), //
+ B(LdaUndefined), //
+ B(Return), //
+ },
+ 5,
+ {InstanceType::FIXED_ARRAY_TYPE, InstanceType::SYMBOL_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
+ {"var x = 'potatoes';\n"
+ "for (var p of x) { return p; }",
+ 8 * kPointerSize,
+ 1,
+ 85,
+ {
+ B(StackCheck), //
+ B(LdaConstant), U8(0), //
+ B(Star), R(3), //
+ B(Star), R(6), //
+ B(LdaConstant), U8(1), //
+ B(KeyedLoadIC), R(6), U8(vector->GetIndex(slot2)), //
+ B(Star), R(5), //
+ B(Call), R(5), R(6), U8(1), U8(vector->GetIndex(slot1)), //
+ B(Star), R(1), //
+ B(Ldar), R(1), //
+ B(Star), R(7), //
+ B(LoadIC), R(7), U8(2), U8(vector->GetIndex(slot4)), //
+ B(Star), R(6), //
+ B(Call), R(6), R(7), U8(1), U8(vector->GetIndex(slot3)), //
+ B(Star), R(2), //
+ B(Star), R(5), //
+ B(CallRuntime), U16(Runtime::kInlineIsJSReceiver), R(5), U8(1), //
+ B(LogicalNot), //
+ B(JumpIfFalse), U8(11), //
+ B(Ldar), R(2), //
+ B(Star), R(5), //
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), //
+ /* */ R(5), U8(1), //
+ B(Ldar), R(2), //
+ B(Star), R(5), //
+ B(LoadIC), R(5), U8(3), U8(vector->GetIndex(slot5)), //
+ B(JumpIfToBooleanTrue), U8(18), //
+ B(Ldar), R(2), //
+ B(Star), R(5), //
+ B(LoadIC), R(5), U8(4), U8(vector->GetIndex(slot6)), //
+ B(Star), R(0), //
+ B(StackCheck), //
+ B(Ldar), R(0), //
+ B(Star), R(4), //
+ B(Return), //
+ B(LdaUndefined), //
+ B(Return), //
+ },
+ 5,
+ {InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::SYMBOL_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
+ {"for (var x of [10, 20, 30]) {\n"
+ " if (x == 10) continue;\n"
+ " if (x == 20) break;\n"
+ "}",
+ 7 * kPointerSize,
+ 1,
+ 108,
+ {
+ B(StackCheck), //
+ B(CreateArrayLiteral), U8(0), U8(0), U8(array_literal_flags), //
+ B(Star), R(5), //
+ B(LdaConstant), U8(1), //
+ B(KeyedLoadIC), R(5), U8(vector->GetIndex(slot2)), //
+ B(Star), R(4), //
+ B(Call), R(4), R(5), U8(1), U8(vector->GetIndex(slot1)), //
+ B(Star), R(1), //
+ B(Ldar), R(1), //
+ B(Star), R(6), //
+ B(LoadIC), R(6), U8(2), U8(vector->GetIndex(slot4)), //
+ B(Star), R(5), //
+ B(Call), R(5), R(6), U8(1), U8(vector->GetIndex(slot3)), //
+ B(Star), R(2), //
+ B(Star), R(4), //
+ B(CallRuntime), U16(Runtime::kInlineIsJSReceiver), R(4), U8(1), //
+ B(LogicalNot), //
+ B(JumpIfFalse), U8(11), //
+ B(Ldar), R(2), //
+ B(Star), R(4), //
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), //
+ /* */ R(4), U8(1), //
+ B(Ldar), R(2), //
+ B(Star), R(4), //
+ B(LoadIC), R(4), U8(3), U8(vector->GetIndex(slot5)), //
+ B(JumpIfToBooleanTrue), U8(41), //
+ B(Ldar), R(2), //
+ B(Star), R(4), //
+ B(LoadIC), R(4), U8(4), U8(vector->GetIndex(slot6)), //
+ B(Star), R(0), //
+ B(StackCheck), //
+ B(Ldar), R(0), //
+ B(Star), R(3), //
+ B(Star), R(4), //
+ B(LdaSmi8), U8(10), //
+ B(TestEqual), R(4), //
+ B(JumpIfFalse), U8(4), //
+ B(Jump), U8(-69), //
+ B(Ldar), R(3), //
+ B(Star), R(4), //
+ B(LdaSmi8), U8(20), //
+ B(TestEqual), R(4), //
+ B(JumpIfFalse), U8(4), //
+ B(Jump), U8(4), //
+ B(Jump), U8(-83), //
+ B(LdaUndefined), //
+ B(Return), //
+ },
+ 5,
+ {InstanceType::FIXED_ARRAY_TYPE, InstanceType::SYMBOL_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
+ {"var x = { 'a': 1, 'b': 2 };\n"
+ "for (x['a'] of [1,2,3]) { return x['a']; }",
+ 6 * kPointerSize,
+ 1,
+ 103,
+ {
+ B(StackCheck), //
+ B(CreateObjectLiteral), U8(0), U8(0), U8(object_literal_flags), //
+ B(Star), R(3), //
+ B(Star), R(2), //
+ B(CreateArrayLiteral), U8(1), U8(1), U8(array_literal_flags), //
+ B(Star), R(4), //
+ B(LdaConstant), U8(2), //
+ B(KeyedLoadIC), R(4), U8(vector->GetIndex(slot2)), //
+ B(Star), R(3), //
+ B(Call), R(3), R(4), U8(1), U8(vector->GetIndex(slot1)), //
+ B(Star), R(0), //
+ B(Ldar), R(0), //
+ B(Star), R(5), //
+ B(LoadIC), R(5), U8(3), U8(vector->GetIndex(slot4)), //
+ B(Star), R(4), //
+ B(Call), R(4), R(5), U8(1), U8(vector->GetIndex(slot3)), //
+ B(Star), R(1), //
+ B(Star), R(3), //
+ B(CallRuntime), U16(Runtime::kInlineIsJSReceiver), R(3), U8(1), //
+ B(LogicalNot), //
+ B(JumpIfFalse), U8(11), //
+ B(Ldar), R(1), //
+ B(Star), R(3), //
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), //
+ /* */ R(3), U8(1), //
+ B(Ldar), R(1), //
+ B(Star), R(3), //
+ B(LoadIC), R(3), U8(4), U8(vector->GetIndex(slot5)), //
+ B(JumpIfToBooleanTrue), U8(28), //
+ B(Ldar), R(2), //
+ B(Star), R(3), //
+ B(Ldar), R(1), //
+ B(Star), R(4), //
+ B(LoadIC), R(4), U8(5), U8(vector->GetIndex(slot6)), //
+ B(StoreICSloppy), R(3), U8(6), U8(vector->GetIndex(slot7)), //
+ B(StackCheck), //
+ B(Ldar), R(2), //
+ B(Star), R(3), //
+ B(LoadIC), R(3), U8(6), U8(vector->GetIndex(slot8)), //
+ B(Return), //
+ B(LdaUndefined), //
+ B(Return), //
+ },
+ 7,
+ {InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::SYMBOL_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
+ };
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -5421,12 +6543,14 @@ TEST(Conditional) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
+ // clang-format off
ExpectedSnippet<int> snippets[] = {
{"return 1 ? 2 : 3;",
0,
1,
- 11,
+ 12,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(JumpIfToBooleanFalse), U8(6), //
B(LdaSmi8), U8(2), //
@@ -5437,8 +6561,9 @@ TEST(Conditional) {
{"return 1 ? 2 ? 3 : 4 : 5;",
0,
1,
- 19,
+ 20,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(JumpIfToBooleanFalse), U8(14), //
B(LdaSmi8), U8(2), //
@@ -5451,6 +6576,7 @@ TEST(Conditional) {
B(Return), //
}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -5464,6 +6590,7 @@ TEST(Switch) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
+ // clang-format off
ExpectedSnippet<int> snippets[] = {
{"var a = 1;\n"
"switch(a) {\n"
@@ -5472,8 +6599,9 @@ TEST(Switch) {
"}\n",
3 * kPointerSize,
1,
- 30,
+ 31,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Star), R(1), // The tag variable is allocated as a
B(Star), R(0), // local by the parser, hence the store
@@ -5499,8 +6627,9 @@ TEST(Switch) {
"}\n",
3 * kPointerSize,
1,
- 36,
+ 37,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Star), R(1), //
B(Star), R(0), //
@@ -5528,8 +6657,9 @@ TEST(Switch) {
"}\n",
3 * kPointerSize,
1,
- 34,
+ 35,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Star), R(1), //
B(Star), R(0), //
@@ -5557,8 +6687,9 @@ TEST(Switch) {
"}\n",
3 * kPointerSize,
1,
- 34,
+ 35,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Star), R(1), //
B(Star), R(0), //
@@ -5586,8 +6717,9 @@ TEST(Switch) {
"}\n",
3 * kPointerSize,
1,
- 43,
+ 44,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Star), R(1), //
B(TypeOf), //
@@ -5619,8 +6751,9 @@ TEST(Switch) {
"}\n",
3 * kPointerSize,
1,
- 31,
+ 32,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Star), R(1), //
B(Star), R(0), //
@@ -5647,8 +6780,9 @@ TEST(Switch) {
"}\n",
3 * kPointerSize,
1,
- 288,
+ 289,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Star), R(1), //
B(Star), R(0), //
@@ -5683,8 +6817,9 @@ TEST(Switch) {
"}\n",
5 * kPointerSize,
1,
- 60,
+ 61,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Star), R(2), //
B(Star), R(0), //
@@ -5718,6 +6853,7 @@ TEST(Switch) {
B(Return), //
}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -5733,12 +6869,14 @@ TEST(BasicBlockToBoolean) {
// Check that we generate JumpIfToBoolean if they are at the start of basic
// blocks.
+ // clang-format off
ExpectedSnippet<int> snippets[] = {
{"var a = 1; if (a || a < 0) { return 1; }",
2 * kPointerSize,
1,
- 20,
+ 21,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Star), R(0), //
B(JumpIfToBooleanTrue), U8(9), //
@@ -5755,8 +6893,9 @@ TEST(BasicBlockToBoolean) {
{"var a = 1; if (a && a < 0) { return 1; }",
2 * kPointerSize,
1,
- 20,
+ 21,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Star), R(0), //
B(JumpIfToBooleanFalse), U8(9), //
@@ -5773,8 +6912,9 @@ TEST(BasicBlockToBoolean) {
{"var a = 1; a = (a || a < 0) ? 2 : 3;",
2 * kPointerSize,
1,
- 25,
+ 26,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Star), R(0), //
B(JumpIfToBooleanTrue), U8(9), //
@@ -5791,6 +6931,7 @@ TEST(BasicBlockToBoolean) {
B(Return), //
}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -5804,20 +6945,23 @@ TEST(DeadCodeRemoval) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
+ // clang-format off
ExpectedSnippet<int> snippets[] = {
{"return; var a = 1; a();",
1 * kPointerSize,
1,
- 2,
+ 3,
{
+ B(StackCheck), //
B(LdaUndefined), //
B(Return), //
}},
{"if (false) { return; }; var a = 1;",
1 * kPointerSize,
1,
- 6,
+ 7,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Star), R(0), //
B(LdaUndefined), //
@@ -5826,16 +6970,18 @@ TEST(DeadCodeRemoval) {
{"if (true) { return 1; } else { return 2; };",
0,
1,
- 3,
+ 4,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Return), //
}},
{"var a = 1; if (a) { return 1; }; return 2;",
1 * kPointerSize,
1,
- 12,
+ 13,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Star), R(0), //
B(JumpIfToBooleanFalse), U8(5), //
@@ -5845,6 +6991,7 @@ TEST(DeadCodeRemoval) {
B(Return), //
}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -5860,31 +7007,46 @@ TEST(ThisFunction) {
int closure = Register::function_closure().index();
+ // clang-format off
ExpectedSnippet<int> snippets[] = {
{"var f;\n f = function f() { }",
- 1 * kPointerSize,
+ 2 * kPointerSize,
1,
- 9,
+ 19,
{
- B(LdaTheHole), //
- B(Star), R(0), //
- B(Ldar), R(closure), //
- B(Star), R(0), //
- B(LdaUndefined), //
- B(Return), //
+ B(LdaTheHole), //
+ B(Star), R(0), //
+ B(StackCheck), //
+ B(Ldar), R(closure), //
+ B(Star), R(1), //
+ B(Ldar), R(0), //
+ B(JumpIfNotHole), U8(5), //
+ B(Mov), R(1), R(0), //
+ B(Ldar), R(1), //
+ B(LdaUndefined), //
+ B(Return), //
}},
{"var f;\n f = function f() { return f; }",
- 1 * kPointerSize,
+ 2 * kPointerSize,
1,
- 8,
+ 23,
{
- B(LdaTheHole), //
- B(Star), R(0), //
- B(Ldar), R(closure), //
- B(Star), R(0), //
- B(Return), //
+ B(LdaTheHole), //
+ B(Star), R(0), //
+ B(StackCheck), //
+ B(Ldar), R(closure), //
+ B(Star), R(1), //
+ B(Ldar), R(0), //
+ B(JumpIfNotHole), U8(5), //
+ B(Mov), R(1), R(0), //
+ B(Ldar), R(1), //
+ B(Ldar), R(0), //
+ B(JumpIfNotHole), U8(3), //
+ B(LdaUndefined), //
+ B(Return), //
}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -5900,27 +7062,44 @@ TEST(NewTarget) {
int new_target = Register::new_target().index();
- ExpectedSnippet<int> snippets[] = {
+ // clang-format off
+ ExpectedSnippet<InstanceType> snippets[] = {
{"return new.target;",
- 1 * kPointerSize,
+ 2 * kPointerSize,
1,
- 5,
+ 19,
{
- B(Ldar), R(new_target), //
- B(Star), R(0), //
- B(Return), //
- }},
+ B(Ldar), R(new_target), //
+ B(Star), R(0), //
+ B(StackCheck), //
+ B(Ldar), R(0), //
+ B(JumpIfNotHole), U8(11), //
+ B(LdaConstant), U8(0), //
+ B(Star), R(1), //
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(1), U8(1), //
+ B(Return), //
+ },
+ 1,
+ {InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
{"new.target;",
- 1 * kPointerSize,
+ 2 * kPointerSize,
1,
- 6,
+ 20,
{
- B(Ldar), R(new_target), //
- B(Star), R(0), //
- B(LdaUndefined), //
- B(Return), //
- }},
- };
+ B(Ldar), R(new_target), //
+ B(Star), R(0), //
+ B(StackCheck), //
+ B(Ldar), R(0), //
+ B(JumpIfNotHole), U8(11), //
+ B(LdaConstant), U8(0), //
+ B(Star), R(1), //
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(1), U8(1), //
+ B(LdaUndefined), //
+ B(Return), //
+ },
+ 1,
+ {InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}}};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -5934,6 +7113,7 @@ TEST(RemoveRedundantLdar) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
+ // clang-format off
ExpectedSnippet<int> snippets[] = {
{"var ld_a = 1;\n" // This test is to check Ldar does not
"while(true) {\n" // get removed if the preceding Star is
@@ -5943,9 +7123,11 @@ TEST(RemoveRedundantLdar) {
"return ld_a;",
2 * kPointerSize,
1,
- 29,
- {B(LdaSmi8), U8(1), //
+ 31,
+ {B(StackCheck), //
+ B(LdaSmi8), U8(1), //
B(Star), R(0), //
+ B(StackCheck), //
B(Ldar), R(0), // This load should not be removed as it
B(Star), R(1), // is the target of the branch.
B(Ldar), R(0), //
@@ -5956,7 +7138,7 @@ TEST(RemoveRedundantLdar) {
B(TestGreaterThan), R(1), //
B(JumpIfFalse), U8(4), //
B(Jump), U8(4), //
- B(Jump), U8(-20), //
+ B(Jump), U8(-21), //
B(Ldar), R(0), //
B(Return)}},
{"var ld_a = 1;\n"
@@ -5967,9 +7149,11 @@ TEST(RemoveRedundantLdar) {
"return ld_a;",
2 * kPointerSize,
1,
- 27,
- {B(LdaSmi8), U8(1), //
+ 29,
+ {B(StackCheck), //
+ B(LdaSmi8), U8(1), //
B(Star), R(0), //
+ B(StackCheck), //
B(Ldar), R(0), //
B(Star), R(1), //
B(Ldar), R(0), //
@@ -5987,8 +7171,9 @@ TEST(RemoveRedundantLdar) {
" return ld_a;",
2 * kPointerSize,
1,
- 13,
+ 14,
{
+ B(StackCheck), //
B(LdaSmi8), U8(1), //
B(Star), R(0), //
B(Star), R(1), //
@@ -5998,6 +7183,7 @@ TEST(RemoveRedundantLdar) {
B(Return) //
}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -6011,13 +7197,15 @@ TEST(AssignmentsInBinaryExpression) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
+ // clang-format off
ExpectedSnippet<const char*> snippets[] = {
{"var x = 0, y = 1;\n"
"return (x = 2, y = 3, x = 4, y = 5)",
2 * kPointerSize,
1,
- 24,
+ 25,
{
+ B(StackCheck), //
B(LdaZero), B(Star), R(0), //
B(LdaSmi8), U8(1), //
B(Star), R(1), //
@@ -6037,8 +7225,9 @@ TEST(AssignmentsInBinaryExpression) {
"return y",
2 * kPointerSize,
1,
- 11,
+ 12,
{
+ B(StackCheck), //
B(LdaSmi8), U8(55), //
B(Star), R(0), //
B(LdaSmi8), U8(100), //
@@ -6052,8 +7241,9 @@ TEST(AssignmentsInBinaryExpression) {
"return x;",
3 * kPointerSize,
1,
- 23,
+ 24,
{
+ B(StackCheck), //
B(LdaSmi8), U8(55), //
B(Star), R(0), //
B(Star), R(1), //
@@ -6074,8 +7264,9 @@ TEST(AssignmentsInBinaryExpression) {
"return x;",
3 * kPointerSize,
1,
- 31,
+ 32,
{
+ B(StackCheck), //
B(LdaSmi8), U8(55), //
B(Star), R(0), //
B(LdaSmi8), U8(56), //
@@ -6100,8 +7291,9 @@ TEST(AssignmentsInBinaryExpression) {
"return y;",
4 * kPointerSize,
1,
- 31,
+ 32,
{
+ B(StackCheck), //
B(LdaSmi8), U8(55), //
B(Star), R(0), //
B(Star), R(2), //
@@ -6125,8 +7317,9 @@ TEST(AssignmentsInBinaryExpression) {
"return x;",
3 * kPointerSize,
1,
- 31,
+ 32,
{
+ B(StackCheck), //
B(LdaSmi8), U8(55), //
B(Star), R(0), //
B(Star), R(1), //
@@ -6150,8 +7343,9 @@ TEST(AssignmentsInBinaryExpression) {
"y;\n",
5 * kPointerSize,
1,
- 69,
+ 70,
{
+ B(StackCheck), //
B(LdaSmi8), U8(10), //
B(Star), R(0), //
B(LdaSmi8), U8(20), //
@@ -6193,8 +7387,9 @@ TEST(AssignmentsInBinaryExpression) {
"return 1 + x + (x++) + (++x);\n",
4 * kPointerSize,
1,
- 37,
+ 38,
{
+ B(StackCheck), //
B(LdaSmi8), U8(17), //
B(Star), R(0), //
B(LdaSmi8), U8(1), //
@@ -6217,7 +7412,9 @@ TEST(AssignmentsInBinaryExpression) {
B(Add), R(3), //
B(Return), //
},
- 0}};
+ 0}
+ };
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -6233,49 +7430,51 @@ TEST(Eval) {
Zone zone;
int closure = Register::function_closure().index();
- int context = Register::function_context().index();
+ int context = Register::current_context().index();
int new_target = Register::new_target().index();
int first_context_slot = Context::MIN_CONTEXT_SLOTS;
+ // clang-format off
ExpectedSnippet<const char*> snippets[] = {
{"return eval('1;');",
9 * kPointerSize,
1,
- 67,
+ 65,
{
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- U8(1), //
- B(PushContext), R(0), //
- B(Ldar), THIS(1), //
- B(StaContextSlot), R(0), U8(first_context_slot), //
- B(CreateMappedArguments), //
- B(StaContextSlot), R(0), U8(first_context_slot + 1), //
- B(Ldar), R(new_target), //
- B(StaContextSlot), R(0), U8(first_context_slot + 2), //
- B(Mov), R(context), R(3), //
- B(LdaConstant), U8(0), //
- B(Star), R(4), //
- B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlot), //
- R(3), U8(2), R(1), //
- B(LdaConstant), U8(1), //
- B(Star), R(3), //
- B(Mov), R(1), R(4), //
- B(Mov), R(3), R(5), //
- B(Mov), R(closure), R(6), //
- B(LdaZero), //
- B(Star), R(7), //
- B(LdaSmi8), U8(10), //
- B(Star), R(8), //
- B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), //
- U8(5), //
- B(Star), R(1), //
- B(Call), R(1), R(2), U8(1), U8(0), //
- B(Return), //
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
+ /* */ U8(1), //
+ B(PushContext), R(0), //
+ B(Ldar), THIS(1), //
+ B(StaContextSlot), R(context), U8(first_context_slot), //
+ B(CreateMappedArguments), //
+ B(StaContextSlot), R(context), U8(first_context_slot + 1), //
+ B(Ldar), R(new_target), //
+ B(StaContextSlot), R(context), U8(first_context_slot + 2), //
+ B(StackCheck), //
+ B(LdaConstant), U8(0), //
+ B(Star), R(3), //
+ B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), //
+ /* */ R(3), U8(1), R(1), //
+ B(LdaConstant), U8(1), //
+ B(Star), R(3), //
+ B(Mov), R(1), R(4), //
+ B(Mov), R(3), R(5), //
+ B(Mov), R(closure), R(6), //
+ B(LdaZero), //
+ B(Star), R(7), //
+ B(LdaSmi8), U8(10), //
+ B(Star), R(8), //
+ B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), //
+ /* */ U8(5), //
+ B(Star), R(1), //
+ B(Call), R(1), R(2), U8(2), U8(0), //
+ B(Return), //
},
2,
{"eval", "1;"}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -6290,125 +7489,127 @@ TEST(LookupSlot) {
BytecodeGeneratorHelper helper;
int closure = Register::function_closure().index();
+ int context = Register::current_context().index();
int first_context_slot = Context::MIN_CONTEXT_SLOTS;
- int context = Register::function_context().index();
int new_target = Register::new_target().index();
+ // clang-format off
ExpectedSnippet<const char*> snippets[] = {
{"eval('var x = 10;'); return x;",
9 * kPointerSize,
1,
- 69,
+ 67,
{
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- U8(1), //
- B(PushContext), R(0), //
- B(Ldar), THIS(1), //
- B(StaContextSlot), R(0), U8(first_context_slot), //
- B(CreateMappedArguments), //
- B(StaContextSlot), R(0), U8(first_context_slot + 1), //
- B(Ldar), R(new_target), //
- B(StaContextSlot), R(0), U8(first_context_slot + 2), //
- B(Mov), R(context), R(3), //
- B(LdaConstant), U8(0), //
- B(Star), R(4), //
- B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlot), //
- R(3), U8(2), R(1), //
- B(LdaConstant), U8(1), //
- B(Star), R(3), //
- B(Mov), R(1), R(4), //
- B(Mov), R(3), R(5), //
- B(Mov), R(closure), R(6), //
- B(LdaZero), //
- B(Star), R(7), //
- B(LdaSmi8), U8(10), //
- B(Star), R(8), //
- B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), //
- U8(5), //
- B(Star), R(1), //
- B(Call), R(1), R(2), U8(1), U8(0), //
- B(LdaLookupSlot), U8(2), //
- B(Return), //
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
+ /* */ U8(1), //
+ B(PushContext), R(0), //
+ B(Ldar), THIS(1), //
+ B(StaContextSlot), R(context), U8(first_context_slot), //
+ B(CreateMappedArguments), //
+ B(StaContextSlot), R(context), U8(first_context_slot + 1), //
+ B(Ldar), R(new_target), //
+ B(StaContextSlot), R(context), U8(first_context_slot + 2), //
+ B(StackCheck), //
+ B(LdaConstant), U8(0), //
+ B(Star), R(3), //
+ B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), //
+ R(3), U8(1), R(1), //
+ B(LdaConstant), U8(1), //
+ B(Star), R(3), //
+ B(Mov), R(1), R(4), //
+ B(Mov), R(3), R(5), //
+ B(Mov), R(closure), R(6), //
+ B(LdaZero), //
+ B(Star), R(7), //
+ B(LdaSmi8), U8(10), //
+ B(Star), R(8), //
+ B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), //
+ U8(5), //
+ B(Star), R(1), //
+ B(Call), R(1), R(2), U8(2), U8(0), //
+ B(LdaLookupSlot), U8(2), //
+ B(Return), //
},
3,
{"eval", "var x = 10;", "x"}},
{"eval('var x = 10;'); return typeof x;",
- 9 * kPointerSize,
- 1,
- 70,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- U8(1), //
- B(PushContext), R(0), //
- B(Ldar), THIS(1), //
- B(StaContextSlot), R(0), U8(first_context_slot), //
- B(CreateMappedArguments), //
- B(StaContextSlot), R(0), U8(first_context_slot + 1), //
- B(Ldar), R(new_target), //
- B(StaContextSlot), R(0), U8(first_context_slot + 2), //
- B(Mov), R(context), R(3), //
- B(LdaConstant), U8(0), //
- B(Star), R(4), //
- B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlot), //
- R(3), U8(2), R(1), //
- B(LdaConstant), U8(1), //
- B(Star), R(3), //
- B(Mov), R(1), R(4), //
- B(Mov), R(3), R(5), //
- B(Mov), R(closure), R(6), //
- B(LdaZero), //
- B(Star), R(7), //
- B(LdaSmi8), U8(10), //
- B(Star), R(8), //
- B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), //
- U8(5), //
- B(Star), R(1), //
- B(Call), R(1), R(2), U8(1), U8(0), //
- B(LdaLookupSlotInsideTypeof), U8(2), //
- B(TypeOf), //
- B(Return), //
+ 9 * kPointerSize,
+ 1,
+ 68,
+ {
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
+ /* */ U8(1), //
+ B(PushContext), R(0), //
+ B(Ldar), THIS(1), //
+ B(StaContextSlot), R(context), U8(first_context_slot), //
+ B(CreateMappedArguments), //
+ B(StaContextSlot), R(context), U8(first_context_slot + 1), //
+ B(Ldar), R(new_target), //
+ B(StaContextSlot), R(context), U8(first_context_slot + 2), //
+ B(StackCheck), //
+ B(LdaConstant), U8(0), //
+ B(Star), R(3), //
+ B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), //
+ /* */ R(3), U8(1), R(1), //
+ B(LdaConstant), U8(1), //
+ B(Star), R(3), //
+ B(Mov), R(1), R(4), //
+ B(Mov), R(3), R(5), //
+ B(Mov), R(closure), R(6), //
+ B(LdaZero), //
+ B(Star), R(7), //
+ B(LdaSmi8), U8(10), //
+ B(Star), R(8), //
+ B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), //
+ /* */ U8(5), //
+ B(Star), R(1), //
+ B(Call), R(1), R(2), U8(2), U8(0), //
+ B(LdaLookupSlotInsideTypeof), U8(2), //
+ B(TypeOf), //
+ B(Return), //
},
3,
{"eval", "var x = 10;", "x"}},
{"x = 20; return eval('');",
9 * kPointerSize,
1,
- 71,
- {
- B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- U8(1), //
- B(PushContext), R(0), //
- B(Ldar), THIS(1), //
- B(StaContextSlot), R(0), U8(first_context_slot), //
- B(CreateMappedArguments), //
- B(StaContextSlot), R(0), U8(first_context_slot + 1), //
- B(Ldar), R(new_target), //
- B(StaContextSlot), R(0), U8(first_context_slot + 2), //
- B(LdaSmi8), U8(20), //
- B(StaLookupSlotSloppy), U8(0), //
- B(Mov), R(context), R(3), //
- B(LdaConstant), U8(1), //
- B(Star), R(4), //
- B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlot), //
- R(3), U8(2), R(1), //
- B(LdaConstant), U8(2), //
- B(Star), R(3), //
- B(Mov), R(1), R(4), //
- B(Mov), R(3), R(5), //
- B(Mov), R(closure), R(6), //
- B(LdaZero), //
- B(Star), R(7), //
- B(LdaSmi8), U8(10), //
- B(Star), R(8), //
- B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), //
- U8(5), //
- B(Star), R(1), //
- B(Call), R(1), R(2), U8(1), U8(0), //
- B(Return), //
+ 69,
+ {
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
+ U8(1), //
+ B(PushContext), R(0), //
+ B(Ldar), THIS(1), //
+ B(StaContextSlot), R(context), U8(first_context_slot), //
+ B(CreateMappedArguments), //
+ B(StaContextSlot), R(context), U8(first_context_slot + 1), //
+ B(Ldar), R(new_target), //
+ B(StaContextSlot), R(context), U8(first_context_slot + 2), //
+ B(StackCheck), //
+ B(LdaSmi8), U8(20), //
+ B(StaLookupSlotSloppy), U8(0), //
+ B(LdaConstant), U8(1), //
+ B(Star), R(3), //
+ B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), //
+ /* */ R(3), U8(1), R(1), //
+ B(LdaConstant), U8(2), //
+ B(Star), R(3), //
+ B(Mov), R(1), R(4), //
+ B(Mov), R(3), R(5), //
+ B(Mov), R(closure), R(6), //
+ B(LdaZero), //
+ B(Star), R(7), //
+ B(LdaSmi8), U8(10), //
+ B(Star), R(8), //
+ B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), //
+ /* */ U8(5), //
+ B(Star), R(1), //
+ B(Call), R(1), R(2), U8(2), U8(0), //
+ B(Return), //
},
3,
{"x", "eval", ""}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -6432,31 +7633,32 @@ TEST(CallLookupSlot) {
i::NewTypeFeedbackVector(helper.isolate(), &feedback_spec);
int closure = Register::function_closure().index();
- int context = Register::function_context().index();
+ int context = Register::current_context().index();
int new_target = Register::new_target().index();
+ // clang-format off
ExpectedSnippet<InstanceType> snippets[] = {
{"g = function(){}; eval(''); return g();",
9 * kPointerSize,
1,
- 90,
+ 85,
{
B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
- U8(1), //
+ /* */ U8(1), //
B(PushContext), R(0), //
B(Ldar), THIS(1), //
- B(StaContextSlot), R(0), U8(4), //
+ B(StaContextSlot), R(context), U8(4), //
B(CreateMappedArguments), //
- B(StaContextSlot), R(0), U8(5), //
+ B(StaContextSlot), R(context), U8(5), //
B(Ldar), R(new_target), //
- B(StaContextSlot), R(0), U8(6), //
+ B(StaContextSlot), R(context), U8(6), //
+ B(StackCheck), //
B(CreateClosure), U8(0), U8(0), //
B(StaLookupSlotSloppy), U8(1), //
- B(Mov), R(context), R(3), //
B(LdaConstant), U8(2), //
- B(Star), R(4), //
- B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlot), //
- R(3), U8(2), R(1), //
+ B(Star), R(3), //
+ B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), //
+ R(3), U8(1), R(1), //
B(LdaConstant), U8(3), //
B(Star), R(3), //
B(Mov), R(1), R(4), //
@@ -6469,13 +7671,12 @@ TEST(CallLookupSlot) {
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(4), //
U8(5), //
B(Star), R(1), //
- B(Call), R(1), R(2), U8(1), U8(0), //
- B(Mov), R(context), R(3), //
+ B(Call), R(1), R(2), U8(2), U8(0), //
B(LdaConstant), U8(1), //
- B(Star), R(4), //
- B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlot), //
- R(3), U8(2), R(1), //
- B(Call), R(1), R(2), U8(0), U8(vector->GetIndex(slot2)), //
+ B(Star), R(3), //
+ B(CallRuntimeForPair), U16(Runtime::kLoadLookupSlotForCall), //
+ R(3), U8(1), R(1), //
+ B(Call), R(1), R(2), U8(1), U8(vector->GetIndex(slot2)), //
B(Return), //
},
4,
@@ -6484,6 +7685,7 @@ TEST(CallLookupSlot) {
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
Handle<BytecodeArray> bytecode_array =
@@ -6493,6 +7695,8 @@ TEST(CallLookupSlot) {
}
+// TODO(mythria): tests for variable/function declaration in lookup slots.
+
TEST(LookupSlotInEval) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
@@ -6505,12 +7709,14 @@ TEST(LookupSlotInEval) {
"}"
"f1();";
+ // clang-format off
ExpectedSnippet<const char*> snippets[] = {
{"return x;",
0 * kPointerSize,
1,
- 3,
+ 4,
{
+ B(StackCheck), //
B(LdaLookupSlot), U8(0), //
B(Return) //
},
@@ -6519,8 +7725,9 @@ TEST(LookupSlotInEval) {
{"x = 10;",
0 * kPointerSize,
1,
- 6,
+ 7,
{
+ B(StackCheck), //
B(LdaSmi8), U8(10), //
B(StaLookupSlotSloppy), U8(0), //
B(LdaUndefined), //
@@ -6531,8 +7738,9 @@ TEST(LookupSlotInEval) {
{"'use strict'; x = 10;",
0 * kPointerSize,
1,
- 6,
+ 7,
{
+ B(StackCheck), //
B(LdaSmi8), U8(10), //
B(StaLookupSlotStrict), U8(0), //
B(LdaUndefined), //
@@ -6543,8 +7751,9 @@ TEST(LookupSlotInEval) {
{"return typeof x;",
0 * kPointerSize,
1,
- 4,
+ 5,
{
+ B(StackCheck), //
B(LdaLookupSlotInsideTypeof), U8(0), //
B(TypeOf), //
B(Return), //
@@ -6552,15 +7761,14 @@ TEST(LookupSlotInEval) {
1,
{"x"}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
std::string script = std::string(function_prologue) +
std::string(snippets[i].code_snippet) +
std::string(function_epilogue);
- // TODO(mythria): use * as filter when function declarations are supported
- // inside eval.
Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecode(script.c_str(), "t", "f");
+ helper.MakeBytecode(script.c_str(), "*", "f");
CheckBytecodeArrayEqual(snippets[i], bytecode_array);
}
}
@@ -6581,13 +7789,15 @@ TEST(LookupSlotWideInEval) {
"f1();";
int const_count[] = {0, 0, 0, 0};
+ // clang-format off
ExpectedSnippet<InstanceType, 257> snippets[] = {
{REPEAT_256(SPACE, "var y = 2.3;")
"return x;",
1 * kPointerSize,
1,
- 1028,
+ 1029,
{
+ B(StackCheck), //
REPEAT_256(SPACE, //
B(LdaConstant), U8(const_count[0]++), //
B(Star), R(0), ) //
@@ -6601,8 +7811,9 @@ TEST(LookupSlotWideInEval) {
"return typeof x;",
1 * kPointerSize,
1,
- 1029,
+ 1030,
{
+ B(StackCheck), //
REPEAT_256(SPACE, //
B(LdaConstant), U8(const_count[1]++), //
B(Star), R(0), ) //
@@ -6617,8 +7828,9 @@ TEST(LookupSlotWideInEval) {
"x = 10;",
1 * kPointerSize,
1,
- 1031,
+ 1032,
{
+ B(StackCheck), //
REPEAT_256(SPACE, //
B(LdaConstant), U8(const_count[2]++), //
B(Star), R(0), ) //
@@ -6635,9 +7847,10 @@ TEST(LookupSlotWideInEval) {
"x = 10;",
1 * kPointerSize,
1,
- 1031,
+ 1032,
{
- REPEAT_256(SPACE,
+ B(StackCheck), //
+ REPEAT_256(SPACE, //
B(LdaConstant), U8(const_count[3]++), //
B(Star), R(0), ) //
B(LdaSmi8), U8(10), //
@@ -6649,15 +7862,14 @@ TEST(LookupSlotWideInEval) {
{REPEAT_256(COMMA, InstanceType::HEAP_NUMBER_TYPE),
InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
std::string script = std::string(function_prologue) +
std::string(snippets[i].code_snippet) +
std::string(function_epilogue);
- // TODO(mythria): use * as filter when function declarations are supported
- // inside eval.
Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecode(script.c_str(), "t", "f");
+ helper.MakeBytecode(script.c_str(), "*", "f");
CheckBytecodeArrayEqual(snippets[i], bytecode_array);
}
}
@@ -6677,51 +7889,1215 @@ TEST(DeleteLookupSlotInEval) {
"}"
"f1();";
+ // clang-format off
ExpectedSnippet<const char*> snippets[] = {
{"delete x;",
- 0 * kPointerSize,
+ 1 * kPointerSize,
1,
- 5,
+ 12,
{
- B(LdaConstant), U8(0), //
- B(DeleteLookupSlot), //
- B(LdaUndefined), //
- B(Return) //
+ B(StackCheck), //
+ B(LdaConstant), U8(0), //
+ B(Star), R(0), //
+ B(CallRuntime), U16(Runtime::kDeleteLookupSlot), R(0), U8(1), //
+ B(LdaUndefined), //
+ B(Return) //
},
1,
{"x"}},
{"return delete y;",
0 * kPointerSize,
1,
- 2,
+ 3,
{
+ B(StackCheck), //
B(LdaFalse), //
B(Return) //
},
0},
{"return delete z;",
- 0 * kPointerSize,
+ 1 * kPointerSize,
1,
- 4,
+ 11,
{
- B(LdaConstant), U8(0), //
- B(DeleteLookupSlot), //
- B(Return) //
+ B(StackCheck), //
+ B(LdaConstant), U8(0), //
+ B(Star), R(0), //
+ B(CallRuntime), U16(Runtime::kDeleteLookupSlot), R(0), U8(1), //
+ B(Return) //
},
1,
{"z"}},
};
+ // clang-format on
for (size_t i = 0; i < arraysize(snippets); i++) {
std::string script = std::string(function_prologue) +
std::string(snippets[i].code_snippet) +
std::string(function_epilogue);
Handle<BytecodeArray> bytecode_array =
- helper.MakeBytecode(script.c_str(), "t", "f");
+ helper.MakeBytecode(script.c_str(), "*", "f");
CheckBytecodeArrayEqual(snippets[i], bytecode_array);
}
}
+TEST(WideRegisters) {
+ // Prepare prologue that creates frame for lots of registers.
+ std::ostringstream os;
+ for (size_t i = 0; i < 157; ++i) {
+ os << "var x" << i << ";\n";
+ }
+ std::string prologue(os.str());
+
+ // clang-format off
+ ExpectedSnippet<int> snippets[] = {
+ {"x0 = x127;\n"
+ "return x0;\n",
+ 161 * kPointerSize,
+ 1,
+ 11,
+ {
+ B(StackCheck), //
+ B(MovWide), R16(131), R16(125), //
+ B(Ldar), R(125), //
+ B(Star), R(0), //
+ B(Return), //
+ }},
+ {"x127 = x126;\n"
+ "return x127;\n",
+ 161 * kPointerSize,
+ 1,
+ 23,
+ {
+ B(StackCheck), //
+ B(MovWide), R16(130), R16(125), //
+ B(Ldar), R(125), //
+ B(Star), R(125), //
+ B(MovWide), R16(125), R16(131), //
+ B(MovWide), R16(131), R16(125), //
+ B(Ldar), R(125), //
+ B(Return), //
+ }},
+ {"if (x2 > 3) { return x129; }\n"
+ "return x128;\n",
+ 162 * kPointerSize,
+ 1,
+ 37,
+ {
+ B(StackCheck), //
+ B(Ldar), R(2), //
+ B(Star), R(125), //
+ B(MovWide), R16(125), R16(161), //
+ B(LdaSmi8), U8(3), //
+ B(MovWide), R16(161), R16(125), //
+ B(TestGreaterThan), R(125), //
+ B(JumpIfFalse), U8(10), //
+ B(MovWide), R16(133), R16(125), //
+ B(Ldar), R(125), //
+ B(Return), //
+ B(MovWide), R16(132), R16(125), //
+ B(Ldar), R(125), //
+ B(Return), //
+ }},
+ {"var x0 = 0;\n"
+ "if (x129 == 3) { var x129 = x0; }\n"
+ "if (x2 > 3) { return x0; }\n"
+ "return x129;\n",
+ 162 * kPointerSize,
+ 1,
+ 69,
+ {
+ B(StackCheck), //
+ B(LdaZero), //
+ B(Star), R(0), //
+ B(MovWide), R16(133), R16(125), //
+ B(Ldar), R(125), //
+ B(Star), R(125), //
+ B(MovWide), R16(125), R16(161), //
+ B(LdaSmi8), U8(3), //
+ B(MovWide), R16(161), R16(125), //
+ B(TestEqual), R(125), //
+ B(JumpIfFalse), U8(11), //
+ B(Ldar), R(0), //
+ B(Star), R(125), //
+ B(MovWide), R16(125), R16(133), //
+ B(Ldar), R(2), //
+ B(Star), R(125), //
+ B(MovWide), R16(125), R16(161), //
+ B(LdaSmi8), U8(3), //
+ B(MovWide), R16(161), R16(125), //
+ B(TestGreaterThan), R(125), //
+ B(JumpIfFalse), U8(5), //
+ B(Ldar), R(0), //
+ B(Return), //
+ B(MovWide), R16(133), R16(125), //
+ B(Ldar), R(125), //
+ B(Return), //
+ }},
+ {"var x0 = 0;\n"
+ "var x1 = 0;\n"
+ "for (x128 = 0; x128 < 64; x128++) {"
+ " x1 += x128;"
+ "}"
+ "return x128;\n",
+ 162 * kPointerSize,
+ 1,
+ 99,
+ {
+ B(StackCheck), //
+ B(LdaZero), //
+ B(Star), R(0), //
+ B(LdaZero), //
+ B(Star), R(1), //
+ B(LdaZero), //
+ B(Star), R(125), //
+ B(MovWide), R16(125), R16(132), //
+ B(MovWide), R16(132), R16(125), //
+ B(Ldar), R(125), //
+ B(Star), R(125), //
+ B(MovWide), R16(125), R16(161), //
+ B(LdaSmi8), U8(64), //
+ B(MovWide), R16(161), R16(125), //
+ B(TestLessThan), R(125), //
+ B(JumpIfFalse), U8(53), //
+ B(StackCheck), //
+ B(Ldar), R(1), //
+ B(Star), R(125), //
+ B(MovWide), R16(125), R16(161), //
+ B(MovWide), R16(132), R16(125), //
+ B(Ldar), R(125), //
+ B(MovWide), R16(161), R16(125), //
+ B(Add), R(125), //
+ B(Star), R(1), //
+ B(MovWide), R16(132), R16(125), //
+ B(Ldar), R(125), //
+ B(ToNumber), //
+ B(Star), R(125), //
+ B(MovWide), R16(125), R16(161), //
+ B(Inc), //
+ B(Star), R(125), //
+ B(MovWide), R16(125), R16(132), //
+ B(Jump), U8(-74), //
+ B(MovWide), R16(132), R16(125), //
+ B(Ldar), R(125), //
+ B(Return), //
+ }},
+ {"var x0 = 1234;\n"
+ "var x1 = 0;\n"
+ "for (x128 in x0) {"
+ " x1 += x128;"
+ "}"
+ "return x1;\n",
+ 167 * kPointerSize,
+ 1,
+ 111,
+ {
+ B(StackCheck), //
+ B(LdaConstant), U8(0), //
+ B(Star), R(0), //
+ B(LdaZero), //
+ B(Star), R(1), //
+ B(Ldar), R(0), //
+ B(JumpIfUndefined), U8(98), //
+ B(JumpIfNull), U8(96), //
+ B(ToObject), //
+ B(JumpIfNull), U8(93), //
+ B(Star), R(125), //
+ B(MovWide), R16(125), R16(161), //
+ B(ForInPrepareWide), R16(162), //
+ B(LdaZero), //
+ B(Star), R(125), //
+ B(MovWide), R16(125), R16(165), //
+ B(MovWide), R16(165), R16(125), //
+ B(MovWide), R16(164), R16(126), //
+ B(ForInDone), R(125), R(126), //
+ B(JumpIfTrue), U8(60), //
+ B(ForInNextWide), R16(161), R16(165), R16(162), //
+ B(JumpIfUndefined), U8(35), //
+ B(Star), R(125), //
+ B(MovWide), R16(125), R16(132), //
+ B(StackCheck), //
+ B(Ldar), R(1), //
+ B(Star), R(125), //
+ B(MovWide), R16(125), R16(166), //
+ B(MovWide), R16(132), R16(125), //
+ B(Ldar), R(125), //
+ B(MovWide), R16(166), R16(125), //
+ B(Add), R(125), //
+ B(Star), R(1), //
+ B(MovWide), R16(165), R16(125), //
+ B(ForInStep), R(125), //
+ B(Star), R(125), //
+ B(MovWide), R16(125), R16(165), //
+ B(Jump), U8(-71), //
+ B(Ldar), R(1), //
+ B(Return), //
+ },
+ 1,
+ {1234}},
+ {"x0 = %Add(x64, x63);\n"
+ "x1 = %Add(x27, x143);\n"
+ "%TheHole();\n"
+ "return x1;\n",
+ 163 * kPointerSize,
+ 1,
+ 66,
+ {
+ B(StackCheck), //
+ B(Ldar), R(64), //
+ B(Star), R(125), //
+ B(MovWide), R16(125), R16(161), //
+ B(Ldar), R(63), //
+ B(Star), R(125), //
+ B(MovWide), R16(125), R16(162), //
+ B(CallRuntimeWide), U16(Runtime::kAdd), R16(161), U8(2), //
+ B(Star), R(0), //
+ B(Ldar), R(27), //
+ B(Star), R(125), //
+ B(MovWide), R16(125), R16(161), //
+ B(MovWide), R16(147), R16(125), //
+ B(Ldar), R(125), //
+ B(Star), R(125), //
+ B(MovWide), R16(125), R16(162), //
+ B(CallRuntimeWide), U16(Runtime::kAdd), R16(161), U8(2), //
+ B(Star), R(1), //
+ B(CallRuntime), U16(Runtime::kTheHole), R(0), U8(0), //
+ B(Ldar), R(1), //
+ B(Return), //
+ }}
+ };
+ // clang-format on
+
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+
+ for (size_t i = 0; i < arraysize(snippets); ++i) {
+ std::string body = prologue + snippets[i].code_snippet;
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecodeForFunctionBody(body.c_str());
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+}
+
+TEST(ConstVariable) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+ // clang-format off
+ ExpectedSnippet<const char*> snippets[] = {
+ {"const x = 10;",
+ 1 * kPointerSize,
+ 1,
+ 10,
+ {
+ B(LdaTheHole), //
+ B(Star), R(0), //
+ B(StackCheck), //
+ B(LdaSmi8), U8(10), //
+ B(Star), R(0), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
+ 0},
+ {"const x = 10; return x;",
+ 2 * kPointerSize,
+ 1,
+ 20,
+ {
+ B(LdaTheHole), //
+ B(Star), R(0), //
+ B(StackCheck), //
+ B(LdaSmi8), U8(10), //
+ B(Star), R(0), //
+ B(JumpIfNotHole), U8(11), //
+ B(LdaConstant), U8(0), //
+ B(Star), R(1), //
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(1), U8(1), //
+ B(Return) //
+ },
+ 1,
+ {"x"}},
+ {"const x = ( x = 20);",
+ 3 * kPointerSize,
+ 1,
+ 32,
+ {
+ B(LdaTheHole), //
+ B(Star), R(0), //
+ B(StackCheck), //
+ B(LdaSmi8), U8(20), //
+ B(Star), R(1), //
+ B(Ldar), R(0), //
+ B(JumpIfNotHole), U8(11), //
+ B(LdaConstant), U8(0), //
+ B(Star), R(2), //
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1), //
+ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), //
+ /* */ U8(0), //
+ B(Ldar), R(1), //
+ B(Star), R(0), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
+ 1,
+ {"x"}},
+ {"const x = 10; x = 20;",
+ 3 * kPointerSize,
+ 1,
+ 36,
+ {
+ B(LdaTheHole), //
+ B(Star), R(0), //
+ B(StackCheck), //
+ B(LdaSmi8), U8(10), //
+ B(Star), R(0), //
+ B(LdaSmi8), U8(20), //
+ B(Star), R(1), //
+ B(Ldar), R(0), //
+ B(JumpIfNotHole), U8(11), //
+ B(LdaConstant), U8(0), //
+ B(Star), R(2), //
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1), //
+ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), //
+ /* */ U8(0), //
+ B(Ldar), R(1), //
+ B(Star), R(0), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
+ 1,
+ {"x"}},
+ };
+ // clang-format on
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+}
+
+TEST(LetVariable) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+
+ // clang-format off
+ ExpectedSnippet<const char*> snippets[] = {
+ {"let x = 10;",
+ 1 * kPointerSize,
+ 1,
+ 10,
+ {
+ B(LdaTheHole), //
+ B(Star), R(0), //
+ B(StackCheck), //
+ B(LdaSmi8), U8(10), //
+ B(Star), R(0), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
+ 0},
+ {"let x = 10; return x;",
+ 2 * kPointerSize,
+ 1,
+ 20,
+ {
+ B(LdaTheHole), //
+ B(Star), R(0), //
+ B(StackCheck), //
+ B(LdaSmi8), U8(10), //
+ B(Star), R(0), //
+ B(JumpIfNotHole), U8(11), //
+ B(LdaConstant), U8(0), //
+ B(Star), R(1), //
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(1), U8(1), //
+ B(Return) //
+ },
+ 1,
+ {"x"}},
+ {"let x = (x = 20);",
+ 3 * kPointerSize,
+ 1,
+ 27,
+ {
+ B(LdaTheHole), //
+ B(Star), R(0), //
+ B(StackCheck), //
+ B(LdaSmi8), U8(20), //
+ B(Star), R(1), //
+ B(Ldar), R(0), //
+ B(JumpIfNotHole), U8(11), //
+ B(LdaConstant), U8(0), //
+ B(Star), R(2), //
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1), //
+ B(Ldar), R(1), //
+ B(Star), R(0), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
+ 1,
+ {"x"}},
+ {"let x = 10; x = 20;",
+ 3 * kPointerSize,
+ 1,
+ 31,
+ {
+ B(LdaTheHole), //
+ B(Star), R(0), //
+ B(StackCheck), //
+ B(LdaSmi8), U8(10), //
+ B(Star), R(0), //
+ B(LdaSmi8), U8(20), //
+ B(Star), R(1), //
+ B(Ldar), R(0), //
+ B(JumpIfNotHole), U8(11), //
+ B(LdaConstant), U8(0), //
+ B(Star), R(2), //
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1), //
+ B(Ldar), R(1), //
+ B(Star), R(0), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
+ 1,
+ {"x"}},
+ };
+ // clang-format on
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+}
+
+TEST(LegacyConstVariable) {
+ bool old_legacy_const_flag = FLAG_legacy_const;
+ FLAG_legacy_const = true;
+
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+
+ // clang-format off
+ ExpectedSnippet<const char*> snippets[] = {
+ {"const x = 10;",
+ 2 * kPointerSize,
+ 1,
+ 19,
+ {
+ B(LdaTheHole), //
+ B(Star), R(0), //
+ B(StackCheck), //
+ B(LdaSmi8), U8(10), //
+ B(Star), R(1), //
+ B(Ldar), R(0), //
+ B(JumpIfNotHole), U8(5), //
+ B(Mov), R(1), R(0), //
+ B(Ldar), R(1), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
+ 0},
+ {"const x = 10; return x;",
+ 2 * kPointerSize,
+ 1,
+ 23,
+ {
+ B(LdaTheHole), //
+ B(Star), R(0), //
+ B(StackCheck), //
+ B(LdaSmi8), U8(10), //
+ B(Star), R(1), //
+ B(Ldar), R(0), //
+ B(JumpIfNotHole), U8(5), //
+ B(Mov), R(1), R(0), //
+ B(Ldar), R(1), //
+ B(Ldar), R(0), //
+ B(JumpIfNotHole), U8(3), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
+ 0},
+ {"const x = ( x = 20);",
+ 2 * kPointerSize,
+ 1,
+ 23,
+ {
+ B(LdaTheHole), //
+ B(Star), R(0), //
+ B(StackCheck), //
+ B(LdaSmi8), U8(20), //
+ B(Star), R(1), //
+ B(Ldar), R(0), //
+ B(Ldar), R(1), //
+ B(Ldar), R(0), //
+ B(JumpIfNotHole), U8(5), //
+ B(Mov), R(1), R(0), //
+ B(Ldar), R(1), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
+ 0},
+ {"const x = 10; x = 20;",
+ 2 * kPointerSize,
+ 1,
+ 27,
+ {
+ B(LdaTheHole), //
+ B(Star), R(0), //
+ B(StackCheck), //
+ B(LdaSmi8), U8(10), //
+ B(Star), R(1), //
+ B(Ldar), R(0), //
+ B(JumpIfNotHole), U8(5), //
+ B(Mov), R(1), R(0), //
+ B(Ldar), R(1), //
+ B(LdaSmi8), U8(20), //
+ B(Star), R(1), //
+ B(Ldar), R(0), //
+ B(Ldar), R(1), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
+ 0},
+ };
+ // clang-format on
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+
+ FLAG_legacy_const = old_legacy_const_flag;
+}
+
+TEST(ConstVariableContextSlot) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+
+ int closure = Register::function_closure().index();
+ int context = Register::current_context().index();
+
+ // TODO(mythria): Add tests for initialization of this via super calls.
+ // TODO(mythria): Add tests that walk the context chain.
+ // clang-format off
+ ExpectedSnippet<InstanceType> snippets[] = {
+ {"const x = 10; function f1() {return x;}",
+ 2 * kPointerSize,
+ 1,
+ 24,
+ {
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
+ U8(1), //
+ B(PushContext), R(1), //
+ B(LdaTheHole), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(CreateClosure), U8(0), U8(0), //
+ B(Star), R(0), //
+ B(StackCheck), //
+ B(LdaSmi8), U8(10), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
+ 1,
+ {InstanceType::SHARED_FUNCTION_INFO_TYPE}},
+ {"const x = 10; function f1() {return x;} return x;",
+ 3 * kPointerSize,
+ 1,
+ 37,
+ {
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
+ U8(1), //
+ B(PushContext), R(1), //
+ B(LdaTheHole), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(CreateClosure), U8(0), U8(0), //
+ B(Star), R(0), //
+ B(StackCheck), //
+ B(LdaSmi8), U8(10), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(LdaContextSlot), R(context), U8(4), //
+ B(JumpIfNotHole), U8(11), //
+ B(LdaConstant), U8(1), //
+ B(Star), R(2), //
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1), //
+ B(Return) //
+ },
+ 2,
+ {InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
+ {"const x = (x = 20); function f1() {return x;}",
+ 4 * kPointerSize,
+ 1,
+ 50,
+ {
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
+ /* */ U8(1), //
+ B(PushContext), R(1), //
+ B(LdaTheHole), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(CreateClosure), U8(0), U8(0), //
+ B(Star), R(0), //
+ B(StackCheck), //
+ B(LdaSmi8), U8(20), //
+ B(Star), R(2), //
+ B(LdaContextSlot), R(context), U8(4), //
+ B(JumpIfNotHole), U8(11), //
+ B(LdaConstant), U8(1), //
+ B(Star), R(3), //
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1), //
+ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), //
+ U8(0), //
+ B(Ldar), R(2), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
+ 2,
+ {InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
+ {"const x = 10; x = 20; function f1() {return x;}",
+ 4 * kPointerSize,
+ 1,
+ 52,
+ {
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
+ /* */ U8(1), //
+ B(PushContext), R(1), //
+ B(LdaTheHole), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(CreateClosure), U8(0), U8(0), //
+ B(Star), R(0), //
+ B(StackCheck), //
+ B(LdaSmi8), U8(10), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(LdaSmi8), U8(20), //
+ B(Star), R(2), //
+ B(LdaContextSlot), R(context), U8(4), //
+ B(JumpIfNotHole), U8(11), //
+ B(LdaConstant), U8(1), //
+ B(Star), R(3), //
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1), //
+ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), //
+ U8(0), //
+ B(Ldar), R(2), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
+ 2,
+ {InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
+ };
+ // clang-format on
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+}
+
+TEST(LetVariableContextSlot) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+
+ int closure = Register::function_closure().index();
+ int context = Register::current_context().index();
+
+ // clang-format off
+ ExpectedSnippet<InstanceType> snippets[] = {
+ {"let x = 10; function f1() {return x;}",
+ 2 * kPointerSize,
+ 1,
+ 24,
+ {
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
+ /* */ U8(1), //
+ B(PushContext), R(1), //
+ B(LdaTheHole), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(CreateClosure), U8(0), U8(0), //
+ B(Star), R(0), //
+ B(StackCheck), //
+ B(LdaSmi8), U8(10), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
+ 1,
+ {InstanceType::SHARED_FUNCTION_INFO_TYPE}},
+ {"let x = 10; function f1() {return x;} return x;",
+ 3 * kPointerSize,
+ 1,
+ 37,
+ {
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
+ /* */ U8(1), //
+ B(PushContext), R(1), //
+ B(LdaTheHole), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(CreateClosure), U8(0), U8(0), //
+ B(Star), R(0), //
+ B(StackCheck), //
+ B(LdaSmi8), U8(10), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(LdaContextSlot), R(context), U8(4), //
+ B(JumpIfNotHole), U8(11), //
+ B(LdaConstant), U8(1), //
+ B(Star), R(2), //
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1), //
+ B(Return) //
+ },
+ 2,
+ {InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
+ {"let x = (x = 20); function f1() {return x;}",
+ 4 * kPointerSize,
+ 1,
+ 45,
+ {
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
+ /* */ U8(1), //
+ B(PushContext), R(1), //
+ B(LdaTheHole), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(CreateClosure), U8(0), U8(0), //
+ B(Star), R(0), //
+ B(StackCheck), //
+ B(LdaSmi8), U8(20), //
+ B(Star), R(2), //
+ B(LdaContextSlot), R(context), U8(4), //
+ B(JumpIfNotHole), U8(11), //
+ B(LdaConstant), U8(1), //
+ B(Star), R(3), //
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1), //
+ B(Ldar), R(2), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
+ 2,
+ {InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
+ {"let x = 10; x = 20; function f1() {return x;}",
+ 4 * kPointerSize,
+ 1,
+ 47,
+ {
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
+ /* */ U8(1), //
+ B(PushContext), R(1), //
+ B(LdaTheHole), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(CreateClosure), U8(0), U8(0), //
+ B(Star), R(0), //
+ B(StackCheck), //
+ B(LdaSmi8), U8(10), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(LdaSmi8), U8(20), //
+ B(Star), R(2), //
+ B(LdaContextSlot), R(context), U8(4), //
+ B(JumpIfNotHole), U8(11), //
+ B(LdaConstant), U8(1), //
+ B(Star), R(3), //
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(3), U8(1), //
+ B(Ldar), R(2), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
+ 2,
+ {InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
+ };
+ // clang-format on
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+}
+
+TEST(DoExpression) {
+ bool old_flag = FLAG_harmony_do_expressions;
+ FLAG_harmony_do_expressions = true;
+
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+
+ // clang-format off
+ ExpectedSnippet<const char*> snippets[] = {
+ {"var a = do { }; return a;",
+ 2 * kPointerSize,
+ 1,
+ 6,
+ {
+ B(StackCheck), //
+ B(Ldar), R(0), //
+ B(Star), R(1), //
+ B(Return) //
+ },
+ 0},
+ {"var a = do { var x = 100; }; return a;",
+ 3 * kPointerSize,
+ 1,
+ 11,
+ {
+ B(StackCheck), //
+ B(LdaSmi8), U8(100), //
+ B(Star), R(1), //
+ B(LdaUndefined), //
+ B(Star), R(0), //
+ B(Star), R(2), //
+ B(Return) //
+ },
+ 0},
+ {"while(true) { var a = 10; a = do { ++a; break; }; a = 20; }",
+ 2 * kPointerSize,
+ 1,
+ 26,
+ {
+ B(StackCheck), //
+ B(StackCheck), //
+ B(LdaSmi8), U8(10), //
+ B(Star), R(1), //
+ B(ToNumber), //
+ B(Inc), //
+ B(Star), R(1), //
+ B(Star), R(0), //
+ B(Jump), U8(12), //
+ B(Ldar), R(0), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(20), //
+ B(Star), R(1), //
+ B(Jump), U8(-21), //
+ B(LdaUndefined), //
+ B(Return), //
+ },
+ 0},
+ };
+ // clang-format on
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+ FLAG_harmony_do_expressions = old_flag;
+}
+
+TEST(WithStatement) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+
+ int deep_elements_flags =
+ ObjectLiteral::kFastElements | ObjectLiteral::kDisableMementos;
+ int context = Register::current_context().index();
+ int closure = Register::function_closure().index();
+ int new_target = Register::new_target().index();
+
+ // clang-format off
+ ExpectedSnippet<InstanceType> snippets[] = {
+ {"with ({x:42}) { return x; }",
+ 5 * kPointerSize,
+ 1,
+ 47,
+ {
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
+ /* */ U8(1), //
+ B(PushContext), R(0), //
+ B(Ldar), THIS(1), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(CreateMappedArguments), //
+ B(StaContextSlot), R(context), U8(5), //
+ B(Ldar), R(new_target), //
+ B(StaContextSlot), R(context), U8(6), //
+ B(StackCheck), //
+ B(CreateObjectLiteral), U8(0), U8(0), U8(deep_elements_flags), //
+ B(Star), R(2), //
+ B(ToObject), //
+ B(Star), R(3), //
+ B(Ldar), R(closure), //
+ B(Star), R(4), //
+ B(CallRuntime), U16(Runtime::kPushWithContext), R(3), U8(2), //
+ B(PushContext), R(1), //
+ B(LdaLookupSlot), U8(1), //
+ B(PopContext), R(0), //
+ B(Return), //
+ },
+ 2,
+ {InstanceType::FIXED_ARRAY_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
+ };
+ // clang-format on
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+}
+
+TEST(DoDebugger) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+
+ // clang-format off
+ ExpectedSnippet<const char*> snippet = {
+ "debugger;",
+ 0,
+ 1,
+ 4,
+ {
+ B(StackCheck), //
+ B(Debugger), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
+ 0
+ };
+ // clang-format on
+
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecodeForFunctionBody(snippet.code_snippet);
+ CheckBytecodeArrayEqual(snippet, bytecode_array);
+}
+
+// TODO(rmcilroy): Update expectations after switch to
+// Runtime::kDefineDataPropertyInLiteral.
+TEST(ClassDeclarations) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+
+ int closure = Register::function_closure().index();
+ int context = Register::current_context().index();
+
+ // clang-format off
+ ExpectedSnippet<InstanceType, 12> snippets[] = {
+ {"class Person {\n"
+ " constructor(name) { this.name = name; }\n"
+ " speak() { console.log(this.name + ' is speaking.'); }\n"
+ "}\n",
+ 9 * kPointerSize,
+ 1,
+ 71,
+ {
+ B(LdaTheHole), //
+ B(Star), R(1), //
+ B(StackCheck), //
+ B(LdaTheHole), //
+ B(Star), R(0), //
+ B(LdaTheHole), //
+ B(Star), R(2), //
+ B(CreateClosure), U8(0), U8(0), //
+ B(Star), R(3), //
+ B(LdaSmi8), U8(15), //
+ B(Star), R(4), //
+ B(LdaConstant), U8(1), //
+ B(Star), R(5), //
+ B(CallRuntime), U16(Runtime::kDefineClass), R(2), U8(4), //
+ B(Star), R(2), //
+ B(LoadIC), R(2), U8(2), U8(1), //
+ B(Star), R(3), //
+ B(Mov), R(3), R(4), //
+ B(LdaConstant), U8(3), //
+ B(Star), R(5), //
+ B(CreateClosure), U8(4), U8(0), //
+ B(Star), R(6), //
+ B(LdaSmi8), U8(2), //
+ B(Star), R(7), //
+ B(LdaZero), //
+ B(Star), R(8), //
+ B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(4), U8(5),
+ B(CallRuntime), U16(Runtime::kFinalizeClassDefinition), R(2), U8(2), //
+ B(Star), R(0), //
+ B(Star), R(1), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
+ 5,
+ { InstanceType::SHARED_FUNCTION_INFO_TYPE, kInstanceTypeDontCare,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::SHARED_FUNCTION_INFO_TYPE}},
+ {"class person {\n"
+ " constructor(name) { this.name = name; }\n"
+ " speak() { console.log(this.name + ' is speaking.'); }\n"
+ "}\n",
+ 9 * kPointerSize,
+ 1,
+ 71,
+ {
+ B(LdaTheHole), //
+ B(Star), R(1), //
+ B(StackCheck), //
+ B(LdaTheHole), //
+ B(Star), R(0), //
+ B(LdaTheHole), //
+ B(Star), R(2), //
+ B(CreateClosure), U8(0), U8(0), //
+ B(Star), R(3), //
+ B(LdaSmi8), U8(15), //
+ B(Star), R(4), //
+ B(LdaConstant), U8(1), //
+ B(Star), R(5), //
+ B(CallRuntime), U16(Runtime::kDefineClass), R(2), U8(4), //
+ B(Star), R(2), //
+ B(LoadIC), R(2), U8(2), U8(1), //
+ B(Star), R(3), //
+ B(Mov), R(3), R(4), //
+ B(LdaConstant), U8(3), //
+ B(Star), R(5), //
+ B(CreateClosure), U8(4), U8(0), //
+ B(Star), R(6), //
+ B(LdaSmi8), U8(2), //
+ B(Star), R(7), //
+ B(LdaZero), //
+ B(Star), R(8), //
+ B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(4), U8(5),
+ B(CallRuntime), U16(Runtime::kFinalizeClassDefinition), R(2), U8(2), //
+ B(Star), R(0), //
+ B(Star), R(1), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
+ 5,
+ { InstanceType::SHARED_FUNCTION_INFO_TYPE, kInstanceTypeDontCare,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::SHARED_FUNCTION_INFO_TYPE}},
+ {"var n0 = 'a';"
+ "var n1 = 'b';"
+ "class N {\n"
+ " [n0]() { return n0; }\n"
+ " static [n1]() { return n1; }\n"
+ "}\n",
+ 10 * kPointerSize,
+ 1,
+ 125,
+ {
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), //
+ /* */ U8(1), //
+ B(PushContext), R(2), //
+ B(LdaTheHole), //
+ B(Star), R(1), //
+ B(StackCheck), //
+ B(LdaConstant), U8(0), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(LdaConstant), U8(1), //
+ B(StaContextSlot), R(context), U8(5), //
+ B(LdaTheHole), //
+ B(Star), R(0), //
+ B(LdaTheHole), //
+ B(Star), R(3), //
+ B(CreateClosure), U8(2), U8(0), //
+ B(Star), R(4), //
+ B(LdaSmi8), U8(41), //
+ B(Star), R(5), //
+ B(LdaSmi8), U8(107), //
+ B(Star), R(6), //
+ B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4), //
+ B(Star), R(3), //
+ B(LoadIC), R(3), U8(3), U8(1), //
+ B(Star), R(4), //
+ B(Mov), R(4), R(5), //
+ B(LdaContextSlot), R(context), U8(4), //
+ B(ToName), //
+ B(Star), R(6), //
+ B(CreateClosure), U8(4), U8(0), //
+ B(Star), R(7), //
+ B(LdaSmi8), U8(2), //
+ B(Star), R(8), //
+ B(LdaSmi8), U8(1), //
+ B(Star), R(9), //
+ B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(5), U8(5),
+ B(Mov), R(3), R(5), //
+ B(LdaContextSlot), R(context), U8(5), //
+ B(ToName), //
+ B(Star), R(6), //
+ B(LdaConstant), U8(3), //
+ B(TestEqualStrict), R(6), //
+ B(JumpIfFalse), U8(7), //
+ B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), //
+ /* */ R(0), U8(0), //
+ B(CreateClosure), U8(5), U8(0), //
+ B(Star), R(7), //
+ B(LdaSmi8), U8(1), //
+ B(Star), R(9), //
+ B(CallRuntime), U16(Runtime::kDefineDataPropertyInLiteral), R(5), U8(5),
+ B(CallRuntime), U16(Runtime::kFinalizeClassDefinition), R(3), U8(2), //
+ B(Star), R(0), //
+ B(Star), R(1), //
+ B(LdaUndefined), //
+ B(Return), //
+ },
+ 6,
+ { InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::SHARED_FUNCTION_INFO_TYPE}},
+ {"var count = 0;\n"
+ "class C { constructor() { count++; }}\n"
+ "return new C();\n",
+ 10 * kPointerSize,
+ 1,
+ 74,
+ {
+ B(CallRuntime), U16(Runtime::kNewFunctionContext), R(closure), U8(1), //
+ B(PushContext), R(2), //
+ B(LdaTheHole), //
+ B(Star), R(1), //
+ B(StackCheck), //
+ B(LdaZero), //
+ B(StaContextSlot), R(context), U8(4), //
+ B(LdaTheHole), //
+ B(Star), R(0), //
+ B(LdaTheHole), //
+ B(Star), R(3), //
+ B(CreateClosure), U8(0), U8(0), //
+ B(Star), R(4), //
+ B(LdaSmi8), U8(30), //
+ B(Star), R(5), //
+ B(LdaSmi8), U8(67), //
+ B(Star), R(6), //
+ B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4), //
+ B(Star), R(3), //
+ B(LoadIC), R(3), U8(1), U8(1), //
+ B(Star), R(4), //
+ B(CallRuntime), U16(Runtime::kFinalizeClassDefinition), R(3), U8(2), //
+ B(Star), R(0), //
+ B(Star), R(1), //
+ B(JumpIfNotHole), U8(11), //
+ B(LdaConstant), U8(2), //
+ B(Star), R(4), //
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1), //
+ B(Star), R(3), //
+ B(New), R(3), R(0), U8(0), //
+ B(Return), //
+ },
+ 3,
+ { InstanceType::SHARED_FUNCTION_INFO_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE,
+ InstanceType::ONE_BYTE_INTERNALIZED_STRING_TYPE}},
+ };
+ // clang-format on
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+}
+
+// TODO(oth): Add tests for super keyword.
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter.cc b/deps/v8/test/cctest/interpreter/test-interpreter.cc
index 506cf00cd0..69cf0e18bd 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter.cc
@@ -65,8 +65,6 @@ class InterpreterTester {
bytecode_(bytecode),
feedback_vector_(feedback_vector) {
i::FLAG_ignition = true;
- i::FLAG_ignition_fake_try_catch = true;
- i::FLAG_ignition_fallback_on_eval_and_catch = false;
i::FLAG_always_opt = false;
// Set ignition filter flag via SetFlagsFromString to avoid double-free
// (or potential leak with StrDup() based on ownership confusion).
@@ -98,6 +96,18 @@ class InterpreterTester {
return InterpreterCallable<A...>(isolate_, GetBytecodeFunction<A...>());
}
+ Local<Message> CheckThrowsReturnMessage() {
+ TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate_));
+ auto callable = GetCallable<>();
+ MaybeHandle<Object> no_result = callable();
+ CHECK(isolate_->has_pending_exception());
+ CHECK(try_catch.HasCaught());
+ CHECK(no_result.is_null());
+ isolate_->OptionalRescheduleException(true);
+ CHECK(!try_catch.Message().IsEmpty());
+ return try_catch.Message();
+ }
+
static Handle<Object> NewObject(const char* script) {
return v8::Utils::OpenHandle(*CompileRun(script));
}
@@ -165,10 +175,8 @@ TEST(InterpreterReturn) {
Handle<Object> undefined_value =
handles.main_isolate()->factory()->undefined_value();
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
- builder.set_locals_count(0);
- builder.set_context_count(0);
- builder.set_parameter_count(1);
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
+ 0, 0);
builder.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -184,10 +192,8 @@ TEST(InterpreterLoadUndefined) {
Handle<Object> undefined_value =
handles.main_isolate()->factory()->undefined_value();
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
- builder.set_locals_count(0);
- builder.set_context_count(0);
- builder.set_parameter_count(1);
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
+ 0, 0);
builder.LoadUndefined().Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -202,10 +208,8 @@ TEST(InterpreterLoadNull) {
HandleAndZoneScope handles;
Handle<Object> null_value = handles.main_isolate()->factory()->null_value();
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
- builder.set_locals_count(0);
- builder.set_context_count(0);
- builder.set_parameter_count(1);
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
+ 0, 0);
builder.LoadNull().Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -221,10 +225,8 @@ TEST(InterpreterLoadTheHole) {
Handle<Object> the_hole_value =
handles.main_isolate()->factory()->the_hole_value();
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
- builder.set_locals_count(0);
- builder.set_context_count(0);
- builder.set_parameter_count(1);
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
+ 0, 0);
builder.LoadTheHole().Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -239,10 +241,8 @@ TEST(InterpreterLoadTrue) {
HandleAndZoneScope handles;
Handle<Object> true_value = handles.main_isolate()->factory()->true_value();
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
- builder.set_locals_count(0);
- builder.set_context_count(0);
- builder.set_parameter_count(1);
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
+ 0, 0);
builder.LoadTrue().Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -257,10 +257,8 @@ TEST(InterpreterLoadFalse) {
HandleAndZoneScope handles;
Handle<Object> false_value = handles.main_isolate()->factory()->false_value();
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
- builder.set_locals_count(0);
- builder.set_context_count(0);
- builder.set_parameter_count(1);
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
+ 0, 0);
builder.LoadFalse().Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -277,10 +275,8 @@ TEST(InterpreterLoadLiteral) {
// Small Smis.
for (int i = -128; i < 128; i++) {
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
- builder.set_locals_count(0);
- builder.set_context_count(0);
- builder.set_parameter_count(1);
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
+ 0, 0);
builder.LoadLiteral(Smi::FromInt(i)).Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -292,10 +288,8 @@ TEST(InterpreterLoadLiteral) {
// Large Smis.
{
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
- builder.set_locals_count(0);
- builder.set_context_count(0);
- builder.set_parameter_count(1);
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
+ 0, 0);
builder.LoadLiteral(Smi::FromInt(0x12345678)).Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -307,10 +301,8 @@ TEST(InterpreterLoadLiteral) {
// Heap numbers.
{
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
- builder.set_locals_count(0);
- builder.set_context_count(0);
- builder.set_parameter_count(1);
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
+ 0, 0);
builder.LoadLiteral(factory->NewHeapNumber(-2.1e19)).Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -322,10 +314,8 @@ TEST(InterpreterLoadLiteral) {
// Strings.
{
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
- builder.set_locals_count(0);
- builder.set_context_count(0);
- builder.set_parameter_count(1);
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
+ 0, 0);
Handle<i::String> string = factory->NewStringFromAsciiChecked("String");
builder.LoadLiteral(string).Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -342,10 +332,8 @@ TEST(InterpreterLoadStoreRegisters) {
HandleAndZoneScope handles;
Handle<Object> true_value = handles.main_isolate()->factory()->true_value();
for (int i = 0; i <= kMaxInt8; i++) {
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
- builder.set_locals_count(i + 1);
- builder.set_context_count(0);
- builder.set_parameter_count(1);
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
+ 0, i + 1);
Register reg(i);
builder.LoadTrue()
.StoreAccumulatorInRegister(reg)
@@ -362,117 +350,6 @@ TEST(InterpreterLoadStoreRegisters) {
}
-TEST(InterpreterExchangeRegisters) {
- for (int locals_count = 2; locals_count < 300; locals_count += 126) {
- HandleAndZoneScope handles;
- for (int exchanges = 1; exchanges < 4; exchanges++) {
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
- builder.set_locals_count(locals_count);
- builder.set_context_count(0);
- builder.set_parameter_count(0);
-
- Register r0(0);
- Register r1(locals_count - 1);
- builder.LoadTrue();
- builder.StoreAccumulatorInRegister(r0);
- builder.ExchangeRegisters(r0, r1);
- builder.LoadFalse();
- builder.StoreAccumulatorInRegister(r0);
-
- bool expected = false;
- for (int i = 0; i < exchanges; i++) {
- builder.ExchangeRegisters(r0, r1);
- expected = !expected;
- }
- builder.LoadAccumulatorWithRegister(r0);
- builder.Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
- auto callable = tester.GetCallable<>();
- Handle<Object> return_val = callable().ToHandleChecked();
- Handle<Object> expected_val =
- handles.main_isolate()->factory()->ToBoolean(expected);
- CHECK(return_val.is_identical_to(expected_val));
- }
- }
-}
-
-
-TEST(InterpreterExchangeRegistersWithParameter) {
- for (int locals_count = 2; locals_count < 300; locals_count += 126) {
- HandleAndZoneScope handles;
- for (int exchanges = 1; exchanges < 4; exchanges++) {
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
- builder.set_locals_count(locals_count);
- builder.set_context_count(0);
- builder.set_parameter_count(3);
-
- Register r0 = Register::FromParameterIndex(2, 3);
- Register r1(locals_count - 1);
- builder.LoadTrue();
- builder.StoreAccumulatorInRegister(r0);
- builder.ExchangeRegisters(r0, r1);
- builder.LoadFalse();
- builder.StoreAccumulatorInRegister(r0);
-
- bool expected = false;
- for (int i = 0; i < exchanges; i++) {
- builder.ExchangeRegisters(r0, r1);
- expected = !expected;
- }
- builder.LoadAccumulatorWithRegister(r0);
- builder.Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
- auto callable = tester.GetCallable<>();
- Handle<Object> return_val = callable().ToHandleChecked();
- Handle<Object> expected_val =
- handles.main_isolate()->factory()->ToBoolean(expected);
- CHECK(return_val.is_identical_to(expected_val));
- }
- }
-}
-
-
-TEST(InterpreterExchangeWideRegisters) {
- for (int locals_count = 3; locals_count < 300; locals_count += 126) {
- HandleAndZoneScope handles;
- for (int exchanges = 0; exchanges < 7; exchanges++) {
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
- builder.set_locals_count(locals_count);
- builder.set_context_count(0);
- builder.set_parameter_count(0);
-
- Register r0(0);
- Register r1(locals_count - 1);
- Register r2(locals_count - 2);
- builder.LoadLiteral(Smi::FromInt(200));
- builder.StoreAccumulatorInRegister(r0);
- builder.ExchangeRegisters(r0, r1);
- builder.LoadLiteral(Smi::FromInt(100));
- builder.StoreAccumulatorInRegister(r0);
- builder.ExchangeRegisters(r0, r2);
- builder.LoadLiteral(Smi::FromInt(0));
- builder.StoreAccumulatorInRegister(r0);
- for (int i = 0; i < exchanges; i++) {
- builder.ExchangeRegisters(r1, r2);
- builder.ExchangeRegisters(r0, r1);
- }
- builder.LoadAccumulatorWithRegister(r0);
- builder.Return();
- Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
- InterpreterTester tester(handles.main_isolate(), bytecode_array);
- auto callable = tester.GetCallable<>();
- Handle<Object> return_val = callable().ToHandleChecked();
- Handle<Object> expected_val =
- handles.main_isolate()->factory()->NewNumberFromInt(100 *
- (exchanges % 3));
- CHECK(return_val.is_identical_to(expected_val));
- }
- }
-}
-
-
static const Token::Value kShiftOperators[] = {
Token::Value::SHL, Token::Value::SAR, Token::Value::SHR};
@@ -539,17 +416,14 @@ TEST(InterpreterShiftOpsSmi) {
HandleAndZoneScope handles;
i::Factory* factory = handles.main_isolate()->factory();
BytecodeArrayBuilder builder(handles.main_isolate(),
- handles.main_zone());
- builder.set_locals_count(1);
- builder.set_context_count(0);
- builder.set_parameter_count(1);
+ handles.main_zone(), 1, 0, 1);
Register reg(0);
int lhs = lhs_inputs[l];
int rhs = rhs_inputs[r];
builder.LoadLiteral(Smi::FromInt(lhs))
.StoreAccumulatorInRegister(reg)
.LoadLiteral(Smi::FromInt(rhs))
- .BinaryOperation(kShiftOperators[o], reg, Strength::WEAK)
+ .BinaryOperation(kShiftOperators[o], reg)
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -574,17 +448,14 @@ TEST(InterpreterBinaryOpsSmi) {
HandleAndZoneScope handles;
i::Factory* factory = handles.main_isolate()->factory();
BytecodeArrayBuilder builder(handles.main_isolate(),
- handles.main_zone());
- builder.set_locals_count(1);
- builder.set_context_count(0);
- builder.set_parameter_count(1);
+ handles.main_zone(), 1, 0, 1);
Register reg(0);
int lhs = lhs_inputs[l];
int rhs = rhs_inputs[r];
builder.LoadLiteral(Smi::FromInt(lhs))
.StoreAccumulatorInRegister(reg)
.LoadLiteral(Smi::FromInt(rhs))
- .BinaryOperation(kArithmeticOperators[o], reg, Strength::WEAK)
+ .BinaryOperation(kArithmeticOperators[o], reg)
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -610,17 +481,14 @@ TEST(InterpreterBinaryOpsHeapNumber) {
HandleAndZoneScope handles;
i::Factory* factory = handles.main_isolate()->factory();
BytecodeArrayBuilder builder(handles.main_isolate(),
- handles.main_zone());
- builder.set_locals_count(1);
- builder.set_context_count(0);
- builder.set_parameter_count(1);
+ handles.main_zone(), 1, 0, 1);
Register reg(0);
double lhs = lhs_inputs[l];
double rhs = rhs_inputs[r];
builder.LoadLiteral(factory->NewNumber(lhs))
.StoreAccumulatorInRegister(reg)
.LoadLiteral(factory->NewNumber(rhs))
- .BinaryOperation(kArithmeticOperators[o], reg, Strength::WEAK)
+ .BinaryOperation(kArithmeticOperators[o], reg)
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -669,15 +537,13 @@ TEST(InterpreterStringAdd) {
};
for (size_t i = 0; i < arraysize(test_cases); i++) {
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
- builder.set_locals_count(1);
- builder.set_context_count(0);
- builder.set_parameter_count(1);
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
+ 0, 1);
Register reg(0);
builder.LoadLiteral(test_cases[i].lhs)
.StoreAccumulatorInRegister(reg)
.LoadLiteral(test_cases[i].rhs)
- .BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
+ .BinaryOperation(Token::Value::ADD, reg)
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -691,10 +557,8 @@ TEST(InterpreterStringAdd) {
TEST(InterpreterParameter1) {
HandleAndZoneScope handles;
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
- builder.set_locals_count(0);
- builder.set_context_count(0);
- builder.set_parameter_count(1);
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
+ 0, 0);
builder.LoadAccumulatorWithRegister(builder.Parameter(0)).Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -715,18 +579,16 @@ TEST(InterpreterParameter1) {
TEST(InterpreterParameter8) {
HandleAndZoneScope handles;
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
- builder.set_locals_count(0);
- builder.set_context_count(0);
- builder.set_parameter_count(8);
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 8,
+ 0, 0);
builder.LoadAccumulatorWithRegister(builder.Parameter(0))
- .BinaryOperation(Token::Value::ADD, builder.Parameter(1), Strength::WEAK)
- .BinaryOperation(Token::Value::ADD, builder.Parameter(2), Strength::WEAK)
- .BinaryOperation(Token::Value::ADD, builder.Parameter(3), Strength::WEAK)
- .BinaryOperation(Token::Value::ADD, builder.Parameter(4), Strength::WEAK)
- .BinaryOperation(Token::Value::ADD, builder.Parameter(5), Strength::WEAK)
- .BinaryOperation(Token::Value::ADD, builder.Parameter(6), Strength::WEAK)
- .BinaryOperation(Token::Value::ADD, builder.Parameter(7), Strength::WEAK)
+ .BinaryOperation(Token::Value::ADD, builder.Parameter(1))
+ .BinaryOperation(Token::Value::ADD, builder.Parameter(2))
+ .BinaryOperation(Token::Value::ADD, builder.Parameter(3))
+ .BinaryOperation(Token::Value::ADD, builder.Parameter(4))
+ .BinaryOperation(Token::Value::ADD, builder.Parameter(5))
+ .BinaryOperation(Token::Value::ADD, builder.Parameter(6))
+ .BinaryOperation(Token::Value::ADD, builder.Parameter(7))
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -752,10 +614,8 @@ TEST(InterpreterParameter8) {
TEST(InterpreterParameter1Assign) {
HandleAndZoneScope handles;
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
- builder.set_locals_count(0);
- builder.set_context_count(0);
- builder.set_parameter_count(1);
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
+ 0, 0);
builder.LoadLiteral(Smi::FromInt(5))
.StoreAccumulatorInRegister(builder.Parameter(0))
.LoadAccumulatorWithRegister(builder.Parameter(0))
@@ -882,12 +742,9 @@ TEST(InterpreterLoadNamedProperty) {
Handle<i::String> name = factory->NewStringFromAsciiChecked("val");
name = factory->string_table()->LookupString(isolate, name);
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
- builder.set_locals_count(0);
- builder.set_context_count(0);
- builder.set_parameter_count(1);
- builder.LoadNamedProperty(builder.Parameter(0), name, vector->GetIndex(slot),
- i::SLOPPY)
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
+ 0, 0);
+ builder.LoadNamedProperty(builder.Parameter(0), name, vector->GetIndex(slot))
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -938,13 +795,10 @@ TEST(InterpreterLoadKeyedProperty) {
Handle<i::String> key = factory->NewStringFromAsciiChecked("key");
key = factory->string_table()->LookupString(isolate, key);
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
- builder.set_locals_count(1);
- builder.set_context_count(0);
- builder.set_parameter_count(1);
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
+ 0, 1);
builder.LoadLiteral(key)
- .LoadKeyedProperty(builder.Parameter(0), vector->GetIndex(slot),
- i::STRICT)
+ .LoadKeyedProperty(builder.Parameter(0), vector->GetIndex(slot))
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -983,10 +837,8 @@ TEST(InterpreterStoreNamedProperty) {
Handle<i::String> name = factory->NewStringFromAsciiChecked("val");
name = factory->string_table()->LookupString(isolate, name);
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
- builder.set_locals_count(0);
- builder.set_context_count(0);
- builder.set_parameter_count(1);
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
+ 0, 0);
builder.LoadLiteral(Smi::FromInt(999))
.StoreNamedProperty(builder.Parameter(0), name, vector->GetIndex(slot),
i::STRICT)
@@ -1044,10 +896,8 @@ TEST(InterpreterStoreKeyedProperty) {
Handle<i::String> name = factory->NewStringFromAsciiChecked("val");
name = factory->string_table()->LookupString(isolate, name);
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
- builder.set_locals_count(1);
- builder.set_context_count(0);
- builder.set_parameter_count(1);
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
+ 0, 1);
builder.LoadLiteral(name)
.StoreAccumulatorInRegister(Register(0))
.LoadLiteral(Smi::FromInt(999))
@@ -1078,8 +928,7 @@ TEST(InterpreterStoreKeyedProperty) {
CHECK_EQ(Smi::cast(*result), Smi::FromInt(999));
}
-
-TEST(InterpreterCall) {
+static void TestInterpreterCall(TailCallMode tail_call_mode) {
HandleAndZoneScope handles;
i::Isolate* isolate = handles.main_isolate();
i::Factory* factory = isolate->factory();
@@ -1097,13 +946,11 @@ TEST(InterpreterCall) {
// Check with no args.
{
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
- builder.set_locals_count(1);
- builder.set_context_count(0);
- builder.set_parameter_count(1);
- builder.LoadNamedProperty(builder.Parameter(0), name, slot_index, i::SLOPPY)
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
+ 0, 1);
+ builder.LoadNamedProperty(builder.Parameter(0), name, slot_index)
.StoreAccumulatorInRegister(Register(0))
- .Call(Register(0), builder.Parameter(0), 0, 0)
+ .Call(Register(0), builder.Parameter(0), 1, 0, tail_call_mode)
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -1118,13 +965,11 @@ TEST(InterpreterCall) {
// Check that receiver is passed properly.
{
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
- builder.set_locals_count(1);
- builder.set_context_count(0);
- builder.set_parameter_count(1);
- builder.LoadNamedProperty(builder.Parameter(0), name, slot_index, i::SLOPPY)
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
+ 0, 1);
+ builder.LoadNamedProperty(builder.Parameter(0), name, slot_index)
.StoreAccumulatorInRegister(Register(0))
- .Call(Register(0), builder.Parameter(0), 0, 0)
+ .Call(Register(0), builder.Parameter(0), 1, 0, tail_call_mode)
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -1142,11 +987,9 @@ TEST(InterpreterCall) {
// Check with two parameters (+ receiver).
{
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
- builder.set_locals_count(4);
- builder.set_context_count(0);
- builder.set_parameter_count(1);
- builder.LoadNamedProperty(builder.Parameter(0), name, slot_index, i::SLOPPY)
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
+ 0, 4);
+ builder.LoadNamedProperty(builder.Parameter(0), name, slot_index)
.StoreAccumulatorInRegister(Register(0))
.LoadAccumulatorWithRegister(builder.Parameter(0))
.StoreAccumulatorInRegister(Register(1))
@@ -1154,7 +997,7 @@ TEST(InterpreterCall) {
.StoreAccumulatorInRegister(Register(2))
.LoadLiteral(Smi::FromInt(11))
.StoreAccumulatorInRegister(Register(3))
- .Call(Register(0), Register(1), 2, 0)
+ .Call(Register(0), Register(1), 3, 0, tail_call_mode)
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -1171,11 +1014,9 @@ TEST(InterpreterCall) {
// Check with 10 parameters (+ receiver).
{
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
- builder.set_locals_count(12);
- builder.set_context_count(0);
- builder.set_parameter_count(1);
- builder.LoadNamedProperty(builder.Parameter(0), name, slot_index, i::SLOPPY)
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
+ 0, 12);
+ builder.LoadNamedProperty(builder.Parameter(0), name, slot_index)
.StoreAccumulatorInRegister(Register(0))
.LoadAccumulatorWithRegister(builder.Parameter(0))
.StoreAccumulatorInRegister(Register(1))
@@ -1199,7 +1040,7 @@ TEST(InterpreterCall) {
.StoreAccumulatorInRegister(Register(10))
.LoadLiteral(factory->NewStringFromAsciiChecked("j"))
.StoreAccumulatorInRegister(Register(11))
- .Call(Register(0), Register(1), 10, 0)
+ .Call(Register(0), Register(1), 11, 0, tail_call_mode)
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -1220,6 +1061,9 @@ TEST(InterpreterCall) {
}
}
+TEST(InterpreterCall) { TestInterpreterCall(TailCallMode::kDisallow); }
+
+TEST(InterpreterTailCall) { TestInterpreterCall(TailCallMode::kAllow); }
static BytecodeArrayBuilder& SetRegister(BytecodeArrayBuilder& builder,
Register reg, int value,
@@ -1236,7 +1080,7 @@ static BytecodeArrayBuilder& IncrementRegister(BytecodeArrayBuilder& builder,
Register scratch) {
return builder.StoreAccumulatorInRegister(scratch)
.LoadLiteral(Smi::FromInt(value))
- .BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
+ .BinaryOperation(Token::Value::ADD, reg)
.StoreAccumulatorInRegister(reg)
.LoadAccumulatorWithRegister(scratch);
}
@@ -1244,10 +1088,8 @@ static BytecodeArrayBuilder& IncrementRegister(BytecodeArrayBuilder& builder,
TEST(InterpreterJumps) {
HandleAndZoneScope handles;
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
- builder.set_locals_count(2);
- builder.set_context_count(0);
- builder.set_parameter_count(0);
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 0,
+ 0, 2);
Register reg(0), scratch(1);
BytecodeLabel label[3];
@@ -1273,10 +1115,8 @@ TEST(InterpreterJumps) {
TEST(InterpreterConditionalJumps) {
HandleAndZoneScope handles;
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
- builder.set_locals_count(2);
- builder.set_context_count(0);
- builder.set_parameter_count(0);
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 0,
+ 0, 2);
Register reg(0), scratch(1);
BytecodeLabel label[2];
BytecodeLabel done, done1;
@@ -1309,10 +1149,8 @@ TEST(InterpreterConditionalJumps) {
TEST(InterpreterConditionalJumps2) {
// TODO(oth): Add tests for all conditional jumps near and far.
HandleAndZoneScope handles;
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
- builder.set_locals_count(2);
- builder.set_context_count(0);
- builder.set_parameter_count(0);
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 0,
+ 0, 2);
Register reg(0), scratch(1);
BytecodeLabel label[2];
BytecodeLabel done, done1;
@@ -1397,15 +1235,12 @@ TEST(InterpreterSmiComparisons) {
for (size_t j = 0; j < arraysize(inputs); j++) {
HandleAndZoneScope handles;
BytecodeArrayBuilder builder(handles.main_isolate(),
- handles.main_zone());
+ handles.main_zone(), 0, 0, 1);
Register r0(0);
- builder.set_locals_count(1);
- builder.set_context_count(0);
- builder.set_parameter_count(0);
builder.LoadLiteral(Smi::FromInt(inputs[i]))
.StoreAccumulatorInRegister(r0)
.LoadLiteral(Smi::FromInt(inputs[j]))
- .CompareOperation(comparison, r0, Strength::WEAK)
+ .CompareOperation(comparison, r0)
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -1436,15 +1271,12 @@ TEST(InterpreterHeapNumberComparisons) {
HandleAndZoneScope handles;
i::Factory* factory = handles.main_isolate()->factory();
BytecodeArrayBuilder builder(handles.main_isolate(),
- handles.main_zone());
+ handles.main_zone(), 0, 0, 1);
Register r0(0);
- builder.set_locals_count(1);
- builder.set_context_count(0);
- builder.set_parameter_count(0);
builder.LoadLiteral(factory->NewHeapNumber(inputs[i]))
.StoreAccumulatorInRegister(r0)
.LoadLiteral(factory->NewHeapNumber(inputs[j]))
- .CompareOperation(comparison, r0, Strength::WEAK)
+ .CompareOperation(comparison, r0)
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -1472,15 +1304,12 @@ TEST(InterpreterStringComparisons) {
HandleAndZoneScope handles;
i::Factory* factory = handles.main_isolate()->factory();
BytecodeArrayBuilder builder(handles.main_isolate(),
- handles.main_zone());
+ handles.main_zone(), 0, 0, 1);
Register r0(0);
- builder.set_locals_count(1);
- builder.set_context_count(0);
- builder.set_parameter_count(0);
builder.LoadLiteral(factory->NewStringFromAsciiChecked(lhs))
.StoreAccumulatorInRegister(r0)
.LoadLiteral(factory->NewStringFromAsciiChecked(rhs))
- .CompareOperation(comparison, r0, Strength::WEAK)
+ .CompareOperation(comparison, r0)
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -1519,24 +1348,21 @@ TEST(InterpreterMixedComparisons) {
HandleAndZoneScope handles;
i::Factory* factory = handles.main_isolate()->factory();
BytecodeArrayBuilder builder(handles.main_isolate(),
- handles.main_zone());
+ handles.main_zone(), 0, 0, 1);
Register r0(0);
- builder.set_locals_count(1);
- builder.set_context_count(0);
- builder.set_parameter_count(0);
if (pass == 0) {
// Comparison with HeapNumber on the lhs and String on the rhs
builder.LoadLiteral(factory->NewNumber(lhs))
.StoreAccumulatorInRegister(r0)
.LoadLiteral(factory->NewStringFromAsciiChecked(rhs_cstr))
- .CompareOperation(comparison, r0, Strength::WEAK)
+ .CompareOperation(comparison, r0)
.Return();
} else {
// Comparison with HeapNumber on the rhs and String on the lhs
builder.LoadLiteral(factory->NewStringFromAsciiChecked(lhs_cstr))
.StoreAccumulatorInRegister(r0)
.LoadLiteral(factory->NewNumber(rhs))
- .CompareOperation(comparison, r0, Strength::WEAK)
+ .CompareOperation(comparison, r0)
.Return();
}
@@ -1564,15 +1390,13 @@ TEST(InterpreterInstanceOf) {
Handle<i::Object> cases[] = {Handle<i::Object>::cast(instance), other};
for (size_t i = 0; i < arraysize(cases); i++) {
bool expected_value = (i == 0);
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 0,
+ 0, 1);
Register r0(0);
- builder.set_locals_count(1);
- builder.set_context_count(0);
- builder.set_parameter_count(0);
builder.LoadLiteral(cases[i]);
builder.StoreAccumulatorInRegister(r0)
.LoadLiteral(func)
- .CompareOperation(Token::Value::INSTANCEOF, r0, Strength::WEAK)
+ .CompareOperation(Token::Value::INSTANCEOF, r0)
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -1590,20 +1414,18 @@ TEST(InterpreterTestIn) {
i::Factory* factory = handles.main_isolate()->factory();
// Allocate an array
Handle<i::JSArray> array =
- factory->NewJSArray(i::ElementsKind::FAST_SMI_ELEMENTS);
+ factory->NewJSArray(0, i::ElementsKind::FAST_SMI_ELEMENTS);
// Check for these properties on the array object
const char* properties[] = {"length", "fuzzle", "x", "0"};
for (size_t i = 0; i < arraysize(properties); i++) {
bool expected_value = (i == 0);
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 0,
+ 0, 1);
Register r0(0);
- builder.set_locals_count(1);
- builder.set_context_count(0);
- builder.set_parameter_count(0);
builder.LoadLiteral(factory->NewStringFromAsciiChecked(properties[i]))
.StoreAccumulatorInRegister(r0)
.LoadLiteral(Handle<Object>::cast(array))
- .CompareOperation(Token::Value::IN, r0, Strength::WEAK)
+ .CompareOperation(Token::Value::IN, r0)
.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
@@ -1620,11 +1442,9 @@ TEST(InterpreterUnaryNot) {
HandleAndZoneScope handles;
for (size_t i = 1; i < 10; i++) {
bool expected_value = ((i & 1) == 1);
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 0,
+ 0, 0);
Register r0(0);
- builder.set_locals_count(0);
- builder.set_context_count(0);
- builder.set_parameter_count(0);
builder.LoadFalse();
for (size_t j = 0; j < i; j++) {
builder.LogicalNot();
@@ -1683,11 +1503,9 @@ TEST(InterpreterUnaryNotNonBoolean) {
};
for (size_t i = 0; i < arraysize(object_type_tuples); i++) {
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 0,
+ 0, 0);
Register r0(0);
- builder.set_locals_count(0);
- builder.set_context_count(0);
- builder.set_parameter_count(0);
LoadAny(&builder, factory, object_type_tuples[i].first);
builder.LogicalNot();
builder.Return();
@@ -1731,10 +1549,8 @@ TEST(InterpreterTypeof) {
TEST(InterpreterCallRuntime) {
HandleAndZoneScope handles;
- BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
- builder.set_locals_count(2);
- builder.set_context_count(0);
- builder.set_parameter_count(1);
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone(), 1,
+ 0, 2);
builder.LoadLiteral(Smi::FromInt(15))
.StoreAccumulatorInRegister(Register(0))
.LoadLiteral(Smi::FromInt(40))
@@ -2133,29 +1949,76 @@ TEST(InterpreterLogicalAnd) {
TEST(InterpreterTryCatch) {
HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
- // TODO(rmcilroy): modify tests when we have real try catch support.
- std::string source(InterpreterTester::SourceForBody(
- "var a = 1; try { a = a + 1; } catch(e) { a = a + 2; }; return a;"));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
- auto callable = tester.GetCallable<>();
+ std::pair<const char*, Handle<Object>> catches[] = {
+ std::make_pair("var a = 1; try { a = 2 } catch(e) { a = 3 }; return a;",
+ handle(Smi::FromInt(2), isolate)),
+ std::make_pair("var a; try { undef.x } catch(e) { a = 2 }; return a;",
+ handle(Smi::FromInt(2), isolate)),
+ std::make_pair("var a; try { throw 1 } catch(e) { a = e + 2 }; return a;",
+ handle(Smi::FromInt(3), isolate)),
+ std::make_pair("var a; try { throw 1 } catch(e) { a = e + 2 };"
+ " try { throw a } catch(e) { a = e + 3 }; return a;",
+ handle(Smi::FromInt(6), isolate)),
+ };
- Handle<Object> return_val = callable().ToHandleChecked();
- CHECK_EQ(Smi::cast(*return_val), Smi::FromInt(2));
+ for (size_t i = 0; i < arraysize(catches); i++) {
+ std::string source(InterpreterTester::SourceForBody(catches[i].first));
+ InterpreterTester tester(handles.main_isolate(), source.c_str());
+ auto callable = tester.GetCallable<>();
+
+ Handle<i::Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*catches[i].second));
+ }
}
TEST(InterpreterTryFinally) {
HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+ i::Factory* factory = isolate->factory();
- // TODO(rmcilroy): modify tests when we have real try finally support.
- std::string source(InterpreterTester::SourceForBody(
- "var a = 1; try { a = a + 1; } finally { a = a + 2; }; return a;"));
- InterpreterTester tester(handles.main_isolate(), source.c_str());
- auto callable = tester.GetCallable<>();
+ std::pair<const char*, Handle<Object>> finallies[] = {
+ std::make_pair(
+ "var a = 1; try { a = a + 1; } finally { a = a + 2; }; return a;",
+ factory->NewStringFromStaticChars("R4")),
+ std::make_pair(
+ "var a = 1; try { a = 2; return 23; } finally { a = 3 }; return a;",
+ factory->NewStringFromStaticChars("R23")),
+ std::make_pair(
+ "var a = 1; try { a = 2; throw 23; } finally { a = 3 }; return a;",
+ factory->NewStringFromStaticChars("E23")),
+ std::make_pair(
+ "var a = 1; try { a = 2; throw 23; } finally { return a; };",
+ factory->NewStringFromStaticChars("R2")),
+ std::make_pair(
+ "var a = 1; try { a = 2; throw 23; } finally { throw 42; };",
+ factory->NewStringFromStaticChars("E42")),
+ std::make_pair("var a = 1; for (var i = 10; i < 20; i += 5) {"
+ " try { a = 2; break; } finally { a = 3; }"
+ "} return a + i;",
+ factory->NewStringFromStaticChars("R13")),
+ std::make_pair("var a = 1; for (var i = 10; i < 20; i += 5) {"
+ " try { a = 2; continue; } finally { a = 3; }"
+ "} return a + i;",
+ factory->NewStringFromStaticChars("R23")),
+ std::make_pair("var a = 1; try { a = 2;"
+ " try { a = 3; throw 23; } finally { a = 4; }"
+ "} catch(e) { a = a + e; } return a;",
+ factory->NewStringFromStaticChars("R27")),
+ };
- Handle<Object> return_val = callable().ToHandleChecked();
- CHECK_EQ(Smi::cast(*return_val), Smi::FromInt(4));
+ const char* try_wrapper =
+ "(function() { try { return 'R' + f() } catch(e) { return 'E' + e }})()";
+
+ for (size_t i = 0; i < arraysize(finallies); i++) {
+ std::string source(InterpreterTester::SourceForBody(finallies[i].first));
+ InterpreterTester tester(handles.main_isolate(), source.c_str());
+ tester.GetCallable<>();
+ Handle<Object> wrapped = v8::Utils::OpenHandle(*CompileRun(try_wrapper));
+ CHECK(wrapped->SameValue(*finallies[i].second));
+ }
}
@@ -2164,7 +2027,6 @@ TEST(InterpreterThrow) {
i::Isolate* isolate = handles.main_isolate();
i::Factory* factory = isolate->factory();
- // TODO(rmcilroy): modify tests when we have real try catch support.
std::pair<const char*, Handle<Object>> throws[] = {
std::make_pair("throw undefined;\n",
factory->undefined_value()),
@@ -2364,6 +2226,15 @@ TEST(InterpreterCreateArguments) {
std::make_pair("function f(a, b, c, d) {"
" 'use strict'; c = b; return arguments[2]; }",
2),
+ // check rest parameters
+ std::make_pair("function f(...restArray) { return restArray[0]; }", 0),
+ std::make_pair("function f(a, ...restArray) { return restArray[0]; }", 1),
+ std::make_pair("function f(a, ...restArray) { return arguments[0]; }", 0),
+ std::make_pair("function f(a, ...restArray) { return arguments[1]; }", 1),
+ std::make_pair("function f(a, ...restArray) { return restArray[1]; }", 2),
+ std::make_pair("function f(a, ...arguments) { return arguments[0]; }", 1),
+ std::make_pair("function f(a, b, ...restArray) { return restArray[0]; }",
+ 2),
};
// Test passing no arguments.
@@ -2698,211 +2569,301 @@ TEST(InterpreterBasicLoops) {
TEST(InterpreterForIn) {
- HandleAndZoneScope handles;
-
std::pair<const char*, int> for_in_samples[] = {
- {"function f() {\n"
- " var r = -1;\n"
- " for (var a in null) { r = a; }\n"
- " return r;\n"
- "}",
+ {"var r = -1;\n"
+ "for (var a in null) { r = a; }\n"
+ "return r;\n",
-1},
- {"function f() {\n"
- " var r = -1;\n"
- " for (var a in undefined) { r = a; }\n"
- " return r;\n"
- "}",
+ {"var r = -1;\n"
+ "for (var a in undefined) { r = a; }\n"
+ "return r;\n",
-1},
- {"function f() {\n"
- " var r = 0;\n"
- " for (var a in [0,6,7,9]) { r = r + (1 << a); }\n"
- " return r;\n"
- "}",
+ {"var r = 0;\n"
+ "for (var a in [0,6,7,9]) { r = r + (1 << a); }\n"
+ "return r;\n",
0xf},
+ {"var r = 0;\n"
+ "for (var a in [0,6,7,9]) { r = r + (1 << a); }\n"
+ "var r = 0;\n"
+ "for (var a in [0,6,7,9]) { r = r + (1 << a); }\n"
+ "return r;\n",
+ 0xf},
+ {"var r = 0;\n"
+ "for (var a in 'foobar') { r = r + (1 << a); }\n"
+ "return r;\n",
+ 0x3f},
+ {"var r = 0;\n"
+ "for (var a in {1:0, 10:1, 100:2, 1000:3}) {\n"
+ " r = r + Number(a);\n"
+ " }\n"
+ " return r;\n",
+ 1111},
+ {"var r = 0;\n"
+ "var data = {1:0, 10:1, 100:2, 1000:3};\n"
+ "for (var a in data) {\n"
+ " if (a == 1) delete data[1];\n"
+ " r = r + Number(a);\n"
+ " }\n"
+ " return r;\n",
+ 1111},
+ {"var r = 0;\n"
+ "var data = {1:0, 10:1, 100:2, 1000:3};\n"
+ "for (var a in data) {\n"
+ " if (a == 10) delete data[100];\n"
+ " r = r + Number(a);\n"
+ " }\n"
+ " return r;\n",
+ 1011},
+ {"var r = 0;\n"
+ "var data = {1:0, 10:1, 100:2, 1000:3};\n"
+ "for (var a in data) {\n"
+ " if (a == 10) data[10000] = 4;\n"
+ " r = r + Number(a);\n"
+ " }\n"
+ " return r;\n",
+ 1111},
+ {"var r = 0;\n"
+ "var input = 'foobar';\n"
+ "for (var a in input) {\n"
+ " if (input[a] == 'b') break;\n"
+ " r = r + (1 << a);\n"
+ "}\n"
+ "return r;\n",
+ 0x7},
+ {"var r = 0;\n"
+ "var input = 'foobar';\n"
+ "for (var a in input) {\n"
+ " if (input[a] == 'b') continue;\n"
+ " r = r + (1 << a);\n"
+ "}\n"
+ "return r;\n",
+ 0x37},
+ {"var r = 0;\n"
+ "var data = {1:0, 10:1, 100:2, 1000:3};\n"
+ "for (var a in data) {\n"
+ " if (a == 10) {\n"
+ " data[10000] = 4;\n"
+ " }\n"
+ " r = r + Number(a);\n"
+ "}\n"
+ "return r;\n",
+ 1111},
+ {"var r = [ 3 ];\n"
+ "var data = {1:0, 10:1, 100:2, 1000:3};\n"
+ "for (r[10] in data) {\n"
+ "}\n"
+ "return Number(r[10]);\n",
+ 1000},
+ {"var r = [ 3 ];\n"
+ "var data = {1:0, 10:1, 100:2, 1000:3};\n"
+ "for (r['100'] in data) {\n"
+ "}\n"
+ "return Number(r['100']);\n",
+ 1000},
+ {"var obj = {}\n"
+ "var descObj = new Boolean(false);\n"
+ "var accessed = 0;\n"
+ "descObj.enumerable = true;\n"
+ "Object.defineProperties(obj, { prop:descObj });\n"
+ "for (var p in obj) {\n"
+ " if (p === 'prop') { accessed = 1; }\n"
+ "}\n"
+ "return accessed;",
+ 1},
+ {"var appointment = {};\n"
+ "Object.defineProperty(appointment, 'startTime', {\n"
+ " value: 1001,\n"
+ " writable: false,\n"
+ " enumerable: false,\n"
+ " configurable: true\n"
+ "});\n"
+ "Object.defineProperty(appointment, 'name', {\n"
+ " value: 'NAME',\n"
+ " writable: false,\n"
+ " enumerable: false,\n"
+ " configurable: true\n"
+ "});\n"
+ "var meeting = Object.create(appointment);\n"
+ "Object.defineProperty(meeting, 'conferenceCall', {\n"
+ " value: 'In-person meeting',\n"
+ " writable: false,\n"
+ " enumerable: false,\n"
+ " configurable: true\n"
+ "});\n"
+ "\n"
+ "var teamMeeting = Object.create(meeting);\n"
+ "\n"
+ "var flags = 0;\n"
+ "for (var p in teamMeeting) {\n"
+ " if (p === 'startTime') {\n"
+ " flags |= 1;\n"
+ " }\n"
+ " if (p === 'name') {\n"
+ " flags |= 2;\n"
+ " }\n"
+ " if (p === 'conferenceCall') {\n"
+ " flags |= 4;\n"
+ " }\n"
+ "}\n"
+ "\n"
+ "var hasOwnProperty = !teamMeeting.hasOwnProperty('name') &&\n"
+ " !teamMeeting.hasOwnProperty('startTime') &&\n"
+ " !teamMeeting.hasOwnProperty('conferenceCall');\n"
+ "if (!hasOwnProperty) {\n"
+ " flags |= 8;\n"
+ "}\n"
+ "return flags;\n",
+ 0},
+ {"var data = {x:23, y:34};\n"
+ " var result = 0;\n"
+ "var o = {};\n"
+ "var arr = [o];\n"
+ "for (arr[0].p in data)\n" // This is to test if value is loaded
+ " result += data[arr[0].p];\n" // back from accumulator before storing
+ "return result;\n", // named properties.
+ 57},
+ {"var data = {x:23, y:34};\n"
+ "var result = 0;\n"
+ "var o = {};\n"
+ "var i = 0;\n"
+ "for (o[i++] in data)\n" // This is to test if value is loaded
+ " result += data[o[i-1]];\n" // back from accumulator before
+ "return result;\n", // storing keyed properties.
+ 57}};
+
+ // Two passes are made for this test. On the first, 8-bit register
+ // operands are employed, and on the 16-bit register operands are
+ // used.
+ for (int pass = 0; pass < 2; pass++) {
+ HandleAndZoneScope handles;
+ std::ostringstream wide_os;
+ if (pass == 1) {
+ for (int i = 0; i < 200; i++) {
+ wide_os << "var local" << i << " = 0;\n";
+ }
+ }
+
+ for (size_t i = 0; i < arraysize(for_in_samples); i++) {
+ std::ostringstream body_os;
+ body_os << wide_os.str() << for_in_samples[i].first;
+ std::string body(body_os.str());
+ std::string function = InterpreterTester::SourceForBody(body.c_str());
+ InterpreterTester tester(handles.main_isolate(), function.c_str());
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_val = callable().ToHandleChecked();
+ CHECK_EQ(Handle<Smi>::cast(return_val)->value(),
+ for_in_samples[i].second);
+ }
+ }
+}
+
+
+TEST(InterpreterForOf) {
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+ i::Factory* factory = isolate->factory();
+
+ std::pair<const char*, Handle<Object>> for_of[] = {
{"function f() {\n"
" var r = 0;\n"
- " for (var a in [0,6,7,9]) { r = r + (1 << a); }\n"
- " var r = 0;\n"
- " for (var a in [0,6,7,9]) { r = r + (1 << a); }\n"
+ " for (var a of [0,6,7,9]) { r += a; }\n"
" return r;\n"
"}",
- 0xf},
+ handle(Smi::FromInt(22), isolate)},
{"function f() {\n"
- " var r = 0;\n"
- " for (var a in 'foobar') { r = r + (1 << a); }\n"
+ " var r = '';\n"
+ " for (var a of 'foobar') { r = a + r; }\n"
" return r;\n"
"}",
- 0x3f},
+ factory->NewStringFromStaticChars("raboof")},
{"function f() {\n"
+ " var a = [1, 2, 3];\n"
+ " a.name = 4;\n"
" var r = 0;\n"
- " for (var a in {1:0, 10:1, 100:2, 1000:3}) {\n"
- " r = r + Number(a);\n"
- " }\n"
- " return r;\n"
+ " for (var x of a) { r += x; }\n"
+ " return r;\n"
"}",
- 1111},
+ handle(Smi::FromInt(6), isolate)},
{"function f() {\n"
- " var r = 0;\n"
- " var data = {1:0, 10:1, 100:2, 1000:3};\n"
- " for (var a in data) {\n"
- " if (a == 1) delete data[1];\n"
- " r = r + Number(a);\n"
- " }\n"
- " return r;\n"
- "}",
- 1111},
+ " var r = '';\n"
+ " var data = [1, 2, 3]; \n"
+ " for (a of data) { delete data[0]; r += a; } return r; }",
+ factory->NewStringFromStaticChars("123")},
{"function f() {\n"
- " var r = 0;\n"
- " var data = {1:0, 10:1, 100:2, 1000:3};\n"
- " for (var a in data) {\n"
- " if (a == 10) delete data[100];\n"
- " r = r + Number(a);\n"
- " }\n"
- " return r;\n"
- "}",
- 1011},
+ " var r = '';\n"
+ " var data = [1, 2, 3]; \n"
+ " for (a of data) { delete data[2]; r += a; } return r; }",
+ factory->NewStringFromStaticChars("12undefined")},
{"function f() {\n"
- " var r = 0;\n"
- " var data = {1:0, 10:1, 100:2, 1000:3};\n"
- " for (var a in data) {\n"
- " if (a == 10) data[10000] = 4;\n"
- " r = r + Number(a);\n"
- " }\n"
- " return r;\n"
- "}",
- 1111},
+ " var r = '';\n"
+ " var data = [1, 2, 3]; \n"
+ " for (a of data) { delete data; r += a; } return r; }",
+ factory->NewStringFromStaticChars("123")},
{"function f() {\n"
- " var r = 0;\n"
+ " var r = '';\n"
" var input = 'foobar';\n"
- " for (var a in input) {\n"
- " if (input[a] == 'b') break;\n"
- " r = r + (1 << a);\n"
+ " for (var a of input) {\n"
+ " if (a == 'b') break;\n"
+ " r += a;\n"
" }\n"
" return r;\n"
"}",
- 0x7},
+ factory->NewStringFromStaticChars("foo")},
{"function f() {\n"
- "var r = 0;\n"
- "var input = 'foobar';\n"
- "for (var a in input) {\n"
- " if (input[a] == 'b') continue;\n"
- " r = r + (1 << a);\n"
- "}\n"
- "return r;\n"
- "}",
- 0x37},
- {"function f() {\n"
- " var r = 0;\n"
- " var data = {1:0, 10:1, 100:2, 1000:3};\n"
- " for (var a in data) {\n"
- " if (a == 10) {\n"
- " data[10000] = 4;\n"
- " }\n"
- " r = r + Number(a);\n"
+ " var r = '';\n"
+ " var input = 'foobar';\n"
+ " for (var a of input) {\n"
+ " if (a == 'b') continue;\n"
+ " r += a;\n"
" }\n"
" return r;\n"
"}",
- 1111},
+ factory->NewStringFromStaticChars("fooar")},
{"function f() {\n"
- " var r = [ 3 ];\n"
- " var data = {1:0, 10:1, 100:2, 1000:3};\n"
- " for (r[10] in data) {\n"
- " }\n"
- " return Number(r[10]);\n"
+ " var r = '';\n"
+ " var data = [1, 2, 3, 4]; \n"
+ " for (a of data) { data[2] = 567; r += a; }\n"
+ " return r;\n"
"}",
- 1000},
+ factory->NewStringFromStaticChars("125674")},
{"function f() {\n"
- " var r = [ 3 ];\n"
- " var data = {1:0, 10:1, 100:2, 1000:3};\n"
- " for (r['100'] in data) {\n"
- " }\n"
- " return Number(r['100']);\n"
+ " var r = '';\n"
+ " var data = [1, 2, 3, 4]; \n"
+ " for (a of data) { data[4] = 567; r += a; }\n"
+ " return r;\n"
"}",
- 1000},
+ factory->NewStringFromStaticChars("1234567")},
{"function f() {\n"
- " var obj = {}\n"
- " var descObj = new Boolean(false);\n"
- " var accessed = 0;\n"
- " descObj.enumerable = true;\n"
- " Object.defineProperties(obj, { prop:descObj });\n"
- " for (var p in obj) {\n"
- " if (p === 'prop') { accessed = 1; }\n"
- " }\n"
- " return accessed;"
+ " var r = '';\n"
+ " var data = [1, 2, 3, 4]; \n"
+ " for (a of data) { data[5] = 567; r += a; }\n"
+ " return r;\n"
"}",
- 1},
+ factory->NewStringFromStaticChars("1234undefined567")},
{"function f() {\n"
- " var appointment = {};\n"
- " Object.defineProperty(appointment, 'startTime', {\n"
- " value: 1001,\n"
- " writable: false,\n"
- " enumerable: false,\n"
- " configurable: true\n"
- " });\n"
- " Object.defineProperty(appointment, 'name', {\n"
- " value: 'NAME',\n"
- " writable: false,\n"
- " enumerable: false,\n"
- " configurable: true\n"
- " });\n"
- " var meeting = Object.create(appointment);\n"
- " Object.defineProperty(meeting, 'conferenceCall', {\n"
- " value: 'In-person meeting',\n"
- " writable: false,\n"
- " enumerable: false,\n"
- " configurable: true\n"
- " });\n"
- "\n"
- " var teamMeeting = Object.create(meeting);\n"
- "\n"
- " var flags = 0;\n"
- " for (var p in teamMeeting) {\n"
- " if (p === 'startTime') {\n"
- " flags |= 1;\n"
- " }\n"
- " if (p === 'name') {\n"
- " flags |= 2;\n"
- " }\n"
- " if (p === 'conferenceCall') {\n"
- " flags |= 4;\n"
+ " var r = '';\n"
+ " var obj = new Object();\n"
+ " obj[Symbol.iterator] = function() { return {\n"
+ " index: 3,\n"
+ " data: ['a', 'b', 'c', 'd'],"
+ " next: function() {"
+ " return {"
+ " done: this.index == -1,\n"
+ " value: this.index < 0 ? undefined : this.data[this.index--]\n"
" }\n"
- " }\n"
- "\n"
- " var hasOwnProperty = !teamMeeting.hasOwnProperty('name') &&\n"
- " !teamMeeting.hasOwnProperty('startTime') &&\n"
- " !teamMeeting.hasOwnProperty('conferenceCall');\n"
- " if (!hasOwnProperty) {\n"
- " flags |= 8;\n"
- " }\n"
- " return flags;\n"
- " }",
- 0},
- {"function f() {\n"
- " var data = {x:23, y:34};\n"
- " var result = 0;\n"
- " var o = {};\n"
- " var arr = [o];\n"
- " for (arr[0].p in data)\n" // This is to test if value is loaded
- " result += data[arr[0].p];\n" // back from accumulator before storing
- " return result;\n" // named properties.
- "}",
- 57},
- {"function f() {\n"
- " var data = {x:23, y:34};\n"
- " var result = 0;\n"
- " var o = {};\n"
- " var i = 0;\n"
- " for (o[i++] in data)\n" // This is to test if value is loaded
- " result += data[o[i-1]];\n" // back from accumulator before
- " return result;\n" // storing keyed properties.
+ " }\n"
+ " }}\n"
+ " for (a of obj) { r += a }\n"
+ " return r;\n"
"}",
- 57}};
+ factory->NewStringFromStaticChars("dcba")},
+ };
- for (size_t i = 0; i < arraysize(for_in_samples); i++) {
- InterpreterTester tester(handles.main_isolate(), for_in_samples[i].first);
+ for (size_t i = 0; i < arraysize(for_of); i++) {
+ InterpreterTester tester(handles.main_isolate(), for_of[i].first);
auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
- CHECK_EQ(Handle<Smi>::cast(return_val)->value(), for_in_samples[i].second);
+ CHECK(return_val->SameValue(*for_of[i].second));
}
}
@@ -3422,10 +3383,10 @@ TEST(InterpreterDeleteLookupSlot) {
TEST(JumpWithConstantsAndWideConstants) {
HandleAndZoneScope handles;
- auto isolate = handles.main_isolate();
- auto factory = isolate->factory();
const int kStep = 13;
- for (int constants = 3; constants < 256 + 3 * kStep; constants += kStep) {
+ for (int constants = 11; constants < 256 + 3 * kStep; constants += kStep) {
+ auto isolate = handles.main_isolate();
+ auto factory = isolate->factory();
std::ostringstream filler_os;
// Generate a string that consumes constant pool entries and
// spread out branch distances in script below.
@@ -3448,8 +3409,8 @@ TEST(JumpWithConstantsAndWideConstants) {
for (int a = 0; a < 3; a++) {
InterpreterTester tester(handles.main_isolate(), script.c_str());
auto callable = tester.GetCallable<Handle<Object>>();
- Handle<Object> return_val =
- callable(factory->NewNumberFromInt(a)).ToHandleChecked();
+ Handle<Object> argument = factory->NewNumberFromInt(a);
+ Handle<Object> return_val = callable(argument).ToHandleChecked();
static const int results[] = {11, 12, 2};
CHECK_EQ(Handle<Smi>::cast(return_val)->value(), results[a]);
}
@@ -3500,7 +3461,6 @@ TEST(InterpreterEval) {
std::string source(InterpreterTester::SourceForBody(eval[i].first));
InterpreterTester tester(handles.main_isolate(), source.c_str());
auto callable = tester.GetCallable<>();
-
Handle<i::Object> return_value = callable().ToHandleChecked();
CHECK(return_value->SameValue(*eval[i].second));
}
@@ -3562,6 +3522,646 @@ TEST(InterpreterEvalGlobal) {
}
}
+
+TEST(InterpreterEvalVariableDecl) {
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+ i::Factory* factory = isolate->factory();
+
+ std::pair<const char*, Handle<Object>> eval_global[] = {
+ {"function f() { eval('var x = 10; x++;'); return x; }",
+ handle(Smi::FromInt(11), isolate)},
+ {"function f() { var x = 20; eval('var x = 10; x++;'); return x; }",
+ handle(Smi::FromInt(11), isolate)},
+ {"function f() {"
+ " var x = 20;"
+ " eval('\"use strict\"; var x = 10; x++;');"
+ " return x; }",
+ handle(Smi::FromInt(20), isolate)},
+ {"function f() {"
+ " var y = 30;"
+ " eval('var x = {1:20}; x[2]=y;');"
+ " return x[2]; }",
+ handle(Smi::FromInt(30), isolate)},
+ {"function f() {"
+ " eval('var x = {name:\"test\"};');"
+ " return x.name; }",
+ factory->NewStringFromStaticChars("test")},
+ {"function f() {"
+ " eval('var x = [{name:\"test\"}, {type:\"cc\"}];');"
+ " return x[1].type+x[0].name; }",
+ factory->NewStringFromStaticChars("cctest")},
+ {"function f() {\n"
+ " var x = 3;\n"
+ " var get_eval_x;\n"
+ " eval('\"use strict\"; "
+ " var x = 20; "
+ " get_eval_x = function func() {return x;};');\n"
+ " return get_eval_x() + x;\n"
+ "}",
+ handle(Smi::FromInt(23), isolate)},
+ // TODO(mythria): Add tests with const declarations.
+ };
+
+ for (size_t i = 0; i < arraysize(eval_global); i++) {
+ InterpreterTester tester(handles.main_isolate(), eval_global[i].first, "*");
+ auto callable = tester.GetCallable<>();
+
+ Handle<i::Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*eval_global[i].second));
+ }
+}
+
+
+TEST(InterpreterEvalFunctionDecl) {
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+
+ std::pair<const char*, Handle<Object>> eval_func_decl[] = {
+ {"function f() {\n"
+ " var x = 3;\n"
+ " eval('var x = 20;"
+ " function get_x() {return x;};');\n"
+ " return get_x() + x;\n"
+ "}",
+ handle(Smi::FromInt(40), isolate)},
+ };
+
+ for (size_t i = 0; i < arraysize(eval_func_decl); i++) {
+ InterpreterTester tester(handles.main_isolate(), eval_func_decl[i].first,
+ "*");
+ auto callable = tester.GetCallable<>();
+
+ Handle<i::Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*eval_func_decl[i].second));
+ }
+}
+
+TEST(InterpreterWideRegisterArithmetic) {
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+
+ static const size_t kMaxRegisterForTest = 150;
+ std::ostringstream os;
+ os << "function " << InterpreterTester::function_name() << "(arg) {\n";
+ os << " var retval = -77;\n";
+ for (size_t i = 0; i < kMaxRegisterForTest; i++) {
+ os << " var x" << i << " = " << i << ";\n";
+ }
+ for (size_t i = 0; i < kMaxRegisterForTest / 2; i++) {
+ size_t j = kMaxRegisterForTest - i - 1;
+ os << " var tmp = x" << j << ";\n";
+ os << " var x" << j << " = x" << i << ";\n";
+ os << " var x" << i << " = tmp;\n";
+ }
+ for (size_t i = 0; i < kMaxRegisterForTest / 2; i++) {
+ size_t j = kMaxRegisterForTest - i - 1;
+ os << " var tmp = x" << j << ";\n";
+ os << " var x" << j << " = x" << i << ";\n";
+ os << " var x" << i << " = tmp;\n";
+ }
+ for (size_t i = 0; i < kMaxRegisterForTest; i++) {
+ os << " if (arg == " << i << ") {\n" //
+ << " retval = x" << i << ";\n" //
+ << " }\n"; //
+ }
+ os << " return retval;\n";
+ os << "}\n";
+
+ std::string source = os.str();
+ InterpreterTester tester(handles.main_isolate(), source.c_str());
+ auto callable = tester.GetCallable<Handle<Object>>();
+ for (size_t i = 0; i < kMaxRegisterForTest; i++) {
+ Handle<Object> arg = handle(Smi::FromInt(static_cast<int>(i)), isolate);
+ Handle<Object> return_value = callable(arg).ToHandleChecked();
+ CHECK(return_value->SameValue(*arg));
+ }
+}
+
+TEST(InterpreterCallWideRegisters) {
+ static const int kPeriod = 25;
+ static const int kLength = 512;
+ static const int kStartChar = 65;
+
+ for (int pass = 0; pass < 3; pass += 1) {
+ std::ostringstream os;
+ for (int i = 0; i < pass * 97; i += 1) {
+ os << "var x" << i << " = " << i << "\n";
+ }
+ os << "return String.fromCharCode(";
+ os << kStartChar;
+ for (int i = 1; i < kLength; i += 1) {
+ os << "," << kStartChar + (i % kPeriod);
+ }
+ os << ");";
+ std::string source = InterpreterTester::SourceForBody(os.str().c_str());
+ HandleAndZoneScope handles;
+ InterpreterTester tester(handles.main_isolate(), source.c_str());
+ auto callable = tester.GetCallable();
+ Handle<Object> return_val = callable().ToHandleChecked();
+ Handle<String> return_string = Handle<String>::cast(return_val);
+ CHECK_EQ(return_string->length(), kLength);
+ for (int i = 0; i < kLength; i += 1) {
+ CHECK_EQ(return_string->Get(i), 65 + (i % kPeriod));
+ }
+ }
+}
+
+TEST(InterpreterWideParametersPickOne) {
+ static const int kParameterCount = 130;
+ for (int parameter = 0; parameter < 10; parameter++) {
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+ std::ostringstream os;
+ os << "function " << InterpreterTester::function_name() << "(arg) {\n";
+ os << " function selector(i";
+ for (int i = 0; i < kParameterCount; i++) {
+ os << ","
+ << "a" << i;
+ }
+ os << ") {\n";
+ os << " return a" << parameter << ";\n";
+ os << " };\n";
+ os << " return selector(arg";
+ for (int i = 0; i < kParameterCount; i++) {
+ os << "," << i;
+ }
+ os << ");";
+ os << "}\n";
+
+ std::string source = os.str();
+ InterpreterTester tester(handles.main_isolate(), source.c_str(), "*");
+ auto callable = tester.GetCallable<Handle<Object>>();
+ Handle<Object> arg = handle(Smi::FromInt(0xaa55), isolate);
+ Handle<Object> return_value = callable(arg).ToHandleChecked();
+ Handle<Smi> actual = Handle<Smi>::cast(return_value);
+ CHECK_EQ(actual->value(), parameter);
+ }
+}
+
+TEST(InterpreterWideParametersSummation) {
+ static int kParameterCount = 200;
+ static int kBaseValue = 17000;
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+ std::ostringstream os;
+ os << "function " << InterpreterTester::function_name() << "(arg) {\n";
+ os << " function summation(i";
+ for (int i = 0; i < kParameterCount; i++) {
+ os << ","
+ << "a" << i;
+ }
+ os << ") {\n";
+ os << " var sum = " << kBaseValue << ";\n";
+ os << " switch(i) {\n";
+ for (int i = 0; i < kParameterCount; i++) {
+ int j = kParameterCount - i - 1;
+ os << " case " << j << ": sum += a" << j << ";\n";
+ }
+ os << " }\n";
+ os << " return sum;\n";
+ os << " };\n";
+ os << " return summation(arg";
+ for (int i = 0; i < kParameterCount; i++) {
+ os << "," << i;
+ }
+ os << ");";
+ os << "}\n";
+
+ std::string source = os.str();
+ InterpreterTester tester(handles.main_isolate(), source.c_str(), "*");
+ auto callable = tester.GetCallable<Handle<Object>>();
+ for (int i = 0; i < kParameterCount; i++) {
+ Handle<Object> arg = handle(Smi::FromInt(i), isolate);
+ Handle<Object> return_value = callable(arg).ToHandleChecked();
+ int expected = kBaseValue + i * (i + 1) / 2;
+ Handle<Smi> actual = Handle<Smi>::cast(return_value);
+ CHECK_EQ(actual->value(), expected);
+ }
+}
+
+TEST(InterpreterDoExpression) {
+ bool old_flag = FLAG_harmony_do_expressions;
+ FLAG_harmony_do_expressions = true;
+
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+ Factory* factory = isolate->factory();
+
+ std::pair<const char*, Handle<Object>> do_expr[] = {
+ {"var a = do {}; return a;", factory->undefined_value()},
+ {"var a = do { var x = 100; }; return a;", factory->undefined_value()},
+ {"var a = do { var x = 100; }; return a;", factory->undefined_value()},
+ {"var a = do { var x = 100; x++; }; return a;",
+ handle(Smi::FromInt(100), isolate)},
+ {"var i = 0; for (; i < 5;) { i = do { if (i == 3) { break; }; i + 1; }};"
+ "return i;",
+ handle(Smi::FromInt(3), isolate)},
+ };
+
+ for (size_t i = 0; i < arraysize(do_expr); i++) {
+ std::string source(InterpreterTester::SourceForBody(do_expr[i].first));
+ InterpreterTester tester(handles.main_isolate(), source.c_str());
+ auto callable = tester.GetCallable<>();
+
+ Handle<i::Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*do_expr[i].second));
+ }
+
+ FLAG_harmony_do_expressions = old_flag;
+}
+
+TEST(InterpreterWithStatement) {
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+
+ std::pair<const char*, Handle<Object>> with_stmt[] = {
+ {"with({x:42}) return x;", handle(Smi::FromInt(42), isolate)},
+ {"with({}) { var y = 10; return y;}", handle(Smi::FromInt(10), isolate)},
+ {"var y = {x:42};"
+ " function inner() {"
+ " var x = 20;"
+ " with(y) return x;"
+ "}"
+ "return inner();",
+ handle(Smi::FromInt(42), isolate)},
+ {"var y = {x:42};"
+ " function inner(o) {"
+ " var x = 20;"
+ " with(o) return x;"
+ "}"
+ "return inner(y);",
+ handle(Smi::FromInt(42), isolate)},
+ };
+
+ for (size_t i = 0; i < arraysize(with_stmt); i++) {
+ std::string source(InterpreterTester::SourceForBody(with_stmt[i].first));
+ InterpreterTester tester(handles.main_isolate(), source.c_str());
+ auto callable = tester.GetCallable<>();
+
+ Handle<i::Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*with_stmt[i].second));
+ }
+}
+
+TEST(InterpreterClassLiterals) {
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+ std::pair<const char*, Handle<Object>> examples[] = {
+ {"class C {\n"
+ " constructor(x) { this.x_ = x; }\n"
+ " method() { return this.x_; }\n"
+ "}\n"
+ "return new C(99).method();",
+ handle(Smi::FromInt(99), isolate)},
+ {"class C {\n"
+ " constructor(x) { this.x_ = x; }\n"
+ " static static_method(x) { return x; }\n"
+ "}\n"
+ "return C.static_method(101);",
+ handle(Smi::FromInt(101), isolate)},
+ {"class C {\n"
+ " get x() { return 102; }\n"
+ "}\n"
+ "return new C().x",
+ handle(Smi::FromInt(102), isolate)},
+ {"class C {\n"
+ " static get x() { return 103; }\n"
+ "}\n"
+ "return C.x",
+ handle(Smi::FromInt(103), isolate)},
+ {"class C {\n"
+ " constructor() { this.x_ = 0; }"
+ " set x(value) { this.x_ = value; }\n"
+ " get x() { return this.x_; }\n"
+ "}\n"
+ "var c = new C();"
+ "c.x = 104;"
+ "return c.x;",
+ handle(Smi::FromInt(104), isolate)},
+ {"var x = 0;"
+ "class C {\n"
+ " static set x(value) { x = value; }\n"
+ " static get x() { return x; }\n"
+ "}\n"
+ "C.x = 105;"
+ "return C.x;",
+ handle(Smi::FromInt(105), isolate)},
+ {"var method = 'f';"
+ "class C {\n"
+ " [method]() { return 106; }\n"
+ "}\n"
+ "return new C().f();",
+ handle(Smi::FromInt(106), isolate)},
+ };
+
+ for (size_t i = 0; i < arraysize(examples); ++i) {
+ std::string source(InterpreterTester::SourceForBody(examples[i].first));
+ InterpreterTester tester(handles.main_isolate(), source.c_str());
+ auto callable = tester.GetCallable<>();
+
+ Handle<i::Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*examples[i].second));
+ }
+}
+
+TEST(InterpreterClassAndSuperClass) {
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+ std::pair<const char*, Handle<Object>> examples[] = {
+ {"class A {\n"
+ " constructor(x) { this.x_ = x; }\n"
+ " method() { return this.x_; }\n"
+ "}\n"
+ "class B extends A {\n"
+ " constructor(x, y) { super(x); this.y_ = y; }\n"
+ " method() { return super.method() + 1; }\n"
+ "}\n"
+ "return new B(998, 0).method();\n",
+ handle(Smi::FromInt(999), isolate)},
+ {"class A {\n"
+ " constructor() { this.x_ = 2; this.y_ = 3; }\n"
+ "}\n"
+ "class B extends A {\n"
+ " constructor() { super(); }"
+ " method() { this.x_++; this.y_++; return this.x_ + this.y_; }\n"
+ "}\n"
+ "return new B().method();\n",
+ handle(Smi::FromInt(7), isolate)},
+ {"var calls = 0;\n"
+ "class B {}\n"
+ "B.prototype.x = 42;\n"
+ "class C extends B {\n"
+ " constructor() {\n"
+ " super();\n"
+ " calls++;\n"
+ " }\n"
+ "}\n"
+ "new C;\n"
+ "return calls;\n",
+ handle(Smi::FromInt(1), isolate)},
+ {"class A {\n"
+ " method() { return 1; }\n"
+ " get x() { return 2; }\n"
+ "}\n"
+ "class B extends A {\n"
+ " method() { return super.x === 2 ? super.method() : -1; }\n"
+ "}\n"
+ "return new B().method();\n",
+ handle(Smi::FromInt(1), isolate)},
+ {"var object = { setY(v) { super.y = v; }};\n"
+ "object.setY(10);\n"
+ "return object.y;\n",
+ handle(Smi::FromInt(10), isolate)},
+ };
+
+ for (size_t i = 0; i < arraysize(examples); ++i) {
+ std::string source(InterpreterTester::SourceForBody(examples[i].first));
+ InterpreterTester tester(handles.main_isolate(), source.c_str());
+ auto callable = tester.GetCallable<>();
+ Handle<i::Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*examples[i].second));
+ }
+}
+
+TEST(InterpreterConstDeclaration) {
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+ i::Factory* factory = isolate->factory();
+
+ std::pair<const char*, Handle<Object>> const_decl[] = {
+ {"const x = 3; return x;", handle(Smi::FromInt(3), isolate)},
+ {"let x = 10; x = x + 20; return x;", handle(Smi::FromInt(30), isolate)},
+ {"let x = 10; x = 20; return x;", handle(Smi::FromInt(20), isolate)},
+ {"let x; x = 20; return x;", handle(Smi::FromInt(20), isolate)},
+ {"let x; return x;", factory->undefined_value()},
+ {"var x = 10; { let x = 30; } return x;",
+ handle(Smi::FromInt(10), isolate)},
+ {"let x = 10; { let x = 20; } return x;",
+ handle(Smi::FromInt(10), isolate)},
+ {"var x = 10; eval('let x = 20;'); return x;",
+ handle(Smi::FromInt(10), isolate)},
+ {"var x = 10; eval('const x = 20;'); return x;",
+ handle(Smi::FromInt(10), isolate)},
+ {"var x = 10; { const x = 20; } return x;",
+ handle(Smi::FromInt(10), isolate)},
+ {"var x = 10; { const x = 20; return x;} return -1;",
+ handle(Smi::FromInt(20), isolate)},
+ {"var a = 10;\n"
+ "for (var i = 0; i < 10; ++i) {\n"
+ " const x = i;\n" // const declarations are block scoped.
+ " a = a + x;\n"
+ "}\n"
+ "return a;\n",
+ handle(Smi::FromInt(55), isolate)},
+ };
+
+ // Tests for sloppy mode.
+ for (size_t i = 0; i < arraysize(const_decl); i++) {
+ std::string source(InterpreterTester::SourceForBody(const_decl[i].first));
+ InterpreterTester tester(handles.main_isolate(), source.c_str());
+ auto callable = tester.GetCallable<>();
+
+ Handle<i::Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*const_decl[i].second));
+ }
+
+ // Tests for strict mode.
+ for (size_t i = 0; i < arraysize(const_decl); i++) {
+ std::string strict_body =
+ "'use strict'; " + std::string(const_decl[i].first);
+ std::string source(InterpreterTester::SourceForBody(strict_body.c_str()));
+ InterpreterTester tester(handles.main_isolate(), source.c_str());
+ auto callable = tester.GetCallable<>();
+
+ Handle<i::Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*const_decl[i].second));
+ }
+}
+
+TEST(InterpreterConstDeclarationLookupSlots) {
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+ i::Factory* factory = isolate->factory();
+
+ std::pair<const char*, Handle<Object>> const_decl[] = {
+ {"const x = 3; function f1() {return x;}; return x;",
+ handle(Smi::FromInt(3), isolate)},
+ {"let x = 10; x = x + 20; function f1() {return x;}; return x;",
+ handle(Smi::FromInt(30), isolate)},
+ {"let x; x = 20; function f1() {return x;}; return x;",
+ handle(Smi::FromInt(20), isolate)},
+ {"let x; function f1() {return x;}; return x;",
+ factory->undefined_value()},
+ };
+
+ // Tests for sloppy mode.
+ for (size_t i = 0; i < arraysize(const_decl); i++) {
+ std::string source(InterpreterTester::SourceForBody(const_decl[i].first));
+ InterpreterTester tester(handles.main_isolate(), source.c_str());
+ auto callable = tester.GetCallable<>();
+
+ Handle<i::Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*const_decl[i].second));
+ }
+
+ // Tests for strict mode.
+ for (size_t i = 0; i < arraysize(const_decl); i++) {
+ std::string strict_body =
+ "'use strict'; " + std::string(const_decl[i].first);
+ std::string source(InterpreterTester::SourceForBody(strict_body.c_str()));
+ InterpreterTester tester(handles.main_isolate(), source.c_str());
+ auto callable = tester.GetCallable<>();
+
+ Handle<i::Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*const_decl[i].second));
+ }
+}
+
+TEST(InterpreterConstInLookupContextChain) {
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+
+ const char* prologue =
+ "function OuterMost() {\n"
+ " const outerConst = 10;\n"
+ " let outerLet = 20;\n"
+ " function Outer() {\n"
+ " function Inner() {\n"
+ " this.innerFunc = function() { ";
+ const char* epilogue =
+ " }\n"
+ " }\n"
+ " this.getInnerFunc ="
+ " function() {return new Inner().innerFunc;}\n"
+ " }\n"
+ " this.getOuterFunc ="
+ " function() {return new Outer().getInnerFunc();}"
+ "}\n"
+ "var f = new OuterMost().getOuterFunc();\n"
+ "f();\n";
+ std::pair<const char*, Handle<Object>> const_decl[] = {
+ {"return outerConst;", handle(Smi::FromInt(10), isolate)},
+ {"return outerLet;", handle(Smi::FromInt(20), isolate)},
+ {"outerLet = 30; return outerLet;", handle(Smi::FromInt(30), isolate)},
+ {"var outerLet = 40; return outerLet;",
+ handle(Smi::FromInt(40), isolate)},
+ {"var outerConst = 50; return outerConst;",
+ handle(Smi::FromInt(50), isolate)},
+ {"try { outerConst = 30 } catch(e) { return -1; }",
+ handle(Smi::FromInt(-1), isolate)}};
+
+ for (size_t i = 0; i < arraysize(const_decl); i++) {
+ std::string script = std::string(prologue) +
+ std::string(const_decl[i].first) +
+ std::string(epilogue);
+ InterpreterTester tester(handles.main_isolate(), script.c_str(), "*");
+ auto callable = tester.GetCallable<>();
+
+ Handle<i::Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*const_decl[i].second));
+ }
+
+ // Tests for Legacy constant.
+ bool old_flag_legacy_const = FLAG_legacy_const;
+ FLAG_legacy_const = true;
+
+ std::pair<const char*, Handle<Object>> legacy_const_decl[] = {
+ {"return outerConst = 23;", handle(Smi::FromInt(23), isolate)},
+ {"outerConst = 30; return outerConst;",
+ handle(Smi::FromInt(10), isolate)},
+ };
+
+ for (size_t i = 0; i < arraysize(legacy_const_decl); i++) {
+ std::string script = std::string(prologue) +
+ std::string(legacy_const_decl[i].first) +
+ std::string(epilogue);
+ InterpreterTester tester(handles.main_isolate(), script.c_str(), "*");
+ auto callable = tester.GetCallable<>();
+
+ Handle<i::Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*legacy_const_decl[i].second));
+ }
+
+ FLAG_legacy_const = old_flag_legacy_const;
+}
+
+TEST(InterpreterIllegalConstDeclaration) {
+ HandleAndZoneScope handles;
+
+ std::pair<const char*, const char*> const_decl[] = {
+ {"const x = x = 10 + 3; return x;",
+ "Uncaught ReferenceError: x is not defined"},
+ {"const x = 10; x = 20; return x;",
+ "Uncaught TypeError: Assignment to constant variable."},
+ {"const x = 10; { x = 20; } return x;",
+ "Uncaught TypeError: Assignment to constant variable."},
+ {"const x = 10; eval('x = 20;'); return x;",
+ "Uncaught TypeError: Assignment to constant variable."},
+ {"let x = x + 10; return x;",
+ "Uncaught ReferenceError: x is not defined"},
+ {"'use strict'; (function f1() { f1 = 123; })() ",
+ "Uncaught TypeError: Assignment to constant variable."},
+ };
+
+ // Tests for sloppy mode.
+ for (size_t i = 0; i < arraysize(const_decl); i++) {
+ std::string source(InterpreterTester::SourceForBody(const_decl[i].first));
+ InterpreterTester tester(handles.main_isolate(), source.c_str());
+ v8::Local<v8::String> message = tester.CheckThrowsReturnMessage()->Get();
+ v8::Local<v8::String> expected_string = v8_str(const_decl[i].second);
+ CHECK(
+ message->Equals(CcTest::isolate()->GetCurrentContext(), expected_string)
+ .FromJust());
+ }
+
+ // Tests for strict mode.
+ for (size_t i = 0; i < arraysize(const_decl); i++) {
+ std::string strict_body =
+ "'use strict'; " + std::string(const_decl[i].first);
+ std::string source(InterpreterTester::SourceForBody(strict_body.c_str()));
+ InterpreterTester tester(handles.main_isolate(), source.c_str());
+ v8::Local<v8::String> message = tester.CheckThrowsReturnMessage()->Get();
+ v8::Local<v8::String> expected_string = v8_str(const_decl[i].second);
+ CHECK(
+ message->Equals(CcTest::isolate()->GetCurrentContext(), expected_string)
+ .FromJust());
+ }
+}
+
+TEST(InterpreterLegacyConstDeclaration) {
+ bool old_flag_legacy_const = FLAG_legacy_const;
+ FLAG_legacy_const = true;
+
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+
+ std::pair<const char*, Handle<Object>> const_decl[] = {
+ {"const x = (x = 10) + 3; return x;", handle(Smi::FromInt(13), isolate)},
+ {"const x = 10; x = 20; return x;", handle(Smi::FromInt(10), isolate)},
+ {"var a = 10;\n"
+ "for (var i = 0; i < 10; ++i) {\n"
+ " const x = i;\n" // Legacy constants are not block scoped.
+ " a = a + x;\n"
+ "}\n"
+ "return a;\n",
+ handle(Smi::FromInt(10), isolate)},
+ {"const x = 20; eval('x = 10;'); return x;",
+ handle(Smi::FromInt(20), isolate)},
+ };
+
+ for (size_t i = 0; i < arraysize(const_decl); i++) {
+ std::string source(InterpreterTester::SourceForBody(const_decl[i].first));
+ InterpreterTester tester(handles.main_isolate(), source.c_str());
+ auto callable = tester.GetCallable<>();
+
+ Handle<i::Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*const_decl[i].second));
+ }
+
+ FLAG_legacy_const = old_flag_legacy_const;
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/profiler-extension.cc b/deps/v8/test/cctest/profiler-extension.cc
index a917932978..024cc9c635 100644
--- a/deps/v8/test/cctest/profiler-extension.cc
+++ b/deps/v8/test/cctest/profiler-extension.cc
@@ -27,41 +27,35 @@
//
// Tests of profiles generator and utilities.
-#include "src/base/logging.h"
#include "test/cctest/profiler-extension.h"
+#include "test/cctest/cctest.h"
namespace v8 {
namespace internal {
-
v8::CpuProfile* ProfilerExtension::last_profile = NULL;
const char* ProfilerExtension::kSource =
"native function startProfiling();"
- "native function stopProfiling();";
+ "native function stopProfiling();"
+ "native function collectSample();";
v8::Local<v8::FunctionTemplate> ProfilerExtension::GetNativeFunctionTemplate(
v8::Isolate* isolate, v8::Local<v8::String> name) {
v8::Local<v8::Context> context = isolate->GetCurrentContext();
- if (name->Equals(context, v8::String::NewFromUtf8(isolate, "startProfiling",
- v8::NewStringType::kNormal)
- .ToLocalChecked())
- .FromJust()) {
+ if (name->Equals(context, v8_str(isolate, "startProfiling")).FromJust()) {
return v8::FunctionTemplate::New(isolate,
ProfilerExtension::StartProfiling);
- } else if (name->Equals(context,
- v8::String::NewFromUtf8(isolate, "stopProfiling",
- v8::NewStringType::kNormal)
- .ToLocalChecked())
- .FromJust()) {
- return v8::FunctionTemplate::New(isolate,
- ProfilerExtension::StopProfiling);
- } else {
- CHECK(false);
- return v8::Local<v8::FunctionTemplate>();
}
+ if (name->Equals(context, v8_str(isolate, "stopProfiling")).FromJust()) {
+ return v8::FunctionTemplate::New(isolate, ProfilerExtension::StopProfiling);
+ }
+ if (name->Equals(context, v8_str(isolate, "collectSample")).FromJust()) {
+ return v8::FunctionTemplate::New(isolate, ProfilerExtension::CollectSample);
+ }
+ CHECK(false);
+ return v8::Local<v8::FunctionTemplate>();
}
-
void ProfilerExtension::StartProfiling(
const v8::FunctionCallbackInfo<v8::Value>& args) {
last_profile = NULL;
@@ -71,7 +65,6 @@ void ProfilerExtension::StartProfiling(
: v8::String::Empty(args.GetIsolate()));
}
-
void ProfilerExtension::StopProfiling(
const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::CpuProfiler* cpu_profiler = args.GetIsolate()->GetCpuProfiler();
@@ -80,5 +73,10 @@ void ProfilerExtension::StopProfiling(
: v8::String::Empty(args.GetIsolate()));
}
+void ProfilerExtension::CollectSample(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ args.GetIsolate()->GetCpuProfiler()->CollectSample();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/profiler-extension.h b/deps/v8/test/cctest/profiler-extension.h
index e63d374fb5..00f9a5a808 100644
--- a/deps/v8/test/cctest/profiler-extension.h
+++ b/deps/v8/test/cctest/profiler-extension.h
@@ -40,10 +40,13 @@ class ProfilerExtension : public v8::Extension {
ProfilerExtension() : v8::Extension("v8/profiler", kSource) { }
virtual v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
v8::Isolate* isolate, v8::Local<v8::String> name);
- static void StartProfiling(const v8::FunctionCallbackInfo<v8::Value>& args);
- static void StopProfiling(const v8::FunctionCallbackInfo<v8::Value>& args);
static v8::CpuProfile* last_profile;
+
private:
+ static void StartProfiling(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void StopProfiling(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void CollectSample(const v8::FunctionCallbackInfo<v8::Value>& args);
+
static const char* kSource;
};
diff --git a/deps/v8/test/cctest/test-accessors.cc b/deps/v8/test/cctest/test-accessors.cc
index 0687c33500..90ea08144d 100644
--- a/deps/v8/test/cctest/test-accessors.cc
+++ b/deps/v8/test/cctest/test-accessors.cc
@@ -686,9 +686,9 @@ THREADED_TEST(Regress433458) {
static bool security_check_value = false;
-
static bool SecurityTestCallback(Local<v8::Context> accessing_context,
- Local<v8::Object> accessed_object) {
+ Local<v8::Object> accessed_object,
+ Local<v8::Value> data) {
return security_check_value;
}
diff --git a/deps/v8/test/cctest/test-api-fast-accessor-builder.cc b/deps/v8/test/cctest/test-api-fast-accessor-builder.cc
index 1e1c972694..eeb6b96fbc 100644
--- a/deps/v8/test/cctest/test-api-fast-accessor-builder.cc
+++ b/deps/v8/test/cctest/test-api-fast-accessor-builder.cc
@@ -51,9 +51,9 @@ namespace {
"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" \
"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" \
"*/ " // 16 lines * 64 'X' =~ 1024 character comment.
-#define FN_WARMUP(name, src) \
- "function " name "() { " src INLINE_SPOILER \
- " }; for(i = 0; i < 2; i++) { " name "() } "
+#define FN(name, src) "function " name "() { " src INLINE_SPOILER " }"
+#define WARMUP(name, count) "for(i = 0; i < " count "; i++) { " name "() } "
+#define FN_WARMUP(name, src) FN(name, src) "; " WARMUP(name, "2")
static void NativePropertyAccessor(
const v8::FunctionCallbackInfo<v8::Value>& info) {
@@ -112,6 +112,9 @@ void AddInternalFieldAccessor(v8::Isolate* isolate,
// "Fast" accessor that accesses an internal field.
TEST(FastAccessorWithInternalField) {
+ // Crankshaft support for fast accessors is not implemented; crankshafted
+ // code uses the slow accessor which breaks this test's expectations.
+ v8::internal::FLAG_always_opt = false;
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
@@ -142,6 +145,9 @@ TEST(FastAccessorWithInternalField) {
// "Fast" accessor with control flow via ...OrReturnNull methods.
TEST(FastAccessorOrReturnNull) {
+ // Crankshaft support for fast accessors is not implemented; crankshafted
+ // code uses the slow accessor which breaks this test's expectations.
+ v8::internal::FLAG_always_opt = false;
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
@@ -191,6 +197,9 @@ TEST(FastAccessorOrReturnNull) {
// "Fast" accessor with simple control flow via explicit labels.
TEST(FastAccessorControlFlowWithLabels) {
+ // Crankshaft support for fast accessors is not implemented; crankshafted
+ // code uses the slow accessor which breaks this test's expectations.
+ v8::internal::FLAG_always_opt = false;
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
@@ -226,6 +235,9 @@ TEST(FastAccessorControlFlowWithLabels) {
// "Fast" accessor, loading things.
TEST(FastAccessorLoad) {
+ // Crankshaft support for fast accessors is not implemented; crankshafted
+ // code uses the slow accessor which breaks this test's expectations.
+ v8::internal::FLAG_always_opt = false;
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
@@ -286,3 +298,67 @@ TEST(FastAccessorLoad) {
CompileRun(FN_WARMUP("loadval", "return obj.loadval"));
ExpectString("loadval()", "Hello");
}
+
+void ApiCallbackInt(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ info.GetReturnValue().Set(12345);
+}
+
+const char* kApiCallbackStringValue =
+ "Hello World! Bizarro C++ world, actually.";
+void ApiCallbackString(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ info.GetReturnValue().Set(v8_str(kApiCallbackStringValue));
+}
+
+void ApiCallbackParam(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ CHECK_EQ(1, info.Length());
+ CHECK(info[0]->IsNumber());
+ info.GetReturnValue().Set(info[0]);
+}
+
+// "Fast" accessor, callback to embedder
+TEST(FastAccessorCallback) {
+ // Crankshaft support for fast accessors is not implemented; crankshafted
+ // code uses the slow accessor which breaks this test's expectations.
+ v8::internal::FLAG_always_opt = false;
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ v8::Local<v8::ObjectTemplate> foo = v8::ObjectTemplate::New(isolate);
+ {
+ auto builder = v8::experimental::FastAccessorBuilder::New(isolate);
+ builder->ReturnValue(
+ builder->Call(&ApiCallbackInt, builder->IntegerConstant(999)));
+ foo->SetAccessorProperty(v8_str("int"),
+ v8::FunctionTemplate::NewWithFastHandler(
+ isolate, NativePropertyAccessor, builder));
+
+ builder = v8::experimental::FastAccessorBuilder::New(isolate);
+ builder->ReturnValue(
+ builder->Call(&ApiCallbackString, builder->IntegerConstant(0)));
+ foo->SetAccessorProperty(v8_str("str"),
+ v8::FunctionTemplate::NewWithFastHandler(
+ isolate, NativePropertyAccessor, builder));
+
+ builder = v8::experimental::FastAccessorBuilder::New(isolate);
+ builder->ReturnValue(
+ builder->Call(&ApiCallbackParam, builder->IntegerConstant(1000)));
+ foo->SetAccessorProperty(v8_str("param"),
+ v8::FunctionTemplate::NewWithFastHandler(
+ isolate, NativePropertyAccessor, builder));
+ }
+
+ // Create an instance.
+ v8::Local<v8::Object> obj = foo->NewInstance(env.local()).ToLocalChecked();
+ CHECK(env->Global()->Set(env.local(), v8_str("obj"), obj).FromJust());
+
+ // Callbacks:
+ CompileRun(FN_WARMUP("callbackint", "return obj.int"));
+ ExpectInt32("callbackint()", 12345);
+
+ CompileRun(FN_WARMUP("callbackstr", "return obj.str"));
+ ExpectString("callbackstr()", kApiCallbackStringValue);
+
+ CompileRun(FN_WARMUP("callbackparam", "return obj.param"));
+ ExpectInt32("callbackparam()", 1000);
+}
diff --git a/deps/v8/test/cctest/test-api-interceptors.cc b/deps/v8/test/cctest/test-api-interceptors.cc
index 9f5eb21954..ac54ededca 100644
--- a/deps/v8/test/cctest/test-api-interceptors.cc
+++ b/deps/v8/test/cctest/test-api-interceptors.cc
@@ -1879,9 +1879,9 @@ THREADED_TEST(IndexedInterceptorWithNoSetter) {
ExpectString(code, "PASSED");
}
-
static bool AccessAlwaysBlocked(Local<v8::Context> accessing_context,
- Local<v8::Object> accessed_object) {
+ Local<v8::Object> accessed_object,
+ Local<v8::Value> data) {
return false;
}
@@ -3475,9 +3475,9 @@ struct AccessCheckData {
AccessCheckData* g_access_check_data = nullptr;
-
bool SimpleAccessChecker(Local<v8::Context> accessing_context,
- Local<v8::Object> access_object) {
+ Local<v8::Object> access_object,
+ Local<v8::Value> data) {
g_access_check_data->count++;
return g_access_check_data->result;
}
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index b78d450fcb..f4e8fe8e4b 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -1889,59 +1889,258 @@ THREADED_TEST(GlobalPrototype) {
THREADED_TEST(ObjectTemplate) {
+ LocalContext env;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
+ Local<v8::FunctionTemplate> acc =
+ v8::FunctionTemplate::New(isolate, Returns42);
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("acc"),
+ acc->GetFunction(env.local()).ToLocalChecked())
+ .FromJust());
+
Local<v8::FunctionTemplate> fun = v8::FunctionTemplate::New(isolate);
v8::Local<v8::String> class_name = v8_str("the_class_name");
fun->SetClassName(class_name);
Local<ObjectTemplate> templ1 = ObjectTemplate::New(isolate, fun);
templ1->Set(isolate, "x", v8_num(10));
templ1->Set(isolate, "y", v8_num(13));
- LocalContext env;
+ templ1->Set(v8_str("foo"), acc);
Local<v8::Object> instance1 =
templ1->NewInstance(env.local()).ToLocalChecked();
CHECK(class_name->StrictEquals(instance1->GetConstructorName()));
CHECK(env->Global()->Set(env.local(), v8_str("p"), instance1).FromJust());
- CHECK(v8_compile("(p.x == 10)")
- ->Run(env.local())
- .ToLocalChecked()
- ->BooleanValue(env.local())
- .FromJust());
- CHECK(v8_compile("(p.y == 13)")
- ->Run(env.local())
- .ToLocalChecked()
- ->BooleanValue(env.local())
- .FromJust());
+ CHECK(CompileRun("(p.x == 10)")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("(p.y == 13)")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("(p.foo() == 42)")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("(p.foo == acc)")->BooleanValue(env.local()).FromJust());
+ // Ensure that foo become a data field.
+ CompileRun("p.foo = function() {}");
Local<v8::FunctionTemplate> fun2 = v8::FunctionTemplate::New(isolate);
fun2->PrototypeTemplate()->Set(isolate, "nirk", v8_num(123));
Local<ObjectTemplate> templ2 = fun2->InstanceTemplate();
templ2->Set(isolate, "a", v8_num(12));
templ2->Set(isolate, "b", templ1);
+ templ2->Set(v8_str("bar"), acc);
+ templ2->SetAccessorProperty(v8_str("acc"), acc);
Local<v8::Object> instance2 =
templ2->NewInstance(env.local()).ToLocalChecked();
CHECK(env->Global()->Set(env.local(), v8_str("q"), instance2).FromJust());
- CHECK(v8_compile("(q.nirk == 123)")
- ->Run(env.local())
- .ToLocalChecked()
+ CHECK(CompileRun("(q.nirk == 123)")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("(q.a == 12)")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("(q.b.x == 10)")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("(q.b.y == 13)")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("(q.b.foo() == 42)")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("(q.b.foo === acc)")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("(q.b !== p)")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("(q.acc == 42)")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("(q.bar() == 42)")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("(q.bar == acc)")->BooleanValue(env.local()).FromJust());
+
+ instance2 = templ2->NewInstance(env.local()).ToLocalChecked();
+ CHECK(env->Global()->Set(env.local(), v8_str("q2"), instance2).FromJust());
+ CHECK(CompileRun("(q2.nirk == 123)")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("(q2.a == 12)")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("(q2.b.x == 10)")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("(q2.b.y == 13)")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("(q2.b.foo() == 42)")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("(q2.b.foo === acc)")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("(q2.acc == 42)")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("(q2.bar() == 42)")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("(q2.bar === acc)")->BooleanValue(env.local()).FromJust());
+
+ CHECK(CompileRun("(q.b !== q2.b)")->BooleanValue(env.local()).FromJust());
+ CHECK(CompileRun("q.b.x = 17; (q2.b.x == 10)")
->BooleanValue(env.local())
.FromJust());
- CHECK(v8_compile("(q.a == 12)")
- ->Run(env.local())
- .ToLocalChecked()
+ CHECK(CompileRun("desc1 = Object.getOwnPropertyDescriptor(q, 'acc');"
+ "(desc1.get === acc)")
->BooleanValue(env.local())
.FromJust());
- CHECK(v8_compile("(q.b.x == 10)")
- ->Run(env.local())
- .ToLocalChecked()
- ->BooleanValue(env.local())
- .FromJust());
- CHECK(v8_compile("(q.b.y == 13)")
- ->Run(env.local())
- .ToLocalChecked()
+ CHECK(CompileRun("desc2 = Object.getOwnPropertyDescriptor(q2, 'acc');"
+ "(desc2.get === acc)")
->BooleanValue(env.local())
.FromJust());
}
+THREADED_TEST(IntegerValue) {
+ LocalContext env;
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+
+ CHECK_EQ(0, CompileRun("undefined")->IntegerValue(env.local()).FromJust());
+}
+
+static void GetNirk(Local<String> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ info.GetReturnValue().Set(v8_num(900));
+}
+
+static void GetRino(Local<String> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ info.GetReturnValue().Set(v8_num(560));
+}
+
+enum ObjectInstantiationMode {
+ // Create object using ObjectTemplate::NewInstance.
+ ObjectTemplate_NewInstance,
+ // Create object using FunctionTemplate::NewInstance on constructor.
+ Constructor_GetFunction_NewInstance,
+ // Create object using new operator on constructor.
+ Constructor_GetFunction_New
+};
+
+// Test object instance creation using a function template with an instance
+// template inherited from another function template with accessors and data
+// properties in prototype template.
+static void TestObjectTemplateInheritedWithPrototype(
+ ObjectInstantiationMode mode) {
+ LocalContext env;
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+
+ Local<v8::FunctionTemplate> fun_A = v8::FunctionTemplate::New(isolate);
+ fun_A->SetClassName(v8_str("A"));
+ v8::Local<v8::ObjectTemplate> prototype_templ = fun_A->PrototypeTemplate();
+ prototype_templ->Set(isolate, "a", v8_num(113));
+ prototype_templ->SetNativeDataProperty(v8_str("nirk"), GetNirk);
+ prototype_templ->Set(isolate, "b", v8_num(153));
+
+ Local<v8::FunctionTemplate> fun_B = v8::FunctionTemplate::New(isolate);
+ v8::Local<v8::String> class_name = v8_str("B");
+ fun_B->SetClassName(class_name);
+ fun_B->Inherit(fun_A);
+ prototype_templ = fun_B->PrototypeTemplate();
+ prototype_templ->Set(isolate, "c", v8_num(713));
+ prototype_templ->SetNativeDataProperty(v8_str("rino"), GetRino);
+ prototype_templ->Set(isolate, "d", v8_num(753));
+
+ Local<ObjectTemplate> templ = fun_B->InstanceTemplate();
+ templ->Set(isolate, "x", v8_num(10));
+ templ->Set(isolate, "y", v8_num(13));
+
+ // Perform several iterations to trigger creation from cached boilerplate.
+ for (int i = 0; i < 3; i++) {
+ Local<v8::Object> instance;
+ switch (mode) {
+ case ObjectTemplate_NewInstance:
+ instance = templ->NewInstance(env.local()).ToLocalChecked();
+ break;
+
+ case Constructor_GetFunction_NewInstance: {
+ Local<v8::Function> function_B =
+ fun_B->GetFunction(env.local()).ToLocalChecked();
+ instance = function_B->NewInstance(env.local()).ToLocalChecked();
+ break;
+ }
+ case Constructor_GetFunction_New: {
+ Local<v8::Function> function_B =
+ fun_B->GetFunction(env.local()).ToLocalChecked();
+ if (i == 0) {
+ CHECK(env->Global()
+ ->Set(env.local(), class_name, function_B)
+ .FromJust());
+ }
+ instance =
+ CompileRun("new B()")->ToObject(env.local()).ToLocalChecked();
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ CHECK(class_name->StrictEquals(instance->GetConstructorName()));
+ CHECK(env->Global()->Set(env.local(), v8_str("o"), instance).FromJust());
+
+ CHECK_EQ(10, CompileRun("o.x")->IntegerValue(env.local()).FromJust());
+ CHECK_EQ(13, CompileRun("o.y")->IntegerValue(env.local()).FromJust());
+
+ CHECK_EQ(113, CompileRun("o.a")->IntegerValue(env.local()).FromJust());
+ CHECK_EQ(900, CompileRun("o.nirk")->IntegerValue(env.local()).FromJust());
+ CHECK_EQ(153, CompileRun("o.b")->IntegerValue(env.local()).FromJust());
+ CHECK_EQ(713, CompileRun("o.c")->IntegerValue(env.local()).FromJust());
+ CHECK_EQ(560, CompileRun("o.rino")->IntegerValue(env.local()).FromJust());
+ CHECK_EQ(753, CompileRun("o.d")->IntegerValue(env.local()).FromJust());
+ }
+}
+
+THREADED_TEST(TestObjectTemplateInheritedWithAccessorsInPrototype1) {
+ TestObjectTemplateInheritedWithPrototype(ObjectTemplate_NewInstance);
+}
+
+THREADED_TEST(TestObjectTemplateInheritedWithAccessorsInPrototype2) {
+ TestObjectTemplateInheritedWithPrototype(Constructor_GetFunction_NewInstance);
+}
+
+THREADED_TEST(TestObjectTemplateInheritedWithAccessorsInPrototype3) {
+ TestObjectTemplateInheritedWithPrototype(Constructor_GetFunction_New);
+}
+
+// Test object instance creation using a function template without an instance
+// template inherited from another function template.
+static void TestObjectTemplateInheritedWithoutInstanceTemplate(
+ ObjectInstantiationMode mode) {
+ LocalContext env;
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+
+ Local<v8::FunctionTemplate> fun_A = v8::FunctionTemplate::New(isolate);
+ fun_A->SetClassName(v8_str("A"));
+
+ Local<ObjectTemplate> templ_A = fun_A->InstanceTemplate();
+ templ_A->SetNativeDataProperty(v8_str("nirk"), GetNirk);
+ templ_A->SetNativeDataProperty(v8_str("rino"), GetRino);
+
+ Local<v8::FunctionTemplate> fun_B = v8::FunctionTemplate::New(isolate);
+ v8::Local<v8::String> class_name = v8_str("B");
+ fun_B->SetClassName(class_name);
+ fun_B->Inherit(fun_A);
+
+ // Perform several iterations to trigger creation from cached boilerplate.
+ for (int i = 0; i < 3; i++) {
+ Local<v8::Object> instance;
+ switch (mode) {
+ case Constructor_GetFunction_NewInstance: {
+ Local<v8::Function> function_B =
+ fun_B->GetFunction(env.local()).ToLocalChecked();
+ instance = function_B->NewInstance(env.local()).ToLocalChecked();
+ break;
+ }
+ case Constructor_GetFunction_New: {
+ Local<v8::Function> function_B =
+ fun_B->GetFunction(env.local()).ToLocalChecked();
+ if (i == 0) {
+ CHECK(env->Global()
+ ->Set(env.local(), class_name, function_B)
+ .FromJust());
+ }
+ instance =
+ CompileRun("new B()")->ToObject(env.local()).ToLocalChecked();
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+
+ CHECK(class_name->StrictEquals(instance->GetConstructorName()));
+ CHECK(env->Global()->Set(env.local(), v8_str("o"), instance).FromJust());
+
+ CHECK_EQ(900, CompileRun("o.nirk")->IntegerValue(env.local()).FromJust());
+ CHECK_EQ(560, CompileRun("o.rino")->IntegerValue(env.local()).FromJust());
+ }
+}
+
+THREADED_TEST(TestObjectTemplateInheritedWithPrototype1) {
+ TestObjectTemplateInheritedWithoutInstanceTemplate(
+ Constructor_GetFunction_NewInstance);
+}
+
+THREADED_TEST(TestObjectTemplateInheritedWithPrototype2) {
+ TestObjectTemplateInheritedWithoutInstanceTemplate(
+ Constructor_GetFunction_New);
+}
static void GetFlabby(const v8::FunctionCallbackInfo<v8::Value>& args) {
ApiTestFuzzer::Fuzz();
@@ -2020,31 +2219,12 @@ THREADED_TEST(DescriptorInheritance) {
->NewInstance(env.local())
.ToLocalChecked())
.FromJust());
- CHECK_EQ(17.2, v8_compile("obj.flabby()")
- ->Run(env.local())
- .ToLocalChecked()
- ->NumberValue(env.local())
- .FromJust());
- CHECK(v8_compile("'flabby' in obj")
- ->Run(env.local())
- .ToLocalChecked()
- ->BooleanValue(env.local())
- .FromJust());
- CHECK_EQ(15.2, v8_compile("obj.knurd")
- ->Run(env.local())
- .ToLocalChecked()
- ->NumberValue(env.local())
- .FromJust());
- CHECK(v8_compile("'knurd' in obj")
- ->Run(env.local())
- .ToLocalChecked()
- ->BooleanValue(env.local())
- .FromJust());
- CHECK_EQ(20.1, v8_compile("obj.v1")
- ->Run(env.local())
- .ToLocalChecked()
- ->NumberValue(env.local())
- .FromJust());
+ CHECK_EQ(17.2,
+ CompileRun("obj.flabby()")->NumberValue(env.local()).FromJust());
+ CHECK(CompileRun("'flabby' in obj")->BooleanValue(env.local()).FromJust());
+ CHECK_EQ(15.2, CompileRun("obj.knurd")->NumberValue(env.local()).FromJust());
+ CHECK(CompileRun("'knurd' in obj")->BooleanValue(env.local()).FromJust());
+ CHECK_EQ(20.1, CompileRun("obj.v1")->NumberValue(env.local()).FromJust());
CHECK(env->Global()
->Set(env.local(), v8_str("obj2"), base2->GetFunction(env.local())
@@ -2052,36 +2232,106 @@ THREADED_TEST(DescriptorInheritance) {
->NewInstance(env.local())
.ToLocalChecked())
.FromJust());
- CHECK_EQ(17.2, v8_compile("obj2.flabby()")
- ->Run(env.local())
- .ToLocalChecked()
- ->NumberValue(env.local())
- .FromJust());
- CHECK(v8_compile("'flabby' in obj2")
- ->Run(env.local())
- .ToLocalChecked()
- ->BooleanValue(env.local())
- .FromJust());
- CHECK_EQ(15.2, v8_compile("obj2.knurd")
- ->Run(env.local())
- .ToLocalChecked()
- ->NumberValue(env.local())
- .FromJust());
- CHECK(v8_compile("'knurd' in obj2")
- ->Run(env.local())
- .ToLocalChecked()
- ->BooleanValue(env.local())
- .FromJust());
- CHECK_EQ(10.1, v8_compile("obj2.v2")
- ->Run(env.local())
- .ToLocalChecked()
- ->NumberValue(env.local())
- .FromJust());
+ CHECK_EQ(17.2,
+ CompileRun("obj2.flabby()")->NumberValue(env.local()).FromJust());
+ CHECK(CompileRun("'flabby' in obj2")->BooleanValue(env.local()).FromJust());
+ CHECK_EQ(15.2, CompileRun("obj2.knurd")->NumberValue(env.local()).FromJust());
+ CHECK(CompileRun("'knurd' in obj2")->BooleanValue(env.local()).FromJust());
+ CHECK_EQ(10.1, CompileRun("obj2.v2")->NumberValue(env.local()).FromJust());
// base1 and base2 cannot cross reference to each's prototype
- CHECK(v8_compile("obj.v2")->Run(env.local()).ToLocalChecked()->IsUndefined());
- CHECK(
- v8_compile("obj2.v1")->Run(env.local()).ToLocalChecked()->IsUndefined());
+ CHECK(CompileRun("obj.v2")->IsUndefined());
+ CHECK(CompileRun("obj2.v1")->IsUndefined());
+}
+
+THREADED_TEST(DescriptorInheritance2) {
+ LocalContext env;
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Local<v8::FunctionTemplate> fun_A = v8::FunctionTemplate::New(isolate);
+ fun_A->SetClassName(v8_str("A"));
+ fun_A->InstanceTemplate()->SetNativeDataProperty(v8_str("knurd1"), GetKnurd);
+ fun_A->InstanceTemplate()->SetNativeDataProperty(v8_str("nirk1"), GetNirk);
+ fun_A->InstanceTemplate()->SetNativeDataProperty(v8_str("rino1"), GetRino);
+
+ v8::Local<v8::FunctionTemplate> fun_B = v8::FunctionTemplate::New(isolate);
+ fun_B->SetClassName(v8_str("B"));
+ fun_B->Inherit(fun_A);
+
+ v8::Local<v8::FunctionTemplate> fun_C = v8::FunctionTemplate::New(isolate);
+ fun_C->SetClassName(v8_str("C"));
+ fun_C->Inherit(fun_B);
+ fun_C->InstanceTemplate()->SetNativeDataProperty(v8_str("knurd2"), GetKnurd);
+ fun_C->InstanceTemplate()->SetNativeDataProperty(v8_str("nirk2"), GetNirk);
+ fun_C->InstanceTemplate()->SetNativeDataProperty(v8_str("rino2"), GetRino);
+
+ v8::Local<v8::FunctionTemplate> fun_D = v8::FunctionTemplate::New(isolate);
+ fun_D->SetClassName(v8_str("D"));
+ fun_D->Inherit(fun_C);
+
+ v8::Local<v8::FunctionTemplate> fun_E = v8::FunctionTemplate::New(isolate);
+ fun_E->SetClassName(v8_str("E"));
+ fun_E->Inherit(fun_D);
+ fun_E->InstanceTemplate()->SetNativeDataProperty(v8_str("knurd3"), GetKnurd);
+ fun_E->InstanceTemplate()->SetNativeDataProperty(v8_str("nirk3"), GetNirk);
+ fun_E->InstanceTemplate()->SetNativeDataProperty(v8_str("rino3"), GetRino);
+
+ v8::Local<v8::FunctionTemplate> fun_F = v8::FunctionTemplate::New(isolate);
+ fun_F->SetClassName(v8_str("F"));
+ fun_F->Inherit(fun_E);
+ v8::Local<v8::ObjectTemplate> templ = fun_F->InstanceTemplate();
+ const int kDataPropertiesNumber = 100;
+ for (int i = 0; i < kDataPropertiesNumber; i++) {
+ v8::Local<v8::Value> val = v8_num(i);
+ v8::Local<v8::String> val_str = val->ToString(env.local()).ToLocalChecked();
+ v8::Local<v8::String> name = String::Concat(v8_str("p"), val_str);
+
+ templ->Set(name, val);
+ templ->Set(val_str, val);
+ }
+
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("F"),
+ fun_F->GetFunction(env.local()).ToLocalChecked())
+ .FromJust());
+
+ v8::Local<v8::Script> script = v8_compile("o = new F()");
+
+ for (int i = 0; i < 100; i++) {
+ v8::HandleScope scope(isolate);
+ script->Run(env.local()).ToLocalChecked();
+ }
+ v8::Local<v8::Object> object = script->Run(env.local())
+ .ToLocalChecked()
+ ->ToObject(env.local())
+ .ToLocalChecked();
+
+ CHECK_EQ(15.2, CompileRun("o.knurd1")->NumberValue(env.local()).FromJust());
+ CHECK_EQ(15.2, CompileRun("o.knurd2")->NumberValue(env.local()).FromJust());
+ CHECK_EQ(15.2, CompileRun("o.knurd3")->NumberValue(env.local()).FromJust());
+
+ CHECK_EQ(900, CompileRun("o.nirk1")->IntegerValue(env.local()).FromJust());
+ CHECK_EQ(900, CompileRun("o.nirk2")->IntegerValue(env.local()).FromJust());
+ CHECK_EQ(900, CompileRun("o.nirk3")->IntegerValue(env.local()).FromJust());
+
+ CHECK_EQ(560, CompileRun("o.rino1")->IntegerValue(env.local()).FromJust());
+ CHECK_EQ(560, CompileRun("o.rino2")->IntegerValue(env.local()).FromJust());
+ CHECK_EQ(560, CompileRun("o.rino3")->IntegerValue(env.local()).FromJust());
+
+ for (int i = 0; i < kDataPropertiesNumber; i++) {
+ v8::Local<v8::Value> val = v8_num(i);
+ v8::Local<v8::String> val_str = val->ToString(env.local()).ToLocalChecked();
+ v8::Local<v8::String> name = String::Concat(v8_str("p"), val_str);
+
+ CHECK_EQ(i, object->Get(env.local(), name)
+ .ToLocalChecked()
+ ->IntegerValue(env.local())
+ .FromJust());
+ CHECK_EQ(i, object->Get(env.local(), val)
+ .ToLocalChecked()
+ ->IntegerValue(env.local())
+ .FromJust());
+ }
}
@@ -2135,7 +2385,7 @@ static void ThrowingSymbolAccessorGetter(
}
-THREADED_TEST(ExecutableAccessorIsPreservedOnAttributeChange) {
+THREADED_TEST(AccessorIsPreservedOnAttributeChange) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
LocalContext env;
@@ -2145,11 +2395,11 @@ THREADED_TEST(ExecutableAccessorIsPreservedOnAttributeChange) {
CHECK_GT(i::FixedArray::cast(a->map()->instance_descriptors())->length(), 0);
CompileRun("Object.defineProperty(a, 'length', { writable: false });");
CHECK_EQ(i::FixedArray::cast(a->map()->instance_descriptors())->length(), 0);
- // But we should still have an ExecutableAccessorInfo.
+ // But we should still have an AccessorInfo.
i::Handle<i::String> name(v8::Utils::OpenHandle(*v8_str("length")));
i::LookupIterator it(a, name, i::LookupIterator::OWN_SKIP_INTERCEPTOR);
CHECK_EQ(i::LookupIterator::ACCESSOR, it.state());
- CHECK(it.GetAccessors()->IsExecutableAccessorInfo());
+ CHECK(it.GetAccessors()->IsAccessorInfo());
}
@@ -7779,7 +8029,6 @@ static void ArgumentsTestCallback(
ApiTestFuzzer::Fuzz();
v8::Isolate* isolate = args.GetIsolate();
Local<Context> context = isolate->GetCurrentContext();
- CHECK(args_fun->Equals(context, args.Callee()).FromJust());
CHECK_EQ(3, args.Length());
CHECK(v8::Integer::New(isolate, 1)->Equals(context, args[0]).FromJust());
CHECK(v8::Integer::New(isolate, 2)->Equals(context, args[1]).FromJust());
@@ -8940,8 +9189,11 @@ TEST(TryCatchFinallyStoresMessageUsingTryCatchHandler) {
// For use within the TestSecurityHandler() test.
static bool g_security_callback_result = false;
static bool SecurityTestCallback(Local<v8::Context> accessing_context,
- Local<v8::Object> accessed_object) {
+ Local<v8::Object> accessed_object,
+ Local<v8::Value> data) {
printf("a\n");
+ CHECK(!data.IsEmpty() && data->IsInt32());
+ CHECK_EQ(42, data->Int32Value(accessing_context).FromJust());
return g_security_callback_result;
}
@@ -8952,7 +9204,7 @@ TEST(SecurityHandler) {
v8::HandleScope scope0(isolate);
v8::Local<v8::ObjectTemplate> global_template =
v8::ObjectTemplate::New(isolate);
- global_template->SetAccessCheckCallback(SecurityTestCallback);
+ global_template->SetAccessCheckCallback(SecurityTestCallback, v8_num(42));
// Create an environment
v8::Local<Context> context0 = Context::New(isolate, NULL, global_template);
context0->Enter();
@@ -9138,7 +9390,8 @@ THREADED_TEST(SecurityChecksForPrototypeChain) {
static bool security_check_with_gc_called;
static bool SecurityTestCallbackWithGC(Local<v8::Context> accessing_context,
- Local<v8::Object> accessed_object) {
+ Local<v8::Object> accessed_object,
+ Local<v8::Value> data) {
CcTest::heap()->CollectAllGarbage();
security_check_with_gc_called = true;
return true;
@@ -9625,7 +9878,8 @@ TEST(DetachedAccesses) {
static bool allowed_access = false;
static bool AccessBlocker(Local<v8::Context> accessing_context,
- Local<v8::Object> accessed_object) {
+ Local<v8::Object> accessed_object,
+ Local<v8::Value> data) {
v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
return context->Global()->Equals(context, accessed_object).FromJust() ||
allowed_access;
@@ -9919,9 +10173,9 @@ TEST(AccessControlES5) {
CHECK_EQ(42, g_echo_value); // Make sure we didn't call the setter.
}
-
static bool AccessAlwaysBlocked(Local<v8::Context> accessing_context,
- Local<v8::Object> global) {
+ Local<v8::Object> global,
+ Local<v8::Value> data) {
i::PrintF("Access blocked.\n");
return false;
}
@@ -10065,7 +10319,8 @@ THREADED_TEST(CrossDomainAccessors) {
static int access_count = 0;
static bool AccessCounter(Local<v8::Context> accessing_context,
- Local<v8::Object> accessed_object) {
+ Local<v8::Object> accessed_object,
+ Local<v8::Value> data) {
access_count++;
return true;
}
@@ -10724,7 +10979,8 @@ THREADED_TEST(Regress91517) {
Local<v8::FunctionTemplate> t2 = v8::FunctionTemplate::New(isolate);
t2->SetHiddenPrototype(true);
t2->InstanceTemplate()->Set(v8_str("fuz1"), v8_num(2));
- t2->InstanceTemplate()->Set(v8_str("objects"), v8::Object::New(isolate));
+ t2->InstanceTemplate()->Set(v8_str("objects"),
+ v8::ObjectTemplate::New(isolate));
t2->InstanceTemplate()->Set(v8_str("fuz2"), v8_num(2));
Local<v8::FunctionTemplate> t3 = v8::FunctionTemplate::New(isolate);
t3->SetHiddenPrototype(true);
@@ -10772,6 +11028,7 @@ THREADED_TEST(Regress91517) {
ExpectTrue("names.indexOf(\"boo\") >= 0");
ExpectTrue("names.indexOf(\"foo\") >= 0");
ExpectTrue("names.indexOf(\"fuz1\") >= 0");
+ ExpectTrue("names.indexOf(\"objects\") >= 0");
ExpectTrue("names.indexOf(\"fuz2\") >= 0");
ExpectFalse("names[1005] == undefined");
}
@@ -12793,6 +13050,203 @@ THREADED_TEST(Overriding) {
}
+static void ShouldThrowOnErrorGetter(
+ Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ v8::Isolate* isolate = info.GetIsolate();
+ Local<Boolean> should_throw_on_error =
+ Boolean::New(isolate, info.ShouldThrowOnError());
+ info.GetReturnValue().Set(should_throw_on_error);
+}
+
+
+template <typename T>
+static void ShouldThrowOnErrorSetter(Local<Name> name, Local<v8::Value> value,
+ const v8::PropertyCallbackInfo<T>& info) {
+ ApiTestFuzzer::Fuzz();
+ v8::Isolate* isolate = info.GetIsolate();
+ auto context = isolate->GetCurrentContext();
+ Local<Boolean> should_throw_on_error_value =
+ Boolean::New(isolate, info.ShouldThrowOnError());
+ CHECK(context->Global()
+ ->Set(isolate->GetCurrentContext(), v8_str("should_throw_setter"),
+ should_throw_on_error_value)
+ .FromJust());
+}
+
+
+THREADED_TEST(AccessorShouldThrowOnError) {
+ i::FLAG_strong_mode = true;
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ Local<Object> global = context->Global();
+
+ Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
+ Local<ObjectTemplate> instance_templ = templ->InstanceTemplate();
+ instance_templ->SetAccessor(v8_str("f"), ShouldThrowOnErrorGetter,
+ ShouldThrowOnErrorSetter<void>);
+
+ Local<v8::Object> instance = templ->GetFunction(context.local())
+ .ToLocalChecked()
+ ->NewInstance(context.local())
+ .ToLocalChecked();
+
+ CHECK(global->Set(context.local(), v8_str("o"), instance).FromJust());
+
+ // SLOPPY mode
+ Local<Value> value = v8_compile("o.f")->Run(context.local()).ToLocalChecked();
+ CHECK(value->IsFalse());
+ v8_compile("o.f = 153")->Run(context.local()).ToLocalChecked();
+ value = global->Get(context.local(), v8_str("should_throw_setter"))
+ .ToLocalChecked();
+ CHECK(value->IsFalse());
+
+ // STRICT mode
+ value = v8_compile("'use strict';o.f")->Run(context.local()).ToLocalChecked();
+ CHECK(value->IsFalse());
+ v8_compile("'use strict'; o.f = 153")->Run(context.local()).ToLocalChecked();
+ value = global->Get(context.local(), v8_str("should_throw_setter"))
+ .ToLocalChecked();
+ CHECK(value->IsTrue());
+
+ // STRONG mode
+ value = v8_compile("'use strong';o.f")->Run(context.local()).ToLocalChecked();
+ CHECK(value->IsFalse());
+ v8_compile("'use strong'; o.f = 153")->Run(context.local()).ToLocalChecked();
+ value = global->Get(context.local(), v8_str("should_throw_setter"))
+ .ToLocalChecked();
+ CHECK(value->IsTrue());
+}
+
+
+static void ShouldThrowOnErrorQuery(
+ Local<Name> name, const v8::PropertyCallbackInfo<v8::Integer>& info) {
+ ApiTestFuzzer::Fuzz();
+ v8::Isolate* isolate = info.GetIsolate();
+ info.GetReturnValue().Set(v8::None);
+
+ auto context = isolate->GetCurrentContext();
+ Local<Boolean> should_throw_on_error_value =
+ Boolean::New(isolate, info.ShouldThrowOnError());
+ CHECK(context->Global()
+ ->Set(isolate->GetCurrentContext(), v8_str("should_throw_query"),
+ should_throw_on_error_value)
+ .FromJust());
+}
+
+
+static void ShouldThrowOnErrorDeleter(
+ Local<Name> name, const v8::PropertyCallbackInfo<v8::Boolean>& info) {
+ ApiTestFuzzer::Fuzz();
+ v8::Isolate* isolate = info.GetIsolate();
+ info.GetReturnValue().Set(v8::True(isolate));
+
+ auto context = isolate->GetCurrentContext();
+ Local<Boolean> should_throw_on_error_value =
+ Boolean::New(isolate, info.ShouldThrowOnError());
+ CHECK(context->Global()
+ ->Set(isolate->GetCurrentContext(), v8_str("should_throw_deleter"),
+ should_throw_on_error_value)
+ .FromJust());
+}
+
+
+static void ShouldThrowOnErrorPropertyEnumerator(
+ const v8::PropertyCallbackInfo<v8::Array>& info) {
+ ApiTestFuzzer::Fuzz();
+ v8::Isolate* isolate = info.GetIsolate();
+ Local<v8::Array> names = v8::Array::New(isolate, 1);
+ CHECK(names->Set(isolate->GetCurrentContext(), names, v8_num(1)).FromJust());
+ info.GetReturnValue().Set(names);
+
+ auto context = isolate->GetCurrentContext();
+ Local<Boolean> should_throw_on_error_value =
+ Boolean::New(isolate, info.ShouldThrowOnError());
+ CHECK(context->Global()
+ ->Set(isolate->GetCurrentContext(),
+ v8_str("should_throw_enumerator"),
+ should_throw_on_error_value)
+ .FromJust());
+}
+
+
+THREADED_TEST(InterceptorShouldThrowOnError) {
+ i::FLAG_strong_mode = true;
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+ Local<Object> global = context->Global();
+
+ auto interceptor_templ = v8::ObjectTemplate::New(isolate);
+ v8::NamedPropertyHandlerConfiguration handler(
+ ShouldThrowOnErrorGetter, ShouldThrowOnErrorSetter<Value>,
+ ShouldThrowOnErrorQuery, ShouldThrowOnErrorDeleter,
+ ShouldThrowOnErrorPropertyEnumerator);
+ interceptor_templ->SetHandler(handler);
+
+ Local<v8::Object> instance =
+ interceptor_templ->NewInstance(context.local()).ToLocalChecked();
+
+ CHECK(global->Set(context.local(), v8_str("o"), instance).FromJust());
+
+ // SLOPPY mode
+ Local<Value> value = v8_compile("o.f")->Run(context.local()).ToLocalChecked();
+ CHECK(value->IsFalse());
+ v8_compile("o.f = 153")->Run(context.local()).ToLocalChecked();
+ value = global->Get(context.local(), v8_str("should_throw_setter"))
+ .ToLocalChecked();
+ CHECK(value->IsFalse());
+
+ v8_compile("delete o.f")->Run(context.local()).ToLocalChecked();
+ value = global->Get(context.local(), v8_str("should_throw_deleter"))
+ .ToLocalChecked();
+ CHECK(value->IsFalse());
+
+ v8_compile("Object.getOwnPropertyNames(o)")
+ ->Run(context.local())
+ .ToLocalChecked();
+ value = global->Get(context.local(), v8_str("should_throw_enumerator"))
+ .ToLocalChecked();
+ CHECK(value->IsFalse());
+
+ // STRICT mode
+ value = v8_compile("'use strict';o.f")->Run(context.local()).ToLocalChecked();
+ CHECK(value->IsFalse());
+ v8_compile("'use strict'; o.f = 153")->Run(context.local()).ToLocalChecked();
+ value = global->Get(context.local(), v8_str("should_throw_setter"))
+ .ToLocalChecked();
+ CHECK(value->IsTrue());
+
+ v8_compile("'use strict'; delete o.f")->Run(context.local()).ToLocalChecked();
+ value = global->Get(context.local(), v8_str("should_throw_deleter"))
+ .ToLocalChecked();
+ CHECK(value->IsTrue());
+
+ v8_compile("'use strict'; Object.getOwnPropertyNames(o)")
+ ->Run(context.local())
+ .ToLocalChecked();
+ value = global->Get(context.local(), v8_str("should_throw_enumerator"))
+ .ToLocalChecked();
+ CHECK(value->IsFalse());
+
+ // STRONG mode
+ value = v8_compile("'use strong';o.f")->Run(context.local()).ToLocalChecked();
+ CHECK(value->IsFalse());
+ v8_compile("'use strong'; o.f = 153")->Run(context.local()).ToLocalChecked();
+ value = global->Get(context.local(), v8_str("should_throw_setter"))
+ .ToLocalChecked();
+ CHECK(value->IsTrue());
+
+ v8_compile("'use strong'; Object.getOwnPropertyNames(o)")
+ ->Run(context.local())
+ .ToLocalChecked();
+ value = global->Get(context.local(), v8_str("should_throw_enumerator"))
+ .ToLocalChecked();
+ CHECK(value->IsFalse());
+}
+
+
static void IsConstructHandler(
const v8::FunctionCallbackInfo<v8::Value>& args) {
ApiTestFuzzer::Fuzz();
@@ -19780,7 +20234,7 @@ THREADED_TEST(CreationContextOfJsBoundFunction) {
Context::Scope scope(other_context);
CHECK(bound_function1->CreationContext() == context1);
CheckContextId(bound_function1, 1);
- CHECK(bound_function2->CreationContext() == context2);
+ CHECK(bound_function2->CreationContext() == context1);
CheckContextId(bound_function2, 1);
}
@@ -20171,16 +20625,20 @@ THREADED_TEST(Regress93759) {
context->Exit();
- // Template for object for second context. Values to test are put on it as
- // properties.
- Local<ObjectTemplate> global_template = ObjectTemplate::New(isolate);
- global_template->Set(v8_str("simple"), simple_object);
- global_template->Set(v8_str("protected"), protected_object);
- global_template->Set(v8_str("global"), global_object);
- global_template->Set(v8_str("proxy"), proxy_object);
- global_template->Set(v8_str("hidden"), object_with_hidden);
+ LocalContext context2;
+ v8::Local<v8::Object> global = context2->Global();
- LocalContext context2(NULL, global_template);
+ // Setup global variables.
+ CHECK(global->Set(context2.local(), v8_str("simple"), simple_object)
+ .FromJust());
+ CHECK(global->Set(context2.local(), v8_str("protected"), protected_object)
+ .FromJust());
+ CHECK(global->Set(context2.local(), v8_str("global"), global_object)
+ .FromJust());
+ CHECK(
+ global->Set(context2.local(), v8_str("proxy"), proxy_object).FromJust());
+ CHECK(global->Set(context2.local(), v8_str("hidden"), object_with_hidden)
+ .FromJust());
Local<Value> result1 = CompileRun("Object.getPrototypeOf(simple)");
CHECK(result1->Equals(context2.local(), simple_object->GetPrototype())
@@ -20332,20 +20790,34 @@ THREADED_TEST(ForeignFunctionReceiver) {
uint8_t callback_fired = 0;
+uint8_t before_call_entered_callback_count1 = 0;
+uint8_t before_call_entered_callback_count2 = 0;
-void CallCompletedCallback1() {
+void CallCompletedCallback1(v8::Isolate*) {
v8::base::OS::Print("Firing callback 1.\n");
callback_fired ^= 1; // Toggle first bit.
}
-void CallCompletedCallback2() {
+void CallCompletedCallback2(v8::Isolate*) {
v8::base::OS::Print("Firing callback 2.\n");
callback_fired ^= 2; // Toggle second bit.
}
+void BeforeCallEnteredCallback1(v8::Isolate*) {
+ v8::base::OS::Print("Firing before call entered callback 1.\n");
+ before_call_entered_callback_count1++;
+}
+
+
+void BeforeCallEnteredCallback2(v8::Isolate*) {
+ v8::base::OS::Print("Firing before call entered callback 2.\n");
+ before_call_entered_callback_count2++;
+}
+
+
void RecursiveCall(const v8::FunctionCallbackInfo<v8::Value>& args) {
int32_t level =
args[0]->Int32Value(args.GetIsolate()->GetCurrentContext()).FromJust();
@@ -20378,36 +20850,54 @@ TEST(CallCompletedCallback) {
env->GetIsolate()->AddCallCompletedCallback(CallCompletedCallback1);
env->GetIsolate()->AddCallCompletedCallback(CallCompletedCallback1);
env->GetIsolate()->AddCallCompletedCallback(CallCompletedCallback2);
+ env->GetIsolate()->AddBeforeCallEnteredCallback(BeforeCallEnteredCallback1);
+ env->GetIsolate()->AddBeforeCallEnteredCallback(BeforeCallEnteredCallback2);
+ env->GetIsolate()->AddBeforeCallEnteredCallback(BeforeCallEnteredCallback1);
v8::base::OS::Print("--- Script (1) ---\n");
+ callback_fired = 0;
+ before_call_entered_callback_count1 = 0;
+ before_call_entered_callback_count2 = 0;
Local<Script> script =
v8::Script::Compile(env.local(), v8_str("recursion(0)")).ToLocalChecked();
script->Run(env.local()).ToLocalChecked();
CHECK_EQ(3, callback_fired);
+ CHECK_EQ(4, before_call_entered_callback_count1);
+ CHECK_EQ(4, before_call_entered_callback_count2);
v8::base::OS::Print("\n--- Script (2) ---\n");
callback_fired = 0;
+ before_call_entered_callback_count1 = 0;
+ before_call_entered_callback_count2 = 0;
env->GetIsolate()->RemoveCallCompletedCallback(CallCompletedCallback1);
+ env->GetIsolate()->RemoveBeforeCallEnteredCallback(
+ BeforeCallEnteredCallback1);
script->Run(env.local()).ToLocalChecked();
CHECK_EQ(2, callback_fired);
+ CHECK_EQ(0, before_call_entered_callback_count1);
+ CHECK_EQ(4, before_call_entered_callback_count2);
v8::base::OS::Print("\n--- Function ---\n");
callback_fired = 0;
+ before_call_entered_callback_count1 = 0;
+ before_call_entered_callback_count2 = 0;
Local<Function> recursive_function = Local<Function>::Cast(
env->Global()->Get(env.local(), v8_str("recursion")).ToLocalChecked());
v8::Local<Value> args[] = {v8_num(0)};
recursive_function->Call(env.local(), env->Global(), 1, args)
.ToLocalChecked();
CHECK_EQ(2, callback_fired);
+ CHECK_EQ(0, before_call_entered_callback_count1);
+ CHECK_EQ(4, before_call_entered_callback_count2);
}
-void CallCompletedCallbackNoException() {
+void CallCompletedCallbackNoException(v8::Isolate*) {
v8::HandleScope scope(CcTest::isolate());
CompileRun("1+1;");
}
-void CallCompletedCallbackException() {
+void CallCompletedCallbackException(v8::Isolate*) {
v8::HandleScope scope(CcTest::isolate());
CompileRun("throw 'second exception';");
}
@@ -21676,9 +22166,8 @@ class RequestInterruptTestWithMethodCall
v8::Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate_);
v8::Local<v8::Template> proto = t->PrototypeTemplate();
proto->Set(v8_str("shouldContinue"),
- Function::New(env_.local(), ShouldContinueCallback,
- v8::External::New(isolate_, this))
- .ToLocalChecked());
+ FunctionTemplate::New(isolate_, ShouldContinueCallback,
+ v8::External::New(isolate_, this)));
CHECK(env_->Global()
->Set(env_.local(), v8_str("Klass"),
t->GetFunction(env_.local()).ToLocalChecked())
@@ -21744,9 +22233,8 @@ class RequestInterruptTestWithMethodCallAndInterceptor
v8::Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate_);
v8::Local<v8::Template> proto = t->PrototypeTemplate();
proto->Set(v8_str("shouldContinue"),
- Function::New(env_.local(), ShouldContinueCallback,
- v8::External::New(isolate_, this))
- .ToLocalChecked());
+ FunctionTemplate::New(isolate_, ShouldContinueCallback,
+ v8::External::New(isolate_, this)));
v8::Local<v8::ObjectTemplate> instance_template = t->InstanceTemplate();
instance_template->SetHandler(
v8::NamedPropertyHandlerConfiguration(EmptyInterceptor));
@@ -21958,7 +22446,7 @@ THREADED_TEST(FunctionNew) {
->get_api_func_data()
->serial_number()),
i_isolate);
- auto cache = i_isolate->function_cache();
+ auto cache = i_isolate->template_instantiations_cache();
CHECK(cache->Lookup(serial_number)->IsTheHole());
// Verify that each Function::New creates a new function instance
Local<Object> data2 = v8::Object::New(isolate);
@@ -22034,7 +22522,6 @@ class ApiCallOptimizationChecker {
static void OptimizationCallback(
const v8::FunctionCallbackInfo<v8::Value>& info) {
- CHECK(callee == info.Callee());
CHECK(data == info.Data());
CHECK(receiver == info.This());
if (info.Length() == 1) {
@@ -23699,16 +24186,16 @@ TEST(SealHandleScopeNested) {
static bool access_was_called = false;
-
static bool AccessAlwaysAllowedWithFlag(Local<v8::Context> accessing_context,
- Local<v8::Object> accessed_object) {
+ Local<v8::Object> accessed_object,
+ Local<v8::Value> data) {
access_was_called = true;
return true;
}
-
static bool AccessAlwaysBlockedWithFlag(Local<v8::Context> accessing_context,
- Local<v8::Object> accessed_object) {
+ Local<v8::Object> accessed_object,
+ Local<v8::Value> data) {
access_was_called = true;
return false;
}
@@ -23970,6 +24457,37 @@ static void ExtrasBindingTestRuntimeFunction(
args.GetReturnValue().Set(v8_num(7));
}
+TEST(ExtrasFunctionSource) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope handle_scope(isolate);
+ LocalContext env;
+
+ v8::Local<v8::Object> binding = env->GetExtrasBindingObject();
+
+ // Functions defined in extras do not expose source code.
+ auto func = binding->Get(env.local(), v8_str("testFunctionToString"))
+ .ToLocalChecked()
+ .As<v8::Function>();
+ auto undefined = v8::Undefined(isolate);
+ auto result = func->Call(env.local(), undefined, 0, {})
+ .ToLocalChecked()
+ .As<v8::String>();
+ CHECK(result->StrictEquals(v8_str("function foo() { [native code] }")));
+
+ // Functions defined in extras do not show up in the stack trace.
+ auto wrapper = binding->Get(env.local(), v8_str("testStackTrace"))
+ .ToLocalChecked()
+ .As<v8::Function>();
+ CHECK(env->Global()->Set(env.local(), v8_str("wrapper"), wrapper).FromJust());
+ ExpectString(
+ "function f(x) { return wrapper(x) }"
+ "function g() { return new Error().stack; }"
+ "f(g)",
+ "Error\n"
+ " at g (<anonymous>:1:58)\n"
+ " at f (<anonymous>:1:24)\n"
+ " at <anonymous>:1:78");
+}
TEST(ExtrasBindingObject) {
v8::Isolate* isolate = CcTest::isolate();
@@ -24318,7 +24836,6 @@ TEST(AbortOnUncaughtExceptionNoAbort) {
TEST(AccessCheckedIsConcatSpreadable) {
- i::FLAG_harmony_concat_spreadable = true;
v8::Isolate* isolate = CcTest::isolate();
HandleScope scope(isolate);
LocalContext env;
diff --git a/deps/v8/test/cctest/test-asm-validator.cc b/deps/v8/test/cctest/test-asm-validator.cc
index fae75008c7..207b915378 100644
--- a/deps/v8/test/cctest/test-asm-validator.cc
+++ b/deps/v8/test/cctest/test-asm-validator.cc
@@ -16,14 +16,15 @@
#include "test/cctest/expression-type-collector-macros.h"
// Macros for function types.
-#define FUNC_V_TYPE Bounds(Type::Function(Type::Undefined(zone), zone))
+#define FUNC_FOREIGN_TYPE Bounds(Type::Function(Type::Any(), zone))
+#define FUNC_V_TYPE Bounds(Type::Function(Type::Undefined(), zone))
#define FUNC_I_TYPE Bounds(Type::Function(cache.kAsmSigned, zone))
#define FUNC_F_TYPE Bounds(Type::Function(cache.kAsmFloat, zone))
#define FUNC_D_TYPE Bounds(Type::Function(cache.kAsmDouble, zone))
#define FUNC_D2D_TYPE \
Bounds(Type::Function(cache.kAsmDouble, cache.kAsmDouble, zone))
#define FUNC_N2F_TYPE \
- Bounds(Type::Function(cache.kAsmFloat, Type::Number(zone), zone))
+ Bounds(Type::Function(cache.kAsmFloat, Type::Number(), zone))
#define FUNC_I2I_TYPE \
Bounds(Type::Function(cache.kAsmSigned, cache.kAsmInt, zone))
#define FUNC_II2D_TYPE \
@@ -33,11 +34,10 @@
#define FUNC_DD2D_TYPE \
Bounds(Type::Function(cache.kAsmDouble, cache.kAsmDouble, cache.kAsmDouble, \
zone))
-#define FUNC_NN2N_TYPE \
- Bounds(Type::Function(Type::Number(zone), Type::Number(zone), \
- Type::Number(zone), zone))
+#define FUNC_NN2N_TYPE \
+ Bounds(Type::Function(Type::Number(), Type::Number(), Type::Number(), zone))
#define FUNC_N2N_TYPE \
- Bounds(Type::Function(Type::Number(zone), Type::Number(zone), zone))
+ Bounds(Type::Function(Type::Number(), Type::Number(), zone))
// Macros for array types.
#define FLOAT64_ARRAY_TYPE Bounds(Type::Array(cache.kAsmDouble, zone))
@@ -265,7 +265,7 @@ TEST(ValidateMinimum) {
}
}
// "use asm";
- CHECK_EXPR(Literal, Bounds(Type::String(zone)));
+ CHECK_EXPR(Literal, Bounds(Type::String()));
// var exp = stdlib.Math.exp;
CHECK_EXPR(Assignment, FUNC_D2D_TYPE) {
CHECK_VAR(exp, FUNC_D2D_TYPE);
@@ -518,12 +518,11 @@ void CheckStdlibShortcuts2(Zone* zone, ZoneVector<ExpressionTypeEntry>& types,
CHECK_EXPR(FunctionLiteral, Bounds::Unbounded()) {
#define CHECK_FUNC_TYPES_END_1() \
/* "use asm"; */ \
- CHECK_EXPR(Literal, Bounds(Type::String(zone))); \
+ CHECK_EXPR(Literal, Bounds(Type::String())); \
/* stdlib shortcuts. */ \
CheckStdlibShortcuts1(zone, types, index, depth, cache); \
CheckStdlibShortcuts2(zone, types, index, depth, cache);
-
#define CHECK_FUNC_TYPES_END_2() \
/* return { foo: foo }; */ \
CHECK_EXPR(ObjectLiteral, Bounds::Unbounded()) { \
@@ -564,10 +563,10 @@ TEST(ReturnVoid) {
"function foo() { bar(); }") {
CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
// return undefined;
- CHECK_EXPR(Literal, Bounds(Type::Undefined(zone)));
+ CHECK_EXPR(Literal, Bounds(Type::Undefined()));
}
CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Call, Bounds(Type::Undefined(zone))) {
+ CHECK_EXPR(Call, Bounds(Type::Undefined())) {
CHECK_VAR(bar, FUNC_V_TYPE);
}
}
@@ -582,7 +581,7 @@ TEST(EmptyBody) {
"function foo() { bar(); }") {
CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE);
CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Call, Bounds(Type::Undefined(zone))) {
+ CHECK_EXPR(Call, Bounds(Type::Undefined())) {
CHECK_VAR(bar, FUNC_V_TYPE);
}
}
@@ -602,7 +601,7 @@ TEST(DoesNothing) {
}
}
CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
- CHECK_EXPR(Call, Bounds(Type::Undefined(zone))) {
+ CHECK_EXPR(Call, Bounds(Type::Undefined())) {
CHECK_VAR(bar, FUNC_V_TYPE);
}
}
@@ -1066,7 +1065,7 @@ TEST(UnsignedDivide) {
CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(BinaryOperation, Bounds(Type::None(zone), Type::Any(zone))) {
+ CHECK_EXPR(BinaryOperation, Bounds(Type::None(), Type::Any())) {
CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmUnsigned)) {
CHECK_VAR(x, Bounds(cache.kAsmInt));
CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
@@ -1327,7 +1326,7 @@ TEST(Load1) {
CHECK_EXPR(Property, Bounds(cache.kAsmInt)) {
CHECK_VAR(i8, Bounds(cache.kInt8Array));
CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(x, Bounds(cache.kAsmSigned));
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
}
@@ -1387,7 +1386,7 @@ TEST(Store1) {
CHECK_EXPR(Property, Bounds::Unbounded()) {
CHECK_VAR(i8, Bounds(cache.kInt8Array));
CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(x, Bounds(cache.kAsmSigned));
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
}
@@ -1435,6 +1434,71 @@ TEST(StoreFloat) {
CHECK_FUNC_TYPES_END
}
+TEST(StoreIntish) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { var x = 1; var y = 1; i32[0] = x + y; }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(y, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_EXPR(Property, Bounds::Unbounded()) {
+ CHECK_VAR(i32, Bounds(cache.kInt32Array));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_VAR(y, Bounds(cache.kAsmInt));
+ }
+ }
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+TEST(StoreFloatish) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { "
+ "var x = fround(1.0); "
+ "var y = fround(1.0); f32[0] = x + y; }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmFloat)) {
+ CHECK_VAR(x, Bounds(cache.kAsmFloat));
+ CHECK_EXPR(Call, Bounds(cache.kAsmFloat)) {
+ CHECK_VAR(fround, FUNC_N2F_TYPE);
+ CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
+ }
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmFloat)) {
+ CHECK_VAR(y, Bounds(cache.kAsmFloat));
+ CHECK_EXPR(Call, Bounds(cache.kAsmFloat)) {
+ CHECK_VAR(fround, FUNC_N2F_TYPE);
+ CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
+ }
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmFloat)) {
+ CHECK_EXPR(Property, Bounds::Unbounded()) {
+ CHECK_VAR(f32, Bounds(cache.kFloat32Array));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmFloat)) {
+ CHECK_VAR(x, Bounds(cache.kAsmFloat));
+ CHECK_VAR(y, Bounds(cache.kAsmFloat));
+ }
+ }
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END
+}
TEST(Load1Constant) {
CHECK_FUNC_TYPES_BEGIN(
@@ -1522,7 +1586,9 @@ TEST(FunctionTables) {
CHECK_EXPR(Property, FUNC_I2I_TYPE) {
CHECK_VAR(table1, FUNC_I2I_ARRAY_TYPE);
CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_VAR(x, Bounds(cache.kAsmSigned));
+ // TODO(bradnelson): revert this
+ // CHECK_VAR(x, Bounds(cache.kAsmSigned));
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
}
@@ -1711,9 +1777,9 @@ TEST(MismatchedReturnTypeExpression) {
TEST(AssignToFloatishToF64) {
CHECK_FUNC_ERROR(
- "function bar() { var v = fround(1.0); f32[0] = v + fround(1.0); }\n"
+ "function bar() { var v = fround(1.0); f64[0] = v + fround(1.0); }\n"
"function foo() { bar(); }",
- "asm: line 39: intish or floatish assignment\n");
+ "asm: line 39: floatish assignment to double array\n");
}
@@ -1724,8 +1790,8 @@ TEST(ForeignFunction) {
"function foo() { bar(); }") {
CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(Call, Bounds(Type::Number(zone))) {
- CHECK_VAR(baz, Bounds(Type::Any(zone)));
+ CHECK_EXPR(Call, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(baz, FUNC_FOREIGN_TYPE);
CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
@@ -1739,9 +1805,9 @@ TEST(ForeignFunction) {
}
}
CHECK_FUNC_TYPES_END_1()
- CHECK_EXPR(Assignment, Bounds(Type::Any(zone))) {
- CHECK_VAR(baz, Bounds(Type::Any(zone)));
- CHECK_EXPR(Property, Bounds(Type::Any(zone))) {
+ CHECK_EXPR(Assignment, Bounds(FUNC_FOREIGN_TYPE)) {
+ CHECK_VAR(baz, Bounds(FUNC_FOREIGN_TYPE));
+ CHECK_EXPR(Property, Bounds(FUNC_FOREIGN_TYPE)) {
CHECK_VAR(foreign, Bounds::Unbounded());
CHECK_EXPR(Literal, Bounds::Unbounded());
}
@@ -1749,6 +1815,28 @@ TEST(ForeignFunction) {
CHECK_FUNC_TYPES_END_2()
}
+TEST(ByteArray) {
+ // Forbidden by asm.js spec, present in embenchen.
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { var x = 0; i8[x] = 2; }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(x, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_EXPR(Property, Bounds::Unbounded()) {
+ CHECK_VAR(i8, Bounds(cache.kInt8Array));
+ CHECK_VAR(x, Bounds(cache.kAsmSigned));
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END
+}
TEST(BadExports) {
HARNESS_PREAMBLE()
@@ -1767,7 +1855,14 @@ TEST(BadExports) {
TEST(NestedHeapAssignment) {
CHECK_FUNC_ERROR(
- "function bar() { var x = 0; i8[x = 1] = 2; }\n"
+ "function bar() { var x = 0; i16[x = 1] = 2; }\n"
+ "function foo() { bar(); }",
+ "asm: line 39: expected >> in heap access\n");
+}
+
+TEST(BadOperatorHeapAssignment) {
+ CHECK_FUNC_ERROR(
+ "function bar() { var x = 0; i16[x & 1] = 2; }\n"
"function foo() { bar(); }",
"asm: line 39: expected >> in heap access\n");
}
@@ -1786,7 +1881,7 @@ TEST(BadStandardFunctionCallOutside) {
"var s0 = sin(0);\n"
"function bar() { }\n"
"function foo() { bar(); }",
- "asm: line 39: calls forbidden outside function bodies\n");
+ "asm: line 39: illegal variable reference in module body\n");
}
@@ -1795,7 +1890,7 @@ TEST(BadFunctionCallOutside) {
"function bar() { return 0.0; }\n"
"var s0 = bar(0);\n"
"function foo() { bar(); }",
- "asm: line 40: calls forbidden outside function bodies\n");
+ "asm: line 40: illegal variable reference in module body\n");
}
@@ -1835,7 +1930,7 @@ TEST(NestedAssignmentInHeap) {
CHECK_EXPR(Property, Bounds::Unbounded()) {
CHECK_VAR(i8, Bounds(cache.kInt8Array));
CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
- CHECK_EXPR(Assignment, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
CHECK_VAR(x, Bounds(cache.kAsmInt));
CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
@@ -2007,7 +2102,7 @@ TEST(SwitchTest) {
CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
}
}
- CHECK_EXPR(Literal, Bounds(Type::Undefined(zone)));
+ CHECK_EXPR(Literal, Bounds(Type::Undefined()));
CHECK_VAR(.switch_tag, Bounds(cache.kAsmSigned));
// case 1: return 23;
CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
@@ -2048,3 +2143,202 @@ TEST(BadSwitchOrder) {
"function foo() { bar(); }",
"asm: line 39: default case out of order\n");
}
+
+TEST(BadForeignCall) {
+ const char test_function[] =
+ "function TestModule(stdlib, foreign, buffer) {\n"
+ " \"use asm\";\n"
+ " var ffunc = foreign.foo;\n"
+ " function test1() { var x = 0; ffunc(x); }\n"
+ " return { testFunc1: test1 };\n"
+ "}\n";
+ v8::V8::Initialize();
+ HandleAndZoneScope handles;
+ Zone* zone = handles.main_zone();
+ ZoneVector<ExpressionTypeEntry> types(zone);
+ CHECK_EQ(
+ "asm: line 4: foreign call argument expected to be int, double, or "
+ "fixnum\n",
+ Validate(zone, test_function, &types));
+}
+
+TEST(BadImports) {
+ const char test_function[] =
+ "function TestModule(stdlib, foreign, buffer) {\n"
+ " \"use asm\";\n"
+ " var fint = (foreign.bar | 0) | 0;\n"
+ " function test1() {}\n"
+ " return { testFunc1: test1 };\n"
+ "}\n";
+ v8::V8::Initialize();
+ HandleAndZoneScope handles;
+ Zone* zone = handles.main_zone();
+ ZoneVector<ExpressionTypeEntry> types(zone);
+ CHECK_EQ("asm: line 3: illegal computation inside module body\n",
+ Validate(zone, test_function, &types));
+}
+
+TEST(BadVariableReference) {
+ const char test_function[] =
+ "function TestModule(stdlib, foreign, buffer) {\n"
+ " \"use asm\";\n"
+ " var x = 0;\n"
+ " var y = x;\n"
+ " function test1() {}\n"
+ " return { testFunc1: test1 };\n"
+ "}\n";
+ v8::V8::Initialize();
+ HandleAndZoneScope handles;
+ Zone* zone = handles.main_zone();
+ ZoneVector<ExpressionTypeEntry> types(zone);
+ CHECK_EQ("asm: line 4: illegal variable reference in module body\n",
+ Validate(zone, test_function, &types));
+}
+
+TEST(BadForeignVariableReferenceValueOr) {
+ const char test_function[] =
+ "function TestModule(stdlib, foreign, buffer) {\n"
+ " \"use asm\";\n"
+ " var fint = foreign.bar | 1;\n"
+ "}\n";
+ v8::V8::Initialize();
+ HandleAndZoneScope handles;
+ Zone* zone = handles.main_zone();
+ ZoneVector<ExpressionTypeEntry> types(zone);
+ CHECK_EQ("asm: line 3: illegal integer annotation value\n",
+ Validate(zone, test_function, &types));
+}
+
+TEST(BadForeignVariableReferenceValueOrDot) {
+ const char test_function[] =
+ "function TestModule(stdlib, foreign, buffer) {\n"
+ " \"use asm\";\n"
+ " var fint = foreign.bar | 1.0;\n"
+ "}\n";
+ v8::V8::Initialize();
+ HandleAndZoneScope handles;
+ Zone* zone = handles.main_zone();
+ ZoneVector<ExpressionTypeEntry> types(zone);
+ CHECK_EQ("asm: line 3: illegal integer annotation value\n",
+ Validate(zone, test_function, &types));
+}
+
+TEST(BadForeignVariableReferenceValueMul) {
+ const char test_function[] =
+ "function TestModule(stdlib, foreign, buffer) {\n"
+ " \"use asm\";\n"
+ " var fint = foreign.bar * 2.0;\n"
+ "}\n";
+ v8::V8::Initialize();
+ HandleAndZoneScope handles;
+ Zone* zone = handles.main_zone();
+ ZoneVector<ExpressionTypeEntry> types(zone);
+ CHECK_EQ("asm: line 3: illegal double annotation value\n",
+ Validate(zone, test_function, &types));
+}
+
+TEST(BadForeignVariableReferenceValueMulNoDot) {
+ const char test_function[] =
+ "function TestModule(stdlib, foreign, buffer) {\n"
+ " \"use asm\";\n"
+ " var fint = foreign.bar * 1;\n"
+ "}\n";
+ v8::V8::Initialize();
+ HandleAndZoneScope handles;
+ Zone* zone = handles.main_zone();
+ ZoneVector<ExpressionTypeEntry> types(zone);
+ CHECK_EQ("asm: line 3: ill-typed arithmetic operation\n",
+ Validate(zone, test_function, &types));
+}
+
+TEST(Imports) {
+ const char test_function[] =
+ "function TestModule(stdlib, foreign, buffer) {\n"
+ " \"use asm\";\n"
+ " var ffunc = foreign.foo;\n"
+ " var fint = foreign.bar | 0;\n"
+ " var fdouble = +foreign.baz;\n"
+ " function test1() { return ffunc(fint|0, fdouble) | 0; }\n"
+ " function test2() { return +ffunc(fdouble, fint|0); }\n"
+ " return { testFunc1: test1, testFunc2: test2 };\n"
+ "}\n";
+
+ v8::V8::Initialize();
+ HandleAndZoneScope handles;
+ Zone* zone = handles.main_zone();
+ ZoneVector<ExpressionTypeEntry> types(zone);
+ CHECK_EQ("", Validate(zone, test_function, &types));
+ TypeCache cache;
+
+ CHECK_TYPES_BEGIN {
+ // Module.
+ CHECK_EXPR(FunctionLiteral, Bounds::Unbounded()) {
+ // function test1
+ CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(Call, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(ffunc, FUNC_FOREIGN_TYPE);
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(fint, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ CHECK_VAR(fdouble, Bounds(cache.kAsmDouble));
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ }
+ // function test2
+ CHECK_EXPR(FunctionLiteral, FUNC_D_TYPE) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmDouble)) {
+ CHECK_EXPR(Call, Bounds(cache.kAsmDouble)) {
+ CHECK_VAR(ffunc, FUNC_FOREIGN_TYPE);
+ CHECK_VAR(fdouble, Bounds(cache.kAsmDouble));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_VAR(fint, Bounds(cache.kAsmInt));
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
+ }
+ }
+ // "use asm";
+ CHECK_EXPR(Literal, Bounds(Type::String()));
+ // var func = foreign.foo;
+ CHECK_EXPR(Assignment, Bounds(FUNC_FOREIGN_TYPE)) {
+ CHECK_VAR(ffunc, Bounds(FUNC_FOREIGN_TYPE));
+ CHECK_EXPR(Property, Bounds(FUNC_FOREIGN_TYPE)) {
+ CHECK_VAR(foreign, Bounds::Unbounded());
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ }
+ // var fint = foreign.bar | 0;
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmInt)) {
+ CHECK_VAR(fint, Bounds(cache.kAsmInt));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmSigned)) {
+ CHECK_EXPR(Property, Bounds(Type::Number())) {
+ CHECK_VAR(foreign, Bounds::Unbounded());
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kAsmFixnum));
+ }
+ }
+ // var fdouble = +foreign.baz;
+ CHECK_EXPR(Assignment, Bounds(cache.kAsmDouble)) {
+ CHECK_VAR(fdouble, Bounds(cache.kAsmDouble));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kAsmDouble)) {
+ CHECK_EXPR(Property, Bounds(Type::Number())) {
+ CHECK_VAR(foreign, Bounds::Unbounded());
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kAsmDouble));
+ }
+ }
+ // return { testFunc1: test1, testFunc2: test2 };
+ CHECK_EXPR(ObjectLiteral, Bounds::Unbounded()) {
+ CHECK_VAR(test1, FUNC_I_TYPE);
+ CHECK_VAR(test2, FUNC_D_TYPE);
+ }
+ }
+ }
+ CHECK_TYPES_END
+}
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index 563b050c48..68eaab16f6 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -1819,6 +1819,55 @@ TEST(uxtah) {
}
+#define TEST_RBIT(expected_, input_) \
+ t.input = input_; \
+ t.result = 0; \
+ dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0); \
+ CHECK_EQ(expected_, t.result);
+
+
+TEST(rbit) {
+ CcTest::InitializeVM();
+ Isolate* const isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ Assembler assm(isolate, nullptr, 0);
+
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ CpuFeatureScope scope(&assm, ARMv7);
+
+ typedef struct {
+ uint32_t input;
+ uint32_t result;
+ } T;
+ T t;
+
+ __ ldr(r1, MemOperand(r0, offsetof(T, input)));
+ __ rbit(r1, r1);
+ __ str(r1, MemOperand(r0, offsetof(T, result)));
+ __ bx(lr);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+
+ F3 f = FUNCTION_CAST<F3>(code->entry());
+ Object* dummy = NULL;
+ TEST_RBIT(0xffffffff, 0xffffffff);
+ TEST_RBIT(0x00000000, 0x00000000);
+ TEST_RBIT(0xffff0000, 0x0000ffff);
+ TEST_RBIT(0xff00ff00, 0x00ff00ff);
+ TEST_RBIT(0xf0f0f0f0, 0x0f0f0f0f);
+ TEST_RBIT(0x1e6a2c48, 0x12345678);
+ USE(dummy);
+ }
+}
+
+
TEST(code_relative_offset) {
// Test extracting the offset of a label from the beginning of the code
// in a register.
diff --git a/deps/v8/test/cctest/test-assembler-mips.cc b/deps/v8/test/cctest/test-assembler-mips.cc
index 4f986cea9b..1928a753fd 100644
--- a/deps/v8/test/cctest/test-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-assembler-mips.cc
@@ -384,6 +384,14 @@ TEST(MIPS3) {
TEST(MIPS4) {
+ // Exchange between GP anf FP registers is done through memory
+ // on FPXX compiled binaries and architectures that do not support
+ // MTHC1 and MTFC1. If this is the case, skipping this test.
+ if (IsFpxxMode() &&
+ (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson))) {
+ return;
+ }
+
// Test moves between floating point and integer registers.
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
@@ -403,7 +411,7 @@ TEST(MIPS4) {
__ ldc1(f6, MemOperand(a0, offsetof(T, b)) );
// Swap f4 and f6, by using four integer registers, t0-t3.
- if (!IsFp64Mode()) {
+ if (IsFp32Mode()) {
__ mfc1(t0, f4);
__ mfc1(t1, f5);
__ mfc1(t2, f6);
@@ -415,6 +423,7 @@ TEST(MIPS4) {
__ mtc1(t3, f5);
} else {
CHECK(!IsMipsArchVariant(kMips32r1) && !IsMipsArchVariant(kLoongson));
+ DCHECK(IsFp64Mode() || IsFpxxMode());
__ mfc1(t0, f4);
__ mfhc1(t1, f4);
__ mfc1(t2, f6);
@@ -425,6 +434,7 @@ TEST(MIPS4) {
__ mtc1(t2, f4);
__ mthc1(t3, f4);
}
+
// Store the swapped f4 and f5 back to memory.
__ sdc1(f4, MemOperand(a0, offsetof(T, a)) );
__ sdc1(f6, MemOperand(a0, offsetof(T, c)) );
@@ -811,8 +821,6 @@ TEST(MIPS9) {
TEST(MIPS10) {
// Test conversions between doubles and words.
- // Test maps double to FP reg pairs in fp32 mode
- // and into FP reg in fp64 mode.
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -830,24 +838,16 @@ TEST(MIPS10) {
Assembler assm(isolate, NULL, 0);
Label L, C;
- if (!IsMipsArchVariant(kMips32r2)) return;
+ if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) return;
// Load all structure elements to registers.
// (f0, f1) = a (fp32), f0 = a (fp64)
__ ldc1(f0, MemOperand(a0, offsetof(T, a)));
- if (IsFp64Mode()) {
- __ mfc1(t0, f0); // t0 = f0(31..0)
- __ mfhc1(t1, f0); // t1 = sign_extend(f0(63..32))
- __ sw(t0, MemOperand(a0, offsetof(T, dbl_mant))); // dbl_mant = t0
- __ sw(t1, MemOperand(a0, offsetof(T, dbl_exp))); // dbl_exp = t1
- } else {
- // Save the raw bits of the double.
- __ mfc1(t0, f0); // t0 = a1
- __ mfc1(t1, f1); // t1 = a2
- __ sw(t0, MemOperand(a0, offsetof(T, dbl_mant))); // dbl_mant = t0
- __ sw(t1, MemOperand(a0, offsetof(T, dbl_exp))); // dbl_exp = t1
- }
+ __ mfc1(t0, f0); // t0 = f0(31..0)
+ __ mfhc1(t1, f0); // t1 = sign_extend(f0(63..32))
+ __ sw(t0, MemOperand(a0, offsetof(T, dbl_mant))); // dbl_mant = t0
+ __ sw(t1, MemOperand(a0, offsetof(T, dbl_exp))); // dbl_exp = t1
// Convert double in f0 to word, save hi/lo parts.
__ cvt_w_d(f0, f0); // a_word = (word)a
@@ -1456,10 +1456,10 @@ TEST(min_max) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct TestFloat {
double a;
double b;
double c;
@@ -1468,21 +1468,35 @@ TEST(min_max) {
float f;
float g;
float h;
- } TestFloat;
+ };
TestFloat test;
- const double double_nan = std::numeric_limits<double>::quiet_NaN();
- const float float_nan = std::numeric_limits<float>::quiet_NaN();
- const int kTableLength = 5;
- double inputsa[kTableLength] = {2.0, 3.0, double_nan, 3.0, double_nan};
- double inputsb[kTableLength] = {3.0, 2.0, 3.0, double_nan, double_nan};
- double outputsdmin[kTableLength] = {2.0, 2.0, 3.0, 3.0, double_nan};
- double outputsdmax[kTableLength] = {3.0, 3.0, 3.0, 3.0, double_nan};
-
- float inputse[kTableLength] = {2.0, 3.0, float_nan, 3.0, float_nan};
- float inputsf[kTableLength] = {3.0, 2.0, 3.0, float_nan, float_nan};
- float outputsfmin[kTableLength] = {2.0, 2.0, 3.0, 3.0, float_nan};
- float outputsfmax[kTableLength] = {3.0, 3.0, 3.0, 3.0, float_nan};
+ const double dnan = std::numeric_limits<double>::quiet_NaN();
+ const double dinf = std::numeric_limits<double>::infinity();
+ const double dminf = -std::numeric_limits<double>::infinity();
+ const float fnan = std::numeric_limits<float>::quiet_NaN();
+ const float finf = std::numeric_limits<float>::infinity();
+ const float fminf = std::numeric_limits<float>::infinity();
+ const int kTableLength = 13;
+ double inputsa[kTableLength] = {2.0, 3.0, dnan, 3.0, -0.0, 0.0, dinf,
+ dnan, 42.0, dinf, dminf, dinf, dnan};
+ double inputsb[kTableLength] = {3.0, 2.0, 3.0, dnan, 0.0, -0.0, dnan,
+ dinf, dinf, 42.0, dinf, dminf, dnan};
+ double outputsdmin[kTableLength] = {2.0, 2.0, 3.0, 3.0, -0.0,
+ -0.0, dinf, dinf, 42.0, 42.0,
+ dminf, dminf, dnan};
+ double outputsdmax[kTableLength] = {3.0, 3.0, 3.0, 3.0, 0.0, 0.0, dinf,
+ dinf, dinf, dinf, dinf, dinf, dnan};
+
+ float inputse[kTableLength] = {2.0, 3.0, fnan, 3.0, -0.0, 0.0, finf,
+ fnan, 42.0, finf, fminf, finf, fnan};
+ float inputsf[kTableLength] = {3.0, 2.0, 3.0, fnan, -0.0, 0.0, fnan,
+ finf, finf, 42.0, finf, fminf, fnan};
+ float outputsfmin[kTableLength] = {2.0, 2.0, 3.0, 3.0, -0.0,
+ -0.0, finf, finf, 42.0, 42.0,
+ fminf, fminf, fnan};
+ float outputsfmax[kTableLength] = {3.0, 3.0, 3.0, 3.0, 0.0, 0.0, finf,
+ finf, finf, finf, finf, finf, fnan};
__ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
__ ldc1(f8, MemOperand(a0, offsetof(TestFloat, b)));
@@ -1863,16 +1877,20 @@ TEST(Cvt_d_uw) {
TEST(mina_maxa) {
if (IsMipsArchVariant(kMips32r6)) {
- const int kTableLength = 15;
+ const int kTableLength = 23;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
- const double double_nan = std::numeric_limits<double>::quiet_NaN();
- const float float_nan = std::numeric_limits<float>::quiet_NaN();
-
- typedef struct test_float {
+ const double dnan = std::numeric_limits<double>::quiet_NaN();
+ const double dinf = std::numeric_limits<double>::infinity();
+ const double dminf = -std::numeric_limits<double>::infinity();
+ const float fnan = std::numeric_limits<float>::quiet_NaN();
+ const float finf = std::numeric_limits<float>::infinity();
+ const float fminf = std::numeric_limits<float>::infinity();
+
+ struct TestFloat {
double a;
double b;
double resd;
@@ -1881,41 +1899,34 @@ TEST(mina_maxa) {
float d;
float resf;
float resf1;
- }TestFloat;
+ };
TestFloat test;
double inputsa[kTableLength] = {
- 5.3, 4.8, 6.1, 9.8, 9.8, 9.8, -10.0, -8.9,
- -9.8, -10.0, -8.9, -9.8, double_nan, 3.0, double_nan
- };
+ 5.3, 4.8, 6.1, 9.8, 9.8, 9.8, -10.0, -8.9, -9.8, -10.0, -8.9, -9.8,
+ dnan, 3.0, -0.0, 0.0, dinf, dnan, 42.0, dinf, dminf, dinf, dnan};
double inputsb[kTableLength] = {
- 4.8, 5.3, 6.1, -10.0, -8.9, -9.8, 9.8, 9.8,
- 9.8, -9.8, -11.2, -9.8, 3.0, double_nan, double_nan
- };
+ 4.8, 5.3, 6.1, -10.0, -8.9, -9.8, 9.8, 9.8, 9.8, -9.8, -11.2, -9.8,
+ 3.0, dnan, 0.0, -0.0, dnan, dinf, dinf, 42.0, dinf, dminf, dnan};
double resd[kTableLength] = {
- 4.8, 4.8, 6.1, 9.8, -8.9, -9.8, 9.8, -8.9,
- -9.8, -9.8, -8.9, -9.8, 3.0, 3.0, double_nan
- };
+ 4.8, 4.8, 6.1, 9.8, -8.9, -9.8, 9.8, -8.9, -9.8, -9.8, -8.9, -9.8,
+ 3.0, 3.0, -0.0, -0.0, dinf, dinf, 42.0, 42.0, dminf, dminf, dnan};
double resd1[kTableLength] = {
- 5.3, 5.3, 6.1, -10.0, 9.8, 9.8, -10.0, 9.8,
- 9.8, -10.0, -11.2, -9.8, 3.0, 3.0, double_nan
- };
+ 5.3, 5.3, 6.1, -10.0, 9.8, 9.8, -10.0, 9.8, 9.8, -10.0, -11.2, -9.8,
+ 3.0, 3.0, 0.0, 0.0, dinf, dinf, dinf, dinf, dinf, dinf, dnan};
float inputsc[kTableLength] = {
- 5.3, 4.8, 6.1, 9.8, 9.8, 9.8, -10.0, -8.9,
- -9.8, -10.0, -8.9, -9.8, float_nan, 3.0, float_nan
- };
- float inputsd[kTableLength] = {
- 4.8, 5.3, 6.1, -10.0, -8.9, -9.8, 9.8, 9.8,
- 9.8, -9.8, -11.2, -9.8, 3.0, float_nan, float_nan
- };
+ 5.3, 4.8, 6.1, 9.8, 9.8, 9.8, -10.0, -8.9, -9.8, -10.0, -8.9, -9.8,
+ fnan, 3.0, -0.0, 0.0, finf, fnan, 42.0, finf, fminf, finf, fnan};
+ float inputsd[kTableLength] = {4.8, 5.3, 6.1, -10.0, -8.9, -9.8,
+ 9.8, 9.8, 9.8, -9.8, -11.2, -9.8,
+ 3.0, fnan, -0.0, 0.0, fnan, finf,
+ finf, 42.0, finf, fminf, fnan};
float resf[kTableLength] = {
- 4.8, 4.8, 6.1, 9.8, -8.9, -9.8, 9.8, -8.9,
- -9.8, -9.8, -8.9, -9.8, 3.0, 3.0, float_nan
- };
+ 4.8, 4.8, 6.1, 9.8, -8.9, -9.8, 9.8, -8.9, -9.8, -9.8, -8.9, -9.8,
+ 3.0, 3.0, -0.0, -0.0, finf, finf, 42.0, 42.0, fminf, fminf, fnan};
float resf1[kTableLength] = {
- 5.3, 5.3, 6.1, -10.0, 9.8, 9.8, -10.0, 9.8,
- 9.8, -10.0, -11.2, -9.8, 3.0, 3.0, float_nan
- };
+ 5.3, 5.3, 6.1, -10.0, 9.8, 9.8, -10.0, 9.8, 9.8, -10.0, -11.2, -9.8,
+ 3.0, 3.0, 0.0, 0.0, finf, finf, finf, finf, finf, finf, fnan};
__ ldc1(f2, MemOperand(a0, offsetof(TestFloat, a)) );
__ ldc1(f4, MemOperand(a0, offsetof(TestFloat, b)) );
@@ -5024,8 +5035,7 @@ TEST(r6_jialc) {
}
}
-
-uint64_t run_addiupc(int32_t imm19) {
+static uint32_t run_addiupc(int32_t imm19) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
@@ -5058,13 +5068,13 @@ TEST(r6_addiupc) {
int32_t imm19;
};
- struct TestCaseAddiupc tc[] = {
- // imm19
- { -262144 }, // 0x40000
- { -1 }, // 0x7FFFF
- { 0 },
- { 1 }, // 0x00001
- { 262143 } // 0x3FFFF
+ TestCaseAddiupc tc[] = {
+ // imm19
+ {-262144}, // 0x40000
+ {-1}, // 0x7FFFF
+ {0},
+ {1}, // 0x00001
+ {262143} // 0x3FFFF
};
size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseAddiupc);
diff --git a/deps/v8/test/cctest/test-assembler-mips64.cc b/deps/v8/test/cctest/test-assembler-mips64.cc
index 988083cadc..b979db29bb 100644
--- a/deps/v8/test/cctest/test-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-assembler-mips64.cc
@@ -1586,10 +1586,10 @@ TEST(min_max) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
- typedef struct test_float {
+ struct TestFloat {
double a;
double b;
double c;
@@ -1598,21 +1598,35 @@ TEST(min_max) {
float f;
float g;
float h;
- } TestFloat;
+ };
TestFloat test;
- const double double_nan = std::numeric_limits<double>::quiet_NaN();
- const float float_nan = std::numeric_limits<float>::quiet_NaN();
- const int kTableLength = 5;
- double inputsa[kTableLength] = {2.0, 3.0, double_nan, 3.0, double_nan};
- double inputsb[kTableLength] = {3.0, 2.0, 3.0, double_nan, double_nan};
- double outputsdmin[kTableLength] = {2.0, 2.0, 3.0, 3.0, double_nan};
- double outputsdmax[kTableLength] = {3.0, 3.0, 3.0, 3.0, double_nan};
-
- float inputse[kTableLength] = {2.0, 3.0, float_nan, 3.0, float_nan};
- float inputsf[kTableLength] = {3.0, 2.0, 3.0, float_nan, float_nan};
- float outputsfmin[kTableLength] = {2.0, 2.0, 3.0, 3.0, float_nan};
- float outputsfmax[kTableLength] = {3.0, 3.0, 3.0, 3.0, float_nan};
+ const double dnan = std::numeric_limits<double>::quiet_NaN();
+ const double dinf = std::numeric_limits<double>::infinity();
+ const double dminf = -std::numeric_limits<double>::infinity();
+ const float fnan = std::numeric_limits<float>::quiet_NaN();
+ const float finf = std::numeric_limits<float>::infinity();
+ const float fminf = std::numeric_limits<float>::infinity();
+ const int kTableLength = 13;
+ double inputsa[kTableLength] = {2.0, 3.0, dnan, 3.0, -0.0, 0.0, dinf,
+ dnan, 42.0, dinf, dminf, dinf, dnan};
+ double inputsb[kTableLength] = {3.0, 2.0, 3.0, dnan, 0.0, -0.0, dnan,
+ dinf, dinf, 42.0, dinf, dminf, dnan};
+ double outputsdmin[kTableLength] = {2.0, 2.0, 3.0, 3.0, -0.0,
+ -0.0, dinf, dinf, 42.0, 42.0,
+ dminf, dminf, dnan};
+ double outputsdmax[kTableLength] = {3.0, 3.0, 3.0, 3.0, 0.0, 0.0, dinf,
+ dinf, dinf, dinf, dinf, dinf, dnan};
+
+ float inputse[kTableLength] = {2.0, 3.0, fnan, 3.0, -0.0, 0.0, finf,
+ fnan, 42.0, finf, fminf, finf, fnan};
+ float inputsf[kTableLength] = {3.0, 2.0, 3.0, fnan, -0.0, 0.0, fnan,
+ finf, finf, 42.0, finf, fminf, fnan};
+ float outputsfmin[kTableLength] = {2.0, 2.0, 3.0, 3.0, -0.0,
+ -0.0, finf, finf, 42.0, 42.0,
+ fminf, fminf, fnan};
+ float outputsfmax[kTableLength] = {3.0, 3.0, 3.0, 3.0, 0.0, 0.0, finf,
+ finf, finf, finf, finf, finf, fnan};
__ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
__ ldc1(f8, MemOperand(a0, offsetof(TestFloat, b)));
@@ -1946,16 +1960,20 @@ TEST(rint_s) {
TEST(mina_maxa) {
if (kArchVariant == kMips64r6) {
- const int kTableLength = 15;
+ const int kTableLength = 23;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assm(isolate, NULL, 0,
+ MacroAssembler assm(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
- const double double_nan = std::numeric_limits<double>::quiet_NaN();
- const float float_nan = std::numeric_limits<float>::quiet_NaN();
-
- typedef struct test_float {
+ const double dnan = std::numeric_limits<double>::quiet_NaN();
+ const double dinf = std::numeric_limits<double>::infinity();
+ const double dminf = -std::numeric_limits<double>::infinity();
+ const float fnan = std::numeric_limits<float>::quiet_NaN();
+ const float finf = std::numeric_limits<float>::infinity();
+ const float fminf = std::numeric_limits<float>::infinity();
+
+ struct TestFloat {
double a;
double b;
double resd;
@@ -1964,41 +1982,34 @@ TEST(mina_maxa) {
float d;
float resf;
float resf1;
- }TestFloat;
+ };
TestFloat test;
double inputsa[kTableLength] = {
- 5.3, 4.8, 6.1, 9.8, 9.8, 9.8, -10.0, -8.9,
- -9.8, -10.0, -8.9, -9.8, double_nan, 3.0, double_nan
- };
+ 5.3, 4.8, 6.1, 9.8, 9.8, 9.8, -10.0, -8.9, -9.8, -10.0, -8.9, -9.8,
+ dnan, 3.0, -0.0, 0.0, dinf, dnan, 42.0, dinf, dminf, dinf, dnan};
double inputsb[kTableLength] = {
- 4.8, 5.3, 6.1, -10.0, -8.9, -9.8, 9.8, 9.8,
- 9.8, -9.8, -11.2, -9.8, 3.0, double_nan, double_nan
- };
+ 4.8, 5.3, 6.1, -10.0, -8.9, -9.8, 9.8, 9.8, 9.8, -9.8, -11.2, -9.8,
+ 3.0, dnan, 0.0, -0.0, dnan, dinf, dinf, 42.0, dinf, dminf, dnan};
double resd[kTableLength] = {
- 4.8, 4.8, 6.1, 9.8, -8.9, -9.8, 9.8, -8.9,
- -9.8, -9.8, -8.9, -9.8, 3.0, 3.0, double_nan
- };
+ 4.8, 4.8, 6.1, 9.8, -8.9, -9.8, 9.8, -8.9, -9.8, -9.8, -8.9, -9.8,
+ 3.0, 3.0, -0.0, -0.0, dinf, dinf, 42.0, 42.0, dminf, dminf, dnan};
double resd1[kTableLength] = {
- 5.3, 5.3, 6.1, -10.0, 9.8, 9.8, -10.0, 9.8,
- 9.8, -10.0, -11.2, -9.8, 3.0, 3.0, double_nan
- };
+ 5.3, 5.3, 6.1, -10.0, 9.8, 9.8, -10.0, 9.8, 9.8, -10.0, -11.2, -9.8,
+ 3.0, 3.0, 0.0, 0.0, dinf, dinf, dinf, dinf, dinf, dinf, dnan};
float inputsc[kTableLength] = {
- 5.3, 4.8, 6.1, 9.8, 9.8, 9.8, -10.0, -8.9,
- -9.8, -10.0, -8.9, -9.8, float_nan, 3.0, float_nan
- };
- float inputsd[kTableLength] = {
- 4.8, 5.3, 6.1, -10.0, -8.9, -9.8, 9.8, 9.8,
- 9.8, -9.8, -11.2, -9.8, 3.0, float_nan, float_nan
- };
+ 5.3, 4.8, 6.1, 9.8, 9.8, 9.8, -10.0, -8.9, -9.8, -10.0, -8.9, -9.8,
+ fnan, 3.0, -0.0, 0.0, finf, fnan, 42.0, finf, fminf, finf, fnan};
+ float inputsd[kTableLength] = {4.8, 5.3, 6.1, -10.0, -8.9, -9.8,
+ 9.8, 9.8, 9.8, -9.8, -11.2, -9.8,
+ 3.0, fnan, -0.0, 0.0, fnan, finf,
+ finf, 42.0, finf, fminf, fnan};
float resf[kTableLength] = {
- 4.8, 4.8, 6.1, 9.8, -8.9, -9.8, 9.8, -8.9,
- -9.8, -9.8, -8.9, -9.8, 3.0, 3.0, float_nan
- };
+ 4.8, 4.8, 6.1, 9.8, -8.9, -9.8, 9.8, -8.9, -9.8, -9.8, -8.9, -9.8,
+ 3.0, 3.0, -0.0, -0.0, finf, finf, 42.0, 42.0, fminf, fminf, fnan};
float resf1[kTableLength] = {
- 5.3, 5.3, 6.1, -10.0, 9.8, 9.8, -10.0, 9.8,
- 9.8, -10.0, -11.2, -9.8, 3.0, 3.0, float_nan
- };
+ 5.3, 5.3, 6.1, -10.0, 9.8, 9.8, -10.0, 9.8, 9.8, -10.0, -11.2, -9.8,
+ 3.0, 3.0, 0.0, 0.0, finf, finf, finf, finf, finf, finf, fnan};
__ ldc1(f2, MemOperand(a0, offsetof(TestFloat, a)) );
__ ldc1(f4, MemOperand(a0, offsetof(TestFloat, b)) );
@@ -4997,6 +5008,55 @@ TEST(r6_aui_family) {
}
+uint64_t run_li_macro(uint64_t rs, LiFlags mode) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+
+ __ li(a0, rs, mode);
+ __ mov(v0, a0);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+ F2 f = FUNCTION_CAST<F2>(code->entry());
+
+ uint64_t res = reinterpret_cast<uint64_t>(
+ CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+
+ return res;
+}
+
+
+TEST(li_macro) {
+ CcTest::InitializeVM();
+
+ uint64_t inputs[] = {
+ 0x0000000000000000, 0x000000000000ffff, 0x00000000ffffffff,
+ 0x0000ffffffffffff, 0xffffffffffffffff, 0xffff000000000000,
+ 0xffffffff00000000, 0xffffffffffff0000, 0xffff0000ffff0000,
+ 0x0000ffffffff0000, 0x0000ffff0000ffff, 0x00007fffffffffff,
+ 0x7fffffffffffffff, 0x000000007fffffff, 0x00007fff7fffffff,
+ };
+
+ size_t nr_test_cases = sizeof(inputs) / sizeof(inputs[0]);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ uint64_t res = run_li_macro(inputs[i], OPTIMIZE_SIZE);
+ CHECK_EQ(inputs[i], res);
+ res = run_li_macro(inputs[i], CONSTANT_SIZE);
+ CHECK_EQ(inputs[i], res);
+ if (is_int48(inputs[i])) {
+ res = run_li_macro(inputs[i], ADDRESS_LOAD);
+ CHECK_EQ(inputs[i], res);
+ }
+ }
+}
+
+
uint64_t run_lwpc(int offset) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
diff --git a/deps/v8/test/cctest/test-ast-expression-visitor.cc b/deps/v8/test/cctest/test-ast-expression-visitor.cc
index b6cca6ac38..a40f87ccfe 100644
--- a/deps/v8/test/cctest/test-ast-expression-visitor.cc
+++ b/deps/v8/test/cctest/test-ast-expression-visitor.cc
@@ -343,7 +343,6 @@ TEST(VisitThrow) {
v8::V8::Initialize();
HandleAndZoneScope handles;
ZoneVector<ExpressionTypeEntry> types(handles.main_zone());
- // Check that traversing an empty for statement works.
const char test_function[] =
"function foo() {\n"
" throw 123;\n"
@@ -364,7 +363,6 @@ TEST(VisitYield) {
v8::V8::Initialize();
HandleAndZoneScope handles;
ZoneVector<ExpressionTypeEntry> types(handles.main_zone());
- // Check that traversing an empty for statement works.
const char test_function[] =
"function* foo() {\n"
" yield 123;\n"
@@ -372,7 +370,7 @@ TEST(VisitYield) {
CollectTypes(&handles, test_function, &types);
CHECK_TYPES_BEGIN {
CHECK_EXPR(FunctionLiteral, Bounds::Unbounded()) {
- // Generator function yields generator on entry.
+ // Implicit initial yield
CHECK_EXPR(Yield, Bounds::Unbounded()) {
CHECK_VAR(.generator_object, Bounds::Unbounded());
CHECK_EXPR(Assignment, Bounds::Unbounded()) {
@@ -380,16 +378,20 @@ TEST(VisitYield) {
CHECK_EXPR(CallRuntime, Bounds::Unbounded());
}
}
- // Then yields undefined.
+ // Explicit yield
CHECK_EXPR(Yield, Bounds::Unbounded()) {
CHECK_VAR(.generator_object, Bounds::Unbounded());
CHECK_EXPR(Literal, Bounds::Unbounded());
}
- // Then yields 123.
+ // Implicit final yield
CHECK_EXPR(Yield, Bounds::Unbounded()) {
CHECK_VAR(.generator_object, Bounds::Unbounded());
CHECK_EXPR(Literal, Bounds::Unbounded());
}
+ // Implicit finally clause
+ CHECK_EXPR(CallRuntime, Bounds::Unbounded()) {
+ CHECK_VAR(.generator_object, Bounds::Unbounded());
+ }
}
}
CHECK_TYPES_END
@@ -400,7 +402,6 @@ TEST(VisitSkipping) {
v8::V8::Initialize();
HandleAndZoneScope handles;
ZoneVector<ExpressionTypeEntry> types(handles.main_zone());
- // Check that traversing an empty for statement works.
const char test_function[] =
"function foo(x) {\n"
" return (x + x) + 1;\n"
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index 29a24e62df..361c879af3 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -75,7 +75,7 @@ TEST(StartStop) {
CpuProfilesCollection profiles(isolate->heap());
ProfileGenerator generator(&profiles);
SmartPointer<ProfilerEventsProcessor> processor(new ProfilerEventsProcessor(
- &generator, NULL, v8::base::TimeDelta::FromMicroseconds(100)));
+ &generator, NULL, v8::base::TimeDelta::FromMicroseconds(100)));
processor->Start();
processor->StopSynchronously();
}
@@ -417,15 +417,16 @@ TEST(ProfileStartEndTime) {
CHECK(profile->GetStartTime() <= profile->GetEndTime());
}
-
static v8::CpuProfile* RunProfiler(v8::Local<v8::Context> env,
v8::Local<v8::Function> function,
v8::Local<v8::Value> argv[], int argc,
- unsigned min_js_samples,
+ unsigned min_js_samples = 0,
+ unsigned min_external_samples = 0,
bool collect_samples = false) {
v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
v8::Local<v8::String> profile_name = v8_str("my_profile");
+ cpu_profiler->SetSamplingInterval(100);
cpu_profiler->StartProfiling(profile_name, collect_samples);
i::Sampler* sampler =
@@ -433,7 +434,8 @@ static v8::CpuProfile* RunProfiler(v8::Local<v8::Context> env,
sampler->StartCountingSamples();
do {
function->Call(env, env->Global(), argc, argv).ToLocalChecked();
- } while (sampler->js_and_external_sample_count() < min_js_samples);
+ } while (sampler->js_sample_count() < min_js_samples ||
+ sampler->external_sample_count() < min_external_samples);
v8::CpuProfile* profile = cpu_profiler->StopProfiling(profile_name);
@@ -445,55 +447,14 @@ static v8::CpuProfile* RunProfiler(v8::Local<v8::Context> env,
}
-static bool ContainsString(v8::Local<v8::Context> context,
- v8::Local<v8::String> string,
- const Vector<v8::Local<v8::String> >& vector) {
- for (int i = 0; i < vector.length(); i++) {
- if (string->Equals(context, vector[i]).FromJust()) return true;
- }
- return false;
-}
-
-
-static void CheckChildrenNames(v8::Local<v8::Context> context,
- const v8::CpuProfileNode* node,
- const Vector<v8::Local<v8::String> >& names) {
- int count = node->GetChildrenCount();
- for (int i = 0; i < count; i++) {
- v8::Local<v8::String> name = node->GetChild(i)->GetFunctionName();
- if (!ContainsString(context, name, names)) {
- char buffer[100];
- i::SNPrintF(Vector<char>(buffer, arraysize(buffer)),
- "Unexpected child '%s' found in '%s'",
- *v8::String::Utf8Value(name),
- *v8::String::Utf8Value(node->GetFunctionName()));
- FATAL(buffer);
- }
- // Check that there are no duplicates.
- for (int j = 0; j < count; j++) {
- if (j == i) continue;
- if (name->Equals(context, node->GetChild(j)->GetFunctionName())
- .FromJust()) {
- char buffer[100];
- i::SNPrintF(Vector<char>(buffer, arraysize(buffer)),
- "Second child with the same name '%s' found in '%s'",
- *v8::String::Utf8Value(name),
- *v8::String::Utf8Value(node->GetFunctionName()));
- FATAL(buffer);
- }
- }
- }
-}
-
-
static const v8::CpuProfileNode* FindChild(v8::Local<v8::Context> context,
const v8::CpuProfileNode* node,
const char* name) {
int count = node->GetChildrenCount();
- v8::Local<v8::String> nameHandle = v8_str(name);
+ v8::Local<v8::String> name_handle = v8_str(name);
for (int i = 0; i < count; i++) {
const v8::CpuProfileNode* child = node->GetChild(i);
- if (nameHandle->Equals(context, child->GetFunctionName()).FromJust()) {
+ if (name_handle->Equals(context, child->GetFunctionName()).FromJust()) {
return child;
}
}
@@ -521,8 +482,6 @@ static void CheckSimpleBranch(v8::Local<v8::Context> context,
for (int i = 0; i < length; i++) {
const char* name = names[i];
node = GetChild(context, node, name);
- int expectedChildrenCount = (i == length - 1) ? 0 : 1;
- CHECK_EQ(expectedChildrenCount, node->GetChildrenCount());
}
}
@@ -537,38 +496,43 @@ static const ProfileNode* GetSimpleBranch(v8::Local<v8::Context> context,
return reinterpret_cast<const ProfileNode*>(node);
}
+static void CallCollectSample(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ info.GetIsolate()->GetCpuProfiler()->CollectSample();
+}
-static const char* cpu_profiler_test_source = "function loop(timeout) {\n"
-" this.mmm = 0;\n"
-" var start = Date.now();\n"
-" while (Date.now() - start < timeout) {\n"
-" var n = 100*1000;\n"
-" while(n > 1) {\n"
-" n--;\n"
-" this.mmm += n * n * n;\n"
-" }\n"
-" }\n"
-"}\n"
-"function delay() { try { loop(10); } catch(e) { } }\n"
-"function bar() { delay(); }\n"
-"function baz() { delay(); }\n"
-"function foo() {\n"
-" try {\n"
-" delay();\n"
-" bar();\n"
-" delay();\n"
-" baz();\n"
-" } catch (e) { }\n"
-"}\n"
-"function start(timeout) {\n"
-" var start = Date.now();\n"
-" do {\n"
-" foo();\n"
-" var duration = Date.now() - start;\n"
-" } while (duration < timeout);\n"
-" return duration;\n"
-"}\n";
-
+static const char* cpu_profiler_test_source =
+ "%NeverOptimizeFunction(loop);\n"
+ "%NeverOptimizeFunction(delay);\n"
+ "%NeverOptimizeFunction(bar);\n"
+ "%NeverOptimizeFunction(baz);\n"
+ "%NeverOptimizeFunction(foo);\n"
+ "%NeverOptimizeFunction(start);\n"
+ "function loop(timeout) {\n"
+ " this.mmm = 0;\n"
+ " var start = Date.now();\n"
+ " do {\n"
+ " var n = 1000;\n"
+ " while(n > 1) {\n"
+ " n--;\n"
+ " this.mmm += n * n * n;\n"
+ " }\n"
+ " } while (Date.now() - start < timeout);\n"
+ "}\n"
+ "function delay() { loop(10); }\n"
+ "function bar() { delay(); }\n"
+ "function baz() { delay(); }\n"
+ "function foo() {\n"
+ " delay();\n"
+ " bar();\n"
+ " delay();\n"
+ " baz();\n"
+ "}\n"
+ "function start(duration) {\n"
+ " var start = Date.now();\n"
+ " do {\n"
+ " foo();\n"
+ " } while (Date.now() - start < duration);\n"
+ "}\n";
// Check that the profile tree for the script above will look like the
// following:
@@ -588,6 +552,7 @@ static const char* cpu_profiler_test_source = "function loop(timeout) {\n"
// 2 2 (program) [-1]
// 6 6 (garbage collector) [-1]
TEST(CollectCpuProfile) {
+ i::FLAG_allow_natives_syntax = true;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -598,49 +563,37 @@ TEST(CollectCpuProfile) {
v8::Local<v8::Value> args[] = {
v8::Integer::New(env->GetIsolate(), profiling_interval_ms)};
v8::CpuProfile* profile =
- RunProfiler(env.local(), function, args, arraysize(args), 200);
- function->Call(env.local(), env->Global(), arraysize(args), args)
- .ToLocalChecked();
+ RunProfiler(env.local(), function, args, arraysize(args), 1000);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
+ const v8::CpuProfileNode* start_node = GetChild(env.local(), root, "start");
+ const v8::CpuProfileNode* foo_node = GetChild(env.local(), start_node, "foo");
- ScopedVector<v8::Local<v8::String> > names(3);
- names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
- names[1] = v8_str(ProfileGenerator::kProgramEntryName);
- names[2] = v8_str("start");
- CheckChildrenNames(env.local(), root, names);
-
- const v8::CpuProfileNode* startNode = GetChild(env.local(), root, "start");
- CHECK_EQ(1, startNode->GetChildrenCount());
-
- const v8::CpuProfileNode* fooNode = GetChild(env.local(), startNode, "foo");
- CHECK_EQ(3, fooNode->GetChildrenCount());
-
- const char* barBranch[] = { "bar", "delay", "loop" };
- CheckSimpleBranch(env.local(), fooNode, barBranch, arraysize(barBranch));
- const char* bazBranch[] = { "baz", "delay", "loop" };
- CheckSimpleBranch(env.local(), fooNode, bazBranch, arraysize(bazBranch));
- const char* delayBranch[] = { "delay", "loop" };
- CheckSimpleBranch(env.local(), fooNode, delayBranch, arraysize(delayBranch));
+ const char* bar_branch[] = {"bar", "delay", "loop"};
+ CheckSimpleBranch(env.local(), foo_node, bar_branch, arraysize(bar_branch));
+ const char* baz_branch[] = {"baz", "delay", "loop"};
+ CheckSimpleBranch(env.local(), foo_node, baz_branch, arraysize(baz_branch));
+ const char* delay_branch[] = {"delay", "loop"};
+ CheckSimpleBranch(env.local(), foo_node, delay_branch,
+ arraysize(delay_branch));
profile->Delete();
}
-
static const char* hot_deopt_no_frame_entry_test_source =
-"function foo(a, b) {\n"
-" try {\n"
-" return a + b;\n"
-" } catch (e) { }\n"
-"}\n"
-"function start(timeout) {\n"
-" var start = Date.now();\n"
-" do {\n"
-" for (var i = 1; i < 1000; ++i) foo(1, i);\n"
-" var duration = Date.now() - start;\n"
-" } while (duration < timeout);\n"
-" return duration;\n"
-"}\n";
+ "%NeverOptimizeFunction(foo);\n"
+ "%NeverOptimizeFunction(start);\n"
+ "function foo(a, b) {\n"
+ " return a + b;\n"
+ "}\n"
+ "function start(timeout) {\n"
+ " var start = Date.now();\n"
+ " do {\n"
+ " for (var i = 1; i < 1000; ++i) foo(1, i);\n"
+ " var duration = Date.now() - start;\n"
+ " } while (duration < timeout);\n"
+ " return duration;\n"
+ "}\n";
// Check that the profile tree for the script above will look like the
// following:
@@ -652,10 +605,11 @@ static const char* hot_deopt_no_frame_entry_test_source =
// 2 2 (program) [-1]
// 6 6 (garbage collector) [-1]
//
-// The test checks no FP ranges are present in a deoptimized funcion.
+// The test checks no FP ranges are present in a deoptimized function.
// If 'foo' has no ranges the samples falling into the prologue will miss the
// 'start' function on the stack, so 'foo' will be attached to the (root).
TEST(HotDeoptNoFrameEntry) {
+ i::FLAG_allow_natives_syntax = true;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -666,28 +620,19 @@ TEST(HotDeoptNoFrameEntry) {
v8::Local<v8::Value> args[] = {
v8::Integer::New(env->GetIsolate(), profiling_interval_ms)};
v8::CpuProfile* profile =
- RunProfiler(env.local(), function, args, arraysize(args), 200);
+ RunProfiler(env.local(), function, args, arraysize(args), 1000);
function->Call(env.local(), env->Global(), arraysize(args), args)
.ToLocalChecked();
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
-
- ScopedVector<v8::Local<v8::String> > names(3);
- names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
- names[1] = v8_str(ProfileGenerator::kProgramEntryName);
- names[2] = v8_str("start");
- CheckChildrenNames(env.local(), root, names);
-
- const v8::CpuProfileNode* startNode = GetChild(env.local(), root, "start");
- CHECK_EQ(1, startNode->GetChildrenCount());
-
- GetChild(env.local(), startNode, "foo");
+ const v8::CpuProfileNode* start_node = GetChild(env.local(), root, "start");
+ GetChild(env.local(), start_node, "foo");
profile->Delete();
}
-
TEST(CollectCpuProfileSamples) {
+ i::FLAG_allow_natives_syntax = true;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -698,7 +643,7 @@ TEST(CollectCpuProfileSamples) {
v8::Local<v8::Value> args[] = {
v8::Integer::New(env->GetIsolate(), profiling_interval_ms)};
v8::CpuProfile* profile =
- RunProfiler(env.local(), function, args, arraysize(args), 200, true);
+ RunProfiler(env.local(), function, args, arraysize(args), 1000, 0, true);
CHECK_LE(200, profile->GetSamplesCount());
uint64_t end_time = profile->GetEndTime();
@@ -715,15 +660,18 @@ TEST(CollectCpuProfileSamples) {
profile->Delete();
}
-
-static const char* cpu_profiler_test_source2 = "function loop() {}\n"
-"function delay() { loop(); }\n"
-"function start(count) {\n"
-" var k = 0;\n"
-" do {\n"
-" delay();\n"
-" } while (++k < count*100*1000);\n"
-"}\n";
+static const char* cpu_profiler_test_source2 =
+ "%NeverOptimizeFunction(loop);\n"
+ "%NeverOptimizeFunction(delay);\n"
+ "%NeverOptimizeFunction(start);\n"
+ "function loop() {}\n"
+ "function delay() { loop(); }\n"
+ "function start(duration) {\n"
+ " var start = Date.now();\n"
+ " do {\n"
+ " for (var i = 0; i < 10000; ++i) delay();\n"
+ " } while (Date.now() - start < duration);\n"
+ "}";
// Check that the profile tree doesn't contain unexpected traces:
// - 'loop' can be called only by 'delay'
@@ -737,47 +685,28 @@ static const char* cpu_profiler_test_source2 = "function loop() {}\n"
// 16 16 loop [-1] #5
// 14 14 (program) [-1] #2
TEST(SampleWhenFrameIsNotSetup) {
+ i::FLAG_allow_natives_syntax = true;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
CompileRun(cpu_profiler_test_source2);
v8::Local<v8::Function> function = GetFunction(env.local(), "start");
- int32_t repeat_count = 100;
-#if defined(USE_SIMULATOR)
- // Simulators are much slower.
- repeat_count = 1;
-#endif
+ int32_t duration_ms = 100;
v8::Local<v8::Value> args[] = {
- v8::Integer::New(env->GetIsolate(), repeat_count)};
+ v8::Integer::New(env->GetIsolate(), duration_ms)};
v8::CpuProfile* profile =
- RunProfiler(env.local(), function, args, arraysize(args), 100);
+ RunProfiler(env.local(), function, args, arraysize(args), 1000);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
-
- ScopedVector<v8::Local<v8::String> > names(3);
- names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
- names[1] = v8_str(ProfileGenerator::kProgramEntryName);
- names[2] = v8_str("start");
- CheckChildrenNames(env.local(), root, names);
-
- const v8::CpuProfileNode* startNode = FindChild(env.local(), root, "start");
- // On slow machines there may be no meaningfull samples at all, skip the
- // check there.
- if (startNode && startNode->GetChildrenCount() > 0) {
- CHECK_EQ(1, startNode->GetChildrenCount());
- const v8::CpuProfileNode* delayNode =
- GetChild(env.local(), startNode, "delay");
- if (delayNode->GetChildrenCount() > 0) {
- CHECK_EQ(1, delayNode->GetChildrenCount());
- GetChild(env.local(), delayNode, "loop");
- }
- }
+ const v8::CpuProfileNode* start_node = GetChild(env.local(), root, "start");
+ const v8::CpuProfileNode* delay_node =
+ GetChild(env.local(), start_node, "delay");
+ GetChild(env.local(), delay_node, "loop");
profile->Delete();
}
-
static const char* native_accessor_test_source = "function start(count) {\n"
" for (var i = 0; i < count; i++) {\n"
" var o = instance.foo;\n"
@@ -785,7 +714,6 @@ static const char* native_accessor_test_source = "function start(count) {\n"
" }\n"
"}\n";
-
class TestApiCallbacks {
public:
explicit TestApiCallbacks(int min_duration_ms)
@@ -794,19 +722,19 @@ class TestApiCallbacks {
static void Getter(v8::Local<v8::String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
- TestApiCallbacks* data = fromInfo(info);
+ TestApiCallbacks* data = FromInfo(info);
data->Wait();
}
static void Setter(v8::Local<v8::String> name,
v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) {
- TestApiCallbacks* data = fromInfo(info);
+ TestApiCallbacks* data = FromInfo(info);
data->Wait();
}
static void Callback(const v8::FunctionCallbackInfo<v8::Value>& info) {
- TestApiCallbacks* data = fromInfo(info);
+ TestApiCallbacks* data = FromInfo(info);
data->Wait();
}
@@ -823,8 +751,8 @@ class TestApiCallbacks {
}
}
- template<typename T>
- static TestApiCallbacks* fromInfo(const T& info) {
+ template <typename T>
+ static TestApiCallbacks* FromInfo(const T& info) {
void* data = v8::External::Cast(*info.Data())->Value();
return reinterpret_cast<TestApiCallbacks*>(data);
}
@@ -865,12 +793,12 @@ TEST(NativeAccessorUninitializedIC) {
int32_t repeat_count = 1;
v8::Local<v8::Value> args[] = {v8::Integer::New(isolate, repeat_count)};
v8::CpuProfile* profile =
- RunProfiler(env.local(), function, args, arraysize(args), 180);
+ RunProfiler(env.local(), function, args, arraysize(args), 0, 100);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- const v8::CpuProfileNode* startNode = GetChild(env.local(), root, "start");
- GetChild(env.local(), startNode, "get foo");
- GetChild(env.local(), startNode, "set foo");
+ const v8::CpuProfileNode* start_node = GetChild(env.local(), root, "start");
+ GetChild(env.local(), start_node, "get foo");
+ GetChild(env.local(), start_node, "set foo");
profile->Delete();
}
@@ -918,12 +846,12 @@ TEST(NativeAccessorMonomorphicIC) {
int32_t repeat_count = 100;
v8::Local<v8::Value> args[] = {v8::Integer::New(isolate, repeat_count)};
v8::CpuProfile* profile =
- RunProfiler(env.local(), function, args, arraysize(args), 200);
+ RunProfiler(env.local(), function, args, arraysize(args), 0, 100);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- const v8::CpuProfileNode* startNode = GetChild(env.local(), root, "start");
- GetChild(env.local(), startNode, "get foo");
- GetChild(env.local(), startNode, "set foo");
+ const v8::CpuProfileNode* start_node = GetChild(env.local(), root, "start");
+ GetChild(env.local(), start_node, "get foo");
+ GetChild(env.local(), start_node, "set foo");
profile->Delete();
}
@@ -969,11 +897,11 @@ TEST(NativeMethodUninitializedIC) {
int32_t repeat_count = 1;
v8::Local<v8::Value> args[] = {v8::Integer::New(isolate, repeat_count)};
v8::CpuProfile* profile =
- RunProfiler(env.local(), function, args, arraysize(args), 100);
+ RunProfiler(env.local(), function, args, arraysize(args), 0, 100);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- const v8::CpuProfileNode* startNode = GetChild(env.local(), root, "start");
- GetChild(env.local(), startNode, "fooMethod");
+ const v8::CpuProfileNode* start_node = GetChild(env.local(), root, "start");
+ GetChild(env.local(), start_node, "fooMethod");
profile->Delete();
}
@@ -1023,12 +951,12 @@ TEST(NativeMethodMonomorphicIC) {
int32_t repeat_count = 100;
v8::Local<v8::Value> args[] = {v8::Integer::New(isolate, repeat_count)};
v8::CpuProfile* profile =
- RunProfiler(env.local(), function, args, arraysize(args), 100);
+ RunProfiler(env.local(), function, args, arraysize(args), 0, 200);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
GetChild(env.local(), root, "start");
- const v8::CpuProfileNode* startNode = GetChild(env.local(), root, "start");
- GetChild(env.local(), startNode, "fooMethod");
+ const v8::CpuProfileNode* start_node = GetChild(env.local(), root, "start");
+ GetChild(env.local(), start_node, "fooMethod");
profile->Delete();
}
@@ -1052,18 +980,12 @@ TEST(BoundFunctionCall) {
CompileRun(bound_function_test_source);
v8::Local<v8::Function> function = GetFunction(env, "start");
- v8::CpuProfile* profile = RunProfiler(env, function, NULL, 0, 0);
+ v8::CpuProfile* profile = RunProfiler(env, function, NULL, 0);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- ScopedVector<v8::Local<v8::String> > names(3);
- names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
- names[1] = v8_str(ProfileGenerator::kProgramEntryName);
- names[2] = v8_str("start");
- // Don't allow |foo| node to be at the top level.
- CheckChildrenNames(env, root, names);
- const v8::CpuProfileNode* startNode = GetChild(env, root, "start");
- GetChild(env, startNode, "foo");
+ const v8::CpuProfileNode* start_node = GetChild(env, root, "start");
+ GetChild(env, start_node, "foo");
profile->Delete();
}
@@ -1166,18 +1088,21 @@ TEST(TickLines) {
CHECK_EQ(hit_count, value);
}
-
-static const char* call_function_test_source = "function bar(iterations) {\n"
-"}\n"
-"function start(duration) {\n"
-" var start = Date.now();\n"
-" while (Date.now() - start < duration) {\n"
-" try {\n"
-" bar.call(this, 10 * 1000);\n"
-" } catch(e) {}\n"
-" }\n"
-"}";
-
+static const char* call_function_test_source =
+ "%NeverOptimizeFunction(bar);\n"
+ "%NeverOptimizeFunction(start);\n"
+ "function bar(n) {\n"
+ " var s = 0;\n"
+ " for (var i = 0; i < n; i++) s += i * i * i;\n"
+ " return s;\n"
+ "}\n"
+ "function start(duration) {\n"
+ " var start = Date.now();\n"
+ " do {\n"
+ " for (var i = 0; i < 100; ++i)\n"
+ " bar.call(this, 1000);\n"
+ " } while (Date.now() - start < duration);\n"
+ "}";
// Test that if we sampled thread when it was inside FunctionCall buitin then
// its caller frame will be '(unresolved function)' as we have no reliable way
@@ -1192,6 +1117,7 @@ static const char* call_function_test_source = "function bar(iterations) {\n"
// 1 1 bar [-1] #7
// 19 19 (program) [-1] #2
TEST(FunctionCallSample) {
+ i::FLAG_allow_natives_syntax = true;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -1206,60 +1132,38 @@ TEST(FunctionCallSample) {
v8::Local<v8::Value> args[] = {
v8::Integer::New(env->GetIsolate(), duration_ms)};
v8::CpuProfile* profile =
- RunProfiler(env.local(), function, args, arraysize(args), 100);
+ RunProfiler(env.local(), function, args, arraysize(args), 1000);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- {
- ScopedVector<v8::Local<v8::String> > names(4);
- names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
- names[1] = v8_str(ProfileGenerator::kProgramEntryName);
- names[2] = v8_str("start");
- names[3] = v8_str(i::ProfileGenerator::kUnresolvedFunctionName);
- // Don't allow |bar| and |call| nodes to be at the top level.
- CheckChildrenNames(env.local(), root, names);
- }
-
- // In case of GC stress tests all samples may be in GC phase and there
- // won't be |start| node in the profiles.
- bool is_gc_stress_testing =
- (i::FLAG_gc_interval != -1) || i::FLAG_stress_compaction;
- const v8::CpuProfileNode* startNode = FindChild(env.local(), root, "start");
- CHECK(is_gc_stress_testing || startNode);
- if (startNode) {
- ScopedVector<v8::Local<v8::String> > names(2);
- names[0] = v8_str("bar");
- names[1] = v8_str("call");
- CheckChildrenNames(env.local(), startNode, names);
- }
+ const v8::CpuProfileNode* start_node = GetChild(env.local(), root, "start");
+ GetChild(env.local(), start_node, "bar");
- const v8::CpuProfileNode* unresolvedNode = FindChild(
+ const v8::CpuProfileNode* unresolved_node = FindChild(
env.local(), root, i::ProfileGenerator::kUnresolvedFunctionName);
- if (unresolvedNode) {
- ScopedVector<v8::Local<v8::String> > names(1);
- names[0] = v8_str("call");
- CheckChildrenNames(env.local(), unresolvedNode, names);
- }
+ CHECK(!unresolved_node || GetChild(env.local(), unresolved_node, "call"));
profile->Delete();
}
-
static const char* function_apply_test_source =
- "function bar(iterations) {\n"
+ "%NeverOptimizeFunction(bar);\n"
+ "%NeverOptimizeFunction(test);\n"
+ "%NeverOptimizeFunction(start);\n"
+ "function bar(n) {\n"
+ " var s = 0;\n"
+ " for (var i = 0; i < n; i++) s += i * i * i;\n"
+ " return s;\n"
"}\n"
"function test() {\n"
- " bar.apply(this, [10 * 1000]);\n"
+ " bar.apply(this, [1000]);\n"
"}\n"
"function start(duration) {\n"
" var start = Date.now();\n"
- " while (Date.now() - start < duration) {\n"
- " try {\n"
- " test();\n"
- " } catch(e) {}\n"
- " }\n"
+ " do {\n"
+ " for (var i = 0; i < 100; ++i) test();\n"
+ " } while (Date.now() - start < duration);\n"
"}";
-
// [Top down]:
// 94 0 (root) [-1] #0 1
// 2 2 (garbage collector) [-1] #0 7
@@ -1268,9 +1172,9 @@ static const char* function_apply_test_source =
// 1 1 apply [-1] #0 9
// 32 21 test [-1] #16 4
// 2 2 bar [-1] #16 6
-// 9 9 apply [-1] #0 5
// 10 10 (program) [-1] #0 2
TEST(FunctionApplySample) {
+ i::FLAG_allow_natives_syntax = true;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -1282,64 +1186,32 @@ TEST(FunctionApplySample) {
v8::Integer::New(env->GetIsolate(), duration_ms)};
v8::CpuProfile* profile =
- RunProfiler(env.local(), function, args, arraysize(args), 100);
+ RunProfiler(env.local(), function, args, arraysize(args), 1000);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- {
- ScopedVector<v8::Local<v8::String> > names(3);
- names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
- names[1] = v8_str(ProfileGenerator::kProgramEntryName);
- names[2] = v8_str("start");
- // Don't allow |test|, |bar| and |apply| nodes to be at the top level.
- CheckChildrenNames(env.local(), root, names);
- }
+ const v8::CpuProfileNode* start_node = GetChild(env.local(), root, "start");
+ const v8::CpuProfileNode* test_node =
+ GetChild(env.local(), start_node, "test");
+ GetChild(env.local(), test_node, "bar");
- const v8::CpuProfileNode* startNode = FindChild(env.local(), root, "start");
- if (startNode) {
- {
- ScopedVector<v8::Local<v8::String> > names(2);
- names[0] = v8_str("test");
- names[1] = v8_str(ProfileGenerator::kUnresolvedFunctionName);
- CheckChildrenNames(env.local(), startNode, names);
- }
-
- const v8::CpuProfileNode* testNode =
- FindChild(env.local(), startNode, "test");
- if (testNode) {
- ScopedVector<v8::Local<v8::String> > names(3);
- names[0] = v8_str("bar");
- names[1] = v8_str("apply");
- // apply calls "get length" before invoking the function itself
- // and we may get hit into it.
- names[2] = v8_str("get length");
- CheckChildrenNames(env.local(), testNode, names);
- }
-
- if (const v8::CpuProfileNode* unresolvedNode =
- FindChild(env.local(), startNode,
- ProfileGenerator::kUnresolvedFunctionName)) {
- ScopedVector<v8::Local<v8::String> > names(1);
- names[0] = v8_str("apply");
- CheckChildrenNames(env.local(), unresolvedNode, names);
- GetChild(env.local(), unresolvedNode, "apply");
- }
- }
+ const v8::CpuProfileNode* unresolved_node = FindChild(
+ env.local(), start_node, ProfileGenerator::kUnresolvedFunctionName);
+ CHECK(!unresolved_node || GetChild(env.local(), unresolved_node, "apply"));
profile->Delete();
}
-
static const char* cpu_profiler_deep_stack_test_source =
-"function foo(n) {\n"
-" if (n)\n"
-" foo(n - 1);\n"
-" else\n"
-" startProfiling('my_profile');\n"
-"}\n"
-"function start() {\n"
-" foo(250);\n"
-"}\n";
-
+ "function foo(n) {\n"
+ " if (n)\n"
+ " foo(n - 1);\n"
+ " else\n"
+ " collectSample();\n"
+ "}\n"
+ "function start() {\n"
+ " startProfiling('my_profile');\n"
+ " foo(250);\n"
+ "}\n";
// Check a deep stack
//
@@ -1350,8 +1222,7 @@ static const char* cpu_profiler_deep_stack_test_source =
// 0 foo 21 #4 no reason
// 0 foo 21 #5 no reason
// ....
-// 0 foo 21 #253 no reason
-// 1 startProfiling 0 #254
+// 0 foo 21 #254 no reason
TEST(CpuProfileDeepStack) {
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
@@ -1369,37 +1240,29 @@ TEST(CpuProfileDeepStack) {
reinterpret_cast<i::CpuProfile*>(profile)->Print();
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- {
- ScopedVector<v8::Local<v8::String> > names(3);
- names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
- names[1] = v8_str(ProfileGenerator::kProgramEntryName);
- names[2] = v8_str("start");
- CheckChildrenNames(env, root, names);
- }
-
const v8::CpuProfileNode* node = GetChild(env, root, "start");
- for (int i = 0; i < 250; ++i) {
+ for (int i = 0; i <= 250; ++i) {
node = GetChild(env, node, "foo");
}
- // TODO(alph):
- // In theory there must be one more 'foo' and a 'startProfiling' nodes,
- // but due to unstable top frame extraction these might be missing.
+ CHECK(!FindChild(env, node, "foo"));
profile->Delete();
}
-
static const char* js_native_js_test_source =
- "function foo() {\n"
- " startProfiling('my_profile');\n"
+ "%NeverOptimizeFunction(foo);\n"
+ "%NeverOptimizeFunction(bar);\n"
+ "%NeverOptimizeFunction(start);\n"
+ "function foo(n) {\n"
+ " var s = 0;\n"
+ " for (var i = 0; i < n; i++) s += i * i * i;\n"
+ " return s;\n"
"}\n"
"function bar() {\n"
- " try { foo(); } catch(e) {}\n"
+ " foo(1000);\n"
"}\n"
"function start() {\n"
- " try {\n"
- " CallJsFunction(bar);\n"
- " } catch(e) {}\n"
+ " CallJsFunction(bar);\n"
"}";
static void CallJsFunction(const v8::FunctionCallbackInfo<v8::Value>& info) {
@@ -1410,7 +1273,6 @@ static void CallJsFunction(const v8::FunctionCallbackInfo<v8::Value>& info) {
.ToLocalChecked();
}
-
// [Top down]:
// 58 0 (root) #0 1
// 2 2 (program) #0 2
@@ -1419,6 +1281,7 @@ static void CallJsFunction(const v8::FunctionCallbackInfo<v8::Value>& info) {
// 55 1 bar #16 5
// 54 54 foo #16 6
TEST(JsNativeJsSample) {
+ i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
v8::Context::Scope context_scope(env);
@@ -1433,47 +1296,35 @@ TEST(JsNativeJsSample) {
CompileRun(js_native_js_test_source);
v8::Local<v8::Function> function = GetFunction(env, "start");
- v8::CpuProfile* profile = RunProfiler(env, function, NULL, 0, 0);
+ v8::CpuProfile* profile = RunProfiler(env, function, NULL, 0, 1000);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- {
- ScopedVector<v8::Local<v8::String> > names(3);
- names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
- names[1] = v8_str(ProfileGenerator::kProgramEntryName);
- names[2] = v8_str("start");
- CheckChildrenNames(env, root, names);
- }
-
- const v8::CpuProfileNode* startNode = GetChild(env, root, "start");
- CHECK_EQ(1, startNode->GetChildrenCount());
- const v8::CpuProfileNode* nativeFunctionNode =
- GetChild(env, startNode, "CallJsFunction");
-
- CHECK_EQ(1, nativeFunctionNode->GetChildrenCount());
- const v8::CpuProfileNode* barNode = GetChild(env, nativeFunctionNode, "bar");
-
- CHECK_EQ(1, barNode->GetChildrenCount());
- GetChild(env, barNode, "foo");
+ const v8::CpuProfileNode* start_node = GetChild(env, root, "start");
+ const v8::CpuProfileNode* native_node =
+ GetChild(env, start_node, "CallJsFunction");
+ const v8::CpuProfileNode* bar_node = GetChild(env, native_node, "bar");
+ GetChild(env, bar_node, "foo");
profile->Delete();
}
-
static const char* js_native_js_runtime_js_test_source =
- "function foo() {\n"
- " startProfiling('my_profile');\n"
+ "%NeverOptimizeFunction(foo);\n"
+ "%NeverOptimizeFunction(bar);\n"
+ "%NeverOptimizeFunction(start);\n"
+ "function foo(n) {\n"
+ " var s = 0;\n"
+ " for (var i = 0; i < n; i++) s += i * i * i;\n"
+ " return s;\n"
"}\n"
"var bound = foo.bind(this);\n"
"function bar() {\n"
- " try { bound(); } catch(e) {}\n"
+ " bound(1000);\n"
"}\n"
"function start() {\n"
- " try {\n"
- " CallJsFunction(bar);\n"
- " } catch(e) {}\n"
+ " CallJsFunction(bar);\n"
"}";
-
// [Top down]:
// 57 0 (root) #0 1
// 55 1 start #16 3
@@ -1482,6 +1333,7 @@ static const char* js_native_js_runtime_js_test_source =
// 51 51 foo #16 6
// 2 2 (program) #0 2
TEST(JsNativeJsRuntimeJsSample) {
+ i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
v8::Context::Scope context_scope(env);
@@ -1495,57 +1347,39 @@ TEST(JsNativeJsRuntimeJsSample) {
CompileRun(js_native_js_runtime_js_test_source);
v8::Local<v8::Function> function = GetFunction(env, "start");
-
- v8::CpuProfile* profile = RunProfiler(env, function, NULL, 0, 0);
+ v8::CpuProfile* profile = RunProfiler(env, function, NULL, 0, 1000);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- ScopedVector<v8::Local<v8::String> > names(3);
- names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
- names[1] = v8_str(ProfileGenerator::kProgramEntryName);
- names[2] = v8_str("start");
- CheckChildrenNames(env, root, names);
-
- const v8::CpuProfileNode* startNode = GetChild(env, root, "start");
- CHECK_EQ(1, startNode->GetChildrenCount());
- const v8::CpuProfileNode* nativeFunctionNode =
- GetChild(env, startNode, "CallJsFunction");
-
- CHECK_EQ(1, nativeFunctionNode->GetChildrenCount());
- const v8::CpuProfileNode* barNode = GetChild(env, nativeFunctionNode, "bar");
-
- // The child is in fact a bound foo.
- // A bound function has a wrapper that may make calls to
- // other functions e.g. "get length".
- CHECK_LE(1, barNode->GetChildrenCount());
- CHECK_GE(2, barNode->GetChildrenCount());
- GetChild(env, barNode, "foo");
+ const v8::CpuProfileNode* start_node = GetChild(env, root, "start");
+ const v8::CpuProfileNode* native_node =
+ GetChild(env, start_node, "CallJsFunction");
+ const v8::CpuProfileNode* bar_node = GetChild(env, native_node, "bar");
+ GetChild(env, bar_node, "foo");
profile->Delete();
}
-
static void CallJsFunction2(const v8::FunctionCallbackInfo<v8::Value>& info) {
v8::base::OS::Print("In CallJsFunction2\n");
CallJsFunction(info);
}
-
static const char* js_native1_js_native2_js_test_source =
+ "%NeverOptimizeFunction(foo);\n"
+ "%NeverOptimizeFunction(bar);\n"
+ "%NeverOptimizeFunction(start);\n"
"function foo() {\n"
- " try {\n"
- " startProfiling('my_profile');\n"
- " } catch(e) {}\n"
+ " var s = 0;\n"
+ " for (var i = 0; i < 1000; i++) s += i * i * i;\n"
+ " return s;\n"
"}\n"
"function bar() {\n"
" CallJsFunction2(foo);\n"
"}\n"
"function start() {\n"
- " try {\n"
- " CallJsFunction1(bar);\n"
- " } catch(e) {}\n"
+ " CallJsFunction1(bar);\n"
"}";
-
// [Top down]:
// 57 0 (root) #0 1
// 55 1 start #16 3
@@ -1555,14 +1389,15 @@ static const char* js_native1_js_native2_js_test_source =
// 54 54 foo #16 7
// 2 2 (program) #0 2
TEST(JsNative1JsNative2JsSample) {
+ i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
v8::Context::Scope context_scope(env);
- v8::Local<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New(
- env->GetIsolate(), CallJsFunction);
v8::Local<v8::Function> func1 =
- func_template->GetFunction(env).ToLocalChecked();
+ v8::FunctionTemplate::New(env->GetIsolate(), CallJsFunction)
+ ->GetFunction(env)
+ .ToLocalChecked();
func1->SetName(v8_str("CallJsFunction1"));
env->Global()->Set(env, v8_str("CallJsFunction1"), func1).FromJust();
@@ -1576,38 +1411,109 @@ TEST(JsNative1JsNative2JsSample) {
CompileRun(js_native1_js_native2_js_test_source);
v8::Local<v8::Function> function = GetFunction(env, "start");
- v8::CpuProfile* profile = RunProfiler(env, function, NULL, 0, 0);
+ v8::CpuProfile* profile = RunProfiler(env, function, NULL, 0, 1000);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- ScopedVector<v8::Local<v8::String> > names(3);
- names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
- names[1] = v8_str(ProfileGenerator::kProgramEntryName);
- names[2] = v8_str("start");
- CheckChildrenNames(env, root, names);
+ const v8::CpuProfileNode* start_node = GetChild(env, root, "start");
+ const v8::CpuProfileNode* native_node1 =
+ GetChild(env, start_node, "CallJsFunction1");
+ const v8::CpuProfileNode* bar_node = GetChild(env, native_node1, "bar");
+ const v8::CpuProfileNode* native_node2 =
+ GetChild(env, bar_node, "CallJsFunction2");
+ GetChild(env, native_node2, "foo");
- const v8::CpuProfileNode* startNode = GetChild(env, root, "start");
- CHECK_EQ(1, startNode->GetChildrenCount());
- const v8::CpuProfileNode* nativeNode1 =
- GetChild(env, startNode, "CallJsFunction1");
+ profile->Delete();
+}
- CHECK_EQ(1, nativeNode1->GetChildrenCount());
- const v8::CpuProfileNode* barNode = GetChild(env, nativeNode1, "bar");
+static const char* js_force_collect_sample_source =
+ "function start() {\n"
+ " CallCollectSample();\n"
+ "}";
- CHECK_EQ(1, barNode->GetChildrenCount());
- const v8::CpuProfileNode* nativeNode2 =
- GetChild(env, barNode, "CallJsFunction2");
+TEST(CollectSampleAPI) {
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Context::Scope context_scope(env);
- CHECK_EQ(1, nativeNode2->GetChildrenCount());
- GetChild(env, nativeNode2, "foo");
+ v8::Local<v8::FunctionTemplate> func_template =
+ v8::FunctionTemplate::New(env->GetIsolate(), CallCollectSample);
+ v8::Local<v8::Function> func =
+ func_template->GetFunction(env).ToLocalChecked();
+ func->SetName(v8_str("CallCollectSample"));
+ env->Global()->Set(env, v8_str("CallCollectSample"), func).FromJust();
+
+ CompileRun(js_force_collect_sample_source);
+ v8::Local<v8::Function> function = GetFunction(env, "start");
+ v8::CpuProfile* profile = RunProfiler(env, function, NULL, 0, 0);
+
+ const v8::CpuProfileNode* root = profile->GetTopDownRoot();
+ const v8::CpuProfileNode* start_node = GetChild(env, root, "start");
+ CHECK_LE(1, start_node->GetChildrenCount());
+ GetChild(env, start_node, "CallCollectSample");
profile->Delete();
}
+static const char* js_native_js_runtime_multiple_test_source =
+ "%NeverOptimizeFunction(foo);\n"
+ "%NeverOptimizeFunction(bar);\n"
+ "%NeverOptimizeFunction(start);\n"
+ "function foo() {\n"
+ " return Math.sin(Math.random());\n"
+ "}\n"
+ "var bound = foo.bind(this);\n"
+ "function bar() {\n"
+ " return bound();\n"
+ "}\n"
+ "function start() {\n"
+ " startProfiling('my_profile');\n"
+ " var startTime = Date.now();\n"
+ " do {\n"
+ " CallJsFunction(bar);\n"
+ " } while (Date.now() - startTime < 200);\n"
+ "}";
+
+// The test check multiple entrances/exits between JS and native code.
+//
+// [Top down]:
+// (root) #0 1
+// start #16 3
+// CallJsFunction #0 4
+// bar #16 5
+// foo #16 6
+// (program) #0 2
+TEST(JsNativeJsRuntimeJsSampleMultiple) {
+ i::FLAG_allow_natives_syntax = true;
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Context::Scope context_scope(env);
+
+ v8::Local<v8::FunctionTemplate> func_template =
+ v8::FunctionTemplate::New(env->GetIsolate(), CallJsFunction);
+ v8::Local<v8::Function> func =
+ func_template->GetFunction(env).ToLocalChecked();
+ func->SetName(v8_str("CallJsFunction"));
+ env->Global()->Set(env, v8_str("CallJsFunction"), func).FromJust();
+
+ CompileRun(js_native_js_runtime_multiple_test_source);
+ v8::Local<v8::Function> function = GetFunction(env, "start");
+
+ v8::CpuProfile* profile = RunProfiler(env, function, NULL, 0, 500, 500);
+
+ const v8::CpuProfileNode* root = profile->GetTopDownRoot();
+ const v8::CpuProfileNode* start_node = GetChild(env, root, "start");
+ const v8::CpuProfileNode* native_node =
+ GetChild(env, start_node, "CallJsFunction");
+ const v8::CpuProfileNode* bar_node = GetChild(env, native_node, "bar");
+ GetChild(env, bar_node, "foo");
+
+ profile->Delete();
+}
// [Top down]:
-// 6 0 (root) #0 1
-// 3 3 (program) #0 2
-// 3 3 (idle) #0 3
+// 0 (root) #0 1
+// 2 (program) #0 2
+// 3 (idle) #0 3
TEST(IdleTime) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -1618,17 +1524,14 @@ TEST(IdleTime) {
i::Isolate* isolate = CcTest::i_isolate();
i::ProfilerEventsProcessor* processor = isolate->cpu_profiler()->processor();
- processor->AddCurrentStack(isolate);
+ processor->AddCurrentStack(isolate, true);
cpu_profiler->SetIdle(true);
-
for (int i = 0; i < 3; i++) {
- processor->AddCurrentStack(isolate);
+ processor->AddCurrentStack(isolate, true);
}
-
cpu_profiler->SetIdle(false);
- processor->AddCurrentStack(isolate);
-
+ processor->AddCurrentStack(isolate, true);
v8::CpuProfile* profile = cpu_profiler->StopProfiling(profile_name);
CHECK(profile);
@@ -1636,26 +1539,19 @@ TEST(IdleTime) {
reinterpret_cast<i::CpuProfile*>(profile)->Print();
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- ScopedVector<v8::Local<v8::String> > names(3);
- names[0] = v8_str(ProfileGenerator::kGarbageCollectorEntryName);
- names[1] = v8_str(ProfileGenerator::kProgramEntryName);
- names[2] = v8_str(ProfileGenerator::kIdleEntryName);
- CheckChildrenNames(env.local(), root, names);
-
- const v8::CpuProfileNode* programNode =
+ const v8::CpuProfileNode* program_node =
GetChild(env.local(), root, ProfileGenerator::kProgramEntryName);
- CHECK_EQ(0, programNode->GetChildrenCount());
- CHECK_GE(programNode->GetHitCount(), 3u);
+ CHECK_EQ(0, program_node->GetChildrenCount());
+ CHECK_GE(program_node->GetHitCount(), 2u);
- const v8::CpuProfileNode* idleNode =
+ const v8::CpuProfileNode* idle_node =
GetChild(env.local(), root, ProfileGenerator::kIdleEntryName);
- CHECK_EQ(0, idleNode->GetChildrenCount());
- CHECK_GE(idleNode->GetHitCount(), 3u);
+ CHECK_EQ(0, idle_node->GetChildrenCount());
+ CHECK_GE(idle_node->GetHitCount(), 3u);
profile->Delete();
}
-
static void CheckFunctionDetails(v8::Isolate* isolate,
const v8::CpuProfileNode* node,
const char* name, const char* script_name,
@@ -1672,17 +1568,21 @@ static void CheckFunctionDetails(v8::Isolate* isolate,
TEST(FunctionDetails) {
+ i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
v8::Context::Scope context_scope(env);
v8::Local<v8::Script> script_a = CompileWithOrigin(
- " function foo\n() { try { bar(); } catch(e) {} }\n"
+ "%NeverOptimizeFunction(foo);\n"
+ "%NeverOptimizeFunction(bar);\n"
+ " function foo\n() { bar(); }\n"
" function bar() { startProfiling(); }\n",
"script_a");
script_a->Run(env).ToLocalChecked();
v8::Local<v8::Script> script_b = CompileWithOrigin(
- "\n\n function baz() { try { foo(); } catch(e) {} }\n"
+ "%NeverOptimizeFunction(baz);"
+ "\n\n function baz() { foo(); }\n"
"\n\nbaz();\n"
"stopProfiling();\n",
"script_b");
@@ -1706,10 +1606,10 @@ TEST(FunctionDetails) {
script_b->GetUnboundScript()->GetId(), 3, 16);
const v8::CpuProfileNode* foo = GetChild(env, baz, "foo");
CheckFunctionDetails(env->GetIsolate(), foo, "foo", "script_a",
- script_a->GetUnboundScript()->GetId(), 2, 1);
+ script_a->GetUnboundScript()->GetId(), 4, 1);
const v8::CpuProfileNode* bar = GetChild(env, foo, "bar");
CheckFunctionDetails(env->GetIsolate(), bar, "bar", "script_a",
- script_a->GetUnboundScript()->GetId(), 3, 14);
+ script_a->GetUnboundScript()->GetId(), 5, 14);
}
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 914bda1d4e..dd483c06d5 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -302,18 +302,6 @@ static void ChangeScriptBreakPointConditionFromJS(v8::Isolate* isolate,
}
-static void ChangeScriptBreakPointIgnoreCountFromJS(v8::Isolate* isolate,
- int break_point_number,
- int ignoreCount) {
- EmbeddedVector<char, SMALL_STRING_BUFFER_SIZE> buffer;
- SNPrintF(buffer,
- "debug.Debug.changeScriptBreakPointIgnoreCount(%d, %d)",
- break_point_number, ignoreCount);
- buffer[SMALL_STRING_BUFFER_SIZE - 1] = '\0';
- CompileRunChecked(isolate, buffer.start());
-}
-
-
// Change break on exception.
static void ChangeBreakOnException(bool caught, bool uncaught) {
v8::internal::Debug* debug = CcTest::i_isolate()->debug();
@@ -1717,72 +1705,6 @@ TEST(ConditionalScriptBreakPoint) {
}
-// Test ignore count on script break points.
-TEST(ScriptBreakPointIgnoreCount) {
- break_point_hit_count = 0;
- DebugLocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- env.ExposeDebug();
-
- v8::Debug::SetDebugEventListener(env->GetIsolate(),
- DebugEventBreakPointHitCount);
-
- v8::Local<v8::String> script = v8_str(env->GetIsolate(),
- "function f() {\n"
- " a = 0; // line 1\n"
- "};");
-
- // Compile the script and get function f.
- v8::Local<v8::Context> context = env.context();
- v8::ScriptOrigin origin = v8::ScriptOrigin(v8_str(env->GetIsolate(), "test"));
- v8::Script::Compile(context, script, &origin)
- .ToLocalChecked()
- ->Run(context)
- .ToLocalChecked();
- v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
- env->Global()
- ->Get(context, v8_str(env->GetIsolate(), "f"))
- .ToLocalChecked());
-
- // Set script break point on line 1 (in function f).
- int sbp = SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test", 1, 0);
-
- // Call f with different ignores on the script break point.
- break_point_hit_count = 0;
- ChangeScriptBreakPointIgnoreCountFromJS(env->GetIsolate(), sbp, 1);
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
- CHECK_EQ(0, break_point_hit_count);
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
- CHECK_EQ(1, break_point_hit_count);
-
- ChangeScriptBreakPointIgnoreCountFromJS(env->GetIsolate(), sbp, 5);
- break_point_hit_count = 0;
- for (int i = 0; i < 10; i++) {
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
- }
- CHECK_EQ(5, break_point_hit_count);
-
- // Reload the script and get f again checking that the ignore survives.
- v8::Script::Compile(context, script, &origin)
- .ToLocalChecked()
- ->Run(context)
- .ToLocalChecked();
- f = v8::Local<v8::Function>::Cast(
- env->Global()
- ->Get(context, v8_str(env->GetIsolate(), "f"))
- .ToLocalChecked());
-
- break_point_hit_count = 0;
- for (int i = 0; i < 10; i++) {
- f->Call(context, env->Global(), 0, NULL).ToLocalChecked();
- }
- CHECK_EQ(5, break_point_hit_count);
-
- v8::Debug::SetDebugEventListener(env->GetIsolate(), nullptr);
- CheckDebuggerUnloaded(env->GetIsolate());
-}
-
-
// Test that script break points survive when a script is reloaded.
TEST(ScriptBreakPointReload) {
break_point_hit_count = 0;
@@ -4405,7 +4327,6 @@ TEST(DisableBreak) {
CheckDebuggerUnloaded(env->GetIsolate());
}
-
TEST(DisableDebuggerStatement) {
DebugLocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -7396,7 +7317,7 @@ static void DebugEventBreakWithOptimizedStack(
CHECK(argument_name->Equals(context, v8_str(isolate, "count"))
.FromJust());
// Get the value of the first argument in frame i. If the
- // funtion is optimized the value will be undefined, otherwise
+ // function is optimized the value will be undefined, otherwise
// the value will be '1 - i'.
//
// TODO(3141533): We should be able to get the real value for
@@ -8073,3 +7994,81 @@ TEST(NoInterruptsInDebugListener) {
v8::Debug::SetDebugEventListener(env->GetIsolate(), NoInterruptsOnDebugEvent);
CompileRun("void(0);");
}
+
+class TestBreakLocation : public i::BreakLocation {
+ public:
+ using i::BreakLocation::GetIterator;
+ using i::BreakLocation::Iterator;
+};
+
+TEST(BreakLocationIterator) {
+ DebugLocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ v8::HandleScope scope(isolate);
+
+ v8::Local<v8::Value> result = CompileRun(
+ "function f() {\n"
+ " debugger; \n"
+ " f(); \n"
+ " debugger; \n"
+ "} \n"
+ "f");
+ Handle<i::Object> function_obj = v8::Utils::OpenHandle(*result);
+ Handle<i::JSFunction> function = Handle<i::JSFunction>::cast(function_obj);
+ Handle<i::SharedFunctionInfo> shared(function->shared());
+
+ EnableDebugger(isolate);
+ CHECK(i_isolate->debug()->EnsureDebugInfo(shared, function));
+
+ Handle<i::DebugInfo> debug_info(shared->GetDebugInfo());
+ int code_size = debug_info->abstract_code()->Size();
+
+ bool found_return = false;
+ bool found_call = false;
+ bool found_debugger = false;
+
+ // Test public interface.
+ for (int i = 0; i < code_size; i++) {
+ i::BreakLocation location = i::BreakLocation::FromCodeOffset(debug_info, i);
+ if (location.IsCall()) found_call = true;
+ if (location.IsReturn()) found_return = true;
+ if (location.IsDebuggerStatement()) found_debugger = true;
+ }
+ CHECK(found_call);
+ CHECK(found_return);
+ CHECK(found_debugger);
+
+ // Test underlying implementation.
+ TestBreakLocation::Iterator* iterator =
+ TestBreakLocation::GetIterator(debug_info, i::ALL_BREAK_LOCATIONS);
+ CHECK(iterator->GetBreakLocation().IsDebuggerStatement());
+ CHECK_EQ(7, iterator->GetBreakLocation().position());
+ iterator->Next();
+ CHECK(iterator->GetBreakLocation().IsDebugBreakSlot());
+ CHECK_EQ(22, iterator->GetBreakLocation().position());
+ iterator->Next();
+ CHECK(iterator->GetBreakLocation().IsCall());
+ CHECK_EQ(22, iterator->GetBreakLocation().position());
+ iterator->Next();
+ CHECK(iterator->GetBreakLocation().IsDebuggerStatement());
+ CHECK_EQ(37, iterator->GetBreakLocation().position());
+ iterator->Next();
+ CHECK(iterator->GetBreakLocation().IsReturn());
+ CHECK_EQ(50, iterator->GetBreakLocation().position());
+ iterator->Next();
+ CHECK(iterator->Done());
+ delete iterator;
+
+ iterator = TestBreakLocation::GetIterator(debug_info, i::CALLS_AND_RETURNS);
+ CHECK(iterator->GetBreakLocation().IsCall());
+ CHECK_EQ(22, iterator->GetBreakLocation().position());
+ iterator->Next();
+ CHECK(iterator->GetBreakLocation().IsReturn());
+ CHECK_EQ(50, iterator->GetBreakLocation().position());
+ iterator->Next();
+ CHECK(iterator->Done());
+ delete iterator;
+
+ DisableDebugger(isolate);
+}
diff --git a/deps/v8/test/cctest/test-disasm-arm.cc b/deps/v8/test/cctest/test-disasm-arm.cc
index b3b8a0358e..74144f25c6 100644
--- a/deps/v8/test/cctest/test-disasm-arm.cc
+++ b/deps/v8/test/cctest/test-disasm-arm.cc
@@ -362,6 +362,7 @@ TEST(Type3) {
SET_UP();
if (CpuFeatures::IsSupported(ARMv7)) {
+ CpuFeatureScope scope(&assm, ARMv7);
COMPARE(ubfx(r0, r1, 5, 10),
"e7e902d1 ubfx r0, r1, #5, #10");
COMPARE(ubfx(r1, r0, 5, 10),
@@ -437,6 +438,9 @@ TEST(Type3) {
COMPARE(uxth(r3, r4, 8), "e6ff3474 uxth r3, r4, ror #8");
COMPARE(uxtah(r3, r4, r5, 24), "e6f43c75 uxtah r3, r4, r5, ror #24");
+
+ COMPARE(rbit(r1, r2), "e6ff1f32 rbit r1, r2");
+ COMPARE(rbit(r10, ip), "e6ffaf3c rbit r10, ip");
}
COMPARE(smmla(r0, r1, r2, r3), "e7503211 smmla r0, r1, r2, r3");
@@ -657,18 +661,27 @@ TEST(Vfp) {
COMPARE(vmls(s6, s4, s5, cc),
"3e023a62 vmlscc.f32 s6, s4, s5");
- COMPARE(vcvt_u32_f64(s0, d0),
- "eebc0bc0 vcvt.u32.f64 s0, d0");
- COMPARE(vcvt_s32_f64(s0, d0),
- "eebd0bc0 vcvt.s32.f64 s0, d0");
- COMPARE(vcvt_f64_u32(d0, s1),
- "eeb80b60 vcvt.f64.u32 d0, s1");
- COMPARE(vcvt_f64_s32(d0, s1),
- "eeb80be0 vcvt.f64.s32 d0, s1");
- COMPARE(vcvt_f32_s32(s0, s2),
- "eeb80ac1 vcvt.f32.s32 s0, s2");
- COMPARE(vcvt_f64_s32(d0, 2),
- "eeba0bcf vcvt.f64.s32 d0, d0, #2");
+ COMPARE(vcvt_f32_f64(s31, d15),
+ "eef7fbcf vcvt.f32.f64 s31, d15");
+ COMPARE(vcvt_f32_s32(s30, s29),
+ "eeb8faee vcvt.f32.s32 s30, s29");
+ COMPARE(vcvt_f64_f32(d14, s28),
+ "eeb7eace vcvt.f64.f32 d14, s28");
+ COMPARE(vcvt_f64_s32(d13, s27),
+ "eeb8dbed vcvt.f64.s32 d13, s27");
+ COMPARE(vcvt_f64_u32(d12, s26),
+ "eeb8cb4d vcvt.f64.u32 d12, s26");
+ COMPARE(vcvt_s32_f32(s25, s24),
+ "eefdcacc vcvt.s32.f32 s25, s24");
+ COMPARE(vcvt_s32_f64(s23, d11),
+ "eefdbbcb vcvt.s32.f64 s23, d11");
+ COMPARE(vcvt_u32_f32(s22, s21),
+ "eebcbaea vcvt.u32.f32 s22, s21");
+ COMPARE(vcvt_u32_f64(s20, d10),
+ "eebcabca vcvt.u32.f64 s20, d10");
+
+ COMPARE(vcvt_f64_s32(d9, 2),
+ "eeba9bcf vcvt.f64.s32 d9, d9, #2");
if (CpuFeatures::IsSupported(VFP32DREGS)) {
COMPARE(vmov(d3, d27),
@@ -742,12 +755,27 @@ TEST(Vfp) {
COMPARE(vmla(d16, d17, d18),
"ee410ba2 vmla.f64 d16, d17, d18");
- COMPARE(vcvt_u32_f64(s0, d16),
- "eebc0be0 vcvt.u32.f64 s0, d16");
- COMPARE(vcvt_s32_f64(s0, d16),
- "eebd0be0 vcvt.s32.f64 s0, d16");
- COMPARE(vcvt_f64_u32(d16, s1),
- "eef80b60 vcvt.f64.u32 d16, s1");
+ COMPARE(vcvt_f32_f64(s0, d31),
+ "eeb70bef vcvt.f32.f64 s0, d31");
+ COMPARE(vcvt_f32_s32(s1, s2),
+ "eef80ac1 vcvt.f32.s32 s1, s2");
+ COMPARE(vcvt_f64_f32(d30, s3),
+ "eef7eae1 vcvt.f64.f32 d30, s3");
+ COMPARE(vcvt_f64_s32(d29, s4),
+ "eef8dbc2 vcvt.f64.s32 d29, s4");
+ COMPARE(vcvt_f64_u32(d28, s5),
+ "eef8cb62 vcvt.f64.u32 d28, s5");
+ COMPARE(vcvt_s32_f32(s6, s7),
+ "eebd3ae3 vcvt.s32.f32 s6, s7");
+ COMPARE(vcvt_s32_f64(s8, d27),
+ "eebd4beb vcvt.s32.f64 s8, d27");
+ COMPARE(vcvt_u32_f32(s9, s10),
+ "eefc4ac5 vcvt.u32.f32 s9, s10");
+ COMPARE(vcvt_u32_f64(s11, d26),
+ "eefc5bea vcvt.u32.f64 s11, d26");
+
+ COMPARE(vcvt_f64_s32(d25, 2),
+ "eefa9bcf vcvt.f64.s32 d25, d25, #2");
}
}
@@ -1003,3 +1031,45 @@ TEST(LoadStore) {
VERIFY_RUN();
}
+
+
+TEST(Barrier) {
+ SET_UP();
+
+ if (CpuFeatures::IsSupported(ARMv7)) {
+ CpuFeatureScope scope(&assm, ARMv7);
+
+ COMPARE(dmb(OSHLD),
+ "f57ff051 dmb oshld");
+ COMPARE(dmb(OSHST),
+ "f57ff052 dmb oshst");
+ COMPARE(dmb(OSH),
+ "f57ff053 dmb osh");
+ COMPARE(dmb(NSHLD),
+ "f57ff055 dmb nshld");
+ COMPARE(dmb(NSHST),
+ "f57ff056 dmb nshst");
+ COMPARE(dmb(NSH),
+ "f57ff057 dmb nsh");
+ COMPARE(dmb(ISHLD),
+ "f57ff059 dmb ishld");
+ COMPARE(dmb(ISHST),
+ "f57ff05a dmb ishst");
+ COMPARE(dmb(ISH),
+ "f57ff05b dmb ish");
+ COMPARE(dmb(LD),
+ "f57ff05d dmb ld");
+ COMPARE(dmb(ST),
+ "f57ff05e dmb st");
+ COMPARE(dmb(SY),
+ "f57ff05f dmb sy");
+
+ COMPARE(dsb(ISH),
+ "f57ff04b dsb ish");
+
+ COMPARE(isb(ISH),
+ "f57ff06b isb ish");
+ }
+
+ VERIFY_RUN();
+}
diff --git a/deps/v8/test/cctest/test-extra.js b/deps/v8/test/cctest/test-extra.js
index dfb6c8012c..b3752d97b2 100644
--- a/deps/v8/test/cctest/test-extra.js
+++ b/deps/v8/test/cctest/test-extra.js
@@ -12,6 +12,15 @@
return binding.runtime(3);
};
+ binding.testFunctionToString = function() {
+ function foo() { return 1; }
+ return foo.toString();
+ };
+
+ binding.testStackTrace = function(f) {
+ return f();
+ }
+
// Exercise all of the extras utils:
// - v8.createPrivateSymbol
// - v8.simpleBind, v8.uncurryThis
diff --git a/deps/v8/test/cctest/test-field-type-tracking.cc b/deps/v8/test/cctest/test-field-type-tracking.cc
index 89456bd6ba..cee3600314 100644
--- a/deps/v8/test/cctest/test-field-type-tracking.cc
+++ b/deps/v8/test/cctest/test-field-type-tracking.cc
@@ -12,6 +12,7 @@
#include "src/compilation-cache.h"
#include "src/execution.h"
#include "src/factory.h"
+#include "src/field-type.h"
#include "src/global-handles.h"
#include "src/ic/stub-cache.h"
#include "src/macro-assembler.h"
@@ -88,7 +89,7 @@ class Expectations {
PropertyType types_[MAX_PROPERTIES];
PropertyAttributes attributes_[MAX_PROPERTIES];
Representation representations_[MAX_PROPERTIES];
- // HeapType for kField, value for DATA_CONSTANT and getter for
+ // FieldType for kField, value for DATA_CONSTANT and getter for
// ACCESSOR_CONSTANT.
Handle<Object> values_[MAX_PROPERTIES];
// Setter for ACCESSOR_CONSTANT.
@@ -142,25 +143,25 @@ class Expectations {
os << "\n";
}
- Handle<HeapType> GetFieldType(int index) {
+ Handle<FieldType> GetFieldType(int index) {
CHECK(index < MAX_PROPERTIES);
CHECK(types_[index] == DATA || types_[index] == ACCESSOR);
- return Handle<HeapType>::cast(values_[index]);
+ return Handle<FieldType>::cast(values_[index]);
}
void SetDataField(int index, PropertyAttributes attrs,
- Representation representation, Handle<HeapType> value) {
+ Representation representation, Handle<FieldType> value) {
Init(index, DATA, attrs, representation, value);
}
void SetDataField(int index, Representation representation,
- Handle<HeapType> value) {
+ Handle<FieldType> value) {
SetDataField(index, attributes_[index], representation, value);
}
void SetAccessorField(int index, PropertyAttributes attrs) {
Init(index, ACCESSOR, attrs, Representation::Tagged(),
- HeapType::Any(isolate_));
+ FieldType::Any(isolate_));
}
void SetAccessorField(int index) {
@@ -216,7 +217,7 @@ class Expectations {
CHECK(index < number_of_properties_);
representations_[index] = Representation::Tagged();
if (types_[index] == DATA || types_[index] == ACCESSOR) {
- values_[index] = HeapType::Any(isolate_);
+ values_[index] = FieldType::Any(isolate_);
}
}
@@ -232,8 +233,8 @@ class Expectations {
switch (type) {
case DATA:
case ACCESSOR: {
- HeapType* type = descriptors->GetFieldType(descriptor);
- return HeapType::cast(expected_value)->Equals(type);
+ FieldType* type = descriptors->GetFieldType(descriptor);
+ return FieldType::cast(expected_value) == type;
}
case DATA_CONSTANT:
@@ -280,7 +281,7 @@ class Expectations {
Handle<Map> AddDataField(Handle<Map> map, PropertyAttributes attributes,
Representation representation,
- Handle<HeapType> heap_type) {
+ Handle<FieldType> heap_type) {
CHECK_EQ(number_of_properties_, map->NumberOfOwnDescriptors());
int property_index = number_of_properties_++;
SetDataField(property_index, attributes, representation, heap_type);
@@ -306,7 +307,7 @@ class Expectations {
Handle<Map> TransitionToDataField(Handle<Map> map,
PropertyAttributes attributes,
Representation representation,
- Handle<HeapType> heap_type,
+ Handle<FieldType> heap_type,
Handle<Object> value) {
CHECK_EQ(number_of_properties_, map->NumberOfOwnDescriptors());
int property_index = number_of_properties_++;
@@ -332,7 +333,7 @@ class Expectations {
Handle<Map> FollowDataTransition(Handle<Map> map,
PropertyAttributes attributes,
Representation representation,
- Handle<HeapType> heap_type) {
+ Handle<FieldType> heap_type) {
CHECK_EQ(number_of_properties_, map->NumberOfOwnDescriptors());
int property_index = number_of_properties_++;
SetDataField(property_index, attributes, representation, heap_type);
@@ -421,8 +422,8 @@ TEST(ReconfigureAccessorToNonExistingDataField) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> any_type = HeapType::Any(isolate);
- Handle<HeapType> none_type = HeapType::None(isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
+ Handle<FieldType> none_type = FieldType::None(isolate);
Handle<AccessorPair> pair = CreateAccessorPair(true, true);
Expectations expectations(isolate);
@@ -533,12 +534,12 @@ TEST(ReconfigureAccessorToNonExistingDataFieldHeavy) {
//
static void TestGeneralizeRepresentation(
int detach_property_at_index, int property_index,
- Representation from_representation, Handle<HeapType> from_type,
- Representation to_representation, Handle<HeapType> to_type,
- Representation expected_representation, Handle<HeapType> expected_type,
+ Representation from_representation, Handle<FieldType> from_type,
+ Representation to_representation, Handle<FieldType> to_type,
+ Representation expected_representation, Handle<FieldType> expected_type,
bool expected_deprecation, bool expected_field_type_dependency) {
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
CHECK(detach_property_at_index >= -1 &&
detach_property_at_index < kPropCount);
@@ -639,11 +640,10 @@ static void TestGeneralizeRepresentation(
CHECK_EQ(*new_map, *updated_map);
}
-
static void TestGeneralizeRepresentation(
- Representation from_representation, Handle<HeapType> from_type,
- Representation to_representation, Handle<HeapType> to_type,
- Representation expected_representation, Handle<HeapType> expected_type,
+ Representation from_representation, Handle<FieldType> from_type,
+ Representation to_representation, Handle<FieldType> to_type,
+ Representation expected_representation, Handle<FieldType> expected_type,
bool expected_deprecation, bool expected_field_type_dependency) {
// Check the cases when the map being reconfigured is a part of the
// transition tree.
@@ -670,19 +670,18 @@ static void TestGeneralizeRepresentation(
// Check that reconfiguration to the very same field works correctly.
Representation representation = from_representation;
- Handle<HeapType> type = from_type;
+ Handle<FieldType> type = from_type;
TestGeneralizeRepresentation(-1, 2, representation, type, representation,
type, representation, type, false, false);
}
}
-
static void TestGeneralizeRepresentation(Representation from_representation,
- Handle<HeapType> from_type,
+ Handle<FieldType> from_type,
Representation to_representation,
- Handle<HeapType> to_type,
+ Handle<FieldType> to_type,
Representation expected_representation,
- Handle<HeapType> expected_type) {
+ Handle<FieldType> expected_type) {
const bool expected_deprecation = true;
const bool expected_field_type_dependency = false;
@@ -692,11 +691,10 @@ static void TestGeneralizeRepresentation(Representation from_representation,
expected_field_type_dependency);
}
-
static void TestGeneralizeRepresentationTrivial(
- Representation from_representation, Handle<HeapType> from_type,
- Representation to_representation, Handle<HeapType> to_type,
- Representation expected_representation, Handle<HeapType> expected_type,
+ Representation from_representation, Handle<FieldType> from_type,
+ Representation to_representation, Handle<FieldType> to_type,
+ Representation expected_representation, Handle<FieldType> expected_type,
bool expected_field_type_dependency = true) {
const bool expected_deprecation = false;
@@ -711,7 +709,7 @@ TEST(GeneralizeRepresentationSmiToDouble) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
TestGeneralizeRepresentation(Representation::Smi(), any_type,
Representation::Double(), any_type,
@@ -723,9 +721,9 @@ TEST(GeneralizeRepresentationSmiToTagged) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> any_type = HeapType::Any(isolate);
- Handle<HeapType> value_type =
- HeapType::Class(Map::Create(isolate, 0), isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
+ Handle<FieldType> value_type =
+ FieldType::Class(Map::Create(isolate, 0), isolate);
TestGeneralizeRepresentation(Representation::Smi(), any_type,
Representation::HeapObject(), value_type,
@@ -737,9 +735,9 @@ TEST(GeneralizeRepresentationDoubleToTagged) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> any_type = HeapType::Any(isolate);
- Handle<HeapType> value_type =
- HeapType::Class(Map::Create(isolate, 0), isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
+ Handle<FieldType> value_type =
+ FieldType::Class(Map::Create(isolate, 0), isolate);
TestGeneralizeRepresentation(Representation::Double(), any_type,
Representation::HeapObject(), value_type,
@@ -751,9 +749,9 @@ TEST(GeneralizeRepresentationHeapObjectToTagged) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> any_type = HeapType::Any(isolate);
- Handle<HeapType> value_type =
- HeapType::Class(Map::Create(isolate, 0), isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
+ Handle<FieldType> value_type =
+ FieldType::Class(Map::Create(isolate, 0), isolate);
TestGeneralizeRepresentation(Representation::HeapObject(), value_type,
Representation::Smi(), any_type,
@@ -765,29 +763,23 @@ TEST(GeneralizeRepresentationHeapObjectToHeapObject) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
- const int kMaxClassesPerFieldType = 1;
- Handle<HeapType> current_type =
- HeapType::Class(Map::Create(isolate, 0), isolate);
+ Handle<FieldType> current_type =
+ FieldType::Class(Map::Create(isolate, 0), isolate);
- for (int i = 0; i < kMaxClassesPerFieldType; i++) {
- Handle<HeapType> new_type =
- HeapType::Class(Map::Create(isolate, 0), isolate);
+ Handle<FieldType> new_type =
+ FieldType::Class(Map::Create(isolate, 0), isolate);
- Handle<HeapType> expected_type =
- (i < kMaxClassesPerFieldType - 1)
- ? HeapType::Union(current_type, new_type, isolate)
- : any_type;
+ Handle<FieldType> expected_type = any_type;
TestGeneralizeRepresentationTrivial(
Representation::HeapObject(), current_type,
Representation::HeapObject(), new_type, Representation::HeapObject(),
expected_type);
current_type = expected_type;
- }
- Handle<HeapType> new_type = HeapType::Class(Map::Create(isolate, 0), isolate);
+ new_type = FieldType::Class(Map::Create(isolate, 0), isolate);
TestGeneralizeRepresentationTrivial(
Representation::HeapObject(), any_type, Representation::HeapObject(),
@@ -799,8 +791,8 @@ TEST(GeneralizeRepresentationNoneToSmi) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> none_type = HeapType::None(isolate);
- Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<FieldType> none_type = FieldType::None(isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
// None -> Smi representation change is trivial.
TestGeneralizeRepresentationTrivial(Representation::None(), none_type,
@@ -813,8 +805,8 @@ TEST(GeneralizeRepresentationNoneToDouble) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> none_type = HeapType::None(isolate);
- Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<FieldType> none_type = FieldType::None(isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
// None -> Double representation change is NOT trivial.
TestGeneralizeRepresentation(Representation::None(), none_type,
@@ -827,9 +819,9 @@ TEST(GeneralizeRepresentationNoneToHeapObject) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> none_type = HeapType::None(isolate);
- Handle<HeapType> value_type =
- HeapType::Class(Map::Create(isolate, 0), isolate);
+ Handle<FieldType> none_type = FieldType::None(isolate);
+ Handle<FieldType> value_type =
+ FieldType::Class(Map::Create(isolate, 0), isolate);
// None -> HeapObject representation change is trivial.
TestGeneralizeRepresentationTrivial(Representation::None(), none_type,
@@ -842,8 +834,8 @@ TEST(GeneralizeRepresentationNoneToTagged) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> none_type = HeapType::None(isolate);
- Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<FieldType> none_type = FieldType::None(isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
// None -> HeapObject representation change is trivial.
TestGeneralizeRepresentationTrivial(Representation::None(), none_type,
@@ -861,7 +853,7 @@ TEST(GeneralizeRepresentationWithAccessorProperties) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
Handle<AccessorPair> pair = CreateAccessorPair(true, true);
const int kAccessorProp = kPropCount / 2;
@@ -932,9 +924,9 @@ TEST(GeneralizeRepresentationWithAccessorProperties) {
// where "p2A" and "p2B" differ only in the attributes.
//
static void TestReconfigureDataFieldAttribute_GeneralizeRepresentation(
- Representation from_representation, Handle<HeapType> from_type,
- Representation to_representation, Handle<HeapType> to_type,
- Representation expected_representation, Handle<HeapType> expected_type) {
+ Representation from_representation, Handle<FieldType> from_type,
+ Representation to_representation, Handle<FieldType> to_type,
+ Representation expected_representation, Handle<FieldType> expected_type) {
Isolate* isolate = CcTest::i_isolate();
Expectations expectations(isolate);
@@ -1016,9 +1008,9 @@ static void TestReconfigureDataFieldAttribute_GeneralizeRepresentation(
// where "p2A" and "p2B" differ only in the attributes.
//
static void TestReconfigureDataFieldAttribute_GeneralizeRepresentationTrivial(
- Representation from_representation, Handle<HeapType> from_type,
- Representation to_representation, Handle<HeapType> to_type,
- Representation expected_representation, Handle<HeapType> expected_type,
+ Representation from_representation, Handle<FieldType> from_type,
+ Representation to_representation, Handle<FieldType> to_type,
+ Representation expected_representation, Handle<FieldType> expected_type,
bool expected_field_type_dependency = true) {
Isolate* isolate = CcTest::i_isolate();
@@ -1096,7 +1088,7 @@ TEST(ReconfigureDataFieldAttribute_GeneralizeRepresentationSmiToDouble) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
TestReconfigureDataFieldAttribute_GeneralizeRepresentation(
Representation::Smi(), any_type, Representation::Double(), any_type,
@@ -1108,9 +1100,9 @@ TEST(ReconfigureDataFieldAttribute_GeneralizeRepresentationSmiToTagged) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> any_type = HeapType::Any(isolate);
- Handle<HeapType> value_type =
- HeapType::Class(Map::Create(isolate, 0), isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
+ Handle<FieldType> value_type =
+ FieldType::Class(Map::Create(isolate, 0), isolate);
TestReconfigureDataFieldAttribute_GeneralizeRepresentation(
Representation::Smi(), any_type, Representation::HeapObject(), value_type,
@@ -1122,9 +1114,9 @@ TEST(ReconfigureDataFieldAttribute_GeneralizeRepresentationDoubleToTagged) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> any_type = HeapType::Any(isolate);
- Handle<HeapType> value_type =
- HeapType::Class(Map::Create(isolate, 0), isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
+ Handle<FieldType> value_type =
+ FieldType::Class(Map::Create(isolate, 0), isolate);
TestReconfigureDataFieldAttribute_GeneralizeRepresentation(
Representation::Double(), any_type, Representation::HeapObject(),
@@ -1136,29 +1128,22 @@ TEST(ReconfigureDataFieldAttribute_GeneralizeRepresentationHeapObjToHeapObj) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
- const int kMaxClassesPerFieldType = 1;
- Handle<HeapType> current_type =
- HeapType::Class(Map::Create(isolate, 0), isolate);
+ Handle<FieldType> current_type =
+ FieldType::Class(Map::Create(isolate, 0), isolate);
- for (int i = 0; i < kMaxClassesPerFieldType; i++) {
- Handle<HeapType> new_type =
- HeapType::Class(Map::Create(isolate, 0), isolate);
+ Handle<FieldType> new_type =
+ FieldType::Class(Map::Create(isolate, 0), isolate);
- Handle<HeapType> expected_type =
- (i < kMaxClassesPerFieldType - 1)
- ? HeapType::Union(current_type, new_type, isolate)
- : any_type;
+ Handle<FieldType> expected_type = any_type;
- TestReconfigureDataFieldAttribute_GeneralizeRepresentationTrivial(
- Representation::HeapObject(), current_type,
- Representation::HeapObject(), new_type, Representation::HeapObject(),
- expected_type);
- current_type = expected_type;
- }
+ TestReconfigureDataFieldAttribute_GeneralizeRepresentationTrivial(
+ Representation::HeapObject(), current_type, Representation::HeapObject(),
+ new_type, Representation::HeapObject(), expected_type);
+ current_type = expected_type;
- Handle<HeapType> new_type = HeapType::Class(Map::Create(isolate, 0), isolate);
+ new_type = FieldType::Class(Map::Create(isolate, 0), isolate);
TestReconfigureDataFieldAttribute_GeneralizeRepresentationTrivial(
Representation::HeapObject(), any_type, Representation::HeapObject(),
@@ -1170,9 +1155,9 @@ TEST(ReconfigureDataFieldAttribute_GeneralizeRepresentationHeapObjectToTagged) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> any_type = HeapType::Any(isolate);
- Handle<HeapType> value_type =
- HeapType::Class(Map::Create(isolate, 0), isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
+ Handle<FieldType> value_type =
+ FieldType::Class(Map::Create(isolate, 0), isolate);
TestReconfigureDataFieldAttribute_GeneralizeRepresentation(
Representation::HeapObject(), value_type, Representation::Smi(), any_type,
@@ -1268,7 +1253,7 @@ template <typename TestConfig, typename Checker>
static void TestReconfigureProperty_CustomPropertyAfterTargetMap(
TestConfig& config, Checker& checker) {
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
const int kCustomPropIndex = kPropCount - 2;
Expectations expectations(isolate);
@@ -1391,8 +1376,8 @@ TEST(ReconfigureDataFieldAttribute_DataConstantToDataFieldAfterTargetMap) {
void UpdateExpectations(int property_index, Expectations& expectations) {
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> function_type =
- HeapType::Class(isolate->sloppy_function_map(), isolate);
+ Handle<FieldType> function_type =
+ FieldType::Class(isolate->sloppy_function_map(), isolate);
expectations.SetDataField(property_index, Representation::HeapObject(),
function_type);
}
@@ -1523,7 +1508,7 @@ TEST(ReconfigureDataFieldAttribute_AccConstantToDataFieldAfterTargetMap) {
return expectations.AddAccessorConstant(map, NONE, pair_);
} else {
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
return expectations.AddDataField(map, NONE, Representation::Smi(),
any_type);
}
@@ -1547,7 +1532,7 @@ TEST(ReconfigurePropertySplitMapTransitionsOverflow) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
Expectations expectations(isolate);
@@ -1638,9 +1623,9 @@ TEST(ReconfigurePropertySplitMapTransitionsOverflow) {
template <typename TestConfig>
static void TestGeneralizeRepresentationWithSpecialTransition(
TestConfig& config, Representation from_representation,
- Handle<HeapType> from_type, Representation to_representation,
- Handle<HeapType> to_type, Representation expected_representation,
- Handle<HeapType> expected_type) {
+ Handle<FieldType> from_type, Representation to_representation,
+ Handle<FieldType> to_type, Representation expected_representation,
+ Handle<FieldType> expected_type) {
Isolate* isolate = CcTest::i_isolate();
Expectations expectations(isolate);
@@ -1730,9 +1715,9 @@ TEST(ElementsKindTransitionFromMapOwningDescriptor) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> any_type = HeapType::Any(isolate);
- Handle<HeapType> value_type =
- HeapType::Class(Map::Create(isolate, 0), isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
+ Handle<FieldType> value_type =
+ FieldType::Class(Map::Create(isolate, 0), isolate);
struct TestConfig {
Handle<Map> Transition(Handle<Map> map) {
@@ -1754,14 +1739,14 @@ TEST(ElementsKindTransitionFromMapNotOwningDescriptor) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> any_type = HeapType::Any(isolate);
- Handle<HeapType> value_type =
- HeapType::Class(Map::Create(isolate, 0), isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
+ Handle<FieldType> value_type =
+ FieldType::Class(Map::Create(isolate, 0), isolate);
struct TestConfig {
Handle<Map> Transition(Handle<Map> map) {
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
// Add one more transition to |map| in order to prevent descriptors
// ownership.
@@ -1789,9 +1774,9 @@ TEST(ForObservedTransitionFromMapOwningDescriptor) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> any_type = HeapType::Any(isolate);
- Handle<HeapType> value_type =
- HeapType::Class(Map::Create(isolate, 0), isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
+ Handle<FieldType> value_type =
+ FieldType::Class(Map::Create(isolate, 0), isolate);
struct TestConfig {
Handle<Map> Transition(Handle<Map> map) {
@@ -1812,14 +1797,14 @@ TEST(ForObservedTransitionFromMapNotOwningDescriptor) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> any_type = HeapType::Any(isolate);
- Handle<HeapType> value_type =
- HeapType::Class(Map::Create(isolate, 0), isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
+ Handle<FieldType> value_type =
+ FieldType::Class(Map::Create(isolate, 0), isolate);
struct TestConfig {
Handle<Map> Transition(Handle<Map> map) {
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
// Add one more transition to |map| in order to prevent descriptors
// ownership.
@@ -1847,9 +1832,9 @@ TEST(PrototypeTransitionFromMapOwningDescriptor) {
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> any_type = HeapType::Any(isolate);
- Handle<HeapType> value_type =
- HeapType::Class(Map::Create(isolate, 0), isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
+ Handle<FieldType> value_type =
+ FieldType::Class(Map::Create(isolate, 0), isolate);
struct TestConfig {
Handle<JSObject> prototype_;
@@ -1881,9 +1866,9 @@ TEST(PrototypeTransitionFromMapNotOwningDescriptor) {
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> any_type = HeapType::Any(isolate);
- Handle<HeapType> value_type =
- HeapType::Class(Map::Create(isolate, 0), isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
+ Handle<FieldType> value_type =
+ FieldType::Class(Map::Create(isolate, 0), isolate);
struct TestConfig {
Handle<JSObject> prototype_;
@@ -1896,7 +1881,7 @@ TEST(PrototypeTransitionFromMapNotOwningDescriptor) {
Handle<Map> Transition(Handle<Map> map) {
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
// Add one more transition to |map| in order to prevent descriptors
// ownership.
@@ -1928,11 +1913,11 @@ TEST(PrototypeTransitionFromMapNotOwningDescriptor) {
struct TransitionToDataFieldOperator {
Representation representation_;
PropertyAttributes attributes_;
- Handle<HeapType> heap_type_;
+ Handle<FieldType> heap_type_;
Handle<Object> value_;
TransitionToDataFieldOperator(Representation representation,
- Handle<HeapType> heap_type,
+ Handle<FieldType> heap_type,
Handle<Object> value,
PropertyAttributes attributes = NONE)
: representation_(representation),
@@ -1979,11 +1964,11 @@ struct ReconfigureAsDataPropertyOperator {
int descriptor_;
Representation representation_;
PropertyAttributes attributes_;
- Handle<HeapType> heap_type_;
+ Handle<FieldType> heap_type_;
ReconfigureAsDataPropertyOperator(int descriptor,
Representation representation,
- Handle<HeapType> heap_type,
+ Handle<FieldType> heap_type,
PropertyAttributes attributes = NONE)
: descriptor_(descriptor),
representation_(representation),
@@ -2019,10 +2004,10 @@ struct FieldGeneralizationChecker {
int descriptor_;
Representation representation_;
PropertyAttributes attributes_;
- Handle<HeapType> heap_type_;
+ Handle<FieldType> heap_type_;
FieldGeneralizationChecker(int descriptor, Representation representation,
- Handle<HeapType> heap_type,
+ Handle<FieldType> heap_type,
PropertyAttributes attributes = NONE)
: descriptor_(descriptor),
representation_(representation),
@@ -2085,7 +2070,7 @@ template <typename TransitionOp1, typename TransitionOp2, typename Checker>
static void TestTransitionTo(TransitionOp1& transition_op1,
TransitionOp2& transition_op2, Checker& checker) {
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
Expectations expectations(isolate);
@@ -2113,7 +2098,7 @@ TEST(TransitionDataFieldToDataField) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
Handle<Object> value1 = handle(Smi::FromInt(0), isolate);
TransitionToDataFieldOperator transition_op1(Representation::Smi(), any_type,
@@ -2148,8 +2133,8 @@ TEST(TransitionDataConstantToAnotherDataConstant) {
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Handle<HeapType> function_type =
- HeapType::Class(isolate->sloppy_function_map(), isolate);
+ Handle<FieldType> function_type =
+ FieldType::Class(isolate->sloppy_function_map(), isolate);
Handle<JSFunction> js_func1 = factory->NewFunction(factory->empty_string());
TransitionToDataConstantOperator transition_op1(js_func1);
@@ -2168,7 +2153,7 @@ TEST(TransitionDataConstantToDataField) {
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
Handle<JSFunction> js_func1 = factory->NewFunction(factory->empty_string());
TransitionToDataConstantOperator transition_op1(js_func1);
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index a2fd09e9f5..87119b8571 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -2071,7 +2071,7 @@ TEST(AccessorInfo) {
const v8::HeapGraphNode* length_accessor =
GetProperty(descriptors, v8::HeapGraphEdge::kInternal, "4");
CHECK(length_accessor);
- CHECK_EQ(0, strcmp("system / ExecutableAccessorInfo",
+ CHECK_EQ(0, strcmp("system / AccessorInfo",
*v8::String::Utf8Value(length_accessor->GetName())));
const v8::HeapGraphNode* name =
GetProperty(length_accessor, v8::HeapGraphEdge::kInternal, "name");
@@ -2852,3 +2852,161 @@ TEST(AddressToTraceMap) {
CHECK_EQ(0u, map.size());
CHECK_EQ(0u, map.GetTraceNodeId(ToAddress(0x400)));
}
+
+
+static const v8::AllocationProfile::Node* FindAllocationProfileNode(
+ v8::AllocationProfile& profile, const Vector<const char*>& names) {
+ v8::AllocationProfile::Node* node = profile.GetRootNode();
+ for (int i = 0; node != nullptr && i < names.length(); ++i) {
+ const char* name = names[i];
+ auto children = node->children;
+ node = nullptr;
+ for (v8::AllocationProfile::Node* child : children) {
+ v8::String::Utf8Value child_name(child->name);
+ if (strcmp(*child_name, name) == 0) {
+ node = child;
+ break;
+ }
+ }
+ }
+ return node;
+}
+
+
+TEST(SamplingHeapProfiler) {
+ v8::HandleScope scope(v8::Isolate::GetCurrent());
+ LocalContext env;
+ v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+
+ // Turn off always_opt. Inlining can cause stack traces to be shorter than
+ // what we expect in this test.
+ v8::internal::FLAG_always_opt = false;
+
+ // Suppress randomness to avoid flakiness in tests.
+ v8::internal::FLAG_sampling_heap_profiler_suppress_randomness = true;
+
+ const char* script_source =
+ "var A = [];\n"
+ "function bar(size) { return new Array(size); }\n"
+ "var foo = function() {\n"
+ " for (var i = 0; i < 1024; ++i) {\n"
+ " A[i] = bar(1024);\n"
+ " }\n"
+ "}\n"
+ "foo();";
+
+ // Sample should be empty if requested before sampling has started.
+ {
+ v8::AllocationProfile* profile = heap_profiler->GetAllocationProfile();
+ CHECK(profile == nullptr);
+ }
+
+ int count_1024 = 0;
+ {
+ heap_profiler->StartSamplingHeapProfiler(1024);
+ CompileRun(script_source);
+
+ v8::base::SmartPointer<v8::AllocationProfile> profile(
+ heap_profiler->GetAllocationProfile());
+ CHECK(!profile.is_empty());
+
+ const char* names[] = {"", "foo", "bar"};
+ auto node_bar = FindAllocationProfileNode(
+ *profile, Vector<const char*>(names, arraysize(names)));
+ CHECK(node_bar);
+
+ // Count the number of allocations we sampled from bar.
+ for (auto allocation : node_bar->allocations) {
+ count_1024 += allocation.count;
+ }
+
+ heap_profiler->StopSamplingHeapProfiler();
+ }
+
+ // Samples should get cleared once sampling is stopped.
+ {
+ v8::AllocationProfile* profile = heap_profiler->GetAllocationProfile();
+ CHECK(profile == nullptr);
+ }
+
+ // Sampling at a higher rate should give us similar numbers of objects.
+ {
+ heap_profiler->StartSamplingHeapProfiler(128);
+ CompileRun(script_source);
+
+ v8::base::SmartPointer<v8::AllocationProfile> profile(
+ heap_profiler->GetAllocationProfile());
+ CHECK(!profile.is_empty());
+
+ const char* names[] = {"", "foo", "bar"};
+ auto node_bar = FindAllocationProfileNode(
+ *profile, Vector<const char*>(names, arraysize(names)));
+ CHECK(node_bar);
+
+ // Count the number of allocations we sampled from bar.
+ int count_128 = 0;
+ for (auto allocation : node_bar->allocations) {
+ count_128 += allocation.count;
+ }
+
+ // We should have similar unsampled counts of allocations. Though
+ // we will sample different numbers of objects at different rates,
+ // the unsampling process should produce similar final estimates
+ // at the true number of allocations. However, the process to
+ // determine these unsampled counts is probabilisitic so we need to
+ // account for error.
+ double max_count = std::max(count_128, count_1024);
+ double min_count = std::min(count_128, count_1024);
+ double percent_difference = (max_count - min_count) / min_count;
+ CHECK_LT(percent_difference, 0.15);
+
+ heap_profiler->StopSamplingHeapProfiler();
+ }
+
+ // A more complicated test cases with deeper call graph and dynamically
+ // generated function names.
+ {
+ heap_profiler->StartSamplingHeapProfiler(64);
+ CompileRun(record_trace_tree_source);
+
+ v8::base::SmartPointer<v8::AllocationProfile> profile(
+ heap_profiler->GetAllocationProfile());
+ CHECK(!profile.is_empty());
+
+ const char* names1[] = {"", "start", "f_0_0", "f_0_1", "f_0_2"};
+ auto node1 = FindAllocationProfileNode(
+ *profile, Vector<const char*>(names1, arraysize(names1)));
+ CHECK(node1);
+
+ const char* names2[] = {"", "generateFunctions"};
+ auto node2 = FindAllocationProfileNode(
+ *profile, Vector<const char*>(names2, arraysize(names2)));
+ CHECK(node2);
+
+ heap_profiler->StopSamplingHeapProfiler();
+ }
+}
+
+
+TEST(SamplingHeapProfilerApiAllocation) {
+ v8::HandleScope scope(v8::Isolate::GetCurrent());
+ LocalContext env;
+ v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+
+ // Suppress randomness to avoid flakiness in tests.
+ v8::internal::FLAG_sampling_heap_profiler_suppress_randomness = true;
+
+ heap_profiler->StartSamplingHeapProfiler(256);
+
+ for (int i = 0; i < 8 * 1024; ++i) v8::Object::New(env->GetIsolate());
+
+ v8::base::SmartPointer<v8::AllocationProfile> profile(
+ heap_profiler->GetAllocationProfile());
+ CHECK(!profile.is_empty());
+ const char* names[] = {"(V8 API)"};
+ auto node = FindAllocationProfileNode(
+ *profile, Vector<const char*>(names, arraysize(names)));
+ CHECK(node);
+
+ heap_profiler->StopSamplingHeapProfiler();
+}
diff --git a/deps/v8/test/cctest/test-inobject-slack-tracking.cc b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
index 6ce77c9416..5cc4e94a02 100644
--- a/deps/v8/test/cctest/test-inobject-slack-tracking.cc
+++ b/deps/v8/test/cctest/test-inobject-slack-tracking.cc
@@ -61,9 +61,9 @@ Handle<T> GetLexical(const char* name) {
ScriptContextTable::LookupResult lookup_result;
if (ScriptContextTable::Lookup(script_contexts, str_name, &lookup_result)) {
Handle<Object> result =
- FixedArray::get(ScriptContextTable::GetContext(
+ FixedArray::get(*ScriptContextTable::GetContext(
script_contexts, lookup_result.context_index),
- lookup_result.slot_index);
+ lookup_result.slot_index, isolate);
return Handle<T>::cast(result);
}
return Handle<T>();
diff --git a/deps/v8/test/cctest/test-log-stack-tracer.cc b/deps/v8/test/cctest/test-log-stack-tracer.cc
index 7254ee084f..21dafda4a9 100644
--- a/deps/v8/test/cctest/test-log-stack-tracer.cc
+++ b/deps/v8/test/cctest/test-log-stack-tracer.cc
@@ -83,6 +83,11 @@ static void construct_call(const v8::FunctionCallbackInfo<v8::Value>& args) {
frame_iterator.Advance();
CHECK(frame_iterator.frame()->is_construct());
frame_iterator.Advance();
+ if (i::FLAG_ignition) {
+ // Skip over bytecode handler frame.
+ CHECK(frame_iterator.frame()->type() == i::StackFrame::STUB);
+ frame_iterator.Advance();
+ }
i::StackFrame* calling_frame = frame_iterator.frame();
CHECK(calling_frame->is_java_script());
@@ -175,7 +180,8 @@ TEST(CFromJSStackTrace) {
// TickSample::Trace
CHECK(sample.has_external_callback);
- CHECK_EQ(FUNCTION_ADDR(i::TraceExtension::Trace), sample.external_callback);
+ CHECK_EQ(FUNCTION_ADDR(i::TraceExtension::Trace),
+ sample.external_callback_entry);
// Stack tracing will start from the first JS function, i.e. "JSFuncDoTrace"
unsigned base = 0;
@@ -229,7 +235,8 @@ TEST(PureJSStackTrace) {
//
CHECK(sample.has_external_callback);
- CHECK_EQ(FUNCTION_ADDR(i::TraceExtension::JSTrace), sample.external_callback);
+ CHECK_EQ(FUNCTION_ADDR(i::TraceExtension::JSTrace),
+ sample.external_callback_entry);
// Stack sampling will start from the caller of JSFuncDoTrace, i.e. "JSTrace"
unsigned base = 0;
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index adbd1a5a37..8077f65ae3 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -370,10 +370,14 @@ TEST(LogCallbacks) {
i::ReadFile(initialize_logger.StopLoggingGetTempFile(), &exists, true));
CHECK(exists);
+ Address ObjMethod1_entry = reinterpret_cast<Address>(ObjMethod1);
+#if USES_FUNCTION_DESCRIPTORS
+ ObjMethod1_entry = *FUNCTION_ENTRYPOINT_ADDRESS(ObjMethod1_entry);
+#endif
i::EmbeddedVector<char, 100> ref_data;
i::SNPrintF(ref_data,
"code-creation,Callback,-2,0x%" V8PRIxPTR ",1,\"method1\"",
- reinterpret_cast<intptr_t>(ObjMethod1));
+ reinterpret_cast<intptr_t>(ObjMethod1_entry));
CHECK(StrNStr(log.start(), ref_data.start(), log.length()));
log.Dispose();
@@ -419,22 +423,34 @@ TEST(LogAccessorCallbacks) {
i::ReadFile(initialize_logger.StopLoggingGetTempFile(), &exists, true));
CHECK(exists);
+ Address Prop1Getter_entry = reinterpret_cast<Address>(Prop1Getter);
+#if USES_FUNCTION_DESCRIPTORS
+ Prop1Getter_entry = *FUNCTION_ENTRYPOINT_ADDRESS(Prop1Getter_entry);
+#endif
EmbeddedVector<char, 100> prop1_getter_record;
i::SNPrintF(prop1_getter_record,
"code-creation,Callback,-2,0x%" V8PRIxPTR ",1,\"get prop1\"",
- reinterpret_cast<intptr_t>(Prop1Getter));
+ reinterpret_cast<intptr_t>(Prop1Getter_entry));
CHECK(StrNStr(log.start(), prop1_getter_record.start(), log.length()));
+ Address Prop1Setter_entry = reinterpret_cast<Address>(Prop1Setter);
+#if USES_FUNCTION_DESCRIPTORS
+ Prop1Setter_entry = *FUNCTION_ENTRYPOINT_ADDRESS(Prop1Setter_entry);
+#endif
EmbeddedVector<char, 100> prop1_setter_record;
i::SNPrintF(prop1_setter_record,
"code-creation,Callback,-2,0x%" V8PRIxPTR ",1,\"set prop1\"",
- reinterpret_cast<intptr_t>(Prop1Setter));
+ reinterpret_cast<intptr_t>(Prop1Setter_entry));
CHECK(StrNStr(log.start(), prop1_setter_record.start(), log.length()));
+ Address Prop2Getter_entry = reinterpret_cast<Address>(Prop2Getter);
+#if USES_FUNCTION_DESCRIPTORS
+ Prop2Getter_entry = *FUNCTION_ENTRYPOINT_ADDRESS(Prop2Getter_entry);
+#endif
EmbeddedVector<char, 100> prop2_getter_record;
i::SNPrintF(prop2_getter_record,
"code-creation,Callback,-2,0x%" V8PRIxPTR ",1,\"get prop2\"",
- reinterpret_cast<intptr_t>(Prop2Getter));
+ reinterpret_cast<intptr_t>(Prop2Getter_entry));
CHECK(StrNStr(log.start(), prop2_getter_record.start(), log.length()));
log.Dispose();
}
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips.cc b/deps/v8/test/cctest/test-macro-assembler-mips.cc
index 696ca010ca..77dc859022 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips.cc
@@ -185,7 +185,7 @@ TEST(jump_tables4) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assembler(isolate, NULL, 0,
+ MacroAssembler assembler(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
@@ -193,11 +193,9 @@ TEST(jump_tables4) {
int values[kNumCases];
isolate->random_number_generator()->NextBytes(values, sizeof(values));
Label labels[kNumCases];
- Label near_start, end;
-
- __ addiu(sp, sp, -4);
- __ sw(ra, MemOperand(sp));
+ Label near_start, end, done;
+ __ Push(ra);
__ mov(v0, zero_reg);
__ Branch(&end);
@@ -209,35 +207,17 @@ TEST(jump_tables4) {
__ addiu(v0, v0, 1);
}
- Label done;
- {
- __ BlockTrampolinePoolFor(kNumCases + 6);
- PredictableCodeSizeScope predictable(
- masm, (kNumCases + 6) * Assembler::kInstrSize);
- Label here;
-
- __ bal(&here);
- __ sll(at, a0, 2); // In delay slot.
- __ bind(&here);
- __ addu(at, at, ra);
- __ lw(at, MemOperand(at, 4 * Assembler::kInstrSize));
- __ jr(at);
- __ nop(); // Branch delay slot nop.
- for (int i = 0; i < kNumCases; ++i) {
- __ dd(&labels[i]);
- }
- }
+ __ GenerateSwitchTable(a0, kNumCases,
+ [&labels](size_t i) { return labels + i; });
for (int i = 0; i < kNumCases; ++i) {
__ bind(&labels[i]);
- __ lui(v0, (values[i] >> 16) & 0xffff);
- __ ori(v0, v0, values[i] & 0xffff);
+ __ li(v0, values[i]);
__ Branch(&done);
}
__ bind(&done);
- __ lw(ra, MemOperand(sp));
- __ addiu(sp, sp, 4);
+ __ Pop(ra);
__ jr(ra);
__ nop();
@@ -279,23 +259,21 @@ TEST(jump_tables5) {
Label labels[kNumCases];
Label done;
- __ addiu(sp, sp, -4);
- __ sw(ra, MemOperand(sp));
+ __ Push(ra);
{
- __ BlockTrampolinePoolFor(kNumCases * 2 + 7 + 1);
+ __ BlockTrampolinePoolFor(kNumCases + 6 + 1);
PredictableCodeSizeScope predictable(
- masm, kNumCases * kPointerSize + ((7 + 1) * Assembler::kInstrSize));
- Label here;
-
- __ bal(&here);
- __ sll(at, a0, 2); // In delay slot.
- __ bind(&here);
- __ addu(at, at, ra);
- __ lw(at, MemOperand(at, 6 * Assembler::kInstrSize));
+ masm, kNumCases * kPointerSize + ((6 + 1) * Assembler::kInstrSize));
+
+ __ addiupc(at, 6 + 1);
+ __ lsa(at, at, a0, 2);
+ __ lw(at, MemOperand(at));
__ jalr(at);
__ nop(); // Branch delay slot nop.
__ bc(&done);
+ // A nop instruction must be generated by the forbidden slot guard
+ // (Assembler::dd(Label*)).
for (int i = 0; i < kNumCases; ++i) {
__ dd(&labels[i]);
}
@@ -303,15 +281,13 @@ TEST(jump_tables5) {
for (int i = 0; i < kNumCases; ++i) {
__ bind(&labels[i]);
- __ lui(v0, (values[i] >> 16) & 0xffff);
- __ ori(v0, v0, values[i] & 0xffff);
+ __ li(v0, values[i]);
__ jr(ra);
__ nop();
}
__ bind(&done);
- __ lw(ra, MemOperand(sp));
- __ addiu(sp, sp, 4);
+ __ Pop(ra);
__ jr(ra);
__ nop();
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips64.cc b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
index 684b554236..e74703b8f8 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
@@ -228,7 +228,7 @@ TEST(jump_tables4) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- MacroAssembler assembler(isolate, NULL, 0,
+ MacroAssembler assembler(isolate, nullptr, 0,
v8::internal::CodeObjectRequired::kYes);
MacroAssembler* masm = &assembler;
@@ -236,11 +236,9 @@ TEST(jump_tables4) {
int values[kNumCases];
isolate->random_number_generator()->NextBytes(values, sizeof(values));
Label labels[kNumCases];
- Label near_start, end;
-
- __ daddiu(sp, sp, -8);
- __ sd(ra, MemOperand(sp));
+ Label near_start, end, done;
+ __ Push(ra);
__ mov(v0, zero_reg);
__ Branch(&end);
@@ -252,36 +250,17 @@ TEST(jump_tables4) {
__ addiu(v0, v0, 1);
}
- __ Align(8);
- Label done;
- {
- __ BlockTrampolinePoolFor(kNumCases * 2 + 6);
- PredictableCodeSizeScope predictable(
- masm, (kNumCases * 2 + 6) * Assembler::kInstrSize);
- Label here;
-
- __ bal(&here);
- __ dsll(at, a0, 3); // In delay slot.
- __ bind(&here);
- __ daddu(at, at, ra);
- __ ld(at, MemOperand(at, 4 * Assembler::kInstrSize));
- __ jr(at);
- __ nop(); // Branch delay slot nop.
- for (int i = 0; i < kNumCases; ++i) {
- __ dd(&labels[i]);
- }
- }
+ __ GenerateSwitchTable(a0, kNumCases,
+ [&labels](size_t i) { return labels + i; });
for (int i = 0; i < kNumCases; ++i) {
__ bind(&labels[i]);
- __ lui(v0, (values[i] >> 16) & 0xffff);
- __ ori(v0, v0, values[i] & 0xffff);
+ __ li(v0, values[i]);
__ Branch(&done);
}
__ bind(&done);
- __ ld(ra, MemOperand(sp));
- __ daddiu(sp, sp, 8);
+ __ Pop(ra);
__ jr(ra);
__ nop();
@@ -323,21 +302,22 @@ TEST(jump_tables5) {
Label labels[kNumCases];
Label done;
- __ daddiu(sp, sp, -8);
- __ sd(ra, MemOperand(sp));
+ __ Push(ra);
+
+ // Opposite of Align(8) as we have unaligned number of instructions in the
+ // following block before the first dd().
+ if ((masm->pc_offset() & 7) == 0) {
+ __ nop();
+ }
- __ Align(8);
{
- __ BlockTrampolinePoolFor(kNumCases * 2 + 7 + 1);
+ __ BlockTrampolinePoolFor(kNumCases * 2 + 6 + 1);
PredictableCodeSizeScope predictable(
- masm, kNumCases * kPointerSize + ((7 + 1) * Assembler::kInstrSize));
- Label here;
-
- __ bal(&here);
- __ dsll(at, a0, 3); // In delay slot.
- __ bind(&here);
- __ daddu(at, at, ra);
- __ ld(at, MemOperand(at, 6 * Assembler::kInstrSize));
+ masm, kNumCases * kPointerSize + ((6 + 1) * Assembler::kInstrSize));
+
+ __ addiupc(at, 6 + 1);
+ __ dlsa(at, at, a0, 3);
+ __ ld(at, MemOperand(at));
__ jalr(at);
__ nop(); // Branch delay slot nop.
__ bc(&done);
@@ -351,15 +331,13 @@ TEST(jump_tables5) {
for (int i = 0; i < kNumCases; ++i) {
__ bind(&labels[i]);
- __ lui(v0, (values[i] >> 16) & 0xffff);
- __ ori(v0, v0, values[i] & 0xffff);
+ __ li(v0, values[i]);
__ jr(ra);
__ nop();
}
__ bind(&done);
- __ ld(ra, MemOperand(sp));
- __ daddiu(sp, sp, 8);
+ __ Pop(ra);
__ jr(ra);
__ nop();
diff --git a/deps/v8/test/cctest/test-object-observe.cc b/deps/v8/test/cctest/test-object-observe.cc
index f0af22e27a..5164b87df7 100644
--- a/deps/v8/test/cctest/test-object-observe.cc
+++ b/deps/v8/test/cctest/test-object-observe.cc
@@ -1026,9 +1026,9 @@ TEST(UseCountObjectGetNotifier) {
CHECK_EQ(1, use_counts[v8::Isolate::kObjectObserve]);
}
-
static bool NamedAccessCheckAlwaysAllow(Local<v8::Context> accessing_context,
- Local<v8::Object> accessed_object) {
+ Local<v8::Object> accessed_object,
+ Local<v8::Value> data) {
return true;
}
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index 52493145dc..b04fb94d3a 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -1500,7 +1500,6 @@ i::Handle<i::String> FormatMessage(i::Vector<unsigned> data) {
return i::MessageTemplate::FormatMessage(isolate, message, arg_object);
}
-
enum ParserFlag {
kAllowLazy,
kAllowNatives,
@@ -1511,10 +1510,10 @@ enum ParserFlag {
kAllowHarmonyDestructuringAssignment,
kAllowHarmonyNewTarget,
kAllowStrongMode,
- kNoLegacyConst
+ kNoLegacyConst,
+ kAllowHarmonyFunctionSent
};
-
enum ParserSyncTestResult {
kSuccessOrError,
kSuccess,
@@ -1536,6 +1535,8 @@ void SetParserFlags(i::ParserBase<Traits>* parser,
flags.Contains(kAllowHarmonyDestructuringAssignment));
parser->set_allow_strong_mode(flags.Contains(kAllowStrongMode));
parser->set_allow_legacy_const(!flags.Contains(kNoLegacyConst));
+ parser->set_allow_harmony_function_sent(
+ flags.Contains(kAllowHarmonyFunctionSent));
}
@@ -3712,6 +3713,8 @@ TEST(ErrorsArrowFormalParameters) {
TEST(ErrorsArrowFunctions) {
// Tests that parser and preparser generate the same kind of errors
// on invalid arrow function syntax.
+
+ // clang-format off
const char* context_data[][2] = {
{"", ";"},
{"v = ", ";"},
@@ -3812,8 +3815,14 @@ TEST(ErrorsArrowFunctions) {
"(c, a.b) => {}",
"(a['b'], c) => {}",
"(c, a['b']) => {}",
+
+ // crbug.com/582626
+ "(...rest - a) => b",
+ "(a, ...b - 10) => b",
+
NULL
};
+ // clang-format on
// The test is quite slow, so run it with a reduced set of flags.
static const ParserFlag flags[] = {kAllowLazy};
@@ -4445,6 +4454,7 @@ TEST(ClassDeclarationNoErrors) {
TEST(ClassBodyNoErrors) {
+ // clang-format off
// Tests that parser and preparser accept valid class syntax.
const char* context_data[][2] = {{"(class {", "});"},
{"(class extends Base {", "});"},
@@ -4467,6 +4477,8 @@ TEST(ClassBodyNoErrors) {
"; *g() {}",
"*g() {}; *h(x) {}",
"static() {}",
+ "get static() {}",
+ "set static(v) {}",
"static m() {}",
"static get x() {}",
"static set x(v) {}",
@@ -4476,10 +4488,23 @@ TEST(ClassBodyNoErrors) {
"static get static() {}",
"static set static(v) {}",
"*static() {}",
+ "static *static() {}",
"*get() {}",
"*set() {}",
"static *g() {}",
+
+ // Escaped 'static' should be allowed anywhere
+ // static-as-PropertyName is.
+ "st\\u0061tic() {}",
+ "get st\\u0061tic() {}",
+ "set st\\u0061tic(v) {}",
+ "static st\\u0061tic() {}",
+ "static get st\\u0061tic() {}",
+ "static set st\\u0061tic(v) {}",
+ "*st\\u0061tic() {}",
+ "static *st\\u0061tic() {}",
NULL};
+ // clang-format on
static const ParserFlag always_flags[] = {
kAllowHarmonySloppy
@@ -4928,6 +4953,23 @@ TEST(ConstParsingInForIn) {
}
+TEST(StatementParsingInForIn) {
+ const char* context_data[][2] = {{"", ""},
+ {"'use strict';", ""},
+ {"function foo(){ 'use strict';", "}"},
+ {NULL, NULL}};
+
+ const char* data[] = {"for(x in {}, {}) {}", "for(var x in {}, {}) {}",
+ "for(let x in {}, {}) {}", "for(const x in {}, {}) {}",
+ NULL};
+
+ static const ParserFlag always_flags[] = {
+ kAllowHarmonySloppy, kAllowHarmonySloppyLet, kNoLegacyConst};
+ RunParserSyncTest(context_data, data, kSuccess, nullptr, 0, always_flags,
+ arraysize(always_flags));
+}
+
+
TEST(ConstParsingInForInError) {
const char* context_data[][2] = {{"'use strict';", ""},
{"function foo(){ 'use strict';", "}"},
@@ -5101,6 +5143,76 @@ TEST(ForOfNoDeclarationsError) {
}
+TEST(ForOfInOperator) {
+ const char* context_data[][2] = {{"", ""},
+ {"'use strict';", ""},
+ {"function foo(){ 'use strict';", "}"},
+ {NULL, NULL}};
+
+ const char* data[] = {
+ "for(x of 'foo' in {}) {}", "for(var x of 'foo' in {}) {}",
+ "for(let x of 'foo' in {}) {}", "for(const x of 'foo' in {}) {}", NULL};
+
+ static const ParserFlag always_flags[] = {
+ kAllowHarmonySloppy, kAllowHarmonySloppyLet, kNoLegacyConst};
+ RunParserSyncTest(context_data, data, kSuccess, nullptr, 0, always_flags,
+ arraysize(always_flags));
+}
+
+
+TEST(ForOfYieldIdentifier) {
+ const char* context_data[][2] = {{"", ""}, {NULL, NULL}};
+
+ const char* data[] = {"for(x of yield) {}", "for(var x of yield) {}",
+ "for(let x of yield) {}", "for(const x of yield) {}",
+ NULL};
+
+ static const ParserFlag always_flags[] = {
+ kAllowHarmonySloppy, kAllowHarmonySloppyLet, kNoLegacyConst};
+ RunParserSyncTest(context_data, data, kSuccess, nullptr, 0, always_flags,
+ arraysize(always_flags));
+}
+
+
+TEST(ForOfYieldExpression) {
+ const char* context_data[][2] = {{"", ""},
+ {"'use strict';", ""},
+ {"function foo(){ 'use strict';", "}"},
+ {NULL, NULL}};
+
+ const char* data[] = {"function* g() { for(x of yield) {} }",
+ "function* g() { for(var x of yield) {} }",
+ "function* g() { for(let x of yield) {} }",
+ "function* g() { for(const x of yield) {} }", NULL};
+
+ static const ParserFlag always_flags[] = {
+ kAllowHarmonySloppy, kAllowHarmonySloppyLet, kNoLegacyConst};
+ RunParserSyncTest(context_data, data, kSuccess, nullptr, 0, always_flags,
+ arraysize(always_flags));
+}
+
+
+TEST(ForOfExpressionError) {
+ const char* context_data[][2] = {{"", ""},
+ {"'use strict';", ""},
+ {"function foo(){ 'use strict';", "}"},
+ {NULL, NULL}};
+
+ const char* data[] = {
+ "for(x of [], []) {}", "for(var x of [], []) {}",
+ "for(let x of [], []) {}", "for(const x of [], []) {}",
+
+ // AssignmentExpression should be validated statically:
+ "for(x of { y = 23 }) {}", "for(var x of { y = 23 }) {}",
+ "for(let x of { y = 23 }) {}", "for(const x of { y = 23 }) {}", NULL};
+
+ static const ParserFlag always_flags[] = {
+ kAllowHarmonySloppy, kAllowHarmonySloppyLet, kNoLegacyConst};
+ RunParserSyncTest(context_data, data, kError, nullptr, 0, always_flags,
+ arraysize(always_flags));
+}
+
+
TEST(InvalidUnicodeEscapes) {
const char* context_data[][2] = {{"", ""},
{"'use strict';", ""},
@@ -5558,6 +5670,7 @@ TEST(ComputedPropertyNameShorthandError) {
TEST(BasicImportExportParsing) {
i::FLAG_harmony_modules = true;
+ // clang-format off
const char* kSources[] = {
"export let x = 0;",
"export var y = 0;",
@@ -5569,7 +5682,11 @@ TEST(BasicImportExportParsing) {
"var a, b, c; export { a, b as baz, c };",
"var d, e; export { d as dreary, e, };",
"export default function f() {}",
+ "export default function() {}",
+ "export default function*() {}",
"export default class C {}",
+ "export default class {}"
+ "export default class extends C {}"
"export default 42",
"var x; export default x = 7",
"export { Q } from 'somemodule.js';",
@@ -5596,6 +5713,7 @@ TEST(BasicImportExportParsing) {
"import { static as s } from 'm.js';",
"import { let as l } from 'm.js';",
};
+ // clang-format on
i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
@@ -5652,6 +5770,7 @@ TEST(BasicImportExportParsing) {
TEST(ImportExportParsingErrors) {
i::FLAG_harmony_modules = true;
+ // clang-format off
const char* kErrorSources[] = {
"export {",
"var a; export { a",
@@ -5680,6 +5799,10 @@ TEST(ImportExportParsingErrors) {
"var a, b; export { a as c, b as c };",
"export default function f(){}; export default class C {};",
"export default function f(){}; var a; export { a as default };",
+ "export function() {}",
+ "export function*() {}",
+ "export class {}",
+ "export class extends C {}",
"import from;",
"import from 'm.js';",
@@ -5708,11 +5831,8 @@ TEST(ImportExportParsingErrors) {
"import * as x, * as y from 'm.js';",
"import {x}, {y} from 'm.js';",
"import * as x, {y} from 'm.js';",
-
- // TODO(ES6): These two forms should be supported
- "export default function() {};",
- "export default class {};"
};
+ // clang-format on
i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
@@ -6538,61 +6658,9 @@ TEST(StrongModeFreeVariablesDeclaredInGlobalPrototype) {
}
-TEST(StrongModeFreeVariablesNotDeclared) {
- i::FLAG_strong_mode = true;
- v8::V8::Initialize();
- v8::HandleScope scope(CcTest::isolate());
- v8::Context::Scope context_scope(v8::Context::New(CcTest::isolate()));
- v8::TryCatch try_catch(CcTest::isolate());
-
- // Test that referencing unintroduced variables in sloppy mode is ok.
- const char* script1 =
- "if (false) { \n"
- " not_there1; \n"
- "} \n";
- CompileRun(v8_str(script1));
- CHECK(!try_catch.HasCaught());
-
- // But not in strong mode.
- {
- const char* script2 =
- "\"use strong\"; \n"
- "if (false) { \n"
- " not_there2; \n"
- "} \n";
- v8::TryCatch try_catch2(CcTest::isolate());
- v8_compile(v8_str(script2));
- CHECK(try_catch2.HasCaught());
- v8::String::Utf8Value exception(try_catch2.Exception());
- CHECK_EQ(0,
- strcmp(
- "ReferenceError: In strong mode, using an undeclared global "
- "variable 'not_there2' is not allowed",
- *exception));
- }
-
- // Check that the variable reference is detected inside a strong function too,
- // even if the script scope is not strong.
- {
- const char* script3 =
- "(function not_lazy() { \n"
- " \"use strong\"; \n"
- " if (false) { \n"
- " not_there3; \n"
- " } \n"
- "})(); \n";
- v8::TryCatch try_catch2(CcTest::isolate());
- v8_compile(v8_str(script3));
- CHECK(try_catch2.HasCaught());
- v8::String::Utf8Value exception(try_catch2.Exception());
- CHECK_EQ(0,
- strcmp(
- "ReferenceError: In strong mode, using an undeclared global "
- "variable 'not_there3' is not allowed",
- *exception));
- }
-}
-
+static const ParserFlag kAllDestructuringFlags[] = {
+ kAllowHarmonyDestructuring, kAllowHarmonyDestructuringAssignment,
+ kAllowHarmonyDefaultParameters};
TEST(DestructuringPositiveTests) {
i::FLAG_harmony_destructuring_bind = true;
@@ -6650,6 +6718,8 @@ TEST(DestructuringPositiveTests) {
static const ParserFlag always_flags[] = {kAllowHarmonyDestructuring};
RunParserSyncTest(context_data, data, kSuccess, NULL, 0, always_flags,
arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kSuccess, NULL, 0,
+ kAllDestructuringFlags, arraysize(kAllDestructuringFlags));
}
@@ -6727,6 +6797,10 @@ TEST(DestructuringNegativeTests) {
"[...rest,...rest1]",
"[a,b,...rest,...rest1]",
"[a,,..rest,...rest1]",
+ "[x, y, ...z = 1]",
+ "[...z = 1]",
+ "[x, y, ...[z] = [1]]",
+ "[...[z] = [1]]",
"{ x : 3 }",
"{ x : 'foo' }",
"{ x : /foo/ }",
@@ -6739,6 +6813,9 @@ TEST(DestructuringNegativeTests) {
// clang-format on
RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kError, NULL, 0,
+ kAllDestructuringFlags,
+ arraysize(kAllDestructuringFlags));
}
{ // All modes.
@@ -6759,6 +6836,9 @@ TEST(DestructuringNegativeTests) {
// clang-format on
RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kError, NULL, 0,
+ kAllDestructuringFlags,
+ arraysize(kAllDestructuringFlags));
}
{ // Strict mode.
@@ -6779,6 +6859,9 @@ TEST(DestructuringNegativeTests) {
// clang-format on
RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kError, NULL, 0,
+ kAllDestructuringFlags,
+ arraysize(kAllDestructuringFlags));
}
{ // 'yield' in generators.
@@ -6797,6 +6880,9 @@ TEST(DestructuringNegativeTests) {
// clang-format on
RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kError, NULL, 0,
+ kAllDestructuringFlags,
+ arraysize(kAllDestructuringFlags));
}
{ // Declaration-specific errors
@@ -7083,6 +7169,8 @@ TEST(DestructuringAssignmentNegativeTests) {
"[...x,]",
"[x, y, ...z = 1]",
"[...z = 1]",
+ "[x, y, ...[z] = [1]]",
+ "[...[z] = [1]]",
// v8:4657
"({ x: x4, x: (x+=1e4) })",
@@ -7333,14 +7421,7 @@ TEST(DefaultParametersYieldInInitializers) {
"x, y=f(yield)",
"{x=f(yield)}",
"[x=f(yield)]",
- NULL
- };
- // TODO(wingo): These aren't really destructuring assignment patterns; we're
- // just splitting them for now until the parser gets support for arrow
- // function arguments that look like destructuring assignments. When that
- // happens we should unify destructuring_assignment_data and parameter_data.
- const char* destructuring_assignment_data[] = {
"{x}=yield",
"[x]=yield",
@@ -7359,26 +7440,16 @@ TEST(DefaultParametersYieldInInitializers) {
RunParserSyncTest(sloppy_function_context_data, parameter_data, kSuccess,
NULL, 0, always_flags, arraysize(always_flags));
- RunParserSyncTest(sloppy_function_context_data, destructuring_assignment_data,
- kSuccess, NULL, 0, always_flags, arraysize(always_flags));
RunParserSyncTest(sloppy_arrow_context_data, parameter_data, kSuccess, NULL,
0, always_flags, arraysize(always_flags));
- RunParserSyncTest(sloppy_arrow_context_data, destructuring_assignment_data,
- kSuccess, NULL, 0, always_flags, arraysize(always_flags));
RunParserSyncTest(strict_function_context_data, parameter_data, kError, NULL,
0, always_flags, arraysize(always_flags));
- RunParserSyncTest(strict_function_context_data, destructuring_assignment_data,
- kError, NULL, 0, always_flags, arraysize(always_flags));
RunParserSyncTest(strict_arrow_context_data, parameter_data, kError, NULL, 0,
always_flags, arraysize(always_flags));
- RunParserSyncTest(strict_arrow_context_data, destructuring_assignment_data,
- kError, NULL, 0, always_flags, arraysize(always_flags));
RunParserSyncTest(generator_context_data, parameter_data, kError, NULL, 0,
always_flags, arraysize(always_flags));
- RunParserSyncTest(generator_context_data, destructuring_assignment_data,
- kError, NULL, 0, always_flags, arraysize(always_flags));
}
@@ -7689,6 +7760,13 @@ TEST(LetSloppyOnly) {
"for (const [let] = 1; let < 1; let++) {}",
"for (const [let] in {}) {}",
"for (const [let] of []) {}",
+
+ // Sprinkle in the escaped version too.
+ "let l\\u0065t = 1",
+ "const l\\u0065t = 1",
+ "let [l\\u0065t] = 1",
+ "const [l\\u0065t] = 1",
+ "for (let l\\u0065t in {}) {}",
NULL
};
// clang-format on
@@ -7747,6 +7825,7 @@ TEST(EscapedKeywords) {
"wh\\u0069le (true) { }",
"w\\u0069th (this.scope) { }",
"(function*() { y\\u0069eld 1; })()",
+ "(function*() { var y\\u0069eld = 1; })()",
"var \\u0065num = 1;",
"var { \\u0065num } = {}",
@@ -7784,7 +7863,11 @@ TEST(EscapedKeywords) {
"do { ; } wh\\u0069le (true) { }",
"(function*() { return (n++, y\\u0069eld 1); })()",
"class C { st\\u0061tic bar() {} }",
+ "class C { st\\u0061tic *bar() {} }",
+ "class C { st\\u0061tic get bar() {} }",
+ "class C { st\\u0061tic set bar() {} }",
+ // TODO(adamk): These should not be errors in sloppy mode.
"(y\\u0069eld);",
"var y\\u0069eld = 1;",
"var { y\\u0069eld } = {};",
@@ -7810,14 +7893,14 @@ TEST(EscapedKeywords) {
};
// clang-format on
- RunParserSyncTest(sloppy_context_data, let_data, kError, NULL, 0,
+ RunParserSyncTest(sloppy_context_data, let_data, kSuccess, NULL, 0,
always_flags, arraysize(always_flags));
RunParserSyncTest(strict_context_data, let_data, kError, NULL, 0,
always_flags, arraysize(always_flags));
static const ParserFlag sloppy_let_flags[] = {
kAllowHarmonySloppy, kAllowHarmonySloppyLet, kAllowHarmonyDestructuring};
- RunParserSyncTest(sloppy_context_data, let_data, kError, NULL, 0,
+ RunParserSyncTest(sloppy_context_data, let_data, kSuccess, NULL, 0,
sloppy_let_flags, arraysize(sloppy_let_flags));
// Non-errors in sloppy mode
@@ -7839,6 +7922,9 @@ TEST(EscapedKeywords) {
"(publ\\u0069c);",
"var publ\\u0069c = 1;",
"var { publ\\u0069c } = {};",
+ "(st\\u0061tic);",
+ "var st\\u0061tic = 1;",
+ "var { st\\u0061tic } = {};",
NULL};
RunParserSyncTest(sloppy_context_data, valid_data, kSuccess, NULL, 0,
always_flags, arraysize(always_flags));
@@ -7850,9 +7936,55 @@ TEST(EscapedKeywords) {
TEST(MiscSyntaxErrors) {
+ // clang-format off
const char* context_data[][2] = {
- {"'use strict'", ""}, {"", ""}, {NULL, NULL}};
- const char* error_data[] = {"for (();;) {}", NULL};
+ { "'use strict'", "" },
+ { "", "" },
+ { NULL, NULL }
+ };
+ const char* error_data[] = {
+ "for (();;) {}",
+
+ // crbug.com/582626
+ "{ NaN ,chA((evarA=new t ( l = !.0[((... co -a0([1]))=> greturnkf",
+ NULL
+ };
+ // clang-format on
RunParserSyncTest(context_data, error_data, kError, NULL, 0, NULL, 0);
}
+
+TEST(FunctionSentErrors) {
+ // clang-format off
+ const char* context_data[][2] = {
+ { "'use strict'", "" },
+ { "", "" },
+ { NULL, NULL }
+ };
+ const char* error_data[] = {
+ "var x = function.sent",
+ "function* g() { yield function.s\\u0065nt; }",
+ NULL
+ };
+ // clang-format on
+
+ static const ParserFlag always_flags[] = {kAllowHarmonyFunctionSent};
+ RunParserSyncTest(context_data, error_data, kError, always_flags,
+ arraysize(always_flags));
+}
+
+TEST(NewTargetErrors) {
+ // clang-format off
+ const char* context_data[][2] = {
+ { "'use strict'", "" },
+ { "", "" },
+ { NULL, NULL }
+ };
+ const char* error_data[] = {
+ "var x = new.target",
+ "function f() { return new.t\\u0061rget; }",
+ NULL
+ };
+ // clang-format on
+ RunParserSyncTest(context_data, error_data, kError);
+}
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index 82c0f30bd6..fa7dc155ee 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -428,11 +428,13 @@ TEST(SampleIds) {
// (root)#1 -> aaa #2 -> bbb #4 -> ccc #5 - sample2
// -> ccc #6 -> aaa #7 - sample3
TickSample sample1;
+ sample1.timestamp = v8::base::TimeTicks::HighResolutionNow();
sample1.pc = ToAddress(0x1600);
sample1.stack[0] = ToAddress(0x1510);
sample1.frames_count = 1;
generator.RecordTickSample(sample1);
TickSample sample2;
+ sample2.timestamp = v8::base::TimeTicks::HighResolutionNow();
sample2.pc = ToAddress(0x1925);
sample2.stack[0] = ToAddress(0x1780);
sample2.stack[1] = ToAddress(0x10000); // non-existent.
@@ -440,6 +442,7 @@ TEST(SampleIds) {
sample2.frames_count = 3;
generator.RecordTickSample(sample2);
TickSample sample3;
+ sample3.timestamp = v8::base::TimeTicks::HighResolutionNow();
sample3.pc = ToAddress(0x1510);
sample3.stack[0] = ToAddress(0x1910);
sample3.stack[1] = ToAddress(0x1610);
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index a91058cc24..14ec12f21b 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -96,7 +96,7 @@ static bool CheckParse(const char* input) {
FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
RegExpCompileData result;
return v8::internal::RegExpParser::ParseRegExp(
- CcTest::i_isolate(), &zone, &reader, false, false, &result);
+ CcTest::i_isolate(), &zone, &reader, JSRegExp::kNone, &result);
}
@@ -106,8 +106,10 @@ static void CheckParseEq(const char* input, const char* expected,
Zone zone;
FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
RegExpCompileData result;
- CHECK(v8::internal::RegExpParser::ParseRegExp(
- CcTest::i_isolate(), &zone, &reader, false, unicode, &result));
+ JSRegExp::Flags flags = JSRegExp::kNone;
+ if (unicode) flags |= JSRegExp::kUnicode;
+ CHECK(v8::internal::RegExpParser::ParseRegExp(CcTest::i_isolate(), &zone,
+ &reader, flags, &result));
CHECK(result.tree != NULL);
CHECK(result.error.is_null());
std::ostringstream os;
@@ -125,7 +127,7 @@ static bool CheckSimple(const char* input) {
FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
RegExpCompileData result;
CHECK(v8::internal::RegExpParser::ParseRegExp(
- CcTest::i_isolate(), &zone, &reader, false, false, &result));
+ CcTest::i_isolate(), &zone, &reader, JSRegExp::kNone, &result));
CHECK(result.tree != NULL);
CHECK(result.error.is_null());
return result.simple;
@@ -143,7 +145,7 @@ static MinMaxPair CheckMinMaxMatch(const char* input) {
FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
RegExpCompileData result;
CHECK(v8::internal::RegExpParser::ParseRegExp(
- CcTest::i_isolate(), &zone, &reader, false, false, &result));
+ CcTest::i_isolate(), &zone, &reader, JSRegExp::kNone, &result));
CHECK(result.tree != NULL);
CHECK(result.error.is_null());
int min_match = result.tree->min_match();
@@ -206,8 +208,8 @@ void TestRegExpParser(bool lookbehind) {
}
CheckParseEq("()", "(^ %)");
CheckParseEq("(?=)", "(-> + %)");
- CheckParseEq("[]", "^[\\x00-\\uffff]"); // Doesn't compile on windows
- CheckParseEq("[^]", "[\\x00-\\uffff]"); // \uffff isn't in codepage 1252
+ CheckParseEq("[]", "^[\\x00-\\u{10ffff}]"); // Doesn't compile on windows
+ CheckParseEq("[^]", "[\\x00-\\u{10ffff}]"); // \uffff isn't in codepage 1252
CheckParseEq("[x]", "[x]");
CheckParseEq("[xyz]", "[x y z]");
CheckParseEq("[a-zA-Z0-9]", "[a-z A-Z 0-9]");
@@ -316,6 +318,10 @@ void TestRegExpParser(bool lookbehind) {
CheckParseEq("\\u{12345}{3}", "(# 3 3 g '\\ud808\\udf45')", true);
CheckParseEq("\\u{12345}*", "(# 0 - g '\\ud808\\udf45')", true);
+ CheckParseEq("\\ud808\\udf45*", "(# 0 - g '\\ud808\\udf45')", true);
+ CheckParseEq("[\\ud808\\udf45-\\ud809\\udccc]", "[\\u{012345}-\\u{0124cc}]",
+ true);
+
CHECK_SIMPLE("", false);
CHECK_SIMPLE("a", true);
CHECK_SIMPLE("a|b", false);
@@ -454,7 +460,7 @@ static void ExpectError(const char* input,
FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
RegExpCompileData result;
CHECK(!v8::internal::RegExpParser::ParseRegExp(
- CcTest::i_isolate(), &zone, &reader, false, false, &result));
+ CcTest::i_isolate(), &zone, &reader, JSRegExp::kNone, &result));
CHECK(result.tree == NULL);
CHECK(!result.error.is_null());
v8::base::SmartArrayPointer<char> str = result.error->ToCString(ALLOW_NULLS);
@@ -523,7 +529,7 @@ static void TestCharacterClassEscapes(uc16 c, bool (pred)(uc16 c)) {
ZoneList<CharacterRange>* ranges =
new(&zone) ZoneList<CharacterRange>(2, &zone);
CharacterRange::AddClassEscape(c, ranges, &zone);
- for (unsigned i = 0; i < (1 << 16); i++) {
+ for (uc32 i = 0; i < (1 << 16); i++) {
bool in_class = false;
for (int j = 0; !in_class && j < ranges->length(); j++) {
CharacterRange& range = ranges->at(j);
@@ -550,17 +556,19 @@ static RegExpNode* Compile(const char* input, bool multiline, bool unicode,
Isolate* isolate = CcTest::i_isolate();
FlatStringReader reader(isolate, CStrVector(input));
RegExpCompileData compile_data;
+ JSRegExp::Flags flags = JSRegExp::kNone;
+ if (multiline) flags = JSRegExp::kMultiline;
+ if (unicode) flags = JSRegExp::kUnicode;
if (!v8::internal::RegExpParser::ParseRegExp(CcTest::i_isolate(), zone,
- &reader, multiline, unicode,
- &compile_data))
+ &reader, flags, &compile_data))
return NULL;
Handle<String> pattern = isolate->factory()
->NewStringFromUtf8(CStrVector(input))
.ToHandleChecked();
Handle<String> sample_subject =
isolate->factory()->NewStringFromUtf8(CStrVector("")).ToHandleChecked();
- RegExpEngine::Compile(isolate, zone, &compile_data, false, false, multiline,
- false, pattern, sample_subject, is_one_byte);
+ RegExpEngine::Compile(isolate, zone, &compile_data, flags, pattern,
+ sample_subject, is_one_byte);
return compile_data.node;
}
@@ -678,7 +686,7 @@ TEST(DispatchTableConstruction) {
for (int i = 0; i < kRangeCount; i++) {
uc16* range = ranges[i];
for (int j = 0; j < 2 * kRangeSize; j += 2)
- table.AddRange(CharacterRange(range[j], range[j + 1]), i, &zone);
+ table.AddRange(CharacterRange::Range(range[j], range[j + 1]), i, &zone);
}
// Check that the table looks as we would expect
for (int p = 0; p < kLimit; p++) {
@@ -1178,16 +1186,16 @@ TEST(MacroAssemblerNativeBackRefNoCase) {
m.WriteCurrentPositionToRegister(2, 0);
m.AdvanceCurrentPosition(3);
m.WriteCurrentPositionToRegister(3, 0);
- m.CheckNotBackReferenceIgnoreCase(2, false, &fail); // Match "AbC".
- m.CheckNotBackReferenceIgnoreCase(2, false, &fail); // Match "ABC".
+ m.CheckNotBackReferenceIgnoreCase(2, false, false, &fail); // Match "AbC".
+ m.CheckNotBackReferenceIgnoreCase(2, false, false, &fail); // Match "ABC".
Label expected_fail;
- m.CheckNotBackReferenceIgnoreCase(2, false, &expected_fail);
+ m.CheckNotBackReferenceIgnoreCase(2, false, false, &expected_fail);
m.Bind(&fail);
m.Fail();
m.Bind(&expected_fail);
m.AdvanceCurrentPosition(3); // Skip "xYz"
- m.CheckNotBackReferenceIgnoreCase(2, false, &succ);
+ m.CheckNotBackReferenceIgnoreCase(2, false, false, &succ);
m.Fail();
m.Bind(&succ);
@@ -1490,7 +1498,7 @@ TEST(AddInverseToTable) {
int from = PseudoRandom(t + 87, i + 25) % kLimit;
int to = from + (PseudoRandom(i + 87, t + 25) % (kLimit / 20));
if (to > kLimit) to = kLimit;
- ranges->Add(CharacterRange(from, to), &zone);
+ ranges->Add(CharacterRange::Range(from, to), &zone);
}
DispatchTable table(&zone);
DispatchTableConstructor cons(&table, false, &zone);
@@ -1507,7 +1515,7 @@ TEST(AddInverseToTable) {
Zone zone;
ZoneList<CharacterRange>* ranges =
new(&zone) ZoneList<CharacterRange>(1, &zone);
- ranges->Add(CharacterRange(0xFFF0, 0xFFFE), &zone);
+ ranges->Add(CharacterRange::Range(0xFFF0, 0xFFFE), &zone);
DispatchTable table(&zone);
DispatchTableConstructor cons(&table, false, &zone);
cons.set_choice_index(0);
@@ -1621,7 +1629,9 @@ static void TestRangeCaseIndependence(Isolate* isolate, CharacterRange input,
int count = expected.length();
ZoneList<CharacterRange>* list =
new(&zone) ZoneList<CharacterRange>(count, &zone);
- input.AddCaseEquivalents(isolate, &zone, list, false);
+ list->Add(input, &zone);
+ CharacterRange::AddCaseEquivalents(isolate, &zone, list, false);
+ list->Remove(0); // Remove the input before checking results.
CHECK_EQ(count, list->length());
for (int i = 0; i < list->length(); i++) {
CHECK_EQ(expected[i].from(), list->at(i).from());
@@ -1645,31 +1655,33 @@ TEST(CharacterRangeCaseIndependence) {
CharacterRange::Singleton('A'));
TestSimpleRangeCaseIndependence(isolate, CharacterRange::Singleton('z'),
CharacterRange::Singleton('Z'));
- TestSimpleRangeCaseIndependence(isolate, CharacterRange('a', 'z'),
- CharacterRange('A', 'Z'));
- TestSimpleRangeCaseIndependence(isolate, CharacterRange('c', 'f'),
- CharacterRange('C', 'F'));
- TestSimpleRangeCaseIndependence(isolate, CharacterRange('a', 'b'),
- CharacterRange('A', 'B'));
- TestSimpleRangeCaseIndependence(isolate, CharacterRange('y', 'z'),
- CharacterRange('Y', 'Z'));
- TestSimpleRangeCaseIndependence(isolate, CharacterRange('a' - 1, 'z' + 1),
- CharacterRange('A', 'Z'));
- TestSimpleRangeCaseIndependence(isolate, CharacterRange('A', 'Z'),
- CharacterRange('a', 'z'));
- TestSimpleRangeCaseIndependence(isolate, CharacterRange('C', 'F'),
- CharacterRange('c', 'f'));
- TestSimpleRangeCaseIndependence(isolate, CharacterRange('A' - 1, 'Z' + 1),
- CharacterRange('a', 'z'));
+ TestSimpleRangeCaseIndependence(isolate, CharacterRange::Range('a', 'z'),
+ CharacterRange::Range('A', 'Z'));
+ TestSimpleRangeCaseIndependence(isolate, CharacterRange::Range('c', 'f'),
+ CharacterRange::Range('C', 'F'));
+ TestSimpleRangeCaseIndependence(isolate, CharacterRange::Range('a', 'b'),
+ CharacterRange::Range('A', 'B'));
+ TestSimpleRangeCaseIndependence(isolate, CharacterRange::Range('y', 'z'),
+ CharacterRange::Range('Y', 'Z'));
+ TestSimpleRangeCaseIndependence(isolate,
+ CharacterRange::Range('a' - 1, 'z' + 1),
+ CharacterRange::Range('A', 'Z'));
+ TestSimpleRangeCaseIndependence(isolate, CharacterRange::Range('A', 'Z'),
+ CharacterRange::Range('a', 'z'));
+ TestSimpleRangeCaseIndependence(isolate, CharacterRange::Range('C', 'F'),
+ CharacterRange::Range('c', 'f'));
+ TestSimpleRangeCaseIndependence(isolate,
+ CharacterRange::Range('A' - 1, 'Z' + 1),
+ CharacterRange::Range('a', 'z'));
// Here we need to add [l-z] to complete the case independence of
// [A-Za-z] but we expect [a-z] to be added since we always add a
// whole block at a time.
- TestSimpleRangeCaseIndependence(isolate, CharacterRange('A', 'k'),
- CharacterRange('a', 'z'));
+ TestSimpleRangeCaseIndependence(isolate, CharacterRange::Range('A', 'k'),
+ CharacterRange::Range('a', 'z'));
}
-static bool InClass(uc16 c, ZoneList<CharacterRange>* ranges) {
+static bool InClass(uc32 c, ZoneList<CharacterRange>* ranges) {
if (ranges == NULL)
return false;
for (int i = 0; i < ranges->length(); i++) {
@@ -1681,29 +1693,46 @@ static bool InClass(uc16 c, ZoneList<CharacterRange>* ranges) {
}
-TEST(CharClassDifference) {
+TEST(UnicodeRangeSplitter) {
Zone zone;
ZoneList<CharacterRange>* base =
new(&zone) ZoneList<CharacterRange>(1, &zone);
base->Add(CharacterRange::Everything(), &zone);
- Vector<const int> overlay = CharacterRange::GetWordBounds();
- ZoneList<CharacterRange>* included = NULL;
- ZoneList<CharacterRange>* excluded = NULL;
- CharacterRange::Split(base, overlay, &included, &excluded, &zone);
- for (int i = 0; i < (1 << 16); i++) {
- bool in_base = InClass(i, base);
- if (in_base) {
- bool in_overlay = false;
- for (int j = 0; !in_overlay && j < overlay.length(); j += 2) {
- if (overlay[j] <= i && i < overlay[j+1])
- in_overlay = true;
- }
- CHECK_EQ(in_overlay, InClass(i, included));
- CHECK_EQ(!in_overlay, InClass(i, excluded));
- } else {
- CHECK(!InClass(i, included));
- CHECK(!InClass(i, excluded));
- }
+ UnicodeRangeSplitter splitter(&zone, base);
+ // BMP
+ for (uc32 c = 0; c < 0xd800; c++) {
+ CHECK(InClass(c, splitter.bmp()));
+ CHECK(!InClass(c, splitter.lead_surrogates()));
+ CHECK(!InClass(c, splitter.trail_surrogates()));
+ CHECK(!InClass(c, splitter.non_bmp()));
+ }
+ // Lead surrogates
+ for (uc32 c = 0xd800; c < 0xdbff; c++) {
+ CHECK(!InClass(c, splitter.bmp()));
+ CHECK(InClass(c, splitter.lead_surrogates()));
+ CHECK(!InClass(c, splitter.trail_surrogates()));
+ CHECK(!InClass(c, splitter.non_bmp()));
+ }
+ // Trail surrogates
+ for (uc32 c = 0xdc00; c < 0xdfff; c++) {
+ CHECK(!InClass(c, splitter.bmp()));
+ CHECK(!InClass(c, splitter.lead_surrogates()));
+ CHECK(InClass(c, splitter.trail_surrogates()));
+ CHECK(!InClass(c, splitter.non_bmp()));
+ }
+ // BMP
+ for (uc32 c = 0xe000; c < 0xffff; c++) {
+ CHECK(InClass(c, splitter.bmp()));
+ CHECK(!InClass(c, splitter.lead_surrogates()));
+ CHECK(!InClass(c, splitter.trail_surrogates()));
+ CHECK(!InClass(c, splitter.non_bmp()));
+ }
+ // Non-BMP
+ for (uc32 c = 0x10000; c < 0x10ffff; c++) {
+ CHECK(!InClass(c, splitter.bmp()));
+ CHECK(!InClass(c, splitter.lead_surrogates()));
+ CHECK(!InClass(c, splitter.trail_surrogates()));
+ CHECK(InClass(c, splitter.non_bmp()));
}
}
@@ -1714,9 +1743,9 @@ TEST(CanonicalizeCharacterSets) {
new(&zone) ZoneList<CharacterRange>(4, &zone);
CharacterSet set(list);
- list->Add(CharacterRange(10, 20), &zone);
- list->Add(CharacterRange(30, 40), &zone);
- list->Add(CharacterRange(50, 60), &zone);
+ list->Add(CharacterRange::Range(10, 20), &zone);
+ list->Add(CharacterRange::Range(30, 40), &zone);
+ list->Add(CharacterRange::Range(50, 60), &zone);
set.Canonicalize();
CHECK_EQ(3, list->length());
CHECK_EQ(10, list->at(0).from());
@@ -1727,9 +1756,9 @@ TEST(CanonicalizeCharacterSets) {
CHECK_EQ(60, list->at(2).to());
list->Rewind(0);
- list->Add(CharacterRange(10, 20), &zone);
- list->Add(CharacterRange(50, 60), &zone);
- list->Add(CharacterRange(30, 40), &zone);
+ list->Add(CharacterRange::Range(10, 20), &zone);
+ list->Add(CharacterRange::Range(50, 60), &zone);
+ list->Add(CharacterRange::Range(30, 40), &zone);
set.Canonicalize();
CHECK_EQ(3, list->length());
CHECK_EQ(10, list->at(0).from());
@@ -1740,11 +1769,11 @@ TEST(CanonicalizeCharacterSets) {
CHECK_EQ(60, list->at(2).to());
list->Rewind(0);
- list->Add(CharacterRange(30, 40), &zone);
- list->Add(CharacterRange(10, 20), &zone);
- list->Add(CharacterRange(25, 25), &zone);
- list->Add(CharacterRange(100, 100), &zone);
- list->Add(CharacterRange(1, 1), &zone);
+ list->Add(CharacterRange::Range(30, 40), &zone);
+ list->Add(CharacterRange::Range(10, 20), &zone);
+ list->Add(CharacterRange::Range(25, 25), &zone);
+ list->Add(CharacterRange::Range(100, 100), &zone);
+ list->Add(CharacterRange::Range(1, 1), &zone);
set.Canonicalize();
CHECK_EQ(5, list->length());
CHECK_EQ(1, list->at(0).from());
@@ -1759,9 +1788,9 @@ TEST(CanonicalizeCharacterSets) {
CHECK_EQ(100, list->at(4).to());
list->Rewind(0);
- list->Add(CharacterRange(10, 19), &zone);
- list->Add(CharacterRange(21, 30), &zone);
- list->Add(CharacterRange(20, 20), &zone);
+ list->Add(CharacterRange::Range(10, 19), &zone);
+ list->Add(CharacterRange::Range(21, 30), &zone);
+ list->Add(CharacterRange::Range(20, 20), &zone);
set.Canonicalize();
CHECK_EQ(1, list->length());
CHECK_EQ(10, list->at(0).from());
diff --git a/deps/v8/test/cctest/test-transitions.cc b/deps/v8/test/cctest/test-transitions.cc
index 8834f9ade1..b7eb50f1c9 100644
--- a/deps/v8/test/cctest/test-transitions.cc
+++ b/deps/v8/test/cctest/test-transitions.cc
@@ -10,6 +10,7 @@
#include "src/compilation-cache.h"
#include "src/execution.h"
#include "src/factory.h"
+#include "src/field-type.h"
#include "src/global-handles.h"
#include "test/cctest/cctest.h"
@@ -51,13 +52,13 @@ TEST(TransitionArray_SimpleFieldTransitions) {
Handle<Map> map0 = Map::Create(isolate, 0);
Handle<Map> map1 =
- Map::CopyWithField(map0, name1, handle(HeapType::Any(), isolate),
- attributes, Representation::Tagged(),
- OMIT_TRANSITION).ToHandleChecked();
+ Map::CopyWithField(map0, name1, handle(FieldType::Any(), isolate),
+ attributes, Representation::Tagged(), OMIT_TRANSITION)
+ .ToHandleChecked();
Handle<Map> map2 =
- Map::CopyWithField(map0, name2, handle(HeapType::Any(), isolate),
- attributes, Representation::Tagged(),
- OMIT_TRANSITION).ToHandleChecked();
+ Map::CopyWithField(map0, name2, handle(FieldType::Any(), isolate),
+ attributes, Representation::Tagged(), OMIT_TRANSITION)
+ .ToHandleChecked();
CHECK(map0->raw_transitions()->IsSmi());
@@ -102,13 +103,13 @@ TEST(TransitionArray_FullFieldTransitions) {
Handle<Map> map0 = Map::Create(isolate, 0);
Handle<Map> map1 =
- Map::CopyWithField(map0, name1, handle(HeapType::Any(), isolate),
- attributes, Representation::Tagged(),
- OMIT_TRANSITION).ToHandleChecked();
+ Map::CopyWithField(map0, name1, handle(FieldType::Any(), isolate),
+ attributes, Representation::Tagged(), OMIT_TRANSITION)
+ .ToHandleChecked();
Handle<Map> map2 =
- Map::CopyWithField(map0, name2, handle(HeapType::Any(), isolate),
- attributes, Representation::Tagged(),
- OMIT_TRANSITION).ToHandleChecked();
+ Map::CopyWithField(map0, name2, handle(FieldType::Any(), isolate),
+ attributes, Representation::Tagged(), OMIT_TRANSITION)
+ .ToHandleChecked();
CHECK(map0->raw_transitions()->IsSmi());
@@ -159,10 +160,10 @@ TEST(TransitionArray_DifferentFieldNames) {
EmbeddedVector<char, 64> buffer;
SNPrintF(buffer, "prop%d", i);
Handle<String> name = factory->InternalizeUtf8String(buffer.start());
- Handle<Map> map =
- Map::CopyWithField(map0, name, handle(HeapType::Any(), isolate),
- attributes, Representation::Tagged(),
- OMIT_TRANSITION).ToHandleChecked();
+ Handle<Map> map = Map::CopyWithField(
+ map0, name, handle(FieldType::Any(), isolate),
+ attributes, Representation::Tagged(), OMIT_TRANSITION)
+ .ToHandleChecked();
names[i] = name;
maps[i] = map;
@@ -208,10 +209,10 @@ TEST(TransitionArray_SameFieldNamesDifferentAttributesSimple) {
for (int i = 0; i < ATTRS_COUNT; i++) {
PropertyAttributes attributes = static_cast<PropertyAttributes>(i);
- Handle<Map> map =
- Map::CopyWithField(map0, name, handle(HeapType::Any(), isolate),
- attributes, Representation::Tagged(),
- OMIT_TRANSITION).ToHandleChecked();
+ Handle<Map> map = Map::CopyWithField(
+ map0, name, handle(FieldType::Any(), isolate),
+ attributes, Representation::Tagged(), OMIT_TRANSITION)
+ .ToHandleChecked();
attr_maps[i] = map;
TransitionArray::Insert(map0, name, map, PROPERTY_TRANSITION);
@@ -252,9 +253,9 @@ TEST(TransitionArray_SameFieldNamesDifferentAttributes) {
SNPrintF(buffer, "prop%d", i);
Handle<String> name = factory->InternalizeUtf8String(buffer.start());
Handle<Map> map =
- Map::CopyWithField(map0, name, handle(HeapType::Any(), isolate), NONE,
- Representation::Tagged(),
- OMIT_TRANSITION).ToHandleChecked();
+ Map::CopyWithField(map0, name, handle(FieldType::Any(), isolate), NONE,
+ Representation::Tagged(), OMIT_TRANSITION)
+ .ToHandleChecked();
names[i] = name;
maps[i] = map;
@@ -270,10 +271,10 @@ TEST(TransitionArray_SameFieldNamesDifferentAttributes) {
for (int i = 0; i < ATTRS_COUNT; i++) {
PropertyAttributes attributes = static_cast<PropertyAttributes>(i);
- Handle<Map> map =
- Map::CopyWithField(map0, name, handle(HeapType::Any(), isolate),
- attributes, Representation::Tagged(),
- OMIT_TRANSITION).ToHandleChecked();
+ Handle<Map> map = Map::CopyWithField(
+ map0, name, handle(FieldType::Any(), isolate),
+ attributes, Representation::Tagged(), OMIT_TRANSITION)
+ .ToHandleChecked();
attr_maps[i] = map;
TransitionArray::Insert(map0, name, map, PROPERTY_TRANSITION);
diff --git a/deps/v8/test/cctest/test-types.cc b/deps/v8/test/cctest/test-types.cc
index 4549654501..2e658b0255 100644
--- a/deps/v8/test/cctest/test-types.cc
+++ b/deps/v8/test/cctest/test-types.cc
@@ -27,109 +27,43 @@ static bool IsInteger(i::Object* x) {
typedef uint32_t bitset;
-
-struct ZoneRep {
- typedef void* Struct;
-
- static bool IsStruct(Type* t, int tag) {
- return !IsBitset(t) && reinterpret_cast<intptr_t>(AsStruct(t)[0]) == tag;
- }
- static bool IsBitset(Type* t) { return reinterpret_cast<uintptr_t>(t) & 1; }
- // HACK: the number 5 below is the value of StructuralType::kUnionTag.
- static bool IsUnion(Type* t) { return t->IsUnionForTesting(); }
-
- static Struct* AsStruct(Type* t) {
- return reinterpret_cast<Struct*>(t);
- }
- static bitset AsBitset(Type* t) {
- return static_cast<bitset>(reinterpret_cast<uintptr_t>(t) ^ 1u);
- }
- static Struct* AsUnion(Type* t) {
- return AsStruct(t);
- }
- static int Length(Struct* structured) {
- return static_cast<int>(reinterpret_cast<intptr_t>(structured[1]));
- }
-
- static Zone* ToRegion(Zone* zone, Isolate* isolate) { return zone; }
-
- struct BitsetType : Type::BitsetType {
- using Type::BitsetType::New;
- using Type::BitsetType::Glb;
- using Type::BitsetType::Lub;
- using Type::BitsetType::IsInhabited;
- };
-};
-
-
-struct HeapRep {
- typedef FixedArray Struct;
-
- static bool IsStruct(Handle<HeapType> t, int tag) {
- return t->IsFixedArray() && Smi::cast(AsStruct(t)->get(0))->value() == tag;
- }
- static bool IsBitset(Handle<HeapType> t) { return t->IsSmi(); }
- // HACK: the number 5 below is the value of StructuralType::kUnionTag.
- static bool IsUnion(Handle<HeapType> t) { return t->IsUnionForTesting(); }
-
- static Struct* AsStruct(Handle<HeapType> t) { return FixedArray::cast(*t); }
- static bitset AsBitset(Handle<HeapType> t) {
- return static_cast<bitset>(reinterpret_cast<uintptr_t>(*t));
- }
- static Struct* AsUnion(Handle<HeapType> t) { return AsStruct(t); }
- static int Length(Struct* structured) { return structured->length() - 1; }
-
- static Isolate* ToRegion(Zone* zone, Isolate* isolate) { return isolate; }
-
- struct BitsetType : HeapType::BitsetType {
- using HeapType::BitsetType::New;
- using HeapType::BitsetType::Glb;
- using HeapType::BitsetType::Lub;
- using HeapType::BitsetType::IsInhabited;
- static bitset Glb(Handle<HeapType> type) { return Glb(*type); }
- static bitset Lub(Handle<HeapType> type) { return Lub(*type); }
- };
-};
-
-
-template<class Type, class TypeHandle, class Region, class Rep>
-struct Tests : Rep {
- typedef Types<Type, TypeHandle, Region> TypesInstance;
- typedef typename TypesInstance::TypeVector::iterator TypeIterator;
- typedef typename TypesInstance::MapVector::iterator MapIterator;
- typedef typename TypesInstance::ValueVector::iterator ValueIterator;
+struct Tests {
+ typedef Types::TypeVector::iterator TypeIterator;
+ typedef Types::MapVector::iterator MapIterator;
+ typedef Types::ValueVector::iterator ValueIterator;
Isolate* isolate;
HandleScope scope;
Zone zone;
- TypesInstance T;
+ Types T;
Tests()
: isolate(CcTest::InitIsolateOnce()),
scope(isolate),
zone(),
- T(Rep::ToRegion(&zone, isolate), isolate,
- isolate->random_number_generator()) {}
-
- bool Equal(TypeHandle type1, TypeHandle type2) {
- return
- type1->Equals(type2) &&
- this->IsBitset(type1) == this->IsBitset(type2) &&
- this->IsUnion(type1) == this->IsUnion(type2) &&
- type1->NumClasses() == type2->NumClasses() &&
- type1->NumConstants() == type2->NumConstants() &&
- (!this->IsBitset(type1) ||
- this->AsBitset(type1) == this->AsBitset(type2)) &&
- (!this->IsUnion(type1) ||
- this->Length(this->AsUnion(type1)) ==
- this->Length(this->AsUnion(type2)));
+ T(&zone, isolate, isolate->random_number_generator()) {}
+
+ bool IsBitset(Type* type) { return type->IsBitsetForTesting(); }
+ bool IsUnion(Type* type) { return type->IsUnionForTesting(); }
+ BitsetType::bitset AsBitset(Type* type) { return type->AsBitsetForTesting(); }
+ UnionType* AsUnion(Type* type) { return type->AsUnionForTesting(); }
+
+ bool Equal(Type* type1, Type* type2) {
+ return type1->Equals(type2) &&
+ this->IsBitset(type1) == this->IsBitset(type2) &&
+ this->IsUnion(type1) == this->IsUnion(type2) &&
+ type1->NumClasses() == type2->NumClasses() &&
+ type1->NumConstants() == type2->NumConstants() &&
+ (!this->IsBitset(type1) ||
+ this->AsBitset(type1) == this->AsBitset(type2)) &&
+ (!this->IsUnion(type1) ||
+ this->AsUnion(type1)->LengthForTesting() ==
+ this->AsUnion(type2)->LengthForTesting());
}
- void CheckEqual(TypeHandle type1, TypeHandle type2) {
- CHECK(Equal(type1, type2));
- }
+ void CheckEqual(Type* type1, Type* type2) { CHECK(Equal(type1, type2)); }
- void CheckSub(TypeHandle type1, TypeHandle type2) {
+ void CheckSub(Type* type1, Type* type2) {
CHECK(type1->Is(type2));
CHECK(!type2->Is(type1));
if (this->IsBitset(type1) && this->IsBitset(type2)) {
@@ -137,7 +71,7 @@ struct Tests : Rep {
}
}
- void CheckSubOrEqual(TypeHandle type1, TypeHandle type2) {
+ void CheckSubOrEqual(Type* type1, Type* type2) {
CHECK(type1->Is(type2));
if (this->IsBitset(type1) && this->IsBitset(type2)) {
CHECK((this->AsBitset(type1) | this->AsBitset(type2))
@@ -145,7 +79,7 @@ struct Tests : Rep {
}
}
- void CheckUnordered(TypeHandle type1, TypeHandle type2) {
+ void CheckUnordered(Type* type1, Type* type2) {
CHECK(!type1->Is(type2));
CHECK(!type2->Is(type1));
if (this->IsBitset(type1) && this->IsBitset(type2)) {
@@ -153,12 +87,12 @@ struct Tests : Rep {
}
}
- void CheckOverlap(TypeHandle type1, TypeHandle type2) {
+ void CheckOverlap(Type* type1, Type* type2) {
CHECK(type1->Maybe(type2));
CHECK(type2->Maybe(type1));
}
- void CheckDisjoint(TypeHandle type1, TypeHandle type2) {
+ void CheckDisjoint(Type* type1, Type* type2) {
CHECK(!type1->Is(type2));
CHECK(!type2->Is(type1));
CHECK(!type1->Maybe(type2));
@@ -167,7 +101,7 @@ struct Tests : Rep {
void IsSomeType() {
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- TypeHandle t = *it;
+ Type* t = *it;
CHECK(1 ==
this->IsBitset(t) + t->IsClass() + t->IsConstant() + t->IsRange() +
this->IsUnion(t) + t->IsArray() + t->IsFunction() + t->IsContext());
@@ -185,9 +119,9 @@ struct Tests : Rep {
// Union(T1, T2) is bitset for bitsets T1,T2
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
- TypeHandle union12 = T.Union(type1, type2);
+ Type* type1 = *it1;
+ Type* type2 = *it2;
+ Type* union12 = T.Union(type1, type2);
CHECK(!(this->IsBitset(type1) && this->IsBitset(type2)) ||
this->IsBitset(union12));
}
@@ -196,9 +130,9 @@ struct Tests : Rep {
// Intersect(T1, T2) is bitset for bitsets T1,T2
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
- TypeHandle intersect12 = T.Intersect(type1, type2);
+ Type* type1 = *it1;
+ Type* type2 = *it2;
+ Type* intersect12 = T.Intersect(type1, type2);
CHECK(!(this->IsBitset(type1) && this->IsBitset(type2)) ||
this->IsBitset(intersect12));
}
@@ -207,9 +141,9 @@ struct Tests : Rep {
// Union(T1, T2) is bitset if T2 is bitset and T1->Is(T2)
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
- TypeHandle union12 = T.Union(type1, type2);
+ Type* type1 = *it1;
+ Type* type2 = *it2;
+ Type* union12 = T.Union(type1, type2);
CHECK(!(this->IsBitset(type2) && type1->Is(type2)) ||
this->IsBitset(union12));
}
@@ -218,9 +152,9 @@ struct Tests : Rep {
// Union(T1, T2) is bitwise disjunction for bitsets T1,T2
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
- TypeHandle union12 = T.Union(type1, type2);
+ Type* type1 = *it1;
+ Type* type2 = *it2;
+ Type* union12 = T.Union(type1, type2);
if (this->IsBitset(type1) && this->IsBitset(type2)) {
CHECK(
(this->AsBitset(type1) | this->AsBitset(type2)) ==
@@ -232,10 +166,10 @@ struct Tests : Rep {
// Intersect(T1, T2) is bitwise conjunction for bitsets T1,T2 (modulo None)
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
+ Type* type1 = *it1;
+ Type* type2 = *it2;
if (this->IsBitset(type1) && this->IsBitset(type2)) {
- TypeHandle intersect12 = T.Intersect(type1, type2);
+ Type* intersect12 = T.Intersect(type1, type2);
bitset bits = this->AsBitset(type1) & this->AsBitset(type2);
CHECK(bits == this->AsBitset(intersect12));
}
@@ -251,28 +185,26 @@ struct Tests : Rep {
counter++;
printf("Counter: %i\n", counter);
fflush(stdout);
- TypeHandle type1 = *it1;
- TypeHandle representation = T.Representation(type1);
- TypeHandle semantic = T.Semantic(type1);
- TypeHandle composed = T.Union(representation, semantic);
+ Type* type1 = *it1;
+ Type* representation = T.Representation(type1);
+ Type* semantic = T.Semantic(type1);
+ Type* composed = T.Union(representation, semantic);
CHECK(type1->Equals(composed));
}
// Pointwiseness of Union.
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
- TypeHandle representation1 = T.Representation(type1);
- TypeHandle semantic1 = T.Semantic(type1);
- TypeHandle representation2 = T.Representation(type2);
- TypeHandle semantic2 = T.Semantic(type2);
- TypeHandle direct_union = T.Union(type1, type2);
- TypeHandle representation_union =
- T.Union(representation1, representation2);
- TypeHandle semantic_union = T.Union(semantic1, semantic2);
- TypeHandle composed_union =
- T.Union(representation_union, semantic_union);
+ Type* type1 = *it1;
+ Type* type2 = *it2;
+ Type* representation1 = T.Representation(type1);
+ Type* semantic1 = T.Semantic(type1);
+ Type* representation2 = T.Representation(type2);
+ Type* semantic2 = T.Semantic(type2);
+ Type* direct_union = T.Union(type1, type2);
+ Type* representation_union = T.Union(representation1, representation2);
+ Type* semantic_union = T.Union(semantic1, semantic2);
+ Type* composed_union = T.Union(representation_union, semantic_union);
CHECK(direct_union->Equals(composed_union));
}
}
@@ -280,17 +212,17 @@ struct Tests : Rep {
// Pointwiseness of Intersect.
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
- TypeHandle representation1 = T.Representation(type1);
- TypeHandle semantic1 = T.Semantic(type1);
- TypeHandle representation2 = T.Representation(type2);
- TypeHandle semantic2 = T.Semantic(type2);
- TypeHandle direct_intersection = T.Intersect(type1, type2);
- TypeHandle representation_intersection =
+ Type* type1 = *it1;
+ Type* type2 = *it2;
+ Type* representation1 = T.Representation(type1);
+ Type* semantic1 = T.Semantic(type1);
+ Type* representation2 = T.Representation(type2);
+ Type* semantic2 = T.Semantic(type2);
+ Type* direct_intersection = T.Intersect(type1, type2);
+ Type* representation_intersection =
T.Intersect(representation1, representation2);
- TypeHandle semantic_intersection = T.Intersect(semantic1, semantic2);
- TypeHandle composed_intersection =
+ Type* semantic_intersection = T.Intersect(semantic1, semantic2);
+ Type* composed_intersection =
T.Union(representation_intersection, semantic_intersection);
CHECK(direct_intersection->Equals(composed_intersection));
}
@@ -299,12 +231,12 @@ struct Tests : Rep {
// Pointwiseness of Is.
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
- TypeHandle representation1 = T.Representation(type1);
- TypeHandle semantic1 = T.Semantic(type1);
- TypeHandle representation2 = T.Representation(type2);
- TypeHandle semantic2 = T.Semantic(type2);
+ Type* type1 = *it1;
+ Type* type2 = *it2;
+ Type* representation1 = T.Representation(type1);
+ Type* semantic1 = T.Semantic(type1);
+ Type* representation2 = T.Representation(type2);
+ Type* semantic2 = T.Semantic(type2);
bool representation_is = representation1->Is(representation2);
bool semantic_is = semantic1->Is(semantic2);
bool direct_is = type1->Is(type2);
@@ -317,14 +249,14 @@ struct Tests : Rep {
// Constructor
for (MapIterator mt = T.maps.begin(); mt != T.maps.end(); ++mt) {
Handle<i::Map> map = *mt;
- TypeHandle type = T.Class(map);
+ Type* type = T.Class(map);
CHECK(type->IsClass());
}
// Map attribute
for (MapIterator mt = T.maps.begin(); mt != T.maps.end(); ++mt) {
Handle<i::Map> map = *mt;
- TypeHandle type = T.Class(map);
+ Type* type = T.Class(map);
CHECK(*map == *type->AsClass()->Map());
}
@@ -333,8 +265,8 @@ struct Tests : Rep {
for (MapIterator mt2 = T.maps.begin(); mt2 != T.maps.end(); ++mt2) {
Handle<i::Map> map1 = *mt1;
Handle<i::Map> map2 = *mt2;
- TypeHandle type1 = T.Class(map1);
- TypeHandle type2 = T.Class(map2);
+ Type* type1 = T.Class(map1);
+ Type* type2 = T.Class(map2);
CHECK(Equal(type1, type2) == (*map1 == *map2));
}
}
@@ -344,14 +276,14 @@ struct Tests : Rep {
// Constructor
for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
Handle<i::Object> value = *vt;
- TypeHandle type = T.Constant(value);
+ Type* type = T.Constant(value);
CHECK(type->IsConstant());
}
// Value attribute
for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
Handle<i::Object> value = *vt;
- TypeHandle type = T.Constant(value);
+ Type* type = T.Constant(value);
CHECK(*value == *type->AsConstant()->Value());
}
@@ -360,8 +292,8 @@ struct Tests : Rep {
for (ValueIterator vt2 = T.values.begin(); vt2 != T.values.end(); ++vt2) {
Handle<i::Object> value1 = *vt1;
Handle<i::Object> value2 = *vt2;
- TypeHandle type1 = T.Constant(value1);
- TypeHandle type2 = T.Constant(value2);
+ Type* type1 = T.Constant(value1);
+ Type* type2 = T.Constant(value2);
CHECK(Equal(type1, type2) == (*value1 == *value2));
}
}
@@ -424,7 +356,7 @@ struct Tests : Rep {
double min = (*i)->Number();
double max = (*j)->Number();
if (min > max) std::swap(min, max);
- TypeHandle type = T.Range(min, max);
+ Type* type = T.Range(min, max);
CHECK(type->IsRange());
}
}
@@ -435,7 +367,7 @@ struct Tests : Rep {
double min = (*i)->Number();
double max = (*j)->Number();
if (min > max) std::swap(min, max);
- TypeHandle type = T.Range(min, max);
+ Type* type = T.Range(min, max);
CHECK(min == type->AsRange()->Min());
CHECK(max == type->AsRange()->Max());
}
@@ -457,8 +389,8 @@ struct Tests : Rep {
double max2 = (*j2)->Number();
if (min1 > max1) std::swap(min1, max1);
if (min2 > max2) std::swap(min2, max2);
- TypeHandle type1 = T.Range(min1, max1);
- TypeHandle type2 = T.Range(min2, max2);
+ Type* type1 = T.Range(min1, max1);
+ Type* type2 = T.Range(min2, max2);
CHECK(Equal(type1, type2) == (min1 == min2 && max1 == max2));
}
}
@@ -469,25 +401,25 @@ struct Tests : Rep {
void Context() {
// Constructor
for (int i = 0; i < 20; ++i) {
- TypeHandle type = T.Random();
- TypeHandle context = T.Context(type);
- CHECK(context->Iscontext());
+ Type* type = T.Random();
+ Type* context = T.Context(type);
+ CHECK(context->IsContext());
}
// Attributes
for (int i = 0; i < 20; ++i) {
- TypeHandle type = T.Random();
- TypeHandle context = T.Context(type);
+ Type* type = T.Random();
+ Type* context = T.Context(type);
CheckEqual(type, context->AsContext()->Outer());
}
// Functionality & Injectivity: Context(T1) = Context(T2) iff T1 = T2
for (int i = 0; i < 20; ++i) {
for (int j = 0; j < 20; ++j) {
- TypeHandle type1 = T.Random();
- TypeHandle type2 = T.Random();
- TypeHandle context1 = T.Context(type1);
- TypeHandle context2 = T.Context(type2);
+ Type* type1 = T.Random();
+ Type* type2 = T.Random();
+ Type* context1 = T.Context(type1);
+ Type* context2 = T.Context(type2);
CHECK(Equal(context1, context2) == Equal(type1, type2));
}
}
@@ -496,25 +428,25 @@ struct Tests : Rep {
void Array() {
// Constructor
for (int i = 0; i < 20; ++i) {
- TypeHandle type = T.Random();
- TypeHandle array = T.Array1(type);
+ Type* type = T.Random();
+ Type* array = T.Array1(type);
CHECK(array->IsArray());
}
// Attributes
for (int i = 0; i < 20; ++i) {
- TypeHandle type = T.Random();
- TypeHandle array = T.Array1(type);
+ Type* type = T.Random();
+ Type* array = T.Array1(type);
CheckEqual(type, array->AsArray()->Element());
}
// Functionality & Injectivity: Array(T1) = Array(T2) iff T1 = T2
for (int i = 0; i < 20; ++i) {
for (int j = 0; j < 20; ++j) {
- TypeHandle type1 = T.Random();
- TypeHandle type2 = T.Random();
- TypeHandle array1 = T.Array1(type1);
- TypeHandle array2 = T.Array1(type2);
+ Type* type1 = T.Random();
+ Type* type2 = T.Random();
+ Type* array1 = T.Array1(type1);
+ Type* array2 = T.Array1(type2);
CHECK(Equal(array1, array2) == Equal(type1, type2));
}
}
@@ -525,12 +457,12 @@ struct Tests : Rep {
for (int i = 0; i < 20; ++i) {
for (int j = 0; j < 20; ++j) {
for (int k = 0; k < 20; ++k) {
- TypeHandle type1 = T.Random();
- TypeHandle type2 = T.Random();
- TypeHandle type3 = T.Random();
- TypeHandle function0 = T.Function0(type1, type2);
- TypeHandle function1 = T.Function1(type1, type2, type3);
- TypeHandle function2 = T.Function2(type1, type2, type3);
+ Type* type1 = T.Random();
+ Type* type2 = T.Random();
+ Type* type3 = T.Random();
+ Type* function0 = T.Function0(type1, type2);
+ Type* function1 = T.Function1(type1, type2, type3);
+ Type* function2 = T.Function2(type1, type2, type3);
CHECK(function0->IsFunction());
CHECK(function1->IsFunction());
CHECK(function2->IsFunction());
@@ -542,12 +474,12 @@ struct Tests : Rep {
for (int i = 0; i < 20; ++i) {
for (int j = 0; j < 20; ++j) {
for (int k = 0; k < 20; ++k) {
- TypeHandle type1 = T.Random();
- TypeHandle type2 = T.Random();
- TypeHandle type3 = T.Random();
- TypeHandle function0 = T.Function0(type1, type2);
- TypeHandle function1 = T.Function1(type1, type2, type3);
- TypeHandle function2 = T.Function2(type1, type2, type3);
+ Type* type1 = T.Random();
+ Type* type2 = T.Random();
+ Type* type3 = T.Random();
+ Type* function0 = T.Function0(type1, type2);
+ Type* function1 = T.Function1(type1, type2, type3);
+ Type* function2 = T.Function2(type1, type2, type3);
CHECK_EQ(0, function0->AsFunction()->Arity());
CHECK_EQ(1, function1->AsFunction()->Arity());
CHECK_EQ(2, function2->AsFunction()->Arity());
@@ -568,17 +500,17 @@ struct Tests : Rep {
for (int i = 0; i < 20; ++i) {
for (int j = 0; j < 20; ++j) {
for (int k = 0; k < 20; ++k) {
- TypeHandle type1 = T.Random();
- TypeHandle type2 = T.Random();
- TypeHandle type3 = T.Random();
- TypeHandle function01 = T.Function0(type1, type2);
- TypeHandle function02 = T.Function0(type1, type3);
- TypeHandle function03 = T.Function0(type3, type2);
- TypeHandle function11 = T.Function1(type1, type2, type2);
- TypeHandle function12 = T.Function1(type1, type2, type3);
- TypeHandle function21 = T.Function2(type1, type2, type2);
- TypeHandle function22 = T.Function2(type1, type2, type3);
- TypeHandle function23 = T.Function2(type1, type3, type2);
+ Type* type1 = T.Random();
+ Type* type2 = T.Random();
+ Type* type3 = T.Random();
+ Type* function01 = T.Function0(type1, type2);
+ Type* function02 = T.Function0(type1, type3);
+ Type* function03 = T.Function0(type3, type2);
+ Type* function11 = T.Function1(type1, type2, type2);
+ Type* function12 = T.Function1(type1, type2, type3);
+ Type* function21 = T.Function2(type1, type2, type2);
+ Type* function22 = T.Function2(type1, type2, type3);
+ Type* function23 = T.Function2(type1, type3, type2);
CHECK(Equal(function01, function02) == Equal(type2, type3));
CHECK(Equal(function01, function03) == Equal(type1, type3));
CHECK(Equal(function11, function12) == Equal(type2, type3));
@@ -593,8 +525,8 @@ struct Tests : Rep {
// Constant(V)->Is(Of(V))
for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
Handle<i::Object> value = *vt;
- TypeHandle const_type = T.Constant(value);
- TypeHandle of_type = T.Of(value);
+ Type* const_type = T.Constant(value);
+ Type* of_type = T.Of(value);
CHECK(const_type->Is(of_type));
}
@@ -602,9 +534,9 @@ struct Tests : Rep {
for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
Handle<i::Object> value = *vt;
- TypeHandle type = *it;
- TypeHandle const_type = T.Constant(value);
- TypeHandle of_type = T.Of(value);
+ Type* type = *it;
+ Type* const_type = T.Constant(value);
+ Type* of_type = T.Of(value);
CHECK(!of_type->Is(type) || const_type->Is(type));
}
}
@@ -613,9 +545,9 @@ struct Tests : Rep {
for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
Handle<i::Object> value = *vt;
- TypeHandle type = *it;
- TypeHandle const_type = T.Constant(value);
- TypeHandle of_type = T.Of(value);
+ Type* type = *it;
+ Type* const_type = T.Constant(value);
+ Type* of_type = T.Of(value);
CHECK(!const_type->Is(type) ||
of_type->Is(type) || type->Maybe(const_type));
}
@@ -626,16 +558,16 @@ struct Tests : Rep {
// Constant(V)->NowIs(NowOf(V))
for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
Handle<i::Object> value = *vt;
- TypeHandle const_type = T.Constant(value);
- TypeHandle nowof_type = T.NowOf(value);
+ Type* const_type = T.Constant(value);
+ Type* nowof_type = T.NowOf(value);
CHECK(const_type->NowIs(nowof_type));
}
// NowOf(V)->Is(Of(V))
for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
Handle<i::Object> value = *vt;
- TypeHandle nowof_type = T.NowOf(value);
- TypeHandle of_type = T.Of(value);
+ Type* nowof_type = T.NowOf(value);
+ Type* of_type = T.Of(value);
CHECK(nowof_type->Is(of_type));
}
@@ -643,9 +575,9 @@ struct Tests : Rep {
for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
Handle<i::Object> value = *vt;
- TypeHandle type = *it;
- TypeHandle const_type = T.Constant(value);
- TypeHandle nowof_type = T.NowOf(value);
+ Type* type = *it;
+ Type* const_type = T.Constant(value);
+ Type* nowof_type = T.NowOf(value);
CHECK(!nowof_type->NowIs(type) || const_type->NowIs(type));
}
}
@@ -655,9 +587,9 @@ struct Tests : Rep {
for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
Handle<i::Object> value = *vt;
- TypeHandle type = *it;
- TypeHandle const_type = T.Constant(value);
- TypeHandle nowof_type = T.NowOf(value);
+ Type* type = *it;
+ Type* const_type = T.Constant(value);
+ Type* nowof_type = T.NowOf(value);
CHECK(!const_type->NowIs(type) ||
nowof_type->NowIs(type) || type->Maybe(const_type));
}
@@ -668,9 +600,9 @@ struct Tests : Rep {
for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
Handle<i::Object> value = *vt;
- TypeHandle type = *it;
- TypeHandle const_type = T.Constant(value);
- TypeHandle nowof_type = T.NowOf(value);
+ Type* type = *it;
+ Type* const_type = T.Constant(value);
+ Type* nowof_type = T.NowOf(value);
CHECK(!const_type->Is(type) ||
nowof_type->Is(type) || type->Maybe(const_type));
}
@@ -682,10 +614,10 @@ struct Tests : Rep {
// TODO(neis): Need to ignore representation for this to be true.
/*
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- TypeHandle type = *it;
+ Type* type = *it;
if (this->IsBitset(type) && type->Is(T.Number) &&
!type->Is(T.None) && !type->Is(T.NaN)) {
- TypeHandle range = T.Range(
+ Type* range = T.Range(
isolate->factory()->NewNumber(type->Min()),
isolate->factory()->NewNumber(type->Max()));
CHECK(range->Is(type));
@@ -695,7 +627,7 @@ struct Tests : Rep {
// If b is regular numeric bitset, then b->Min() and b->Max() are integers.
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- TypeHandle type = *it;
+ Type* type = *it;
if (this->IsBitset(type) && type->Is(T.Number) && !type->Is(T.NaN)) {
CHECK(IsInteger(type->Min()) && IsInteger(type->Max()));
}
@@ -705,8 +637,8 @@ struct Tests : Rep {
// b1->Min() >= b2->Min() and b1->Max() <= b2->Max().
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
+ Type* type1 = *it1;
+ Type* type2 = *it2;
if (this->IsBitset(type1) && type1->Is(type2) && type2->Is(T.Number) &&
!type1->Is(T.NaN) && !type2->Is(T.NaN)) {
CHECK(type1->Min() >= type2->Min());
@@ -717,10 +649,9 @@ struct Tests : Rep {
// Lub(Range(x,y))->Min() <= x and y <= Lub(Range(x,y))->Max()
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- TypeHandle type = *it;
+ Type* type = *it;
if (type->IsRange()) {
- TypeHandle lub = Rep::BitsetType::New(
- Rep::BitsetType::Lub(type), T.region());
+ Type* lub = BitsetType::NewForTesting(BitsetType::Lub(type));
CHECK(lub->Min() <= type->Min() && type->Max() <= lub->Max());
}
}
@@ -728,7 +659,7 @@ struct Tests : Rep {
// Rangification: If T->Is(Range(-inf,+inf)) and T is inhabited, then
// T->Is(Range(T->Min(), T->Max())).
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- TypeHandle type = *it;
+ Type* type = *it;
CHECK(!type->Is(T.Integer) || !type->IsInhabited() ||
type->Is(T.Range(type->Min(), type->Max())));
}
@@ -737,19 +668,17 @@ struct Tests : Rep {
void BitsetGlb() {
// Lower: (T->BitsetGlb())->Is(T)
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- TypeHandle type = *it;
- TypeHandle glb =
- Rep::BitsetType::New(Rep::BitsetType::Glb(type), T.region());
+ Type* type = *it;
+ Type* glb = BitsetType::NewForTesting(BitsetType::Glb(type));
CHECK(glb->Is(type));
}
// Greatest: If T1->IsBitset() and T1->Is(T2), then T1->Is(T2->BitsetGlb())
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
- TypeHandle glb2 =
- Rep::BitsetType::New(Rep::BitsetType::Glb(type2), T.region());
+ Type* type1 = *it1;
+ Type* type2 = *it2;
+ Type* glb2 = BitsetType::NewForTesting(BitsetType::Glb(type2));
CHECK(!this->IsBitset(type1) || !type1->Is(type2) || type1->Is(glb2));
}
}
@@ -757,12 +686,10 @@ struct Tests : Rep {
// Monotonicity: T1->Is(T2) implies (T1->BitsetGlb())->Is(T2->BitsetGlb())
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
- TypeHandle glb1 =
- Rep::BitsetType::New(Rep::BitsetType::Glb(type1), T.region());
- TypeHandle glb2 =
- Rep::BitsetType::New(Rep::BitsetType::Glb(type2), T.region());
+ Type* type1 = *it1;
+ Type* type2 = *it2;
+ Type* glb1 = BitsetType::NewForTesting(BitsetType::Glb(type1));
+ Type* glb2 = BitsetType::NewForTesting(BitsetType::Glb(type2));
CHECK(!type1->Is(type2) || glb1->Is(glb2));
}
}
@@ -771,19 +698,17 @@ struct Tests : Rep {
void BitsetLub() {
// Upper: T->Is(T->BitsetLub())
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- TypeHandle type = *it;
- TypeHandle lub =
- Rep::BitsetType::New(Rep::BitsetType::Lub(type), T.region());
+ Type* type = *it;
+ Type* lub = BitsetType::NewForTesting(BitsetType::Lub(type));
CHECK(type->Is(lub));
}
// Least: If T2->IsBitset() and T1->Is(T2), then (T1->BitsetLub())->Is(T2)
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
- TypeHandle lub1 =
- Rep::BitsetType::New(Rep::BitsetType::Lub(type1), T.region());
+ Type* type1 = *it1;
+ Type* type2 = *it2;
+ Type* lub1 = BitsetType::NewForTesting(BitsetType::Lub(type1));
CHECK(!this->IsBitset(type2) || !type1->Is(type2) || lub1->Is(type2));
}
}
@@ -791,12 +716,10 @@ struct Tests : Rep {
// Monotonicity: T1->Is(T2) implies (T1->BitsetLub())->Is(T2->BitsetLub())
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
- TypeHandle lub1 =
- Rep::BitsetType::New(Rep::BitsetType::Lub(type1), T.region());
- TypeHandle lub2 =
- Rep::BitsetType::New(Rep::BitsetType::Lub(type2), T.region());
+ Type* type1 = *it1;
+ Type* type2 = *it2;
+ Type* lub1 = BitsetType::NewForTesting(BitsetType::Lub(type1));
+ Type* lub2 = BitsetType::NewForTesting(BitsetType::Lub(type2));
CHECK(!type1->Is(type2) || lub1->Is(lub2));
}
}
@@ -805,31 +728,31 @@ struct Tests : Rep {
void Is1() {
// Least Element (Bottom): None->Is(T)
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- TypeHandle type = *it;
+ Type* type = *it;
CHECK(T.None->Is(type));
}
// Greatest Element (Top): T->Is(Any)
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- TypeHandle type = *it;
+ Type* type = *it;
CHECK(type->Is(T.Any));
}
// Bottom Uniqueness: T->Is(None) implies T = None
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- TypeHandle type = *it;
+ Type* type = *it;
if (type->Is(T.None)) CheckEqual(type, T.None);
}
// Top Uniqueness: Any->Is(T) implies T = Any
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- TypeHandle type = *it;
+ Type* type = *it;
if (T.Any->Is(type)) CheckEqual(type, T.Any);
}
// Reflexivity: T->Is(T)
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- TypeHandle type = *it;
+ Type* type = *it;
CHECK(type->Is(type));
}
@@ -837,9 +760,9 @@ struct Tests : Rep {
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
- TypeHandle type3 = *it3;
+ Type* type1 = *it1;
+ Type* type2 = *it2;
+ Type* type3 = *it3;
CHECK(!(type1->Is(type2) && type2->Is(type3)) || type1->Is(type3));
}
}
@@ -848,8 +771,8 @@ struct Tests : Rep {
// Antisymmetry: T1->Is(T2) and T2->Is(T1) iff T1 = T2
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
+ Type* type1 = *it1;
+ Type* type2 = *it2;
CHECK((type1->Is(type2) && type2->Is(type1)) == Equal(type1, type2));
}
}
@@ -857,8 +780,8 @@ struct Tests : Rep {
// (In-)Compatibilities.
for (TypeIterator i = T.types.begin(); i != T.types.end(); ++i) {
for (TypeIterator j = T.types.begin(); j != T.types.end(); ++j) {
- TypeHandle type1 = *i;
- TypeHandle type2 = *j;
+ Type* type1 = *i;
+ Type* type2 = *j;
CHECK(!type1->Is(type2) || this->IsBitset(type2) ||
this->IsUnion(type2) || this->IsUnion(type1) ||
(type1->IsClass() && type2->IsClass()) ||
@@ -880,8 +803,8 @@ struct Tests : Rep {
for (MapIterator mt2 = T.maps.begin(); mt2 != T.maps.end(); ++mt2) {
Handle<i::Map> map1 = *mt1;
Handle<i::Map> map2 = *mt2;
- TypeHandle class_type1 = T.Class(map1);
- TypeHandle class_type2 = T.Class(map2);
+ Type* class_type1 = T.Class(map1);
+ Type* class_type2 = T.Class(map2);
CHECK(class_type1->Is(class_type2) == (*map1 == *map2));
}
}
@@ -901,8 +824,8 @@ struct Tests : Rep {
double max2 = (*j2)->Number();
if (min1 > max1) std::swap(min1, max1);
if (min2 > max2) std::swap(min2, max2);
- TypeHandle type1 = T.Range(min1, max1);
- TypeHandle type2 = T.Range(min2, max2);
+ Type* type1 = T.Range(min1, max1);
+ Type* type2 = T.Range(min2, max2);
CHECK(type1->Is(type2) == (min1 >= min2 && max1 <= max2));
}
}
@@ -914,8 +837,8 @@ struct Tests : Rep {
for (ValueIterator vt2 = T.values.begin(); vt2 != T.values.end(); ++vt2) {
Handle<i::Object> value1 = *vt1;
Handle<i::Object> value2 = *vt2;
- TypeHandle const_type1 = T.Constant(value1);
- TypeHandle const_type2 = T.Constant(value2);
+ Type* const_type1 = T.Constant(value1);
+ Type* const_type2 = T.Constant(value2);
CHECK(const_type1->Is(const_type2) == (*value1 == *value2));
}
}
@@ -923,10 +846,10 @@ struct Tests : Rep {
// Context(T1)->Is(Context(T2)) iff T1 = T2
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- TypeHandle outer1 = *it1;
- TypeHandle outer2 = *it2;
- TypeHandle type1 = T.Context(outer1);
- TypeHandle type2 = T.Context(outer2);
+ Type* outer1 = *it1;
+ Type* outer2 = *it2;
+ Type* type1 = T.Context(outer1);
+ Type* type2 = T.Context(outer2);
CHECK(type1->Is(type2) == outer1->Equals(outer2));
}
}
@@ -934,10 +857,10 @@ struct Tests : Rep {
// Array(T1)->Is(Array(T2)) iff T1 = T2
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- TypeHandle element1 = *it1;
- TypeHandle element2 = *it2;
- TypeHandle type1 = T.Array1(element1);
- TypeHandle type2 = T.Array1(element2);
+ Type* element1 = *it1;
+ Type* element2 = *it2;
+ Type* type1 = T.Array1(element1);
+ Type* type2 = T.Array1(element2);
CHECK(type1->Is(type2) == element1->Equals(element2));
}
}
@@ -945,12 +868,12 @@ struct Tests : Rep {
// Function0(S1, T1)->Is(Function0(S2, T2)) iff S1 = S2 and T1 = T2
for (TypeIterator i = T.types.begin(); i != T.types.end(); ++i) {
for (TypeIterator j = T.types.begin(); j != T.types.end(); ++j) {
- TypeHandle result1 = *i;
- TypeHandle receiver1 = *j;
- TypeHandle type1 = T.Function0(result1, receiver1);
- TypeHandle result2 = T.Random();
- TypeHandle receiver2 = T.Random();
- TypeHandle type2 = T.Function0(result2, receiver2);
+ Type* result1 = *i;
+ Type* receiver1 = *j;
+ Type* type1 = T.Function0(result1, receiver1);
+ Type* result2 = T.Random();
+ Type* receiver2 = T.Random();
+ Type* type2 = T.Function0(result2, receiver2);
CHECK(type1->Is(type2) ==
(result1->Equals(result2) && receiver1->Equals(receiver2)));
}
@@ -961,7 +884,7 @@ struct Tests : Rep {
// If IsInteger(v) then Constant(v)->Is(Range(v, v)).
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- TypeHandle type = *it;
+ Type* type = *it;
if (type->IsConstant() && IsInteger(*type->AsConstant()->Value())) {
CHECK(type->Is(T.Range(type->AsConstant()->Value()->Number(),
type->AsConstant()->Value()->Number())));
@@ -971,8 +894,8 @@ struct Tests : Rep {
// If Constant(x)->Is(Range(min,max)) then IsInteger(v) and min <= x <= max.
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
+ Type* type1 = *it1;
+ Type* type2 = *it2;
if (type1->IsConstant() && type2->IsRange() && type1->Is(type2)) {
double x = type1->AsConstant()->Value()->Number();
double min = type2->AsRange()->Min();
@@ -984,10 +907,9 @@ struct Tests : Rep {
// Lub(Range(x,y))->Is(T.Union(T.Integral32, T.OtherNumber))
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- TypeHandle type = *it;
+ Type* type = *it;
if (type->IsRange()) {
- TypeHandle lub = Rep::BitsetType::New(
- Rep::BitsetType::Lub(type), T.region());
+ Type* lub = BitsetType::NewForTesting(BitsetType::Lub(type));
CHECK(lub->Is(T.PlainNumber));
}
}
@@ -1068,31 +990,31 @@ struct Tests : Rep {
void NowIs() {
// Least Element (Bottom): None->NowIs(T)
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- TypeHandle type = *it;
+ Type* type = *it;
CHECK(T.None->NowIs(type));
}
// Greatest Element (Top): T->NowIs(Any)
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- TypeHandle type = *it;
+ Type* type = *it;
CHECK(type->NowIs(T.Any));
}
// Bottom Uniqueness: T->NowIs(None) implies T = None
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- TypeHandle type = *it;
+ Type* type = *it;
if (type->NowIs(T.None)) CheckEqual(type, T.None);
}
// Top Uniqueness: Any->NowIs(T) implies T = Any
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- TypeHandle type = *it;
+ Type* type = *it;
if (T.Any->NowIs(type)) CheckEqual(type, T.Any);
}
// Reflexivity: T->NowIs(T)
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- TypeHandle type = *it;
+ Type* type = *it;
CHECK(type->NowIs(type));
}
@@ -1100,9 +1022,9 @@ struct Tests : Rep {
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
- TypeHandle type3 = *it3;
+ Type* type1 = *it1;
+ Type* type2 = *it2;
+ Type* type3 = *it3;
CHECK(!(type1->NowIs(type2) && type2->NowIs(type3)) ||
type1->NowIs(type3));
}
@@ -1112,8 +1034,8 @@ struct Tests : Rep {
// Antisymmetry: T1->NowIs(T2) and T2->NowIs(T1) iff T1 = T2
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
+ Type* type1 = *it1;
+ Type* type2 = *it2;
CHECK((type1->NowIs(type2) && type2->NowIs(type1)) ==
Equal(type1, type2));
}
@@ -1122,8 +1044,8 @@ struct Tests : Rep {
// T1->Is(T2) implies T1->NowIs(T2)
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
+ Type* type1 = *it1;
+ Type* type2 = *it2;
CHECK(!type1->Is(type2) || type1->NowIs(type2));
}
}
@@ -1133,8 +1055,8 @@ struct Tests : Rep {
for (ValueIterator vt2 = T.values.begin(); vt2 != T.values.end(); ++vt2) {
Handle<i::Object> value1 = *vt1;
Handle<i::Object> value2 = *vt2;
- TypeHandle const_type1 = T.Constant(value1);
- TypeHandle const_type2 = T.Constant(value2);
+ Type* const_type1 = T.Constant(value1);
+ Type* const_type2 = T.Constant(value2);
CHECK(const_type1->NowIs(const_type2) == (*value1 == *value2));
}
}
@@ -1144,8 +1066,8 @@ struct Tests : Rep {
for (MapIterator mt2 = T.maps.begin(); mt2 != T.maps.end(); ++mt2) {
Handle<i::Map> map1 = *mt1;
Handle<i::Map> map2 = *mt2;
- TypeHandle class_type1 = T.Class(map1);
- TypeHandle class_type2 = T.Class(map2);
+ Type* class_type1 = T.Class(map1);
+ Type* class_type2 = T.Class(map2);
CHECK(class_type1->NowIs(class_type2) == (*map1 == *map2));
}
}
@@ -1155,8 +1077,8 @@ struct Tests : Rep {
for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
Handle<i::Map> map = *mt;
Handle<i::Object> value = *vt;
- TypeHandle const_type = T.Constant(value);
- TypeHandle class_type = T.Class(map);
+ Type* const_type = T.Constant(value);
+ Type* class_type = T.Class(map);
CHECK((value->IsHeapObject() &&
i::HeapObject::cast(*value)->map() == *map)
== const_type->NowIs(class_type));
@@ -1168,8 +1090,8 @@ struct Tests : Rep {
for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
Handle<i::Map> map = *mt;
Handle<i::Object> value = *vt;
- TypeHandle const_type = T.Constant(value);
- TypeHandle class_type = T.Class(map);
+ Type* const_type = T.Constant(value);
+ Type* class_type = T.Class(map);
CHECK(!class_type->NowIs(const_type));
}
}
@@ -1179,9 +1101,9 @@ struct Tests : Rep {
// T->Contains(V) iff Constant(V)->Is(T)
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
- TypeHandle type = *it;
+ Type* type = *it;
Handle<i::Object> value = *vt;
- TypeHandle const_type = T.Constant(value);
+ Type* const_type = T.Constant(value);
CHECK(type->Contains(value) == const_type->Is(type));
}
}
@@ -1191,9 +1113,9 @@ struct Tests : Rep {
// T->NowContains(V) iff Constant(V)->NowIs(T)
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
- TypeHandle type = *it;
+ Type* type = *it;
Handle<i::Object> value = *vt;
- TypeHandle const_type = T.Constant(value);
+ Type* const_type = T.Constant(value);
CHECK(type->NowContains(value) == const_type->NowIs(type));
}
}
@@ -1201,7 +1123,7 @@ struct Tests : Rep {
// T->Contains(V) implies T->NowContains(V)
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
- TypeHandle type = *it;
+ Type* type = *it;
Handle<i::Object> value = *vt;
CHECK(!type->Contains(value) || type->NowContains(value));
}
@@ -1210,9 +1132,9 @@ struct Tests : Rep {
// NowOf(V)->Is(T) implies T->NowContains(V)
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
- TypeHandle type = *it;
+ Type* type = *it;
Handle<i::Object> value = *vt;
- TypeHandle nowof_type = T.Of(value);
+ Type* nowof_type = T.Of(value);
CHECK(!nowof_type->NowIs(type) || type->NowContains(value));
}
}
@@ -1221,27 +1143,27 @@ struct Tests : Rep {
void Maybe() {
// T->Maybe(Any) iff T inhabited
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- TypeHandle type = *it;
+ Type* type = *it;
CHECK(type->Maybe(T.Any) == type->IsInhabited());
}
// T->Maybe(None) never
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- TypeHandle type = *it;
+ Type* type = *it;
CHECK(!type->Maybe(T.None));
}
// Reflexivity upto Inhabitation: T->Maybe(T) iff T inhabited
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- TypeHandle type = *it;
+ Type* type = *it;
CHECK(type->Maybe(type) == type->IsInhabited());
}
// Symmetry: T1->Maybe(T2) iff T2->Maybe(T1)
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
+ Type* type1 = *it1;
+ Type* type2 = *it2;
CHECK(type1->Maybe(type2) == type2->Maybe(type1));
}
}
@@ -1249,8 +1171,8 @@ struct Tests : Rep {
// T1->Maybe(T2) implies T1, T2 inhabited
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
+ Type* type1 = *it1;
+ Type* type2 = *it2;
CHECK(!type1->Maybe(type2) ||
(type1->IsInhabited() && type2->IsInhabited()));
}
@@ -1259,9 +1181,9 @@ struct Tests : Rep {
// T1->Maybe(T2) implies Intersect(T1, T2) inhabited
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
- TypeHandle intersect12 = T.Intersect(type1, type2);
+ Type* type1 = *it1;
+ Type* type2 = *it2;
+ Type* intersect12 = T.Intersect(type1, type2);
CHECK(!type1->Maybe(type2) || intersect12->IsInhabited());
}
}
@@ -1269,8 +1191,8 @@ struct Tests : Rep {
// T1->Is(T2) and T1 inhabited implies T1->Maybe(T2)
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
+ Type* type1 = *it1;
+ Type* type2 = *it2;
CHECK(!(type1->Is(type2) && type1->IsInhabited()) ||
type1->Maybe(type2));
}
@@ -1281,8 +1203,8 @@ struct Tests : Rep {
for (ValueIterator vt2 = T.values.begin(); vt2 != T.values.end(); ++vt2) {
Handle<i::Object> value1 = *vt1;
Handle<i::Object> value2 = *vt2;
- TypeHandle const_type1 = T.Constant(value1);
- TypeHandle const_type2 = T.Constant(value2);
+ Type* const_type1 = T.Constant(value1);
+ Type* const_type2 = T.Constant(value2);
CHECK(const_type1->Maybe(const_type2) == (*value1 == *value2));
}
}
@@ -1292,8 +1214,8 @@ struct Tests : Rep {
for (MapIterator mt2 = T.maps.begin(); mt2 != T.maps.end(); ++mt2) {
Handle<i::Map> map1 = *mt1;
Handle<i::Map> map2 = *mt2;
- TypeHandle class_type1 = T.Class(map1);
- TypeHandle class_type2 = T.Class(map2);
+ Type* class_type1 = T.Class(map1);
+ Type* class_type2 = T.Class(map2);
CHECK(class_type1->Maybe(class_type2) == (*map1 == *map2));
}
}
@@ -1305,8 +1227,8 @@ struct Tests : Rep {
for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
Handle<i::Map> map = *mt;
Handle<i::Object> value = *vt;
- TypeHandle const_type = T.Constant(value);
- TypeHandle class_type = T.Class(map);
+ Type* const_type = T.Constant(value);
+ Type* class_type = T.Class(map);
CHECK(!const_type->Maybe(class_type));
}
}
@@ -1319,8 +1241,8 @@ struct Tests : Rep {
for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
Handle<i::Map> map = *mt;
Handle<i::Object> value = *vt;
- TypeHandle const_type = T.Constant(value);
- TypeHandle class_type = T.Class(map);
+ Type* const_type = T.Constant(value);
+ Type* class_type = T.Class(map);
CHECK(!class_type->Maybe(const_type));
}
}
@@ -1383,32 +1305,32 @@ struct Tests : Rep {
void Union1() {
// Identity: Union(T, None) = T
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- TypeHandle type = *it;
- TypeHandle union_type = T.Union(type, T.None);
+ Type* type = *it;
+ Type* union_type = T.Union(type, T.None);
CheckEqual(union_type, type);
}
// Domination: Union(T, Any) = Any
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- TypeHandle type = *it;
- TypeHandle union_type = T.Union(type, T.Any);
+ Type* type = *it;
+ Type* union_type = T.Union(type, T.Any);
CheckEqual(union_type, T.Any);
}
// Idempotence: Union(T, T) = T
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- TypeHandle type = *it;
- TypeHandle union_type = T.Union(type, type);
+ Type* type = *it;
+ Type* union_type = T.Union(type, type);
CheckEqual(union_type, type);
}
// Commutativity: Union(T1, T2) = Union(T2, T1)
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
- TypeHandle union12 = T.Union(type1, type2);
- TypeHandle union21 = T.Union(type2, type1);
+ Type* type1 = *it1;
+ Type* type2 = *it2;
+ Type* union12 = T.Union(type1, type2);
+ Type* union21 = T.Union(type2, type1);
CheckEqual(union12, union21);
}
}
@@ -1421,13 +1343,13 @@ struct Tests : Rep {
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
- TypeHandle type3 = *it3;
- TypeHandle union12 = T.Union(type1, type2);
- TypeHandle union23 = T.Union(type2, type3);
- TypeHandle union1_23 = T.Union(type1, union23);
- TypeHandle union12_3 = T.Union(union12, type3);
+ Type* type1 = *it1;
+ Type* type2 = *it2;
+ Type* type3 = *it3;
+ Type* union12 = T.Union(type1, type2);
+ Type* union23 = T.Union(type2, type3);
+ Type* union1_23 = T.Union(type1, union23);
+ Type* union12_3 = T.Union(union12, type3);
CheckEqual(union1_23, union12_3);
}
}
@@ -1437,9 +1359,9 @@ struct Tests : Rep {
// Meet: T1->Is(Union(T1, T2)) and T2->Is(Union(T1, T2))
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
- TypeHandle union12 = T.Union(type1, type2);
+ Type* type1 = *it1;
+ Type* type2 = *it2;
+ Type* union12 = T.Union(type1, type2);
CHECK(type1->Is(union12));
CHECK(type2->Is(union12));
}
@@ -1448,9 +1370,9 @@ struct Tests : Rep {
// Upper Boundedness: T1->Is(T2) implies Union(T1, T2) = T2
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
- TypeHandle union12 = T.Union(type1, type2);
+ Type* type1 = *it1;
+ Type* type2 = *it2;
+ Type* union12 = T.Union(type1, type2);
if (type1->Is(type2)) CheckEqual(union12, type2);
}
}
@@ -1463,11 +1385,11 @@ struct Tests : Rep {
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
- TypeHandle type3 = *it3;
- TypeHandle union13 = T.Union(type1, type3);
- TypeHandle union23 = T.Union(type2, type3);
+ Type* type1 = *it1;
+ Type* type2 = *it2;
+ Type* type3 = *it3;
+ Type* union13 = T.Union(type1, type3);
+ Type* union23 = T.Union(type2, type3);
CHECK(!type1->Is(type2) || union13->Is(union23));
}
}
@@ -1485,10 +1407,10 @@ struct Tests : Rep {
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
- TypeHandle type3 = *it3;
- TypeHandle union12 = T.Union(type1, type2);
+ Type* type1 = *it1;
+ Type* type2 = *it2;
+ Type* type3 = *it3;
+ Type* union12 = T.Union(type1, type2);
CHECK(!(type1->Is(type3) && type2->Is(type3)) || union12->Is(type3));
}
}
@@ -1502,10 +1424,10 @@ struct Tests : Rep {
HandleScope scope(isolate);
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
for (TypeIterator it3 = it2; it3 != T.types.end(); ++it3) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
- TypeHandle type3 = *it3;
- TypeHandle union23 = T.Union(type2, type3);
+ Type* type1 = *it1;
+ Type* type2 = *it2;
+ Type* type3 = *it3;
+ Type* union23 = T.Union(type2, type3);
CHECK(!(type1->Is(type2) || type1->Is(type3)) || type1->Is(union23));
}
}
@@ -1628,32 +1550,32 @@ struct Tests : Rep {
void Intersect() {
// Identity: Intersect(T, Any) = T
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- TypeHandle type = *it;
- TypeHandle intersect_type = T.Intersect(type, T.Any);
+ Type* type = *it;
+ Type* intersect_type = T.Intersect(type, T.Any);
CheckEqual(intersect_type, type);
}
// Domination: Intersect(T, None) = None
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- TypeHandle type = *it;
- TypeHandle intersect_type = T.Intersect(type, T.None);
+ Type* type = *it;
+ Type* intersect_type = T.Intersect(type, T.None);
CheckEqual(intersect_type, T.None);
}
// Idempotence: Intersect(T, T) = T
for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- TypeHandle type = *it;
- TypeHandle intersect_type = T.Intersect(type, type);
+ Type* type = *it;
+ Type* intersect_type = T.Intersect(type, type);
CheckEqual(intersect_type, type);
}
// Commutativity: Intersect(T1, T2) = Intersect(T2, T1)
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
- TypeHandle intersect12 = T.Intersect(type1, type2);
- TypeHandle intersect21 = T.Intersect(type2, type1);
+ Type* type1 = *it1;
+ Type* type2 = *it2;
+ Type* intersect12 = T.Intersect(type1, type2);
+ Type* intersect21 = T.Intersect(type2, type1);
CheckEqual(intersect12, intersect21);
}
}
@@ -1669,13 +1591,13 @@ struct Tests : Rep {
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
- TypeHandle type3 = *it3;
- TypeHandle intersect12 = T.Intersect(type1, type2);
- TypeHandle intersect23 = T.Intersect(type2, type3);
- TypeHandle intersect1_23 = T.Intersect(type1, intersect23);
- TypeHandle intersect12_3 = T.Intersect(intersect12, type3);
+ Type* type1 = *it1;
+ Type* type2 = *it2;
+ Type* type3 = *it3;
+ Type* intersect12 = T.Intersect(type1, type2);
+ Type* intersect23 = T.Intersect(type2, type3);
+ Type* intersect1_23 = T.Intersect(type1, intersect23);
+ Type* intersect12_3 = T.Intersect(intersect12, type3);
CheckEqual(intersect1_23, intersect12_3);
}
}
@@ -1691,9 +1613,9 @@ struct Tests : Rep {
/*
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
- TypeHandle intersect12 = T.Intersect(type1, type2);
+ Type* type1 = *it1;
+ Type* type2 = *it2;
+ Type* intersect12 = T.Intersect(type1, type2);
CHECK(intersect12->Is(type1));
CHECK(intersect12->Is(type2));
}
@@ -1703,9 +1625,9 @@ struct Tests : Rep {
// Lower Boundedness: T1->Is(T2) implies Intersect(T1, T2) = T1
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
- TypeHandle intersect12 = T.Intersect(type1, type2);
+ Type* type1 = *it1;
+ Type* type2 = *it2;
+ Type* intersect12 = T.Intersect(type1, type2);
if (type1->Is(type2)) CheckEqual(intersect12, type1);
}
}
@@ -1719,11 +1641,11 @@ struct Tests : Rep {
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
- TypeHandle type3 = *it3;
- TypeHandle intersect13 = T.Intersect(type1, type3);
- TypeHandle intersect23 = T.Intersect(type2, type3);
+ Type* type1 = *it1;
+ Type* type2 = *it2;
+ Type* type3 = *it3;
+ Type* intersect13 = T.Intersect(type1, type3);
+ Type* intersect23 = T.Intersect(type2, type3);
CHECK(!type1->Is(type2) || intersect13->Is(intersect23));
}
}
@@ -1739,10 +1661,10 @@ struct Tests : Rep {
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
- TypeHandle type3 = *it3;
- TypeHandle intersect12 = T.Intersect(type1, type2);
+ Type* type1 = *it1;
+ Type* type2 = *it2;
+ Type* type3 = *it3;
+ Type* intersect12 = T.Intersect(type1, type2);
CHECK(!(type1->Is(type3) || type2->Is(type3)) ||
intersect12->Is(type3));
}
@@ -1755,10 +1677,10 @@ struct Tests : Rep {
HandleScope scope(isolate);
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
- TypeHandle type3 = *it3;
- TypeHandle intersect23 = T.Intersect(type2, type3);
+ Type* type1 = *it1;
+ Type* type2 = *it2;
+ Type* type3 = *it3;
+ Type* intersect23 = T.Intersect(type2, type3);
CHECK(!(type1->Is(type2) && type1->Is(type3)) ||
type1->Is(intersect23));
}
@@ -1871,14 +1793,14 @@ struct Tests : Rep {
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
- TypeHandle type3 = *it3;
- TypeHandle union12 = T.Union(type1, type2);
- TypeHandle union13 = T.Union(type1, type3);
- TypeHandle intersect23 = T.Intersect(type2, type3);
- TypeHandle union1_23 = T.Union(type1, intersect23);
- TypeHandle intersect12_13 = T.Intersect(union12, union13);
+ Type* type1 = *it1;
+ Type* type2 = *it2;
+ Type* type3 = *it3;
+ Type* union12 = T.Union(type1, type2);
+ Type* union13 = T.Union(type1, type3);
+ Type* intersect23 = T.Intersect(type2, type3);
+ Type* union1_23 = T.Union(type1, intersect23);
+ Type* intersect12_13 = T.Intersect(union12, union13);
CHECK(Equal(union1_23, intersect12_13));
}
}
@@ -1894,14 +1816,14 @@ struct Tests : Rep {
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
- TypeHandle type3 = *it3;
- TypeHandle intersect12 = T.Intersect(type1, type2);
- TypeHandle intersect13 = T.Intersect(type1, type3);
- TypeHandle union23 = T.Union(type2, type3);
- TypeHandle intersect1_23 = T.Intersect(type1, union23);
- TypeHandle union12_13 = T.Union(intersect12, intersect13);
+ Type* type1 = *it1;
+ Type* type2 = *it2;
+ Type* type3 = *it3;
+ Type* intersect12 = T.Intersect(type1, type2);
+ Type* intersect13 = T.Intersect(type1, type3);
+ Type* union23 = T.Union(type2, type3);
+ Type* intersect1_23 = T.Intersect(type1, union23);
+ Type* union12_13 = T.Union(intersect12, intersect13);
CHECK(Equal(intersect1_23, union12_13));
}
}
@@ -1912,9 +1834,9 @@ struct Tests : Rep {
void GetRange() {
// GetRange(Range(a, b)) = Range(a, b).
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
- TypeHandle type1 = *it1;
+ Type* type1 = *it1;
if (type1->IsRange()) {
- typename Type::RangeType* range = type1->GetRange();
+ RangeType* range = type1->GetRange()->AsRange();
CHECK(type1->Min() == range->Min());
CHECK(type1->Max() == range->Max());
}
@@ -1923,10 +1845,10 @@ struct Tests : Rep {
// GetRange(Union(Constant(x), Range(min,max))) == Range(min, max).
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
+ Type* type1 = *it1;
+ Type* type2 = *it2;
if (type1->IsConstant() && type2->IsRange()) {
- TypeHandle u = T.Union(type1, type2);
+ Type* u = T.Union(type1, type2);
CHECK(type2->Min() == u->GetRange()->Min());
CHECK(type2->Max() == u->GetRange()->Max());
@@ -1935,201 +1857,69 @@ struct Tests : Rep {
}
}
- template<class Type2, class TypeHandle2, class Region2, class Rep2>
- void Convert() {
- Types<Type2, TypeHandle2, Region2> T2(Rep2::ToRegion(&zone, isolate),
- isolate,
- isolate->random_number_generator());
- for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
- TypeHandle type1 = *it;
- TypeHandle2 type2 = T2.template Convert<Type>(type1);
- TypeHandle type3 = T.template Convert<Type2>(type2);
- CheckEqual(type1, type3);
- }
- }
-
void HTypeFromType() {
for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
- TypeHandle type1 = *it1;
- TypeHandle type2 = *it2;
- HType htype1 = HType::FromType<Type>(type1);
- HType htype2 = HType::FromType<Type>(type2);
+ Type* type1 = *it1;
+ Type* type2 = *it2;
+ HType htype1 = HType::FromType(type1);
+ HType htype2 = HType::FromType(type2);
CHECK(!type1->Is(type2) || htype1.IsSubtypeOf(htype2));
}
}
}
};
-typedef Tests<Type, Type*, Zone, ZoneRep> ZoneTests;
-typedef Tests<HeapType, Handle<HeapType>, Isolate, HeapRep> HeapTests;
-
-
-TEST(IsSomeType_zone) { ZoneTests().IsSomeType(); }
-
-
-TEST(IsSomeType_heap) { HeapTests().IsSomeType(); }
-
-
-TEST(PointwiseRepresentation_zone) { ZoneTests().PointwiseRepresentation(); }
-
-
-TEST(PointwiseRepresentation_heap) { HeapTests().PointwiseRepresentation(); }
-
-
-TEST(BitsetType_zone) { ZoneTests().Bitset(); }
-
-
-TEST(BitsetType_heap) { HeapTests().Bitset(); }
-
-
-TEST(ClassType_zone) { ZoneTests().Class(); }
-
-
-TEST(ClassType_heap) { HeapTests().Class(); }
-
-
-TEST(ConstantType_zone) { ZoneTests().Constant(); }
-
-
-TEST(ConstantType_heap) { HeapTests().Constant(); }
-
-
-TEST(RangeType_zone) { ZoneTests().Range(); }
-
-
-TEST(RangeType_heap) { HeapTests().Range(); }
-
-
-TEST(ArrayType_zone) { ZoneTests().Array(); }
-
-
-TEST(ArrayType_heap) { HeapTests().Array(); }
-
-
-TEST(FunctionType_zone) { ZoneTests().Function(); }
-
+TEST(IsSomeType_zone) { Tests().IsSomeType(); }
-TEST(FunctionType_heap) { HeapTests().Function(); }
+TEST(PointwiseRepresentation_zone) { Tests().PointwiseRepresentation(); }
+TEST(BitsetType_zone) { Tests().Bitset(); }
-TEST(Of_zone) { ZoneTests().Of(); }
+TEST(ClassType_zone) { Tests().Class(); }
+TEST(ConstantType_zone) { Tests().Constant(); }
-TEST(Of_heap) { HeapTests().Of(); }
+TEST(RangeType_zone) { Tests().Range(); }
+TEST(ArrayType_zone) { Tests().Array(); }
-TEST(NowOf_zone) { ZoneTests().NowOf(); }
+TEST(FunctionType_zone) { Tests().Function(); }
+TEST(Of_zone) { Tests().Of(); }
-TEST(NowOf_heap) { HeapTests().NowOf(); }
+TEST(NowOf_zone) { Tests().NowOf(); }
+TEST(MinMax_zone) { Tests().MinMax(); }
-TEST(MinMax_zone) { ZoneTests().MinMax(); }
+TEST(BitsetGlb_zone) { Tests().BitsetGlb(); }
+TEST(BitsetLub_zone) { Tests().BitsetLub(); }
-TEST(MinMax_heap) { HeapTests().MinMax(); }
+TEST(Is1_zone) { Tests().Is1(); }
+TEST(Is2_zone) { Tests().Is2(); }
-TEST(BitsetGlb_zone) { ZoneTests().BitsetGlb(); }
+TEST(NowIs_zone) { Tests().NowIs(); }
+TEST(Contains_zone) { Tests().Contains(); }
-TEST(BitsetGlb_heap) { HeapTests().BitsetGlb(); }
+TEST(NowContains_zone) { Tests().NowContains(); }
+TEST(Maybe_zone) { Tests().Maybe(); }
-TEST(BitsetLub_zone) { ZoneTests().BitsetLub(); }
+TEST(Union1_zone) { Tests().Union1(); }
+TEST(Union2_zone) { Tests().Union2(); }
-TEST(BitsetLub_heap) { HeapTests().BitsetLub(); }
-
-
-TEST(Is1_zone) { ZoneTests().Is1(); }
-
-
-TEST(Is1_heap) { HeapTests().Is1(); }
-
-
-TEST(Is2_zone) { ZoneTests().Is2(); }
-
-
-TEST(Is2_heap) { HeapTests().Is2(); }
-
-
-TEST(NowIs_zone) { ZoneTests().NowIs(); }
-
-
-TEST(NowIs_heap) { HeapTests().NowIs(); }
-
-
-TEST(Contains_zone) { ZoneTests().Contains(); }
-
-
-TEST(Contains_heap) { HeapTests().Contains(); }
-
-
-TEST(NowContains_zone) { ZoneTests().NowContains(); }
-
-
-TEST(NowContains_heap) { HeapTests().NowContains(); }
-
-
-TEST(Maybe_zone) { ZoneTests().Maybe(); }
-
-
-TEST(Maybe_heap) { HeapTests().Maybe(); }
-
-
-TEST(Union1_zone) { ZoneTests().Union1(); }
-
-
-TEST(Union1_heap) { HeapTests().Union1(); }
-
-
-TEST(Union2_zone) { ZoneTests().Union2(); }
-
-
-TEST(Union2_heap) { HeapTests().Union2(); }
-
-
-TEST(Union3_zone) { ZoneTests().Union3(); }
-
-
-TEST(Union3_heap) { HeapTests().Union3(); }
-
-
-TEST(Union4_zone) { ZoneTests().Union4(); }
-
-
-TEST(Union4_heap) { HeapTests().Union4(); }
-
-
-TEST(Intersect_zone) { ZoneTests().Intersect(); }
-
-
-TEST(Intersect_heap) { HeapTests().Intersect(); }
-
-
-TEST(Distributivity_zone) { ZoneTests().Distributivity(); }
-
-
-TEST(Distributivity_heap) { HeapTests().Distributivity(); }
-
-
-TEST(GetRange_zone) { ZoneTests().GetRange(); }
-
-
-TEST(GetRange_heap) { HeapTests().GetRange(); }
-
-
-TEST(Convert_zone) {
- ZoneTests().Convert<HeapType, Handle<HeapType>, Isolate, HeapRep>();
-}
-
+TEST(Union3_zone) { Tests().Union3(); }
-TEST(Convert_heap) { HeapTests().Convert<Type, Type*, Zone, ZoneRep>(); }
+TEST(Union4_zone) { Tests().Union4(); }
+TEST(Intersect_zone) { Tests().Intersect(); }
-TEST(HTypeFromType_zone) { ZoneTests().HTypeFromType(); }
+TEST(Distributivity_zone) { Tests().Distributivity(); }
+TEST(GetRange_zone) { Tests().GetRange(); }
-TEST(HTypeFromType_heap) { HeapTests().HTypeFromType(); }
+TEST(HTypeFromType_zone) { Tests().HTypeFromType(); }
diff --git a/deps/v8/test/cctest/test-unboxed-doubles.cc b/deps/v8/test/cctest/test-unboxed-doubles.cc
index 3906d848de..f195a31c79 100644
--- a/deps/v8/test/cctest/test-unboxed-doubles.cc
+++ b/deps/v8/test/cctest/test-unboxed-doubles.cc
@@ -10,6 +10,7 @@
#include "src/compilation-cache.h"
#include "src/execution.h"
#include "src/factory.h"
+#include "src/field-type.h"
#include "src/global-handles.h"
#include "src/heap/slots-buffer.h"
#include "src/ic/ic.h"
@@ -947,7 +948,7 @@ TEST(DescriptorArrayTrimming) {
const int kSplitFieldIndex = 32;
const int kTrimmedLayoutDescriptorLength = 64;
- Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
Handle<Map> map = Map::Create(isolate, kFieldCount);
for (int i = 0; i < kSplitFieldIndex; i++) {
map = Map::CopyWithField(map, MakeName("prop", i), any_type, NONE,
@@ -1035,7 +1036,7 @@ TEST(DoScavenge) {
// a pointer to "from space" pointer. Do scavenge one more time and ensure
// that it didn't crash or corrupt the double value stored in the object.
- Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
Handle<Map> map = Map::Create(isolate, 10);
map = Map::CopyWithField(map, MakeName("prop", 0), any_type, NONE,
Representation::Double(),
@@ -1060,7 +1061,7 @@ TEST(DoScavenge) {
CcTest::heap()->CollectGarbage(i::NEW_SPACE);
// Create temp object in the new space.
- Handle<JSArray> temp = factory->NewJSArray(FAST_ELEMENTS);
+ Handle<JSArray> temp = factory->NewJSArray(0, FAST_ELEMENTS);
CHECK(isolate->heap()->new_space()->Contains(*temp));
// Construct a double value that looks like a pointer to the new space object
@@ -1097,7 +1098,7 @@ TEST(DoScavengeWithIncrementalWriteBarrier) {
// scavenges to promote |obj| to old space, a GC in old space and ensure that
// the tagged value was properly updated after candidates evacuation.
- Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
Handle<Map> map = Map::Create(isolate, 10);
map = Map::CopyWithField(map, MakeName("prop", 0), any_type, NONE,
Representation::Double(),
@@ -1321,7 +1322,7 @@ TEST(LayoutDescriptorSharing) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
- Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
Handle<Map> split_map;
{
@@ -1363,65 +1364,6 @@ TEST(LayoutDescriptorSharing) {
}
-TEST(StoreBufferScanOnScavenge) {
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- Factory* factory = isolate->factory();
- v8::HandleScope scope(CcTest::isolate());
-
- Handle<HeapType> any_type = HeapType::Any(isolate);
- Handle<Map> map = Map::Create(isolate, 10);
- map = Map::CopyWithField(map, MakeName("prop", 0), any_type, NONE,
- Representation::Double(),
- INSERT_TRANSITION).ToHandleChecked();
-
- // Create object in new space.
- Handle<JSObject> obj = factory->NewJSObjectFromMap(map, NOT_TENURED);
-
- Handle<HeapNumber> heap_number = factory->NewHeapNumber(42.5);
- obj->WriteToField(0, *heap_number);
-
- {
- // Ensure the object is properly set up.
- DescriptorArray* descriptors = map->instance_descriptors();
- CHECK(descriptors->GetDetails(0).representation().IsDouble());
- FieldIndex field_index = FieldIndex::ForDescriptor(*map, 0);
- CHECK(field_index.is_inobject() && field_index.is_double());
- CHECK_EQ(FLAG_unbox_double_fields, map->IsUnboxedDoubleField(field_index));
- CHECK_EQ(42.5, GetDoubleFieldValue(*obj, field_index));
- }
- CHECK(isolate->heap()->new_space()->Contains(*obj));
-
- // Trigger GCs so that the newly allocated object moves to old gen.
- CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
- CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
-
- CHECK(isolate->heap()->old_space()->Contains(*obj));
-
- // Create temp object in the new space.
- Handle<JSArray> temp = factory->NewJSArray(FAST_ELEMENTS);
- CHECK(isolate->heap()->new_space()->Contains(*temp));
-
- // Construct a double value that looks like a pointer to the new space object
- // and store it into the obj.
- Address fake_object = reinterpret_cast<Address>(*temp) + kPointerSize;
- double boom_value = bit_cast<double>(fake_object);
-
- FieldIndex field_index = FieldIndex::ForDescriptor(obj->map(), 0);
- Handle<HeapNumber> boom_number = factory->NewHeapNumber(boom_value, MUTABLE);
- obj->FastPropertyAtPut(field_index, *boom_number);
-
- // Enforce scan on scavenge for the obj's page.
- MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
- chunk->set_scan_on_scavenge(true);
-
- // Trigger GCs and force evacuation. Should not crash there.
- CcTest::heap()->CollectAllGarbage();
-
- CHECK_EQ(boom_value, GetDoubleFieldValue(*obj, field_index));
-}
-
-
static void TestWriteBarrier(Handle<Map> map, Handle<Map> new_map,
int tagged_descriptor, int double_descriptor,
bool check_tagged_value = true) {
@@ -1580,7 +1522,7 @@ static void TestWriteBarrierObjectShiftFieldsRight(
Isolate* isolate = CcTest::i_isolate();
v8::HandleScope scope(CcTest::isolate());
- Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<FieldType> any_type = FieldType::Any(isolate);
CompileRun("function func() { return 1; }");
@@ -1608,9 +1550,7 @@ static void TestWriteBarrierObjectShiftFieldsRight(
}
}
-
-// TODO(ishell): enable when this issue is fixed.
-DISABLED_TEST(WriteBarrierObjectShiftFieldsRight) {
+TEST(WriteBarrierObjectShiftFieldsRight) {
TestWriteBarrierObjectShiftFieldsRight(OLD_TO_NEW_WRITE_BARRIER);
}
diff --git a/deps/v8/test/cctest/test-weakmaps.cc b/deps/v8/test/cctest/test-weakmaps.cc
index 2630110c59..781ad1f69f 100644
--- a/deps/v8/test/cctest/test-weakmaps.cc
+++ b/deps/v8/test/cctest/test-weakmaps.cc
@@ -193,7 +193,7 @@ TEST(Regress2060a) {
HandleScope scope(isolate);
for (int i = 0; i < 32; i++) {
Handle<JSObject> object = factory->NewJSObject(function, TENURED);
- CHECK(!heap->InNewSpace(object->address()));
+ CHECK(!heap->InNewSpace(*object));
CHECK(!first_page->Contains(object->address()));
int32_t hash = Object::GetOrCreateHash(isolate, key)->value();
JSWeakCollection::Set(weakmap, key, object, hash);
@@ -231,7 +231,7 @@ TEST(Regress2060b) {
Handle<JSObject> keys[32];
for (int i = 0; i < 32; i++) {
keys[i] = factory->NewJSObject(function, TENURED);
- CHECK(!heap->InNewSpace(keys[i]->address()));
+ CHECK(!heap->InNewSpace(*keys[i]));
CHECK(!first_page->Contains(keys[i]->address()));
}
Handle<JSWeakMap> weakmap = AllocateJSWeakMap(isolate);
diff --git a/deps/v8/test/cctest/test-weaksets.cc b/deps/v8/test/cctest/test-weaksets.cc
index 6998e0f749..643bb48ab1 100644
--- a/deps/v8/test/cctest/test-weaksets.cc
+++ b/deps/v8/test/cctest/test-weaksets.cc
@@ -192,7 +192,7 @@ TEST(WeakSet_Regress2060a) {
HandleScope scope(isolate);
for (int i = 0; i < 32; i++) {
Handle<JSObject> object = factory->NewJSObject(function, TENURED);
- CHECK(!heap->InNewSpace(object->address()));
+ CHECK(!heap->InNewSpace(*object));
CHECK(!first_page->Contains(object->address()));
int32_t hash = Object::GetOrCreateHash(isolate, key)->value();
JSWeakCollection::Set(weakset, key, object, hash);
@@ -230,7 +230,7 @@ TEST(WeakSet_Regress2060b) {
Handle<JSObject> keys[32];
for (int i = 0; i < 32; i++) {
keys[i] = factory->NewJSObject(function, TENURED);
- CHECK(!heap->InNewSpace(keys[i]->address()));
+ CHECK(!heap->InNewSpace(*keys[i]));
CHECK(!first_page->Contains(keys[i]->address()));
}
Handle<JSWeakSet> weakset = AllocateJSWeakSet(isolate);
diff --git a/deps/v8/test/cctest/trace-extension.cc b/deps/v8/test/cctest/trace-extension.cc
index ea2b2cee3d..77a29e990b 100644
--- a/deps/v8/test/cctest/trace-extension.cc
+++ b/deps/v8/test/cctest/trace-extension.cc
@@ -108,7 +108,7 @@ void TraceExtension::DoTrace(Address fp) {
regs.sp =
reinterpret_cast<Address>(trace_env.sample) - 10240;
trace_env.sample->Init(CcTest::i_isolate(), regs,
- TickSample::kSkipCEntryFrame);
+ TickSample::kSkipCEntryFrame, true);
}
diff --git a/deps/v8/test/cctest/types-fuzz.h b/deps/v8/test/cctest/types-fuzz.h
index 5c43e8e694..79e460856c 100644
--- a/deps/v8/test/cctest/types-fuzz.h
+++ b/deps/v8/test/cctest/types-fuzz.h
@@ -35,19 +35,18 @@ namespace v8 {
namespace internal {
-template<class Type, class TypeHandle, class Region>
class Types {
public:
- Types(Region* region, Isolate* isolate, v8::base::RandomNumberGenerator* rng)
- : region_(region), isolate_(isolate), rng_(rng) {
- #define DECLARE_TYPE(name, value) \
- name = Type::name(region); \
- types.push_back(name);
+ Types(Zone* zone, Isolate* isolate, v8::base::RandomNumberGenerator* rng)
+ : zone_(zone), isolate_(isolate), rng_(rng) {
+#define DECLARE_TYPE(name, value) \
+ name = Type::name(); \
+ types.push_back(name);
PROPER_BITSET_TYPE_LIST(DECLARE_TYPE)
#undef DECLARE_TYPE
- SignedSmall = Type::SignedSmall(region);
- UnsignedSmall = Type::UnsignedSmall(region);
+ SignedSmall = Type::SignedSmall();
+ UnsignedSmall = Type::UnsignedSmall();
object_map = isolate->factory()->NewMap(
JS_OBJECT_TYPE, JSObject::kHeaderSize);
@@ -56,16 +55,16 @@ class Types {
number_map = isolate->factory()->NewMap(
HEAP_NUMBER_TYPE, HeapNumber::kSize);
uninitialized_map = isolate->factory()->uninitialized_map();
- ObjectClass = Type::Class(object_map, region);
- ArrayClass = Type::Class(array_map, region);
- NumberClass = Type::Class(number_map, region);
- UninitializedClass = Type::Class(uninitialized_map, region);
+ ObjectClass = Type::Class(object_map, zone);
+ ArrayClass = Type::Class(array_map, zone);
+ NumberClass = Type::Class(number_map, zone);
+ UninitializedClass = Type::Class(uninitialized_map, zone);
maps.push_back(object_map);
maps.push_back(array_map);
maps.push_back(uninitialized_map);
for (MapVector::iterator it = maps.begin(); it != maps.end(); ++it) {
- types.push_back(Type::Class(*it, region));
+ types.push_back(Type::Class(*it, zone));
}
smi = handle(Smi::FromInt(666), isolate);
@@ -74,13 +73,13 @@ class Types {
object2 = isolate->factory()->NewJSObjectFromMap(object_map);
array = isolate->factory()->NewJSArray(20);
uninitialized = isolate->factory()->uninitialized_value();
- SmiConstant = Type::Constant(smi, region);
- Signed32Constant = Type::Constant(signed32, region);
+ SmiConstant = Type::Constant(smi, zone);
+ Signed32Constant = Type::Constant(signed32, zone);
- ObjectConstant1 = Type::Constant(object1, region);
- ObjectConstant2 = Type::Constant(object2, region);
- ArrayConstant = Type::Constant(array, region);
- UninitializedConstant = Type::Constant(uninitialized, region);
+ ObjectConstant1 = Type::Constant(object1, zone);
+ ObjectConstant2 = Type::Constant(object2, zone);
+ ArrayConstant = Type::Constant(array, zone);
+ UninitializedConstant = Type::Constant(uninitialized, zone);
values.push_back(smi);
values.push_back(signed32);
@@ -89,7 +88,7 @@ class Types {
values.push_back(array);
values.push_back(uninitialized);
for (ValueVector::iterator it = values.begin(); it != values.end(); ++it) {
- types.push_back(Type::Constant(*it, region));
+ types.push_back(Type::Constant(*it, zone));
}
integers.push_back(isolate->factory()->NewNumber(-V8_INFINITY));
@@ -103,16 +102,16 @@ class Types {
if (!IsMinusZero(x)) integers.push_back(isolate->factory()->NewNumber(x));
}
- Integer = Type::Range(-V8_INFINITY, +V8_INFINITY, region);
+ Integer = Type::Range(-V8_INFINITY, +V8_INFINITY, zone);
- NumberArray = Type::Array(Number, region);
- StringArray = Type::Array(String, region);
- AnyArray = Type::Array(Any, region);
+ NumberArray = Type::Array(Number, zone);
+ StringArray = Type::Array(String, zone);
+ AnyArray = Type::Array(Any, zone);
- SignedFunction1 = Type::Function(SignedSmall, SignedSmall, region);
- NumberFunction1 = Type::Function(Number, Number, region);
- NumberFunction2 = Type::Function(Number, Number, Number, region);
- MethodFunction = Type::Function(String, Object, 0, region);
+ SignedFunction1 = Type::Function(SignedSmall, SignedSmall, zone);
+ NumberFunction1 = Type::Function(Number, Number, zone);
+ NumberFunction2 = Type::Function(Number, Number, Number, zone);
+ MethodFunction = Type::Function(String, Object, 0, zone);
for (int i = 0; i < 30; ++i) {
types.push_back(Fuzz());
@@ -131,40 +130,40 @@ class Types {
Handle<i::JSArray> array;
Handle<i::Oddball> uninitialized;
- #define DECLARE_TYPE(name, value) TypeHandle name;
+#define DECLARE_TYPE(name, value) Type* name;
PROPER_BITSET_TYPE_LIST(DECLARE_TYPE)
#undef DECLARE_TYPE
-#define DECLARE_TYPE(name, value) TypeHandle Mask##name##ForTesting;
+#define DECLARE_TYPE(name, value) Type* Mask##name##ForTesting;
MASK_BITSET_TYPE_LIST(DECLARE_TYPE)
#undef DECLARE_TYPE
- TypeHandle SignedSmall;
- TypeHandle UnsignedSmall;
+ Type* SignedSmall;
+ Type* UnsignedSmall;
- TypeHandle ObjectClass;
- TypeHandle ArrayClass;
- TypeHandle NumberClass;
- TypeHandle UninitializedClass;
+ Type* ObjectClass;
+ Type* ArrayClass;
+ Type* NumberClass;
+ Type* UninitializedClass;
- TypeHandle SmiConstant;
- TypeHandle Signed32Constant;
- TypeHandle ObjectConstant1;
- TypeHandle ObjectConstant2;
- TypeHandle ArrayConstant;
- TypeHandle UninitializedConstant;
+ Type* SmiConstant;
+ Type* Signed32Constant;
+ Type* ObjectConstant1;
+ Type* ObjectConstant2;
+ Type* ArrayConstant;
+ Type* UninitializedConstant;
- TypeHandle Integer;
+ Type* Integer;
- TypeHandle NumberArray;
- TypeHandle StringArray;
- TypeHandle AnyArray;
+ Type* NumberArray;
+ Type* StringArray;
+ Type* AnyArray;
- TypeHandle SignedFunction1;
- TypeHandle NumberFunction1;
- TypeHandle NumberFunction2;
- TypeHandle MethodFunction;
+ Type* SignedFunction1;
+ Type* NumberFunction1;
+ Type* NumberFunction2;
+ Type* MethodFunction;
- typedef std::vector<TypeHandle> TypeVector;
+ typedef std::vector<Type*> TypeVector;
typedef std::vector<Handle<i::Map> > MapVector;
typedef std::vector<Handle<i::Object> > ValueVector;
@@ -173,94 +172,70 @@ class Types {
ValueVector values;
ValueVector integers; // "Integer" values used for range limits.
- TypeHandle Of(Handle<i::Object> value) {
- return Type::Of(value, region_);
- }
+ Type* Of(Handle<i::Object> value) { return Type::Of(value, zone_); }
- TypeHandle NowOf(Handle<i::Object> value) {
- return Type::NowOf(value, region_);
- }
+ Type* NowOf(Handle<i::Object> value) { return Type::NowOf(value, zone_); }
- TypeHandle Class(Handle<i::Map> map) {
- return Type::Class(map, region_);
- }
+ Type* Class(Handle<i::Map> map) { return Type::Class(map, zone_); }
- TypeHandle Constant(Handle<i::Object> value) {
- return Type::Constant(value, region_);
+ Type* Constant(Handle<i::Object> value) {
+ return Type::Constant(value, zone_);
}
- TypeHandle Range(double min, double max) {
- return Type::Range(min, max, region_);
- }
+ Type* Range(double min, double max) { return Type::Range(min, max, zone_); }
- TypeHandle Context(TypeHandle outer) {
- return Type::Context(outer, region_);
- }
+ Type* Context(Type* outer) { return Type::Context(outer, zone_); }
- TypeHandle Array1(TypeHandle element) {
- return Type::Array(element, region_);
- }
+ Type* Array1(Type* element) { return Type::Array(element, zone_); }
- TypeHandle Function0(TypeHandle result, TypeHandle receiver) {
- return Type::Function(result, receiver, 0, region_);
+ Type* Function0(Type* result, Type* receiver) {
+ return Type::Function(result, receiver, 0, zone_);
}
- TypeHandle Function1(TypeHandle result, TypeHandle receiver, TypeHandle arg) {
- TypeHandle type = Type::Function(result, receiver, 1, region_);
+ Type* Function1(Type* result, Type* receiver, Type* arg) {
+ Type* type = Type::Function(result, receiver, 1, zone_);
type->AsFunction()->InitParameter(0, arg);
return type;
}
- TypeHandle Function2(TypeHandle result, TypeHandle arg1, TypeHandle arg2) {
- return Type::Function(result, arg1, arg2, region_);
+ Type* Function2(Type* result, Type* arg1, Type* arg2) {
+ return Type::Function(result, arg1, arg2, zone_);
}
- TypeHandle Union(TypeHandle t1, TypeHandle t2) {
- return Type::Union(t1, t2, region_);
- }
+ Type* Union(Type* t1, Type* t2) { return Type::Union(t1, t2, zone_); }
- TypeHandle Intersect(TypeHandle t1, TypeHandle t2) {
- return Type::Intersect(t1, t2, region_);
- }
+ Type* Intersect(Type* t1, Type* t2) { return Type::Intersect(t1, t2, zone_); }
- TypeHandle Representation(TypeHandle t) {
- return Type::Representation(t, region_);
- }
+ Type* Representation(Type* t) { return Type::Representation(t, zone_); }
- // TypeHandle Semantic(TypeHandle t) { return Intersect(t,
+ // Type* Semantic(Type* t) { return Intersect(t,
// MaskSemanticForTesting); }
- TypeHandle Semantic(TypeHandle t) { return Type::Semantic(t, region_); }
+ Type* Semantic(Type* t) { return Type::Semantic(t, zone_); }
- template<class Type2, class TypeHandle2>
- TypeHandle Convert(TypeHandle2 t) {
- return Type::template Convert<Type2>(t, region_);
- }
-
- TypeHandle Random() {
+ Type* Random() {
return types[rng_->NextInt(static_cast<int>(types.size()))];
}
- TypeHandle Fuzz(int depth = 4) {
+ Type* Fuzz(int depth = 4) {
switch (rng_->NextInt(depth == 0 ? 3 : 20)) {
case 0: { // bitset
#define COUNT_BITSET_TYPES(type, value) + 1
int n = 0 PROPER_BITSET_TYPE_LIST(COUNT_BITSET_TYPES);
#undef COUNT_BITSET_TYPES
// Pick a bunch of named bitsets and return their intersection.
- TypeHandle result = Type::Any(region_);
+ Type* result = Type::Any();
for (int i = 0, m = 1 + rng_->NextInt(3); i < m; ++i) {
int j = rng_->NextInt(n);
- #define PICK_BITSET_TYPE(type, value) \
- if (j-- == 0) { \
- TypeHandle tmp = Type::Intersect( \
- result, Type::type(region_), region_); \
- if (tmp->Is(Type::None()) && i != 0) { \
- break; \
- } else { \
- result = tmp; \
- continue; \
- } \
- }
+#define PICK_BITSET_TYPE(type, value) \
+ if (j-- == 0) { \
+ Type* tmp = Type::Intersect(result, Type::type(), zone_); \
+ if (tmp->Is(Type::None()) && i != 0) { \
+ break; \
+ } else { \
+ result = tmp; \
+ continue; \
+ } \
+ }
PROPER_BITSET_TYPE_LIST(PICK_BITSET_TYPE)
#undef PICK_BITSET_TYPE
}
@@ -268,11 +243,11 @@ class Types {
}
case 1: { // class
int i = rng_->NextInt(static_cast<int>(maps.size()));
- return Type::Class(maps[i], region_);
+ return Type::Class(maps[i], zone_);
}
case 2: { // constant
int i = rng_->NextInt(static_cast<int>(values.size()));
- return Type::Constant(values[i], region_);
+ return Type::Constant(values[i], zone_);
}
case 3: { // range
int i = rng_->NextInt(static_cast<int>(integers.size()));
@@ -280,26 +255,26 @@ class Types {
double min = integers[i]->Number();
double max = integers[j]->Number();
if (min > max) std::swap(min, max);
- return Type::Range(min, max, region_);
+ return Type::Range(min, max, zone_);
}
case 4: { // context
int depth = rng_->NextInt(3);
- TypeHandle type = Type::Internal(region_);
- for (int i = 0; i < depth; ++i) type = Type::Context(type, region_);
+ Type* type = Type::Internal();
+ for (int i = 0; i < depth; ++i) type = Type::Context(type, zone_);
return type;
}
case 5: { // array
- TypeHandle element = Fuzz(depth / 2);
- return Type::Array(element, region_);
+ Type* element = Fuzz(depth / 2);
+ return Type::Array(element, zone_);
}
case 6:
case 7: { // function
- TypeHandle result = Fuzz(depth / 2);
- TypeHandle receiver = Fuzz(depth / 2);
+ Type* result = Fuzz(depth / 2);
+ Type* receiver = Fuzz(depth / 2);
int arity = rng_->NextInt(3);
- TypeHandle type = Type::Function(result, receiver, arity, region_);
+ Type* type = Type::Function(result, receiver, arity, zone_);
for (int i = 0; i < type->AsFunction()->Arity(); ++i) {
- TypeHandle parameter = Fuzz(depth / 2);
+ Type* parameter = Fuzz(depth / 2);
type->AsFunction()->InitParameter(i, parameter);
}
return type;
@@ -309,21 +284,21 @@ class Types {
#define COUNT_SIMD_TYPE(NAME, Name, name, lane_count, lane_type) +1
SIMD128_TYPES(COUNT_SIMD_TYPE);
#undef COUNT_SIMD_TYPE
- TypeHandle (*simd_constructors[num_simd_types])(Isolate*, Region*) = {
+ Type* (*simd_constructors[num_simd_types])(Isolate*, Zone*) = {
#define COUNT_SIMD_TYPE(NAME, Name, name, lane_count, lane_type) \
&Type::Name,
- SIMD128_TYPES(COUNT_SIMD_TYPE)
+ SIMD128_TYPES(COUNT_SIMD_TYPE)
#undef COUNT_SIMD_TYPE
};
- return simd_constructors[rng_->NextInt(num_simd_types)](
- isolate_, region_);
+ return simd_constructors[rng_->NextInt(num_simd_types)](isolate_,
+ zone_);
}
default: { // union
int n = rng_->NextInt(10);
- TypeHandle type = None;
+ Type* type = None;
for (int i = 0; i < n; ++i) {
- TypeHandle operand = Fuzz(depth - 1);
- type = Type::Union(type, operand, region_);
+ Type* operand = Fuzz(depth - 1);
+ type = Type::Union(type, operand, zone_);
}
return type;
}
@@ -331,10 +306,10 @@ class Types {
UNREACHABLE();
}
- Region* region() { return region_; }
+ Zone* zone() { return zone_; }
private:
- Region* region_;
+ Zone* zone_;
Isolate* isolate_;
v8::base::RandomNumberGenerator* rng_;
};
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-js.cc b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
index 6fcde645cb..0b33808781 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
@@ -3,12 +3,14 @@
// found in the LICENSE file.
#include <stdint.h>
+#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "src/wasm/wasm-macro-gen.h"
#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/value-helper.h"
#include "test/cctest/wasm/test-signatures.h"
#include "test/cctest/wasm/wasm-run-utils.h"
@@ -24,39 +26,79 @@ using namespace v8::internal::wasm;
} while (false)
-static uint32_t AddJsFunction(TestingModule* module, FunctionSig* sig,
- const char* source) {
+#define ADD_CODE(vec, ...) \
+ do { \
+ byte __buf[] = {__VA_ARGS__}; \
+ for (size_t i = 0; i < sizeof(__buf); i++) vec.push_back(__buf[i]); \
+ } while (false)
+
+
+namespace {
+// A helper for generating predictable but unique argument values that
+// are easy to debug (e.g. with misaligned stacks).
+class PredictableInputValues {
+ public:
+ int base_;
+ explicit PredictableInputValues(int base) : base_(base) {}
+ double arg_d(int which) { return base_ * which + ((which & 1) * 0.5); }
+ float arg_f(int which) { return base_ * which + ((which & 1) * 0.25); }
+ int32_t arg_i(int which) { return base_ * which + ((which & 1) * kMinInt); }
+ int64_t arg_l(int which) {
+ return base_ * which + ((which & 1) * (0x04030201LL << 32));
+ }
+};
+
+
+uint32_t AddJsFunction(TestingModule* module, FunctionSig* sig,
+ const char* source) {
Handle<JSFunction> jsfunc = Handle<JSFunction>::cast(v8::Utils::OpenHandle(
*v8::Local<v8::Function>::Cast(CompileRun(source))));
module->AddFunction(sig, Handle<Code>::null());
uint32_t index = static_cast<uint32_t>(module->module->functions->size() - 1);
Isolate* isolate = CcTest::InitIsolateOnce();
- Handle<Code> code = CompileWasmToJSWrapper(isolate, module, jsfunc, index);
- module->function_code->at(index) = code;
+ Handle<Code> code =
+ CompileWasmToJSWrapper(isolate, module, jsfunc, sig, "test");
+ module->instance->function_code->at(index) = code;
return index;
}
-static Handle<JSFunction> WrapCode(ModuleEnv* module, uint32_t index) {
+uint32_t AddJSSelector(TestingModule* module, FunctionSig* sig, int which) {
+ const int kMaxParams = 8;
+ static const char* formals[kMaxParams] = {
+ "", "a", "a,b", "a,b,c",
+ "a,b,c,d", "a,b,c,d,e", "a,b,c,d,e,f", "a,b,c,d,e,f,g",
+ };
+ CHECK_LT(which, static_cast<int>(sig->parameter_count()));
+ CHECK_LT(static_cast<int>(sig->parameter_count()), kMaxParams);
+
+ i::EmbeddedVector<char, 256> source;
+ char param = 'a' + which;
+ SNPrintF(source, "(function(%s) { return %c; })",
+ formals[sig->parameter_count()], param);
+
+ return AddJsFunction(module, sig, source.start());
+}
+
+
+Handle<JSFunction> WrapCode(ModuleEnv* module, uint32_t index) {
Isolate* isolate = module->module->shared_isolate;
// Wrap the code so it can be called as a JS function.
Handle<String> name = isolate->factory()->NewStringFromStaticChars("main");
Handle<JSObject> module_object = Handle<JSObject>(0, isolate);
- Handle<Code> code = module->function_code->at(index);
+ Handle<Code> code = module->instance->function_code->at(index);
WasmJs::InstallWasmFunctionMap(isolate, isolate->native_context());
return compiler::CompileJSToWasmWrapper(isolate, module, name, code,
module_object, index);
}
-static void EXPECT_CALL(double expected, Handle<JSFunction> jsfunc, double a,
- double b) {
+void EXPECT_CALL(double expected, Handle<JSFunction> jsfunc,
+ Handle<Object>* buffer, int count) {
Isolate* isolate = jsfunc->GetIsolate();
- Handle<Object> buffer[] = {isolate->factory()->NewNumber(a),
- isolate->factory()->NewNumber(b)};
Handle<Object> global(isolate->context()->global_object(), isolate);
MaybeHandle<Object> retval =
- Execution::Call(isolate, jsfunc, global, 2, buffer);
+ Execution::Call(isolate, jsfunc, global, count, buffer);
CHECK(!retval.is_null());
Handle<Object> result = retval.ToHandleChecked();
@@ -64,17 +106,26 @@ static void EXPECT_CALL(double expected, Handle<JSFunction> jsfunc, double a,
CHECK_EQ(expected, Smi::cast(*result)->value());
} else {
CHECK(result->IsHeapNumber());
- CHECK_EQ(expected, HeapNumber::cast(*result)->value());
+ CheckFloatEq(expected, HeapNumber::cast(*result)->value());
}
}
+void EXPECT_CALL(double expected, Handle<JSFunction> jsfunc, double a,
+ double b) {
+ Isolate* isolate = jsfunc->GetIsolate();
+ Handle<Object> buffer[] = {isolate->factory()->NewNumber(a),
+ isolate->factory()->NewNumber(b)};
+ EXPECT_CALL(expected, jsfunc, buffer, 2);
+}
+} // namespace
+
TEST(Run_Int32Sub_jswrapped) {
TestSignatures sigs;
TestingModule module;
- WasmFunctionCompiler t(sigs.i_ii());
+ WasmFunctionCompiler t(sigs.i_ii(), &module);
BUILD(t, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- Handle<JSFunction> jsfunc = WrapCode(&module, t.CompileAndAdd(&module));
+ Handle<JSFunction> jsfunc = WrapCode(&module, t.CompileAndAdd());
EXPECT_CALL(33, jsfunc, 44, 11);
EXPECT_CALL(-8723487, jsfunc, -8000000, 723487);
@@ -84,9 +135,9 @@ TEST(Run_Int32Sub_jswrapped) {
TEST(Run_Float32Div_jswrapped) {
TestSignatures sigs;
TestingModule module;
- WasmFunctionCompiler t(sigs.f_ff());
+ WasmFunctionCompiler t(sigs.f_ff(), &module);
BUILD(t, WASM_F32_DIV(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- Handle<JSFunction> jsfunc = WrapCode(&module, t.CompileAndAdd(&module));
+ Handle<JSFunction> jsfunc = WrapCode(&module, t.CompileAndAdd());
EXPECT_CALL(92, jsfunc, 46, 0.5);
EXPECT_CALL(64, jsfunc, -16, -0.25);
@@ -96,9 +147,9 @@ TEST(Run_Float32Div_jswrapped) {
TEST(Run_Float64Add_jswrapped) {
TestSignatures sigs;
TestingModule module;
- WasmFunctionCompiler t(sigs.d_dd());
+ WasmFunctionCompiler t(sigs.d_dd(), &module);
BUILD(t, WASM_F64_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- Handle<JSFunction> jsfunc = WrapCode(&module, t.CompileAndAdd(&module));
+ Handle<JSFunction> jsfunc = WrapCode(&module, t.CompileAndAdd());
EXPECT_CALL(3, jsfunc, 2, 1);
EXPECT_CALL(-5.5, jsfunc, -5.25, -0.25);
@@ -108,9 +159,9 @@ TEST(Run_Float64Add_jswrapped) {
TEST(Run_I32Popcount_jswrapped) {
TestSignatures sigs;
TestingModule module;
- WasmFunctionCompiler t(sigs.i_i());
+ WasmFunctionCompiler t(sigs.i_i(), &module);
BUILD(t, WASM_I32_POPCNT(WASM_GET_LOCAL(0)));
- Handle<JSFunction> jsfunc = WrapCode(&module, t.CompileAndAdd(&module));
+ Handle<JSFunction> jsfunc = WrapCode(&module, t.CompileAndAdd());
EXPECT_CALL(2, jsfunc, 9, 0);
EXPECT_CALL(3, jsfunc, 11, 0);
@@ -121,8 +172,7 @@ TEST(Run_I32Popcount_jswrapped) {
#if !V8_TARGET_ARCH_ARM64
-// TODO(titzer): fix wasm->JS calls on arm64 (wrapper issues)
-
+// TODO(titzer): dynamic frame alignment on arm64
TEST(Run_CallJS_Add_jswrapped) {
TestSignatures sigs;
TestingModule module;
@@ -131,11 +181,252 @@ TEST(Run_CallJS_Add_jswrapped) {
AddJsFunction(&module, sigs.i_i(), "(function(a) { return a + 99; })");
BUILD(t, WASM_CALL_FUNCTION(js_index, WASM_GET_LOCAL(0)));
- Handle<JSFunction> jsfunc = WrapCode(&module, t.CompileAndAdd(&module));
+ Handle<JSFunction> jsfunc = WrapCode(&module, t.CompileAndAdd());
EXPECT_CALL(101, jsfunc, 2, -8);
EXPECT_CALL(199, jsfunc, 100, -1);
EXPECT_CALL(-666666801, jsfunc, -666666900, -1);
}
+#endif
+
+
+void RunJSSelectTest(int which) {
+#if !V8_TARGET_ARCH_ARM
+ // TODO(titzer): fix tests on arm and reenable
+ const int kMaxParams = 8;
+ PredictableInputValues inputs(0x100);
+ LocalType type = kAstF64;
+ LocalType types[kMaxParams + 1] = {type, type, type, type, type,
+ type, type, type, type};
+ for (int num_params = which + 1; num_params < kMaxParams; num_params++) {
+ HandleScope scope(CcTest::InitIsolateOnce());
+ FunctionSig sig(1, num_params, types);
+
+ TestingModule module;
+ uint32_t js_index = AddJSSelector(&module, &sig, which);
+ WasmFunctionCompiler t(&sig, &module);
+
+ {
+ std::vector<byte> code;
+ ADD_CODE(code, kExprCallFunction, static_cast<byte>(js_index));
+
+ for (int i = 0; i < num_params; i++) {
+ ADD_CODE(code, WASM_F64(inputs.arg_d(i)));
+ }
+
+ size_t end = code.size();
+ code.push_back(0);
+ t.Build(&code[0], &code[end]);
+ }
+
+ Handle<JSFunction> jsfunc = WrapCode(&module, t.CompileAndAdd());
+ double expected = inputs.arg_d(which);
+ EXPECT_CALL(expected, jsfunc, 0.0, 0.0);
+ }
+#endif
+}
+
+
+TEST(Run_JSSelect_0) { RunJSSelectTest(0); }
+
+TEST(Run_JSSelect_1) { RunJSSelectTest(1); }
+
+TEST(Run_JSSelect_2) { RunJSSelectTest(2); }
+
+TEST(Run_JSSelect_3) { RunJSSelectTest(3); }
+
+TEST(Run_JSSelect_4) { RunJSSelectTest(4); }
+
+TEST(Run_JSSelect_5) { RunJSSelectTest(5); }
+
+TEST(Run_JSSelect_6) { RunJSSelectTest(6); }
+
+TEST(Run_JSSelect_7) { RunJSSelectTest(7); }
+
+
+void RunWASMSelectTest(int which) {
+ PredictableInputValues inputs(0x200);
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ const int kMaxParams = 8;
+ for (int num_params = which + 1; num_params < kMaxParams; num_params++) {
+ LocalType type = kAstF64;
+ LocalType types[kMaxParams + 1] = {type, type, type, type, type,
+ type, type, type, type};
+ FunctionSig sig(1, num_params, types);
+
+ TestingModule module;
+ WasmFunctionCompiler t(&sig, &module);
+ BUILD(t, WASM_GET_LOCAL(which));
+ Handle<JSFunction> jsfunc = WrapCode(&module, t.CompileAndAdd());
+
+ Handle<Object> args[] = {
+ isolate->factory()->NewNumber(inputs.arg_d(0)),
+ isolate->factory()->NewNumber(inputs.arg_d(1)),
+ isolate->factory()->NewNumber(inputs.arg_d(2)),
+ isolate->factory()->NewNumber(inputs.arg_d(3)),
+ isolate->factory()->NewNumber(inputs.arg_d(4)),
+ isolate->factory()->NewNumber(inputs.arg_d(5)),
+ isolate->factory()->NewNumber(inputs.arg_d(6)),
+ isolate->factory()->NewNumber(inputs.arg_d(7)),
+ };
+
+ double expected = inputs.arg_d(which);
+ EXPECT_CALL(expected, jsfunc, args, kMaxParams);
+ }
+}
+
+
+TEST(Run_WASMSelect_0) { RunWASMSelectTest(0); }
+
+TEST(Run_WASMSelect_1) { RunWASMSelectTest(1); }
+
+TEST(Run_WASMSelect_2) { RunWASMSelectTest(2); }
+
+TEST(Run_WASMSelect_3) { RunWASMSelectTest(3); }
+
+TEST(Run_WASMSelect_4) { RunWASMSelectTest(4); }
+
+TEST(Run_WASMSelect_5) { RunWASMSelectTest(5); }
+
+TEST(Run_WASMSelect_6) { RunWASMSelectTest(6); }
+
+TEST(Run_WASMSelect_7) { RunWASMSelectTest(7); }
+
+
+void RunWASMSelectAlignTest(int num_args, int num_params) {
+ PredictableInputValues inputs(0x300);
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ const int kMaxParams = 4;
+ DCHECK_LE(num_args, kMaxParams);
+ LocalType type = kAstF64;
+ LocalType types[kMaxParams + 1] = {type, type, type, type, type};
+ FunctionSig sig(1, num_params, types);
+
+ for (int which = 0; which < num_params; which++) {
+ TestingModule module;
+ WasmFunctionCompiler t(&sig, &module);
+ BUILD(t, WASM_GET_LOCAL(which));
+ Handle<JSFunction> jsfunc = WrapCode(&module, t.CompileAndAdd());
+
+ Handle<Object> args[] = {
+ isolate->factory()->NewNumber(inputs.arg_d(0)),
+ isolate->factory()->NewNumber(inputs.arg_d(1)),
+ isolate->factory()->NewNumber(inputs.arg_d(2)),
+ isolate->factory()->NewNumber(inputs.arg_d(3)),
+ };
+
+ double nan = std::numeric_limits<double>::quiet_NaN();
+ double expected = which < num_args ? inputs.arg_d(which) : nan;
+ EXPECT_CALL(expected, jsfunc, args, num_args);
+ }
+}
+
+
+TEST(Run_WASMSelectAlign_0) {
+ RunWASMSelectAlignTest(0, 1);
+ RunWASMSelectAlignTest(0, 2);
+}
+
+
+TEST(Run_WASMSelectAlign_1) {
+ RunWASMSelectAlignTest(1, 2);
+ RunWASMSelectAlignTest(1, 3);
+}
+
+
+TEST(Run_WASMSelectAlign_2) {
+ RunWASMSelectAlignTest(2, 3);
+ RunWASMSelectAlignTest(2, 4);
+}
+
+
+TEST(Run_WASMSelectAlign_3) {
+ RunWASMSelectAlignTest(3, 3);
+ RunWASMSelectAlignTest(3, 4);
+}
+
+
+TEST(Run_WASMSelectAlign_4) {
+ RunWASMSelectAlignTest(4, 3);
+ RunWASMSelectAlignTest(4, 4);
+}
+
+
+void RunJSSelectAlignTest(int num_args, int num_params) {
+ PredictableInputValues inputs(0x400);
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ Factory* factory = isolate->factory();
+ const int kMaxParams = 4;
+ CHECK_LE(num_args, kMaxParams);
+ CHECK_LE(num_params, kMaxParams);
+ LocalType type = kAstF64;
+ LocalType types[kMaxParams + 1] = {type, type, type, type, type};
+ FunctionSig sig(1, num_params, types);
+
+ // Build the calling code.
+ std::vector<byte> code;
+ ADD_CODE(code, kExprCallFunction, 0);
+
+ for (int i = 0; i < num_params; i++) {
+ ADD_CODE(code, WASM_GET_LOCAL(i));
+ }
+
+ size_t end = code.size();
+ code.push_back(0);
+
+ // Call different select JS functions.
+ for (int which = 0; which < num_params; which++) {
+ HandleScope scope(isolate);
+ TestingModule module;
+ uint32_t js_index = AddJSSelector(&module, &sig, which);
+ CHECK_EQ(0, js_index);
+ WasmFunctionCompiler t(&sig, &module);
+ t.Build(&code[0], &code[end]);
+
+ Handle<JSFunction> jsfunc = WrapCode(&module, t.CompileAndAdd());
+
+ Handle<Object> args[] = {
+ factory->NewNumber(inputs.arg_d(0)),
+ factory->NewNumber(inputs.arg_d(1)),
+ factory->NewNumber(inputs.arg_d(2)),
+ factory->NewNumber(inputs.arg_d(3)),
+ };
+
+ double nan = std::numeric_limits<double>::quiet_NaN();
+ double expected = which < num_args ? inputs.arg_d(which) : nan;
+ EXPECT_CALL(expected, jsfunc, args, num_args);
+ }
+}
+
+TEST(Run_JSSelectAlign_0) {
+ RunJSSelectAlignTest(0, 1);
+ RunJSSelectAlignTest(0, 2);
+}
+
+
+TEST(Run_JSSelectAlign_2) {
+ RunJSSelectAlignTest(2, 3);
+ RunJSSelectAlignTest(2, 4);
+}
+
+
+TEST(Run_JSSelectAlign_4) {
+ RunJSSelectAlignTest(4, 3);
+ RunJSSelectAlignTest(4, 4);
+}
+
+
+#if !V8_TARGET_ARCH_ARM64
+// TODO(titzer): dynamic frame alignment on arm64
+TEST(Run_JSSelectAlign_1) {
+ RunJSSelectAlignTest(1, 2);
+ RunJSSelectAlignTest(1, 3);
+}
+
+
+TEST(Run_JSSelectAlign_3) {
+ RunJSSelectAlignTest(3, 3);
+ RunJSSelectAlignTest(3, 4);
+}
#endif
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
index 3b7bae1dda..905e8e4932 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
@@ -6,6 +6,7 @@
#include <string.h>
#include "src/wasm/encoder.h"
+#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-macro-gen.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
@@ -18,9 +19,13 @@ using namespace v8::internal::compiler;
using namespace v8::internal::wasm;
+#if !V8_TARGET_ARCH_ARM64
+// TODO(titzer): fix arm64 frame alignment.
namespace {
void TestModule(WasmModuleIndex* module, int32_t expected_result) {
Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope(isolate);
+ WasmJs::InstallWasmFunctionMap(isolate, isolate->native_context());
int32_t result =
CompileAndRunWasmModule(isolate, module->Begin(), module->End());
CHECK_EQ(expected_result, result);
@@ -50,6 +55,8 @@ TEST(Run_WasmModule_CallAdd_rev) {
};
Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope(isolate);
+ WasmJs::InstallWasmFunctionMap(isolate, isolate->native_context());
int32_t result =
CompileAndRunWasmModule(isolate, data, data + arraysize(data));
CHECK_EQ(99, result);
@@ -197,3 +204,5 @@ TEST(Run_WasmModule_Global) {
TestModule(writer->WriteTo(&zone), 97);
}
#endif
+
+#endif // !V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm.cc b/deps/v8/test/cctest/wasm/test-run-wasm.cc
index 445c3f0aed..a6f07f7af0 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm.cc
@@ -26,7 +26,7 @@ using namespace v8::internal::wasm;
TEST(Run_WasmInt8Const) {
- WasmRunner<int8_t> r;
+ WasmRunner<int32_t> r;
const byte kExpectedValue = 121;
// return(kExpectedValue)
BUILD(r, WASM_I8(kExpectedValue));
@@ -35,7 +35,7 @@ TEST(Run_WasmInt8Const) {
TEST(Run_WasmInt8Const_fallthru1) {
- WasmRunner<int8_t> r;
+ WasmRunner<int32_t> r;
const byte kExpectedValue = 122;
// kExpectedValue
BUILD(r, WASM_I8(kExpectedValue));
@@ -44,7 +44,7 @@ TEST(Run_WasmInt8Const_fallthru1) {
TEST(Run_WasmInt8Const_fallthru2) {
- WasmRunner<int8_t> r;
+ WasmRunner<int32_t> r;
const byte kExpectedValue = 123;
// -99 kExpectedValue
BUILD(r, WASM_I8(-99), WASM_I8(kExpectedValue));
@@ -54,10 +54,10 @@ TEST(Run_WasmInt8Const_fallthru2) {
TEST(Run_WasmInt8Const_all) {
for (int value = -128; value <= 127; value++) {
- WasmRunner<int8_t> r;
+ WasmRunner<int32_t> r;
// return(value)
BUILD(r, WASM_I8(value));
- int8_t result = r.Call();
+ int32_t result = r.Call();
CHECK_EQ(value, result);
}
}
@@ -84,10 +84,9 @@ TEST(Run_WasmInt32Const_many) {
TEST(Run_WasmMemorySize) {
- WasmRunner<int32_t> r;
TestingModule module;
+ WasmRunner<int32_t> r(&module);
module.AddMemory(1024);
- r.env()->module = &module;
BUILD(r, kExprMemorySize);
CHECK_EQ(1024, r.Call());
}
@@ -116,6 +115,23 @@ TEST(Run_WasmInt64Const_many) {
}
#endif
+TEST(Run_WasmI32ConvertI64) {
+ FOR_INT64_INPUTS(i) {
+ WasmRunner<int32_t> r;
+ BUILD(r, WASM_I32_CONVERT_I64(WASM_I64(*i)));
+ CHECK_EQ(static_cast<int32_t>(*i), r.Call());
+ }
+}
+
+TEST(Run_WasmI64AndConstants) {
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) {
+ WasmRunner<int32_t> r;
+ BUILD(r, WASM_I32_CONVERT_I64(WASM_I64_AND(WASM_I64(*i), WASM_I64(*j))));
+ CHECK_EQ(static_cast<int32_t>(*i & *j), r.Call());
+ }
+ }
+}
TEST(Run_WasmInt32Param0) {
WasmRunner<int32_t> r(MachineType::Int32());
@@ -179,9 +195,6 @@ TEST(Run_WasmInt32Add_P2) {
}
-// TODO(titzer): Fix for nosee4 and re-enable.
-#if 0
-
TEST(Run_WasmFloat32Add) {
WasmRunner<int32_t> r;
// int(11.5f + 44.5f)
@@ -198,8 +211,6 @@ TEST(Run_WasmFloat64Add) {
CHECK_EQ(57, r.Call());
}
-#endif
-
void TestInt32Binop(WasmOpcode opcode, int32_t expected, int32_t a, int32_t b) {
{
@@ -216,7 +227,6 @@ void TestInt32Binop(WasmOpcode opcode, int32_t expected, int32_t a, int32_t b) {
}
}
-
TEST(Run_WasmInt32Binops) {
TestInt32Binop(kExprI32Add, 88888888, 33333333, 55555555);
TestInt32Binop(kExprI32Sub, -1111111, 7777777, 8888888);
@@ -594,10 +604,9 @@ TEST(Run_WASM_Int32DivU_byzero_const) {
TEST(Run_WASM_Int32DivS_trap_effect) {
- WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
TestingModule module;
module.AddMemoryElems<int32_t>(8);
- r.env()->module = &module;
+ WasmRunner<int32_t> r(&module, MachineType::Int32(), MachineType::Int32());
BUILD(r,
WASM_IF_ELSE(WASM_GET_LOCAL(0),
@@ -793,10 +802,6 @@ void TestFloat64UnopWithConvert(WasmOpcode opcode, int32_t expected, double a) {
}
}
-
-// TODO(titzer): Fix for nosee4 and re-enable.
-#if 0
-
TEST(Run_WasmFloat32Binops) {
TestFloat32Binop(kExprF32Eq, 1, 8.125f, 8.125f);
TestFloat32Binop(kExprF32Ne, 1, 8.125f, 8.127f);
@@ -811,7 +816,6 @@ TEST(Run_WasmFloat32Binops) {
TestFloat32BinopWithConvert(kExprF32Div, 11, 22.1f, 2.0f);
}
-
TEST(Run_WasmFloat32Unops) {
TestFloat32UnopWithConvert(kExprF32Abs, 8, 8.125f);
TestFloat32UnopWithConvert(kExprF32Abs, 9, -9.125f);
@@ -819,7 +823,6 @@ TEST(Run_WasmFloat32Unops) {
TestFloat32UnopWithConvert(kExprF32Sqrt, 12, 144.4f);
}
-
TEST(Run_WasmFloat64Binops) {
TestFloat64Binop(kExprF64Eq, 1, 16.25, 16.25);
TestFloat64Binop(kExprF64Ne, 1, 16.25, 16.15);
@@ -834,7 +837,6 @@ TEST(Run_WasmFloat64Binops) {
TestFloat64BinopWithConvert(kExprF64Div, -1111, -2222.3, 2);
}
-
TEST(Run_WasmFloat64Unops) {
TestFloat64UnopWithConvert(kExprF64Abs, 108, 108.125);
TestFloat64UnopWithConvert(kExprF64Abs, 209, -209.125);
@@ -842,9 +844,6 @@ TEST(Run_WasmFloat64Unops) {
TestFloat64UnopWithConvert(kExprF64Sqrt, 13, 169.4);
}
-#endif
-
-
TEST(Run_WasmFloat32Neg) {
WasmRunner<float> r(MachineType::Float32());
BUILD(r, WASM_F32_NEG(WASM_GET_LOCAL(0)));
@@ -962,8 +961,8 @@ TEST(Run_Wasm_Return_F64) {
TEST(Run_Wasm_Select) {
WasmRunner<int32_t> r(MachineType::Int32());
- // return select(a, 11, 22);
- BUILD(r, WASM_SELECT(WASM_GET_LOCAL(0), WASM_I8(11), WASM_I8(22)));
+ // return select(11, 22, a);
+ BUILD(r, WASM_SELECT(WASM_I8(11), WASM_I8(22), WASM_GET_LOCAL(0)));
FOR_INT32_INPUTS(i) {
int32_t expected = *i ? 11 : 22;
CHECK_EQ(expected, r.Call(*i));
@@ -973,22 +972,38 @@ TEST(Run_Wasm_Select) {
TEST(Run_Wasm_Select_strict1) {
WasmRunner<int32_t> r(MachineType::Int32());
- // select(a, a = 11, 22); return a
- BUILD(r,
- WASM_BLOCK(2, WASM_SELECT(WASM_GET_LOCAL(0),
- WASM_SET_LOCAL(0, WASM_I8(11)), WASM_I8(22)),
- WASM_GET_LOCAL(0)));
- FOR_INT32_INPUTS(i) { CHECK_EQ(11, r.Call(*i)); }
+ // select(a=0, a=1, a=2); return a
+ BUILD(r, WASM_BLOCK(2, WASM_SELECT(WASM_SET_LOCAL(0, WASM_I8(0)),
+ WASM_SET_LOCAL(0, WASM_I8(1)),
+ WASM_SET_LOCAL(0, WASM_I8(2))),
+ WASM_GET_LOCAL(0)));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(2, r.Call(*i)); }
}
TEST(Run_Wasm_Select_strict2) {
WasmRunner<int32_t> r(MachineType::Int32());
- // select(a, 11, a = 22); return a;
- BUILD(r, WASM_BLOCK(2, WASM_SELECT(WASM_GET_LOCAL(0), WASM_I8(11),
- WASM_SET_LOCAL(0, WASM_I8(22))),
- WASM_GET_LOCAL(0)));
- FOR_INT32_INPUTS(i) { CHECK_EQ(22, r.Call(*i)); }
+ r.env()->AddLocals(kAstI32, 2);
+ // select(b=5, c=6, a)
+ BUILD(r, WASM_SELECT(WASM_SET_LOCAL(1, WASM_I8(5)),
+ WASM_SET_LOCAL(2, WASM_I8(6)), WASM_GET_LOCAL(0)));
+ FOR_INT32_INPUTS(i) {
+ int32_t expected = *i ? 5 : 6;
+ CHECK_EQ(expected, r.Call(*i));
+ }
+}
+
+TEST(Run_Wasm_Select_strict3) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ r.env()->AddLocals(kAstI32, 2);
+ // select(b=5, c=6, a=b)
+ BUILD(r, WASM_SELECT(WASM_SET_LOCAL(1, WASM_I8(5)),
+ WASM_SET_LOCAL(2, WASM_I8(6)),
+ WASM_SET_LOCAL(0, WASM_GET_LOCAL(1))));
+ FOR_INT32_INPUTS(i) {
+ int32_t expected = 5;
+ CHECK_EQ(expected, r.Call(*i));
+ }
}
@@ -1002,6 +1017,34 @@ TEST(Run_Wasm_BrIf_strict) {
FOR_INT32_INPUTS(i) { CHECK_EQ(99, r.Call(*i)); }
}
+TEST(Run_Wasm_TableSwitch0a) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r, WASM_BLOCK(2, WASM_TABLESWITCH_OP(0, 1, WASM_CASE_BR(0)),
+ WASM_TABLESWITCH_BODY0(WASM_GET_LOCAL(0)), WASM_I8(91)));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(91, r.Call(*i)); }
+}
+
+TEST(Run_Wasm_TableSwitch0b) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r, WASM_BLOCK(
+ 2, WASM_TABLESWITCH_OP(0, 2, WASM_CASE_BR(0), WASM_CASE_BR(0)),
+ WASM_TABLESWITCH_BODY0(WASM_GET_LOCAL(0)), WASM_I8(92)));
+ FOR_INT32_INPUTS(i) { CHECK_EQ(92, r.Call(*i)); }
+}
+
+TEST(Run_Wasm_TableSwitch0c) {
+ WasmRunner<int32_t> r(MachineType::Int32());
+ BUILD(r,
+ WASM_BLOCK(2, WASM_BLOCK(2, WASM_TABLESWITCH_OP(0, 2, WASM_CASE_BR(0),
+ WASM_CASE_BR(1)),
+ WASM_TABLESWITCH_BODY0(WASM_GET_LOCAL(0)),
+ WASM_RETURN(WASM_I8(76))),
+ WASM_I8(77)));
+ FOR_INT32_INPUTS(i) {
+ int32_t expected = *i == 0 ? 76 : 77;
+ CHECK_EQ(expected, r.Call(*i));
+ }
+}
TEST(Run_Wasm_TableSwitch1) {
WasmRunner<int32_t> r(MachineType::Int32());
@@ -1178,10 +1221,9 @@ TEST(Run_Wasm_TableSwitch4_fallthru_br) {
TEST(Run_Wasm_F32ReinterpretI32) {
- WasmRunner<int32_t> r;
TestingModule module;
int32_t* memory = module.AddMemoryElems<int32_t>(8);
- r.env()->module = &module;
+ WasmRunner<int32_t> r(&module);
BUILD(r, WASM_I32_REINTERPRET_F32(
WASM_LOAD_MEM(MachineType::Float32(), WASM_ZERO)));
@@ -1195,10 +1237,9 @@ TEST(Run_Wasm_F32ReinterpretI32) {
TEST(Run_Wasm_I32ReinterpretF32) {
- WasmRunner<int32_t> r(MachineType::Int32());
TestingModule module;
int32_t* memory = module.AddMemoryElems<int32_t>(8);
- r.env()->module = &module;
+ WasmRunner<int32_t> r(&module, MachineType::Int32());
BUILD(r, WASM_BLOCK(
2, WASM_STORE_MEM(MachineType::Float32(), WASM_ZERO,
@@ -1214,10 +1255,9 @@ TEST(Run_Wasm_I32ReinterpretF32) {
TEST(Run_Wasm_ReturnStore) {
- WasmRunner<int32_t> r;
TestingModule module;
int32_t* memory = module.AddMemoryElems<int32_t>(8);
- r.env()->module = &module;
+ WasmRunner<int32_t> r(&module);
BUILD(r, WASM_STORE_MEM(MachineType::Int32(), WASM_ZERO,
WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO)));
@@ -1231,16 +1271,43 @@ TEST(Run_Wasm_ReturnStore) {
TEST(Run_Wasm_VoidReturn1) {
- WasmRunner<void> r;
- BUILD(r, kExprNop);
- r.Call();
+ // We use a wrapper function because WasmRunner<void> does not exist.
+
+ // Build the test function.
+ TestSignatures sigs;
+ TestingModule module;
+ WasmFunctionCompiler t(sigs.v_v(), &module);
+ BUILD(t, kExprNop);
+ uint32_t index = t.CompileAndAdd();
+
+ const int32_t kExpected = -414444;
+ // Build the calling function.
+ WasmRunner<int32_t> r;
+ r.env()->module = &module;
+ BUILD(r, WASM_BLOCK(2, WASM_CALL_FUNCTION0(index), WASM_I32(kExpected)));
+
+ int32_t result = r.Call();
+ CHECK_EQ(kExpected, result);
}
TEST(Run_Wasm_VoidReturn2) {
- WasmRunner<void> r;
- BUILD(r, WASM_RETURN0);
- r.Call();
+ // We use a wrapper function because WasmRunner<void> does not exist.
+ // Build the test function.
+ TestSignatures sigs;
+ TestingModule module;
+ WasmFunctionCompiler t(sigs.v_v(), &module);
+ BUILD(t, WASM_RETURN0);
+ uint32_t index = t.CompileAndAdd();
+
+ const int32_t kExpected = -414444;
+ // Build the calling function.
+ WasmRunner<int32_t> r;
+ r.env()->module = &module;
+ BUILD(r, WASM_BLOCK(2, WASM_CALL_FUNCTION0(index), WASM_I32(kExpected)));
+
+ int32_t result = r.Call();
+ CHECK_EQ(kExpected, result);
}
@@ -1260,7 +1327,7 @@ TEST(Run_Wasm_Block_If_P) {
TEST(Run_Wasm_Block_BrIf_P) {
WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, WASM_BLOCK(2, WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_I8(51)),
+ BUILD(r, WASM_BLOCK(2, WASM_BRV_IF(0, WASM_I8(51), WASM_GET_LOCAL(0)),
WASM_I8(52)));
FOR_INT32_INPUTS(i) {
int32_t expected = *i ? 51 : 52;
@@ -1427,11 +1494,10 @@ TEST(Run_Wasm_Loop_if_break_fallthru) {
TEST(Run_Wasm_LoadMemI32) {
- WasmRunner<int32_t> r(MachineType::Int32());
TestingModule module;
int32_t* memory = module.AddMemoryElems<int32_t>(8);
+ WasmRunner<int32_t> r(&module, MachineType::Int32());
module.RandomizeMemory(1111);
- r.env()->module = &module;
BUILD(r, WASM_LOAD_MEM(MachineType::Int32(), WASM_I8(0)));
@@ -1447,11 +1513,10 @@ TEST(Run_Wasm_LoadMemI32) {
TEST(Run_Wasm_LoadMemI32_oob) {
- WasmRunner<int32_t> r(MachineType::Uint32());
TestingModule module;
int32_t* memory = module.AddMemoryElems<int32_t>(8);
+ WasmRunner<int32_t> r(&module, MachineType::Uint32());
module.RandomizeMemory(1111);
- r.env()->module = &module;
BUILD(r, WASM_LOAD_MEM(MachineType::Int32(), WASM_GET_LOCAL(0)));
@@ -1468,12 +1533,11 @@ TEST(Run_Wasm_LoadMemI32_oob) {
TEST(Run_Wasm_LoadMemI32_oob_asm) {
- WasmRunner<int32_t> r(MachineType::Uint32());
TestingModule module;
module.asm_js = true;
int32_t* memory = module.AddMemoryElems<int32_t>(8);
+ WasmRunner<int32_t> r(&module, MachineType::Uint32());
module.RandomizeMemory(1112);
- r.env()->module = &module;
BUILD(r, WASM_LOAD_MEM(MachineType::Int32(), WASM_GET_LOCAL(0)));
@@ -1502,8 +1566,7 @@ TEST(Run_Wasm_LoadMem_offset_oob) {
for (size_t m = 0; m < arraysize(machineTypes); m++) {
module.RandomizeMemory(1116 + static_cast<int>(m));
- WasmRunner<int32_t> r(MachineType::Uint32());
- r.env()->module = &module;
+ WasmRunner<int32_t> r(&module, MachineType::Uint32());
uint32_t boundary = 24 - WasmOpcodes::MemSize(machineTypes[m]);
BUILD(r, WASM_LOAD_MEM_OFFSET(machineTypes[m], 8, WASM_GET_LOCAL(0)),
@@ -1519,11 +1582,10 @@ TEST(Run_Wasm_LoadMem_offset_oob) {
TEST(Run_Wasm_LoadMemI32_offset) {
- WasmRunner<int32_t> r(MachineType::Int32());
TestingModule module;
int32_t* memory = module.AddMemoryElems<int32_t>(4);
+ WasmRunner<int32_t> r(&module, MachineType::Int32());
module.RandomizeMemory(1111);
- r.env()->module = &module;
BUILD(r, WASM_LOAD_MEM_OFFSET(MachineType::Int32(), 4, WASM_GET_LOCAL(0)));
@@ -1545,18 +1607,17 @@ TEST(Run_Wasm_LoadMemI32_offset) {
}
-// TODO(titzer): Fix for mips and re-enable.
#if !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-TEST(Run_Wasm_LoadMemI32_const_oob) {
- TestingModule module;
+TEST(Run_Wasm_LoadMemI32_const_oob_misaligned) {
const int kMemSize = 12;
- module.AddMemoryElems<byte>(kMemSize);
-
+ // TODO(titzer): Fix misaligned accesses on MIPS and re-enable.
for (int offset = 0; offset < kMemSize + 5; offset++) {
for (int index = 0; index < kMemSize + 5; index++) {
- WasmRunner<int32_t> r;
- r.env()->module = &module;
+ TestingModule module;
+ module.AddMemoryElems<byte>(kMemSize);
+
+ WasmRunner<int32_t> r(&module);
module.RandomizeMemory();
BUILD(r,
@@ -1574,12 +1635,34 @@ TEST(Run_Wasm_LoadMemI32_const_oob) {
#endif
+TEST(Run_Wasm_LoadMemI32_const_oob) {
+ const int kMemSize = 24;
+ for (int offset = 0; offset < kMemSize + 5; offset += 4) {
+ for (int index = 0; index < kMemSize + 5; index += 4) {
+ TestingModule module;
+ module.AddMemoryElems<byte>(kMemSize);
+
+ WasmRunner<int32_t> r(&module);
+ module.RandomizeMemory();
+
+ BUILD(r,
+ WASM_LOAD_MEM_OFFSET(MachineType::Int32(), offset, WASM_I8(index)));
+
+ if ((offset + index) <= (kMemSize - sizeof(int32_t))) {
+ CHECK_EQ(module.raw_val_at<int32_t>(offset + index), r.Call());
+ } else {
+ CHECK_TRAP(r.Call());
+ }
+ }
+ }
+}
+
+
TEST(Run_Wasm_StoreMemI32_offset) {
- WasmRunner<int32_t> r(MachineType::Int32());
- const int32_t kWritten = 0xaabbccdd;
TestingModule module;
int32_t* memory = module.AddMemoryElems<int32_t>(4);
- r.env()->module = &module;
+ WasmRunner<int32_t> r(&module, MachineType::Int32());
+ const int32_t kWritten = 0xaabbccdd;
BUILD(r, WASM_STORE_MEM_OFFSET(MachineType::Int32(), 4, WASM_GET_LOCAL(0),
WASM_I32(kWritten)));
@@ -1618,8 +1701,7 @@ TEST(Run_Wasm_StoreMem_offset_oob) {
for (size_t m = 0; m < arraysize(machineTypes); m++) {
module.RandomizeMemory(1119 + static_cast<int>(m));
- WasmRunner<int32_t> r(MachineType::Uint32());
- r.env()->module = &module;
+ WasmRunner<int32_t> r(&module, MachineType::Uint32());
BUILD(r, WASM_STORE_MEM_OFFSET(machineTypes[m], 8, WASM_GET_LOCAL(0),
WASM_LOAD_MEM(machineTypes[m], WASM_ZERO)),
@@ -1639,10 +1721,9 @@ TEST(Run_Wasm_StoreMem_offset_oob) {
#if WASM_64
TEST(Run_Wasm_F64ReinterpretI64) {
- WasmRunner<int64_t> r;
TestingModule module;
int64_t* memory = module.AddMemoryElems<int64_t>(8);
- r.env()->module = &module;
+ WasmRunner<int64_t> r(&module);
BUILD(r, WASM_I64_REINTERPRET_F64(
WASM_LOAD_MEM(MachineType::Float64(), WASM_ZERO)));
@@ -1656,10 +1737,9 @@ TEST(Run_Wasm_F64ReinterpretI64) {
TEST(Run_Wasm_I64ReinterpretF64) {
- WasmRunner<int64_t> r(MachineType::Int64());
TestingModule module;
int64_t* memory = module.AddMemoryElems<int64_t>(8);
- r.env()->module = &module;
+ WasmRunner<int64_t> r(&module, MachineType::Int64());
BUILD(r, WASM_BLOCK(
2, WASM_STORE_MEM(MachineType::Float64(), WASM_ZERO,
@@ -1675,11 +1755,10 @@ TEST(Run_Wasm_I64ReinterpretF64) {
TEST(Run_Wasm_LoadMemI64) {
- WasmRunner<int64_t> r;
TestingModule module;
int64_t* memory = module.AddMemoryElems<int64_t>(8);
module.RandomizeMemory(1111);
- r.env()->module = &module;
+ WasmRunner<int64_t> r(&module);
BUILD(r, WASM_LOAD_MEM(MachineType::Int64(), WASM_I8(0)));
@@ -1697,11 +1776,10 @@ TEST(Run_Wasm_LoadMemI64) {
TEST(Run_Wasm_LoadMemI32_P) {
const int kNumElems = 8;
- WasmRunner<int32_t> r(MachineType::Int32());
TestingModule module;
int32_t* memory = module.AddMemoryElems<int32_t>(kNumElems);
+ WasmRunner<int32_t> r(&module, MachineType::Int32());
module.RandomizeMemory(2222);
- r.env()->module = &module;
BUILD(r, WASM_LOAD_MEM(MachineType::Int32(), WASM_GET_LOCAL(0)));
@@ -1712,12 +1790,11 @@ TEST(Run_Wasm_LoadMemI32_P) {
TEST(Run_Wasm_MemI32_Sum) {
- WasmRunner<uint32_t> r(MachineType::Int32());
const int kNumElems = 20;
- const byte kSum = r.AllocateLocal(kAstI32);
TestingModule module;
uint32_t* memory = module.AddMemoryElems<uint32_t>(kNumElems);
- r.env()->module = &module;
+ WasmRunner<uint32_t> r(&module, MachineType::Int32());
+ const byte kSum = r.AllocateLocal(kAstI32);
BUILD(r, WASM_BLOCK(
2, WASM_WHILE(
@@ -1746,11 +1823,10 @@ TEST(Run_Wasm_MemI32_Sum) {
TEST(Run_Wasm_CheckMachIntsZero) {
- WasmRunner<uint32_t> r(MachineType::Int32());
const int kNumElems = 55;
TestingModule module;
module.AddMemoryElems<uint32_t>(kNumElems);
- r.env()->module = &module;
+ WasmRunner<uint32_t> r(&module, MachineType::Int32());
BUILD(r, kExprBlock, 2, kExprLoop, 1, kExprIf, kExprGetLocal, 0, kExprBr, 0,
kExprIfElse, kExprI32LoadMem, 0, kExprGetLocal, 0, kExprBr, 2,
@@ -1763,8 +1839,6 @@ TEST(Run_Wasm_CheckMachIntsZero) {
TEST(Run_Wasm_MemF32_Sum) {
- WasmRunner<int32_t> r(MachineType::Int32());
- const byte kSum = r.AllocateLocal(kAstF32);
const int kSize = 5;
TestingModule module;
module.AddMemoryElems<float>(kSize);
@@ -1774,7 +1848,8 @@ TEST(Run_Wasm_MemF32_Sum) {
buffer[2] = -77.25;
buffer[3] = 66666.25;
buffer[4] = 5555.25;
- r.env()->module = &module;
+ WasmRunner<int32_t> r(&module, MachineType::Int32());
+ const byte kSum = r.AllocateLocal(kAstF32);
BUILD(r, WASM_BLOCK(
3, WASM_WHILE(
@@ -1799,12 +1874,11 @@ TEST(Run_Wasm_MemF32_Sum) {
#if WASM_64
TEST(Run_Wasm_MemI64_Sum) {
- WasmRunner<uint64_t> r(MachineType::Int32());
const int kNumElems = 20;
- const byte kSum = r.AllocateLocal(kAstI64);
TestingModule module;
uint64_t* memory = module.AddMemoryElems<uint64_t>(kNumElems);
- r.env()->module = &module;
+ WasmRunner<uint64_t> r(&module, MachineType::Int32());
+ const byte kSum = r.AllocateLocal(kAstI64);
BUILD(r, WASM_BLOCK(
2, WASM_WHILE(
@@ -1836,14 +1910,13 @@ TEST(Run_Wasm_MemI64_Sum) {
template <typename T>
T GenerateAndRunFold(WasmOpcode binop, T* buffer, size_t size,
LocalType astType, MachineType memType) {
- WasmRunner<int32_t> r(MachineType::Int32());
- const byte kAccum = r.AllocateLocal(astType);
TestingModule module;
module.AddMemoryElems<T>(size);
for (size_t i = 0; i < size; i++) {
module.raw_mem_start<T>()[i] = buffer[i];
}
- r.env()->module = &module;
+ WasmRunner<int32_t> r(&module, MachineType::Int32());
+ const byte kAccum = r.AllocateLocal(astType);
BUILD(
r,
@@ -1882,10 +1955,9 @@ TEST(Build_Wasm_Infinite_Loop) {
TEST(Build_Wasm_Infinite_Loop_effect) {
- WasmRunner<int32_t> r(MachineType::Int32());
TestingModule module;
module.AddMemoryElems<int8_t>(16);
- r.env()->module = &module;
+ WasmRunner<int32_t> r(&module, MachineType::Int32());
// Only build the graph and compile, don't run.
BUILD(r, WASM_LOOP(1, WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO)));
@@ -1970,7 +2042,7 @@ TEST(Run_Wasm_Infinite_Loop_not_taken2) {
TEST(Run_Wasm_Infinite_Loop_not_taken2_brif) {
WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, WASM_BLOCK(2, WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_I8(45)),
+ BUILD(r, WASM_BLOCK(2, WASM_BRV_IF(0, WASM_I8(45), WASM_GET_LOCAL(0)),
WASM_INFINITE_LOOP));
// Run the code, but don't go into the infinite loop.
CHECK_EQ(45, r.Call(1));
@@ -2022,8 +2094,7 @@ TEST(Run_Wasm_Int32LoadInt8_signext) {
int8_t* memory = module.AddMemoryElems<int8_t>(kNumElems);
module.RandomizeMemory();
memory[0] = -1;
- WasmRunner<int32_t> r(MachineType::Int32());
- r.env()->module = &module;
+ WasmRunner<int32_t> r(&module, MachineType::Int32());
BUILD(r, WASM_LOAD_MEM(MachineType::Int8(), WASM_GET_LOCAL(0)));
for (size_t i = 0; i < kNumElems; i++) {
@@ -2038,8 +2109,7 @@ TEST(Run_Wasm_Int32LoadInt8_zeroext) {
byte* memory = module.AddMemory(kNumElems);
module.RandomizeMemory(77);
memory[0] = 255;
- WasmRunner<int32_t> r(MachineType::Int32());
- r.env()->module = &module;
+ WasmRunner<int32_t> r(&module, MachineType::Int32());
BUILD(r, WASM_LOAD_MEM(MachineType::Uint8(), WASM_GET_LOCAL(0)));
for (size_t i = 0; i < kNumElems; i++) {
@@ -2054,8 +2124,7 @@ TEST(Run_Wasm_Int32LoadInt16_signext) {
byte* memory = module.AddMemory(kNumBytes);
module.RandomizeMemory(888);
memory[1] = 200;
- WasmRunner<int32_t> r(MachineType::Int32());
- r.env()->module = &module;
+ WasmRunner<int32_t> r(&module, MachineType::Int32());
BUILD(r, WASM_LOAD_MEM(MachineType::Int16(), WASM_GET_LOCAL(0)));
for (size_t i = 0; i < kNumBytes; i += 2) {
@@ -2071,8 +2140,7 @@ TEST(Run_Wasm_Int32LoadInt16_zeroext) {
byte* memory = module.AddMemory(kNumBytes);
module.RandomizeMemory(9999);
memory[1] = 204;
- WasmRunner<int32_t> r(MachineType::Int32());
- r.env()->module = &module;
+ WasmRunner<int32_t> r(&module, MachineType::Int32());
BUILD(r, WASM_LOAD_MEM(MachineType::Uint16(), WASM_GET_LOCAL(0)));
for (size_t i = 0; i < kNumBytes; i += 2) {
@@ -2085,8 +2153,7 @@ TEST(Run_Wasm_Int32LoadInt16_zeroext) {
TEST(Run_WasmInt32Global) {
TestingModule module;
int32_t* global = module.AddGlobal<int32_t>(MachineType::Int32());
- WasmRunner<int32_t> r(MachineType::Int32());
- r.env()->module = &module;
+ WasmRunner<int32_t> r(&module, MachineType::Int32());
// global = global + p0
BUILD(r, WASM_STORE_GLOBAL(
0, WASM_I32_ADD(WASM_LOAD_GLOBAL(0), WASM_GET_LOCAL(0))));
@@ -2109,8 +2176,7 @@ TEST(Run_WasmInt32Globals_DontAlias) {
for (int g = 0; g < kNumGlobals; g++) {
// global = global + p0
- WasmRunner<int32_t> r(MachineType::Int32());
- r.env()->module = &module;
+ WasmRunner<int32_t> r(&module, MachineType::Int32());
BUILD(r, WASM_STORE_GLOBAL(
g, WASM_I32_ADD(WASM_LOAD_GLOBAL(g), WASM_GET_LOCAL(0))));
@@ -2134,8 +2200,7 @@ TEST(Run_WasmInt32Globals_DontAlias) {
TEST(Run_WasmInt64Global) {
TestingModule module;
int64_t* global = module.AddGlobal<int64_t>(MachineType::Int64());
- WasmRunner<int32_t> r(MachineType::Int32());
- r.env()->module = &module;
+ WasmRunner<int32_t> r(&module, MachineType::Int32());
// global = global + p0
BUILD(r, WASM_BLOCK(2, WASM_STORE_GLOBAL(
0, WASM_I64_ADD(
@@ -2156,8 +2221,7 @@ TEST(Run_WasmInt64Global) {
TEST(Run_WasmFloat32Global) {
TestingModule module;
float* global = module.AddGlobal<float>(MachineType::Float32());
- WasmRunner<int32_t> r(MachineType::Int32());
- r.env()->module = &module;
+ WasmRunner<int32_t> r(&module, MachineType::Int32());
// global = global + p0
BUILD(r, WASM_BLOCK(2, WASM_STORE_GLOBAL(
0, WASM_F32_ADD(
@@ -2177,8 +2241,7 @@ TEST(Run_WasmFloat32Global) {
TEST(Run_WasmFloat64Global) {
TestingModule module;
double* global = module.AddGlobal<double>(MachineType::Float64());
- WasmRunner<int32_t> r(MachineType::Int32());
- r.env()->module = &module;
+ WasmRunner<int32_t> r(&module, MachineType::Int32());
// global = global + p0
BUILD(r, WASM_BLOCK(2, WASM_STORE_GLOBAL(
0, WASM_F64_ADD(
@@ -2209,8 +2272,7 @@ TEST(Run_WasmMixedGlobals) {
float* var_float = module.AddGlobal<float>(MachineType::Float32());
double* var_double = module.AddGlobal<double>(MachineType::Float64());
- WasmRunner<int32_t> r(MachineType::Int32());
- r.env()->module = &module;
+ WasmRunner<int32_t> r(&module, MachineType::Int32());
BUILD(
r,
@@ -2312,13 +2374,12 @@ TEST(Run_WasmCallEmpty) {
// Build the target function.
TestSignatures sigs;
TestingModule module;
- WasmFunctionCompiler t(sigs.i_v());
+ WasmFunctionCompiler t(sigs.i_v(), &module);
BUILD(t, WASM_I32(kExpected));
- uint32_t index = t.CompileAndAdd(&module);
+ uint32_t index = t.CompileAndAdd();
// Build the calling function.
- WasmRunner<int32_t> r;
- r.env()->module = &module;
+ WasmRunner<int32_t> r(&module);
BUILD(r, WASM_CALL_FUNCTION0(index));
int32_t result = r.Call();
@@ -2326,22 +2387,18 @@ TEST(Run_WasmCallEmpty) {
}
-// TODO(tizer): Fix on arm and reenable.
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
-
TEST(Run_WasmCallF32StackParameter) {
// Build the target function.
LocalType param_types[20];
for (int i = 0; i < 20; i++) param_types[i] = kAstF32;
FunctionSig sig(1, 19, param_types);
TestingModule module;
- WasmFunctionCompiler t(&sig);
+ WasmFunctionCompiler t(&sig, &module);
BUILD(t, WASM_GET_LOCAL(17));
- uint32_t index = t.CompileAndAdd(&module);
+ uint32_t index = t.CompileAndAdd();
// Build the calling function.
- WasmRunner<float> r;
- r.env()->module = &module;
+ WasmRunner<float> r(&module);
BUILD(r, WASM_CALL_FUNCTION(
index, WASM_F32(1.0f), WASM_F32(2.0f), WASM_F32(4.0f),
WASM_F32(8.0f), WASM_F32(16.0f), WASM_F32(32.0f),
@@ -2361,13 +2418,12 @@ TEST(Run_WasmCallF64StackParameter) {
for (int i = 0; i < 20; i++) param_types[i] = kAstF64;
FunctionSig sig(1, 19, param_types);
TestingModule module;
- WasmFunctionCompiler t(&sig);
+ WasmFunctionCompiler t(&sig, &module);
BUILD(t, WASM_GET_LOCAL(17));
- uint32_t index = t.CompileAndAdd(&module);
+ uint32_t index = t.CompileAndAdd();
// Build the calling function.
- WasmRunner<double> r;
- r.env()->module = &module;
+ WasmRunner<double> r(&module);
BUILD(r, WASM_CALL_FUNCTION(index, WASM_F64(1.0), WASM_F64(2.0),
WASM_F64(4.0), WASM_F64(8.0), WASM_F64(16.0),
WASM_F64(32.0), WASM_F64(64.0), WASM_F64(128.0),
@@ -2380,8 +2436,50 @@ TEST(Run_WasmCallF64StackParameter) {
CHECK_EQ(256.5, result);
}
-#endif
+TEST(Run_WasmCallI64Parameter) {
+ // Build the target function.
+ LocalType param_types[20];
+ for (int i = 0; i < 20; i++) param_types[i] = kAstI64;
+ param_types[3] = kAstI32;
+ param_types[4] = kAstI32;
+ FunctionSig sig(1, 19, param_types);
+ for (int i = 0; i < 19; i++) {
+ TestingModule module;
+ WasmFunctionCompiler t(&sig, &module);
+ if (i == 2 || i == 3) {
+ continue;
+ } else {
+ BUILD(t, WASM_GET_LOCAL(i));
+ }
+ uint32_t index = t.CompileAndAdd();
+
+ // Build the calling function.
+ WasmRunner<int32_t> r;
+ r.env()->module = &module;
+ BUILD(r,
+ WASM_I32_CONVERT_I64(WASM_CALL_FUNCTION(
+ index, WASM_I64(0xbcd12340000000b), WASM_I64(0xbcd12340000000c),
+ WASM_I32(0xd), WASM_I32_CONVERT_I64(WASM_I64(0xbcd12340000000e)),
+ WASM_I64(0xbcd12340000000f), WASM_I64(0xbcd1234000000010),
+ WASM_I64(0xbcd1234000000011), WASM_I64(0xbcd1234000000012),
+ WASM_I64(0xbcd1234000000013), WASM_I64(0xbcd1234000000014),
+ WASM_I64(0xbcd1234000000015), WASM_I64(0xbcd1234000000016),
+ WASM_I64(0xbcd1234000000017), WASM_I64(0xbcd1234000000018),
+ WASM_I64(0xbcd1234000000019), WASM_I64(0xbcd123400000001a),
+ WASM_I64(0xbcd123400000001b), WASM_I64(0xbcd123400000001c),
+ WASM_I64(0xbcd123400000001d))));
+ CHECK_EQ(i + 0xb, r.Call());
+ }
+}
+
+TEST(Run_WasmI64And) {
+ WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
+ BUILD(r, WASM_I64_AND(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ FOR_INT64_INPUTS(i) {
+ FOR_INT64_INPUTS(j) { CHECK_EQ((*i) & (*j), r.Call(*i, *j)); }
+ }
+}
TEST(Run_WasmCallVoid) {
const byte kMemOffset = 8;
@@ -2392,15 +2490,13 @@ TEST(Run_WasmCallVoid) {
TestingModule module;
module.AddMemory(16);
module.RandomizeMemory();
- WasmFunctionCompiler t(sigs.v_v());
- t.env.module = &module;
+ WasmFunctionCompiler t(sigs.v_v(), &module);
BUILD(t, WASM_STORE_MEM(MachineType::Int32(), WASM_I8(kMemOffset),
WASM_I32(kExpected)));
- uint32_t index = t.CompileAndAdd(&module);
+ uint32_t index = t.CompileAndAdd();
// Build the calling function.
- WasmRunner<int32_t> r;
- r.env()->module = &module;
+ WasmRunner<int32_t> r(&module);
BUILD(r, WASM_CALL_FUNCTION0(index),
WASM_LOAD_MEM(MachineType::Int32(), WASM_I8(kMemOffset)));
@@ -2414,13 +2510,12 @@ TEST(Run_WasmCall_Int32Add) {
// Build the target function.
TestSignatures sigs;
TestingModule module;
- WasmFunctionCompiler t(sigs.i_ii());
+ WasmFunctionCompiler t(sigs.i_ii(), &module);
BUILD(t, WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- uint32_t index = t.CompileAndAdd(&module);
+ uint32_t index = t.CompileAndAdd();
// Build the caller function.
- WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32());
- r.env()->module = &module;
+ WasmRunner<int32_t> r(&module, MachineType::Int32(), MachineType::Int32());
BUILD(r, WASM_CALL_FUNCTION(index, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT32_INPUTS(i) {
@@ -2438,13 +2533,12 @@ TEST(Run_WasmCall_Int64Sub) {
// Build the target function.
TestSignatures sigs;
TestingModule module;
- WasmFunctionCompiler t(sigs.l_ll());
+ WasmFunctionCompiler t(sigs.l_ll(), &module);
BUILD(t, WASM_I64_SUB(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- uint32_t index = t.CompileAndAdd(&module);
+ uint32_t index = t.CompileAndAdd();
// Build the caller function.
- WasmRunner<int64_t> r(MachineType::Int64(), MachineType::Int64());
- r.env()->module = &module;
+ WasmRunner<int64_t> r(&module, MachineType::Int64(), MachineType::Int64());
BUILD(r, WASM_CALL_FUNCTION(index, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_INT32_INPUTS(i) {
@@ -2465,16 +2559,15 @@ TEST(Run_WasmCall_Int64Sub) {
TEST(Run_WasmCall_Float32Sub) {
TestSignatures sigs;
- WasmFunctionCompiler t(sigs.f_ff());
+ TestingModule module;
+ WasmFunctionCompiler t(sigs.f_ff(), &module);
// Build the target function.
- TestingModule module;
BUILD(t, WASM_F32_SUB(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- uint32_t index = t.CompileAndAdd(&module);
+ uint32_t index = t.CompileAndAdd();
// Builder the caller function.
- WasmRunner<float> r(MachineType::Float32(), MachineType::Float32());
- r.env()->module = &module;
+ WasmRunner<float> r(&module, MachineType::Float32(), MachineType::Float32());
BUILD(r, WASM_CALL_FUNCTION(index, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
FOR_FLOAT32_INPUTS(i) {
@@ -2487,10 +2580,9 @@ TEST(Run_WasmCall_Float32Sub) {
TEST(Run_WasmCall_Float64Sub) {
- WasmRunner<int32_t> r;
TestingModule module;
double* memory = module.AddMemoryElems<double>(16);
- r.env()->module = &module;
+ WasmRunner<int32_t> r(&module);
// TODO(titzer): convert to a binop test.
BUILD(r, WASM_BLOCK(
@@ -2560,36 +2652,32 @@ static void Run_WasmMixedCall_N(int start) {
for (int i = 0; i < num_params; i++) {
b.AddParam(WasmOpcodes::LocalTypeFor(memtypes[i]));
}
- WasmFunctionCompiler t(b.Build());
- t.env.module = &module;
+ WasmFunctionCompiler t(b.Build(), &module);
BUILD(t, WASM_GET_LOCAL(which));
- index = t.CompileAndAdd(&module);
+ index = t.CompileAndAdd();
// =========================================================================
// Build the calling function.
// =========================================================================
- WasmRunner<int32_t> r;
- r.env()->module = &module;
+ WasmRunner<int32_t> r(&module);
- {
- std::vector<byte> code;
- ADD_CODE(code,
- static_cast<byte>(WasmOpcodes::LoadStoreOpcodeOf(result, true)),
- WasmOpcodes::LoadStoreAccessOf(false));
- ADD_CODE(code, WASM_ZERO);
- ADD_CODE(code, kExprCallFunction, static_cast<byte>(index));
-
- for (int i = 0; i < num_params; i++) {
- int offset = (i + 1) * kElemSize;
- ADD_CODE(code, WASM_LOAD_MEM(memtypes[i], WASM_I8(offset)));
- }
+ std::vector<byte> code;
+ ADD_CODE(code,
+ static_cast<byte>(WasmOpcodes::LoadStoreOpcodeOf(result, true)),
+ WasmOpcodes::LoadStoreAccessOf(false));
+ ADD_CODE(code, WASM_ZERO);
+ ADD_CODE(code, kExprCallFunction, static_cast<byte>(index));
- ADD_CODE(code, WASM_I32(kExpected));
- size_t end = code.size();
- code.push_back(0);
- r.Build(&code[0], &code[end]);
+ for (int i = 0; i < num_params; i++) {
+ int offset = (i + 1) * kElemSize;
+ ADD_CODE(code, WASM_LOAD_MEM(memtypes[i], WASM_I8(offset)));
}
+ ADD_CODE(code, WASM_I32(kExpected));
+ size_t end = code.size();
+ code.push_back(0);
+ r.Build(&code[0], &code[end]);
+
// Run the code.
for (int t = 0; t < 10; t++) {
module.RandomizeMemory();
@@ -2612,6 +2700,27 @@ TEST(Run_WasmMixedCall_1) { Run_WasmMixedCall_N(1); }
TEST(Run_WasmMixedCall_2) { Run_WasmMixedCall_N(2); }
TEST(Run_WasmMixedCall_3) { Run_WasmMixedCall_N(3); }
+TEST(Run_Wasm_AddCall) {
+ TestSignatures sigs;
+ TestingModule module;
+ WasmFunctionCompiler t1(sigs.i_ii(), &module);
+ BUILD(t1, WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ t1.CompileAndAdd();
+
+ WasmRunner<int32_t> r(&module, MachineType::Int32());
+ byte local = r.AllocateLocal(kAstI32);
+ BUILD(r,
+ WASM_BLOCK(2, WASM_SET_LOCAL(local, WASM_I8(99)),
+ WASM_I32_ADD(
+ WASM_CALL_FUNCTION(t1.function_index_, WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(0)),
+ WASM_CALL_FUNCTION(t1.function_index_, WASM_GET_LOCAL(1),
+ WASM_GET_LOCAL(local)))));
+
+ CHECK_EQ(198, r.Call(0));
+ CHECK_EQ(200, r.Call(1));
+ CHECK_EQ(100, r.Call(-49));
+}
TEST(Run_Wasm_CountDown_expr) {
WasmRunner<int32_t> r(MachineType::Int32());
@@ -2646,7 +2755,7 @@ TEST(Run_Wasm_ExprBlock2b) {
TEST(Run_Wasm_ExprBlock2c) {
WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, WASM_BLOCK(2, WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_I8(1)),
+ BUILD(r, WASM_BLOCK(2, WASM_BRV_IF(0, WASM_I8(1), WASM_GET_LOCAL(0)),
WASM_I8(1)));
CHECK_EQ(1, r.Call(0));
CHECK_EQ(1, r.Call(1));
@@ -2655,7 +2764,7 @@ TEST(Run_Wasm_ExprBlock2c) {
TEST(Run_Wasm_ExprBlock2d) {
WasmRunner<int32_t> r(MachineType::Int32());
- BUILD(r, WASM_BLOCK(2, WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_I8(1)),
+ BUILD(r, WASM_BLOCK(2, WASM_BRV_IF(0, WASM_I8(1), WASM_GET_LOCAL(0)),
WASM_I8(2)));
CHECK_EQ(2, r.Call(0));
CHECK_EQ(1, r.Call(1));
@@ -2688,16 +2797,16 @@ TEST(Run_Wasm_ExprBlock_ManualSwitch) {
TEST(Run_Wasm_ExprBlock_ManualSwitch_brif) {
WasmRunner<int32_t> r(MachineType::Int32());
BUILD(r,
- WASM_BLOCK(6, WASM_BRV_IF(0, WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(1)),
- WASM_I8(11)),
- WASM_BRV_IF(0, WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(2)),
- WASM_I8(12)),
- WASM_BRV_IF(0, WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(3)),
- WASM_I8(13)),
- WASM_BRV_IF(0, WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(4)),
- WASM_I8(14)),
- WASM_BRV_IF(0, WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(5)),
- WASM_I8(15)),
+ WASM_BLOCK(6, WASM_BRV_IF(0, WASM_I8(11),
+ WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(1))),
+ WASM_BRV_IF(0, WASM_I8(12),
+ WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(2))),
+ WASM_BRV_IF(0, WASM_I8(13),
+ WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(3))),
+ WASM_BRV_IF(0, WASM_I8(14),
+ WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(4))),
+ WASM_BRV_IF(0, WASM_I8(15),
+ WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(5))),
WASM_I8(99)));
CHECK_EQ(99, r.Call(0));
CHECK_EQ(11, r.Call(1));
@@ -2781,10 +2890,9 @@ TEST(Run_Wasm_LoadStoreI64_sx) {
kExprI64LoadMem};
for (size_t m = 0; m < arraysize(loads); m++) {
- WasmRunner<int64_t> r;
TestingModule module;
byte* memory = module.AddMemoryElems<byte>(16);
- r.env()->module = &module;
+ WasmRunner<int64_t> r(&module);
byte code[] = {kExprI64StoreMem, 0, kExprI8Const, 8,
loads[m], 0, kExprI8Const, 0};
@@ -2813,19 +2921,16 @@ TEST(Run_Wasm_LoadStoreI64_sx) {
TEST(Run_Wasm_SimpleCallIndirect) {
- Isolate* isolate = CcTest::InitIsolateOnce();
-
- WasmRunner<int32_t> r(MachineType::Int32());
TestSignatures sigs;
TestingModule module;
- r.env()->module = &module;
- WasmFunctionCompiler t1(sigs.i_ii());
+
+ WasmFunctionCompiler t1(sigs.i_ii(), &module);
BUILD(t1, WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- t1.CompileAndAdd(&module);
+ t1.CompileAndAdd(/*sig_index*/ 1);
- WasmFunctionCompiler t2(sigs.i_ii());
+ WasmFunctionCompiler t2(sigs.i_ii(), &module);
BUILD(t2, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- t2.CompileAndAdd(&module);
+ t2.CompileAndAdd(/*sig_index*/ 1);
// Signature table.
module.AddSignature(sigs.f_ff());
@@ -2833,20 +2938,12 @@ TEST(Run_Wasm_SimpleCallIndirect) {
module.AddSignature(sigs.d_dd());
// Function table.
- int table_size = 2;
- module.module->function_table = new std::vector<uint16_t>;
- module.module->function_table->push_back(0);
- module.module->function_table->push_back(1);
-
- // Function table.
- Handle<FixedArray> fixed = isolate->factory()->NewFixedArray(2 * table_size);
- fixed->set(0, Smi::FromInt(1));
- fixed->set(1, Smi::FromInt(1));
- fixed->set(2, *module.function_code->at(0));
- fixed->set(3, *module.function_code->at(1));
- module.function_table = fixed;
+ int table[] = {0, 1};
+ module.AddIndirectFunctionTable(table, 2);
+ module.PopulateIndirectFunctionTable();
// Builder the caller function.
+ WasmRunner<int32_t> r(&module, MachineType::Int32());
BUILD(r, WASM_CALL_INDIRECT(1, WASM_GET_LOCAL(0), WASM_I8(66), WASM_I8(22)));
CHECK_EQ(88, r.Call(0));
@@ -2856,20 +2953,16 @@ TEST(Run_Wasm_SimpleCallIndirect) {
TEST(Run_Wasm_MultipleCallIndirect) {
- Isolate* isolate = CcTest::InitIsolateOnce();
-
- WasmRunner<int32_t> r(MachineType::Int32(), MachineType::Int32(),
- MachineType::Int32());
TestSignatures sigs;
TestingModule module;
- r.env()->module = &module;
- WasmFunctionCompiler t1(sigs.i_ii());
+
+ WasmFunctionCompiler t1(sigs.i_ii(), &module);
BUILD(t1, WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- t1.CompileAndAdd(&module);
+ t1.CompileAndAdd(/*sig_index*/ 1);
- WasmFunctionCompiler t2(sigs.i_ii());
+ WasmFunctionCompiler t2(sigs.i_ii(), &module);
BUILD(t2, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
- t2.CompileAndAdd(&module);
+ t2.CompileAndAdd(/*sig_index*/ 1);
// Signature table.
module.AddSignature(sigs.f_ff());
@@ -2877,20 +2970,13 @@ TEST(Run_Wasm_MultipleCallIndirect) {
module.AddSignature(sigs.d_dd());
// Function table.
- int table_size = 2;
- module.module->function_table = new std::vector<uint16_t>;
- module.module->function_table->push_back(0);
- module.module->function_table->push_back(1);
-
- // Function table.
- Handle<FixedArray> fixed = isolate->factory()->NewFixedArray(2 * table_size);
- fixed->set(0, Smi::FromInt(1));
- fixed->set(1, Smi::FromInt(1));
- fixed->set(2, *module.function_code->at(0));
- fixed->set(3, *module.function_code->at(1));
- module.function_table = fixed;
+ int table[] = {0, 1};
+ module.AddIndirectFunctionTable(table, 2);
+ module.PopulateIndirectFunctionTable();
// Builder the caller function.
+ WasmRunner<int32_t> r(&module, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
BUILD(r,
WASM_I32_ADD(WASM_CALL_INDIRECT(1, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1),
WASM_GET_LOCAL(2)),
@@ -2908,42 +2994,56 @@ TEST(Run_Wasm_MultipleCallIndirect) {
CHECK_TRAP(r.Call(2, 1, 0));
}
+TEST(Run_Wasm_CallIndirect_NoTable) {
+ TestSignatures sigs;
+ TestingModule module;
+
+ // One function.
+ WasmFunctionCompiler t1(sigs.i_ii(), &module);
+ BUILD(t1, WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ t1.CompileAndAdd(/*sig_index*/ 1);
-// TODO(titzer): Fix for nosee4 and re-enable.
-#if 0
+ // Signature table.
+ module.AddSignature(sigs.f_ff());
+ module.AddSignature(sigs.i_ii());
+
+ // Builder the caller function.
+ WasmRunner<int32_t> r(&module, MachineType::Int32());
+ BUILD(r, WASM_CALL_INDIRECT(1, WASM_GET_LOCAL(0), WASM_I8(66), WASM_I8(22)));
+
+ CHECK_TRAP(r.Call(0));
+ CHECK_TRAP(r.Call(1));
+ CHECK_TRAP(r.Call(2));
+}
TEST(Run_Wasm_F32Floor) {
WasmRunner<float> r(MachineType::Float32());
BUILD(r, WASM_F32_FLOOR(WASM_GET_LOCAL(0)));
- FOR_FLOAT32_INPUTS(i) { CheckFloatEq(floor(*i), r.Call(*i)); }
+ FOR_FLOAT32_INPUTS(i) { CheckFloatEq(floorf(*i), r.Call(*i)); }
}
-
TEST(Run_Wasm_F32Ceil) {
WasmRunner<float> r(MachineType::Float32());
BUILD(r, WASM_F32_CEIL(WASM_GET_LOCAL(0)));
- FOR_FLOAT32_INPUTS(i) { CheckFloatEq(ceil(*i), r.Call(*i)); }
+ FOR_FLOAT32_INPUTS(i) { CheckFloatEq(ceilf(*i), r.Call(*i)); }
}
-
TEST(Run_Wasm_F32Trunc) {
WasmRunner<float> r(MachineType::Float32());
BUILD(r, WASM_F32_TRUNC(WASM_GET_LOCAL(0)));
- FOR_FLOAT32_INPUTS(i) { CheckFloatEq(trunc(*i), r.Call(*i)); }
+ FOR_FLOAT32_INPUTS(i) { CheckFloatEq(truncf(*i), r.Call(*i)); }
}
-
TEST(Run_Wasm_F32NearestInt) {
WasmRunner<float> r(MachineType::Float32());
BUILD(r, WASM_F32_NEARESTINT(WASM_GET_LOCAL(0)));
- FOR_FLOAT32_INPUTS(i) { CheckFloatEq(nearbyint(*i), r.Call(*i)); }
+ FOR_FLOAT32_INPUTS(i) { CheckFloatEq(nearbyintf(*i), r.Call(*i)); }
}
-
TEST(Run_Wasm_F64Floor) {
WasmRunner<double> r(MachineType::Float64());
BUILD(r, WASM_F64_FLOOR(WASM_GET_LOCAL(0)));
@@ -2951,7 +3051,6 @@ TEST(Run_Wasm_F64Floor) {
FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(floor(*i), r.Call(*i)); }
}
-
TEST(Run_Wasm_F64Ceil) {
WasmRunner<double> r(MachineType::Float64());
BUILD(r, WASM_F64_CEIL(WASM_GET_LOCAL(0)));
@@ -2959,7 +3058,6 @@ TEST(Run_Wasm_F64Ceil) {
FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(ceil(*i), r.Call(*i)); }
}
-
TEST(Run_Wasm_F64Trunc) {
WasmRunner<double> r(MachineType::Float64());
BUILD(r, WASM_F64_TRUNC(WASM_GET_LOCAL(0)));
@@ -2967,7 +3065,6 @@ TEST(Run_Wasm_F64Trunc) {
FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(trunc(*i), r.Call(*i)); }
}
-
TEST(Run_Wasm_F64NearestInt) {
WasmRunner<double> r(MachineType::Float64());
BUILD(r, WASM_F64_NEARESTINT(WASM_GET_LOCAL(0)));
@@ -2975,9 +3072,6 @@ TEST(Run_Wasm_F64NearestInt) {
FOR_FLOAT64_INPUTS(i) { CheckDoubleEq(nearbyint(*i), r.Call(*i)); }
}
-#endif
-
-
TEST(Run_Wasm_F32Min) {
WasmRunner<float> r(MachineType::Float32(), MachineType::Float32());
BUILD(r, WASM_F32_MIN(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
@@ -3073,6 +3167,74 @@ TEST(Run_Wasm_F64Max) {
}
}
+// TODO(ahaas): Fix on arm and reenable.
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
+
+TEST(Run_Wasm_F32Min_Snan) {
+ // Test that the instruction does not return a signalling NaN.
+ {
+ WasmRunner<float> r;
+ BUILD(r,
+ WASM_F32_MIN(WASM_F32(bit_cast<float>(0xff80f1e2)), WASM_F32(57.67)));
+ CHECK_EQ(0xffc0f1e2, bit_cast<uint32_t>(r.Call()));
+ }
+ {
+ WasmRunner<float> r;
+ BUILD(r,
+ WASM_F32_MIN(WASM_F32(45.73), WASM_F32(bit_cast<float>(0x7f80f1e2))));
+ CHECK_EQ(0x7fc0f1e2, bit_cast<uint32_t>(r.Call()));
+ }
+}
+
+TEST(Run_Wasm_F32Max_Snan) {
+ // Test that the instruction does not return a signalling NaN.
+ {
+ WasmRunner<float> r;
+ BUILD(r,
+ WASM_F32_MAX(WASM_F32(bit_cast<float>(0xff80f1e2)), WASM_F32(57.67)));
+ CHECK_EQ(0xffc0f1e2, bit_cast<uint32_t>(r.Call()));
+ }
+ {
+ WasmRunner<float> r;
+ BUILD(r,
+ WASM_F32_MAX(WASM_F32(45.73), WASM_F32(bit_cast<float>(0x7f80f1e2))));
+ CHECK_EQ(0x7fc0f1e2, bit_cast<uint32_t>(r.Call()));
+ }
+}
+
+TEST(Run_Wasm_F64Min_Snan) {
+ // Test that the instruction does not return a signalling NaN.
+ {
+ WasmRunner<double> r;
+ BUILD(r, WASM_F64_MIN(WASM_F64(bit_cast<double>(0xfff000000000f1e2)),
+ WASM_F64(57.67)));
+ CHECK_EQ(0xfff800000000f1e2, bit_cast<uint64_t>(r.Call()));
+ }
+ {
+ WasmRunner<double> r;
+ BUILD(r, WASM_F64_MIN(WASM_F64(45.73),
+ WASM_F64(bit_cast<double>(0x7ff000000000f1e2))));
+ CHECK_EQ(0x7ff800000000f1e2, bit_cast<uint64_t>(r.Call()));
+ }
+}
+
+TEST(Run_Wasm_F64Max_Snan) {
+ // Test that the instruction does not return a signalling NaN.
+ {
+ WasmRunner<double> r;
+ BUILD(r, WASM_F64_MAX(WASM_F64(bit_cast<double>(0xfff000000000f1e2)),
+ WASM_F64(57.67)));
+ CHECK_EQ(0xfff800000000f1e2, bit_cast<uint64_t>(r.Call()));
+ }
+ {
+ WasmRunner<double> r;
+ BUILD(r, WASM_F64_MAX(WASM_F64(45.73),
+ WASM_F64(bit_cast<double>(0x7ff000000000f1e2))));
+ CHECK_EQ(0x7ff800000000f1e2, bit_cast<uint64_t>(r.Call()));
+ }
+}
+
+#endif
#if WASM_64
TEST(Run_Wasm_F32SConvertI64) {
@@ -3251,4 +3413,48 @@ TEST(Run_Wasm_F32CopySign) {
}
}
+
+#endif
+
+
+void CompileCallIndirectMany(LocalType param) {
+ // Make sure we don't run out of registers when compiling indirect calls
+ // with many many parameters.
+ TestSignatures sigs;
+ for (byte num_params = 0; num_params < 40; num_params++) {
+ Zone zone;
+ HandleScope scope(CcTest::InitIsolateOnce());
+ TestingModule module;
+ FunctionSig* sig = sigs.many(&zone, kAstStmt, param, num_params);
+
+ module.AddSignature(sig);
+ module.AddSignature(sig);
+ module.AddIndirectFunctionTable(nullptr, 0);
+
+ WasmFunctionCompiler t(sig, &module);
+
+ std::vector<byte> code;
+ ADD_CODE(code, kExprCallIndirect, 1);
+ ADD_CODE(code, kExprI8Const, 0);
+ for (byte p = 0; p < num_params; p++) {
+ ADD_CODE(code, kExprGetLocal, p);
+ }
+
+ t.Build(&code[0], &code[0] + code.size());
+ t.Compile();
+ }
+}
+
+
+TEST(Compile_Wasm_CallIndirect_Many_i32) { CompileCallIndirectMany(kAstI32); }
+
+
+#if WASM_64
+TEST(Compile_Wasm_CallIndirect_Many_i64) { CompileCallIndirectMany(kAstI64); }
#endif
+
+
+TEST(Compile_Wasm_CallIndirect_Many_f32) { CompileCallIndirectMany(kAstF32); }
+
+
+TEST(Compile_Wasm_CallIndirect_Many_f64) { CompileCallIndirectMany(kAstF64); }
diff --git a/deps/v8/test/cctest/wasm/test-signatures.h b/deps/v8/test/cctest/wasm/test-signatures.h
index 30ea605386..a5bc7b4f14 100644
--- a/deps/v8/test/cctest/wasm/test-signatures.h
+++ b/deps/v8/test/cctest/wasm/test-signatures.h
@@ -72,6 +72,15 @@ class TestSignatures {
FunctionSig* v_ii() { return &sig_v_ii; }
FunctionSig* v_iii() { return &sig_v_iii; }
+ FunctionSig* many(Zone* zone, LocalType ret, LocalType param, int count) {
+ FunctionSig::Builder builder(zone, ret == kAstStmt ? 0 : 1, count);
+ if (ret != kAstStmt) builder.AddReturn(ret);
+ for (int i = 0; i < count; i++) {
+ builder.AddParam(param);
+ }
+ return builder.Build();
+ }
+
private:
LocalType kIntTypes4[4];
LocalType kLongTypes4[4];
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.h b/deps/v8/test/cctest/wasm/wasm-run-utils.h
index cc23b46b73..7ee3981885 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.h
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.h
@@ -12,7 +12,10 @@
#include "src/base/utils/random-number-generator.h"
#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/int64-lowering.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/node.h"
+#include "src/compiler/pipeline.h"
#include "src/compiler/wasm-compiler.h"
#include "src/wasm/ast-decoder.h"
@@ -20,8 +23,10 @@
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
+#include "src/zone.h"
+
#include "test/cctest/cctest.h"
-#include "test/cctest/compiler/codegen-tester.h"
+#include "test/cctest/compiler/call-tester.h"
#include "test/cctest/compiler/graph-builder-tester.h"
// TODO(titzer): pull WASM_64 up to a common header.
@@ -31,6 +36,8 @@
#define WASM_64 0
#endif
+static const uint32_t kMaxFunctions = 10;
+
// TODO(titzer): check traps more robustly in tests.
// Currently, in tests, we just return 0xdeadbeef from the function in which
// the trap occurs if the runtime context is not available to throw a JavaScript
@@ -41,6 +48,9 @@
CHECK_EQ(0xdeadbeefdeadbeef, (bit_cast<uint64_t>(x)) & 0xFFFFFFFFFFFFFFFF)
#define CHECK_TRAP(x) CHECK_TRAP32(x)
+#define WASM_RUNNER_MAX_NUM_PARAMETERS 4
+#define WASM_WRAPPER_RETURN_VALUE 8754
+
namespace {
using namespace v8::base;
using namespace v8::internal;
@@ -50,47 +60,51 @@ using namespace v8::internal::wasm;
inline void init_env(FunctionEnv* env, FunctionSig* sig) {
env->module = nullptr;
env->sig = sig;
- env->local_int32_count = 0;
- env->local_int64_count = 0;
- env->local_float32_count = 0;
- env->local_float64_count = 0;
+ env->local_i32_count = 0;
+ env->local_i64_count = 0;
+ env->local_f32_count = 0;
+ env->local_f64_count = 0;
env->SumLocals();
}
const uint32_t kMaxGlobalsSize = 128;
// A helper for module environments that adds the ability to allocate memory
-// and global variables.
+// and global variables. Contains a built-in {WasmModule} and
+// {WasmModuleInstance}.
class TestingModule : public ModuleEnv {
public:
- TestingModule() : mem_size(0), global_offset(0) {
- globals_area = 0;
- mem_start = 0;
- mem_end = 0;
- module = nullptr;
+ TestingModule() : instance_(&module_), global_offset(0) {
+ module_.shared_isolate = CcTest::InitIsolateOnce();
+ module = &module_;
+ instance = &instance_;
+ instance->module = &module_;
+ instance->globals_start = global_data;
+ instance->globals_size = kMaxGlobalsSize;
+ instance->mem_start = nullptr;
+ instance->mem_size = 0;
+ instance->function_code = nullptr;
linker = nullptr;
- function_code = nullptr;
asm_js = false;
memset(global_data, 0, sizeof(global_data));
}
~TestingModule() {
- if (mem_start) {
- free(raw_mem_start<byte>());
+ if (instance->mem_start) {
+ free(instance->mem_start);
+ }
+ if (instance->function_code) {
+ delete instance->function_code;
}
- if (function_code) delete function_code;
- if (module) delete module;
}
byte* AddMemory(size_t size) {
- CHECK_EQ(0, mem_start);
- CHECK_EQ(0, mem_size);
- mem_start = reinterpret_cast<uintptr_t>(malloc(size));
- CHECK(mem_start);
- byte* raw = raw_mem_start<byte>();
- memset(raw, 0, size);
- mem_end = mem_start + size;
- mem_size = size;
+ CHECK_NULL(instance->mem_start);
+ CHECK_EQ(0, instance->mem_size);
+ instance->mem_start = reinterpret_cast<byte*>(malloc(size));
+ CHECK(instance->mem_start);
+ memset(instance->mem_start, 0, size);
+ instance->mem_size = size;
return raw_mem_start<byte>();
}
@@ -103,11 +117,10 @@ class TestingModule : public ModuleEnv {
template <typename T>
T* AddGlobal(MachineType mem_type) {
WasmGlobal* global = AddGlobal(mem_type);
- return reinterpret_cast<T*>(globals_area + global->offset);
+ return reinterpret_cast<T*>(instance->globals_start + global->offset);
}
byte AddSignature(FunctionSig* sig) {
- AllocModule();
if (!module->signatures) {
module->signatures = new std::vector<FunctionSig*>();
}
@@ -119,33 +132,33 @@ class TestingModule : public ModuleEnv {
template <typename T>
T* raw_mem_start() {
- DCHECK(mem_start);
- return reinterpret_cast<T*>(mem_start);
+ DCHECK(instance->mem_start);
+ return reinterpret_cast<T*>(instance->mem_start);
}
template <typename T>
T* raw_mem_end() {
- DCHECK(mem_end);
- return reinterpret_cast<T*>(mem_end);
+ DCHECK(instance->mem_start);
+ return reinterpret_cast<T*>(instance->mem_start + instance->mem_size);
}
template <typename T>
T raw_mem_at(int i) {
- DCHECK(mem_start);
- return reinterpret_cast<T*>(mem_start)[i];
+ DCHECK(instance->mem_start);
+ return reinterpret_cast<T*>(instance->mem_start)[i];
}
template <typename T>
T raw_val_at(int i) {
T val;
- memcpy(&val, reinterpret_cast<void*>(mem_start + i), sizeof(T));
+ memcpy(&val, reinterpret_cast<void*>(instance->mem_start + i), sizeof(T));
return val;
}
// Zero-initialize the memory.
void BlankMemory() {
byte* raw = raw_mem_start<byte>();
- memset(raw, 0, mem_size);
+ memset(raw, 0, instance->mem_size);
}
// Pseudo-randomly intialize the memory.
@@ -157,26 +170,57 @@ class TestingModule : public ModuleEnv {
rng.NextBytes(raw, end - raw);
}
- WasmFunction* AddFunction(FunctionSig* sig, Handle<Code> code) {
- AllocModule();
+ int AddFunction(FunctionSig* sig, Handle<Code> code) {
if (module->functions == nullptr) {
module->functions = new std::vector<WasmFunction>();
- function_code = new std::vector<Handle<Code>>();
+ // TODO(titzer): Reserving space here to avoid the underlying WasmFunction
+ // structs from moving.
+ module->functions->reserve(kMaxFunctions);
+ instance->function_code = new std::vector<Handle<Code>>();
+ }
+ uint32_t index = static_cast<uint32_t>(module->functions->size());
+ module->functions->push_back(
+ {sig, index, 0, 0, 0, 0, 0, 0, 0, false, false});
+ instance->function_code->push_back(code);
+ DCHECK_LT(index, kMaxFunctions); // limited for testing.
+ return index;
+ }
+
+ void SetFunctionCode(uint32_t index, Handle<Code> code) {
+ instance->function_code->at(index) = code;
+ }
+
+ void AddIndirectFunctionTable(int* functions, int table_size) {
+ Isolate* isolate = module->shared_isolate;
+ Handle<FixedArray> fixed =
+ isolate->factory()->NewFixedArray(2 * table_size);
+ instance->function_table = fixed;
+ module->function_table = new std::vector<uint16_t>();
+ for (int i = 0; i < table_size; i++) {
+ module->function_table->push_back(functions[i]);
+ }
+ }
+
+ void PopulateIndirectFunctionTable() {
+ if (instance->function_table.is_null()) return;
+ int table_size = static_cast<int>(module->function_table->size());
+ for (int i = 0; i < table_size; i++) {
+ int function_index = module->function_table->at(i);
+ WasmFunction* function = &module->functions->at(function_index);
+ instance->function_table->set(i, Smi::FromInt(function->sig_index));
+ instance->function_table->set(
+ i + table_size, *instance->function_code->at(function_index));
}
- module->functions->push_back({sig, 0, 0, 0, 0, 0, 0, 0, false, false});
- function_code->push_back(code);
- return &module->functions->back();
}
private:
- size_t mem_size;
+ WasmModule module_;
+ WasmModuleInstance instance_;
uint32_t global_offset;
- byte global_data[kMaxGlobalsSize];
+ V8_ALIGNED(8) byte global_data[kMaxGlobalsSize]; // preallocated global data.
WasmGlobal* AddGlobal(MachineType mem_type) {
- AllocModule();
- if (globals_area == 0) {
- globals_area = reinterpret_cast<uintptr_t>(global_data);
+ if (!module->globals) {
module->globals = new std::vector<WasmGlobal>();
}
byte size = WasmOpcodes::MemSize(mem_type);
@@ -187,15 +231,6 @@ class TestingModule : public ModuleEnv {
CHECK_LT(global_offset, kMaxGlobalsSize);
return &module->globals->back();
}
- void AllocModule() {
- if (module == nullptr) {
- module = new WasmModule();
- module->shared_isolate = CcTest::InitIsolateOnce();
- module->globals = nullptr;
- module->functions = nullptr;
- module->data_segments = nullptr;
- }
- }
};
@@ -212,39 +247,212 @@ inline void TestBuildingGraph(Zone* zone, JSGraph* jsgraph, FunctionEnv* env,
str << ", msg = " << result.error_msg.get();
FATAL(str.str().c_str());
}
+ builder.Int64LoweringForTesting();
if (FLAG_trace_turbo_graph) {
OFStream os(stdout);
os << AsRPO(*jsgraph->graph());
}
}
+template <typename ReturnType>
+class WasmFunctionWrapper : public HandleAndZoneScope,
+ private GraphAndBuilders {
+ public:
+ WasmFunctionWrapper()
+ : GraphAndBuilders(main_zone()),
+ inner_code_node_(nullptr),
+ signature_(nullptr) {
+ // One additional parameter for the pointer to the return value memory.
+ Signature<MachineType>::Builder sig_builder(
+ zone(), 1, WASM_RUNNER_MAX_NUM_PARAMETERS + 1);
+
+ sig_builder.AddReturn(MachineType::Int32());
+ for (int i = 0; i < WASM_RUNNER_MAX_NUM_PARAMETERS + 1; i++) {
+ sig_builder.AddParam(MachineType::Pointer());
+ }
+ signature_ = sig_builder.Build();
+ }
+
+ void Init(CallDescriptor* descriptor, MachineType p0 = MachineType::None(),
+ MachineType p1 = MachineType::None(),
+ MachineType p2 = MachineType::None(),
+ MachineType p3 = MachineType::None()) {
+ // Create the TF graph for the wrapper. The wrapper always takes four
+ // pointers as parameters, but may not pass the values of all pointers to
+ // the actual test function.
+
+ // Function, effect, and control.
+ Node** parameters =
+ zone()->template NewArray<Node*>(WASM_RUNNER_MAX_NUM_PARAMETERS + 3);
+ graph()->SetStart(graph()->NewNode(common()->Start(6)));
+ Node* effect = graph()->start();
+ int parameter_count = 0;
+
+ // Dummy node which gets replaced in SetInnerCode.
+ inner_code_node_ = graph()->NewNode(common()->Int32Constant(0));
+ parameters[parameter_count++] = inner_code_node_;
+
+ if (p0 != MachineType::None()) {
+ parameters[parameter_count] = graph()->NewNode(
+ machine()->Load(p0),
+ graph()->NewNode(common()->Parameter(0), graph()->start()),
+ graph()->NewNode(common()->Int32Constant(0)), effect,
+ graph()->start());
+ effect = parameters[parameter_count++];
+ }
+ if (p1 != MachineType::None()) {
+ parameters[parameter_count] = graph()->NewNode(
+ machine()->Load(p0),
+ graph()->NewNode(common()->Parameter(1), graph()->start()),
+ graph()->NewNode(common()->Int32Constant(0)), effect,
+ graph()->start());
+ effect = parameters[parameter_count++];
+ }
+ if (p2 != MachineType::None()) {
+ parameters[parameter_count] = graph()->NewNode(
+ machine()->Load(p0),
+ graph()->NewNode(common()->Parameter(2), graph()->start()),
+ graph()->NewNode(common()->Int32Constant(0)), effect,
+ graph()->start());
+ effect = parameters[parameter_count++];
+ }
+ if (p3 != MachineType::None()) {
+ parameters[parameter_count] = graph()->NewNode(
+ machine()->Load(p0),
+ graph()->NewNode(common()->Parameter(3), graph()->start()),
+ graph()->NewNode(common()->Int32Constant(0)), effect,
+ graph()->start());
+ effect = parameters[parameter_count++];
+ }
+
+ parameters[parameter_count++] = effect;
+ parameters[parameter_count++] = graph()->start();
+ Node* call = graph()->NewNode(common()->Call(descriptor), parameter_count,
+ parameters);
+
+ effect = graph()->NewNode(
+ machine()->Store(
+ StoreRepresentation(MachineTypeForC<ReturnType>().representation(),
+ WriteBarrierKind::kNoWriteBarrier)),
+ graph()->NewNode(common()->Parameter(WASM_RUNNER_MAX_NUM_PARAMETERS),
+ graph()->start()),
+ graph()->NewNode(common()->Int32Constant(0)), call, effect,
+ graph()->start());
+ Node* r = graph()->NewNode(
+ common()->Return(),
+ graph()->NewNode(common()->Int32Constant(WASM_WRAPPER_RETURN_VALUE)),
+ effect, graph()->start());
+ graph()->SetEnd(graph()->NewNode(common()->End(2), r, graph()->start()));
+ }
+
+ void SetInnerCode(Handle<Code> code_handle) {
+ NodeProperties::ChangeOp(inner_code_node_,
+ common()->HeapConstant(code_handle));
+ }
+
+ Handle<Code> GetWrapperCode() {
+ if (code_.is_null()) {
+ Isolate* isolate = CcTest::InitIsolateOnce();
+
+ CallDescriptor* descriptor =
+ Linkage::GetSimplifiedCDescriptor(zone(), signature_, true);
+
+ if (kPointerSize == 4) {
+ // One additional parameter for the pointer of the return value.
+ Signature<MachineRepresentation>::Builder rep_builder(
+ zone(), 1, WASM_RUNNER_MAX_NUM_PARAMETERS + 1);
+
+ rep_builder.AddReturn(MachineRepresentation::kWord32);
+ for (int i = 0; i < WASM_RUNNER_MAX_NUM_PARAMETERS + 1; i++) {
+ rep_builder.AddParam(MachineRepresentation::kWord32);
+ }
+ Int64Lowering r(graph(), machine(), common(), zone(),
+ rep_builder.Build());
+ r.LowerGraph();
+ }
+
+ CompilationInfo info("testing", isolate, graph()->zone());
+ code_ =
+ Pipeline::GenerateCodeForTesting(&info, descriptor, graph(), nullptr);
+ CHECK(!code_.is_null());
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_opt_code) {
+ OFStream os(stdout);
+ code_->Disassemble("wasm wrapper", os);
+ }
+#endif
+ }
-// A helper for compiling functions that are only internally callable WASM code.
+ return code_;
+ }
+
+ Signature<MachineType>* signature() const { return signature_; }
+
+ private:
+ Node* inner_code_node_;
+ Handle<Code> code_;
+ Signature<MachineType>* signature_;
+};
+
+// A helper for compiling WASM functions for testing. This class can create a
+// standalone function if {module} is NULL or a function within a
+// {TestingModule}. It contains the internal state for compilation (i.e.
+// TurboFan graph) and, later, interpretation.
class WasmFunctionCompiler : public HandleAndZoneScope,
private GraphAndBuilders {
public:
- explicit WasmFunctionCompiler(FunctionSig* sig, ModuleEnv* module = nullptr)
+ explicit WasmFunctionCompiler(FunctionSig* sig, TestingModule* module)
: GraphAndBuilders(main_zone()),
jsgraph(this->isolate(), this->graph(), this->common(), nullptr,
nullptr, this->machine()),
- descriptor_(nullptr) {
+ descriptor_(nullptr),
+ testing_module_(module) {
init_env(&env, sig);
env.module = module;
+ if (module) {
+ // Get a new function from the testing module.
+ function_ = nullptr;
+ function_index_ = module->AddFunction(sig, Handle<Code>::null());
+ } else {
+ // Create our own function.
+ function_ = new WasmFunction();
+ function_->sig = sig;
+ function_index_ = 0;
+ }
+ }
+
+ ~WasmFunctionCompiler() {
+ if (function_) delete function_;
}
JSGraph jsgraph;
FunctionEnv env;
// The call descriptor is initialized when the function is compiled.
CallDescriptor* descriptor_;
+ TestingModule* testing_module_;
+ WasmFunction* function_;
+ int function_index_;
Isolate* isolate() { return main_isolate(); }
Graph* graph() const { return main_graph_; }
Zone* zone() const { return graph()->zone(); }
CommonOperatorBuilder* common() { return &main_common_; }
MachineOperatorBuilder* machine() { return &main_machine_; }
+ void InitializeDescriptor() {
+ if (descriptor_ == nullptr) {
+ descriptor_ = env.module->GetWasmCallDescriptor(main_zone(), env.sig);
+ }
+ }
CallDescriptor* descriptor() { return descriptor_; }
void Build(const byte* start, const byte* end) {
+ // Transfer local counts before compiling.
+ function()->local_i32_count = env.local_i32_count;
+ function()->local_i64_count = env.local_i64_count;
+ function()->local_f32_count = env.local_f32_count;
+ function()->local_f64_count = env.local_f64_count;
+
+ // Build the TurboFan graph.
TestBuildingGraph(main_zone(), &jsgraph, &env, start, end);
}
@@ -256,11 +464,16 @@ class WasmFunctionCompiler : public HandleAndZoneScope,
return b;
}
- Handle<Code> Compile(ModuleEnv* module) {
- descriptor_ = module->GetWasmCallDescriptor(this->zone(), env.sig);
+ // TODO(titzer): remove me.
+ Handle<Code> Compile() {
+ InitializeDescriptor();
+ CallDescriptor* desc = descriptor_;
+ if (kPointerSize == 4) {
+ desc = testing_module_->GetI32WasmCallDescriptor(this->zone(), desc);
+ }
CompilationInfo info("wasm compile", this->isolate(), this->zone());
Handle<Code> result =
- Pipeline::GenerateCodeForTesting(&info, descriptor_, this->graph());
+ Pipeline::GenerateCodeForTesting(&info, desc, this->graph());
#ifdef ENABLE_DISASSEMBLER
if (!result.is_null() && FLAG_print_opt_code) {
OFStream os(stdout);
@@ -271,16 +484,20 @@ class WasmFunctionCompiler : public HandleAndZoneScope,
return result;
}
- uint32_t CompileAndAdd(TestingModule* module) {
- uint32_t index = 0;
- if (module->module && module->module->functions) {
- index = static_cast<uint32_t>(module->module->functions->size());
- }
- module->AddFunction(env.sig, Compile(module));
- return index;
+ // TODO(titzer): remove me.
+ uint32_t CompileAndAdd(uint16_t sig_index = 0) {
+ CHECK(testing_module_);
+ function()->sig_index = sig_index;
+ Handle<Code> code = Compile();
+ testing_module_->SetFunctionCode(function_index_, code);
+ return static_cast<uint32_t>(function_index_);
}
-};
+ WasmFunction* function() {
+ if (function_) return function_;
+ return &testing_module_->module->functions->at(function_index_);
+ }
+};
// A helper class to build graphs from Wasm bytecode, generate machine
// code, and run that code.
@@ -291,11 +508,28 @@ class WasmRunner {
MachineType p1 = MachineType::None(),
MachineType p2 = MachineType::None(),
MachineType p3 = MachineType::None())
- : signature_(MachineTypeForC<ReturnType>() == MachineType::None() ? 0 : 1,
+ : compiled_(false),
+
+ signature_(MachineTypeForC<ReturnType>() == MachineType::None() ? 0 : 1,
+ GetParameterCount(p0, p1, p2, p3), storage_),
+ compiler_(&signature_, nullptr) {
+ InitSigStorage(p0, p1, p2, p3);
+ }
+
+ WasmRunner(TestingModule* module, MachineType p0 = MachineType::None(),
+ MachineType p1 = MachineType::None(),
+ MachineType p2 = MachineType::None(),
+ MachineType p3 = MachineType::None())
+ : compiled_(false),
+ signature_(MachineTypeForC<ReturnType>() == MachineType::None() ? 0 : 1,
GetParameterCount(p0, p1, p2, p3), storage_),
- compiler_(&signature_),
- call_wrapper_(p0, p1, p2, p3),
- compilation_done_(false) {
+ compiler_(&signature_, module) {
+ DCHECK(module);
+ InitSigStorage(p0, p1, p2, p3);
+ }
+
+ void InitSigStorage(MachineType p0, MachineType p1, MachineType p2,
+ MachineType p3) {
int index = 0;
MachineType ret = MachineTypeForC<ReturnType>();
if (ret != MachineType::None()) {
@@ -309,56 +543,60 @@ class WasmRunner {
storage_[index++] = WasmOpcodes::LocalTypeFor(p2);
if (p3 != MachineType::None())
storage_[index++] = WasmOpcodes::LocalTypeFor(p3);
- }
+ compiler_.InitializeDescriptor();
+ wrapper_.Init(compiler_.descriptor(), p0, p1, p2, p3);
+ }
FunctionEnv* env() { return &compiler_.env; }
-
- // Builds a graph from the given Wasm code, and generates the machine
+ // Builds a graph from the given Wasm code and generates the machine
// code and call wrapper for that graph. This method must not be called
// more than once.
void Build(const byte* start, const byte* end) {
- DCHECK(!compilation_done_);
- compilation_done_ = true;
- // Build the TF graph.
+ CHECK(!compiled_);
+ compiled_ = true;
+
+ // Build the TF graph within the compiler.
compiler_.Build(start, end);
// Generate code.
- Handle<Code> code = compiler_.Compile(env()->module);
-
- // Construct the call wrapper.
- Node* inputs[5];
- int input_count = 0;
- inputs[input_count++] = call_wrapper_.HeapConstant(code);
- for (size_t i = 0; i < signature_.parameter_count(); i++) {
- inputs[input_count++] = call_wrapper_.Parameter(i);
+ Handle<Code> code = compiler_.Compile();
+
+ if (compiler_.testing_module_) {
+ // Update the table of function code in the module.
+ compiler_.testing_module_->SetFunctionCode(compiler_.function_index_,
+ code);
}
- call_wrapper_.Return(call_wrapper_.AddNode(
- call_wrapper_.common()->Call(compiler_.descriptor()), input_count,
- inputs));
+ wrapper_.SetInnerCode(code);
}
- ReturnType Call() { return call_wrapper_.Call(); }
+ ReturnType Call() { return Call(0, 0, 0, 0); }
template <typename P0>
ReturnType Call(P0 p0) {
- return call_wrapper_.Call(p0);
+ return Call(p0, 0, 0, 0);
}
template <typename P0, typename P1>
ReturnType Call(P0 p0, P1 p1) {
- return call_wrapper_.Call(p0, p1);
+ return Call(p0, p1, 0, 0);
}
template <typename P0, typename P1, typename P2>
ReturnType Call(P0 p0, P1 p1, P2 p2) {
- return call_wrapper_.Call(p0, p1, p2);
+ return Call(p0, p1, p2, 0);
}
template <typename P0, typename P1, typename P2, typename P3>
ReturnType Call(P0 p0, P1 p1, P2 p2, P3 p3) {
- return call_wrapper_.Call(p0, p1, p2, p3);
+ CodeRunner<int32_t> runner(CcTest::InitIsolateOnce(),
+ wrapper_.GetWrapperCode(), wrapper_.signature());
+ ReturnType return_value;
+ int32_t result = runner.Call<void*, void*, void*, void*, void*>(
+ &p0, &p1, &p2, &p3, &return_value);
+ CHECK_EQ(WASM_WRAPPER_RETURN_VALUE, result);
+ return return_value;
}
byte AllocateLocal(LocalType type) {
@@ -369,12 +607,13 @@ class WasmRunner {
return b;
}
- private:
- LocalType storage_[5];
+ protected:
+ Zone zone;
+ bool compiled_;
+ LocalType storage_[WASM_RUNNER_MAX_NUM_PARAMETERS];
FunctionSig signature_;
WasmFunctionCompiler compiler_;
- BufferedRawMachineAssemblerTester<ReturnType> call_wrapper_;
- bool compilation_done_;
+ WasmFunctionWrapper<ReturnType> wrapper_;
static size_t GetParameterCount(MachineType p0, MachineType p1,
MachineType p2, MachineType p3) {
diff --git a/deps/v8/test/default.gyp b/deps/v8/test/default.gyp
index 53a8d7d4a2..efc0406895 100644
--- a/deps/v8/test/default.gyp
+++ b/deps/v8/test/default.gyp
@@ -11,6 +11,7 @@
'type': 'none',
'dependencies': [
'cctest/cctest.gyp:cctest_run',
+ 'fuzzer/fuzzer.gyp:fuzzer_run',
'intl/intl.gyp:intl_run',
'message/message.gyp:message_run',
'mjsunit/mjsunit.gyp:mjsunit_run',
diff --git a/deps/v8/test/default.isolate b/deps/v8/test/default.isolate
index 68044cf15b..416137c5b5 100644
--- a/deps/v8/test/default.isolate
+++ b/deps/v8/test/default.isolate
@@ -2,8 +2,14 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
+ 'variables': {
+ 'command': [
+ '../tools/run-tests.py',
+ ],
+ },
'includes': [
'cctest/cctest.isolate',
+ 'fuzzer/fuzzer.isolate',
'intl/intl.isolate',
'message/message.isolate',
'mjsunit/mjsunit.isolate',
diff --git a/deps/v8/test/fuzzer/DEPS b/deps/v8/test/fuzzer/DEPS
new file mode 100644
index 0000000000..3e73aa244f
--- /dev/null
+++ b/deps/v8/test/fuzzer/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+src",
+]
diff --git a/deps/v8/test/fuzzer/fuzzer-support.cc b/deps/v8/test/fuzzer/fuzzer-support.cc
new file mode 100644
index 0000000000..cf3ee8c6fd
--- /dev/null
+++ b/deps/v8/test/fuzzer/fuzzer-support.cc
@@ -0,0 +1,100 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/fuzzer/fuzzer-support.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "include/libplatform/libplatform.h"
+
+namespace v8_fuzzer {
+
+namespace {
+
+FuzzerSupport* g_fuzzer_support = nullptr;
+
+void DeleteFuzzerSupport() {
+ if (g_fuzzer_support) {
+ delete g_fuzzer_support;
+ g_fuzzer_support = nullptr;
+ }
+}
+
+} // namespace
+
+class FuzzerSupport::ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
+ public:
+ virtual void* Allocate(size_t length) {
+ void* data = AllocateUninitialized(length);
+ return data == NULL ? data : memset(data, 0, length);
+ }
+ virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
+ virtual void Free(void* data, size_t) { free(data); }
+};
+
+FuzzerSupport::FuzzerSupport(int* argc, char*** argv) {
+ v8::V8::SetFlagsFromCommandLine(argc, *argv, true);
+ v8::V8::InitializeICU();
+ v8::V8::InitializeExternalStartupData((*argv)[0]);
+ platform_ = v8::platform::CreateDefaultPlatform();
+ v8::V8::InitializePlatform(platform_);
+ v8::V8::Initialize();
+
+ allocator_ = new ArrayBufferAllocator;
+ v8::Isolate::CreateParams create_params;
+ create_params.array_buffer_allocator = allocator_;
+ isolate_ = v8::Isolate::New(create_params);
+
+ {
+ v8::Isolate::Scope isolate_scope(isolate_);
+ v8::HandleScope handle_scope(isolate_);
+ context_.Reset(isolate_, v8::Context::New(isolate_));
+ }
+}
+
+FuzzerSupport::~FuzzerSupport() {
+ {
+ v8::Isolate::Scope isolate_scope(isolate_);
+ while (v8::platform::PumpMessageLoop(platform_, isolate_)) /* empty */
+ ;
+
+ v8::HandleScope handle_scope(isolate_);
+ context_.Reset();
+ }
+
+ isolate_->Dispose();
+ isolate_ = nullptr;
+
+ delete allocator_;
+ allocator_ = nullptr;
+
+ v8::V8::Dispose();
+ v8::V8::ShutdownPlatform();
+
+ delete platform_;
+ platform_ = nullptr;
+}
+
+// static
+FuzzerSupport* FuzzerSupport::Get() { return g_fuzzer_support; }
+
+v8::Isolate* FuzzerSupport::GetIsolate() { return isolate_; }
+
+v8::Local<v8::Context> FuzzerSupport::GetContext() {
+ v8::Isolate::Scope isolate_scope(isolate_);
+ v8::EscapableHandleScope handle_scope(isolate_);
+ v8::Local<v8::Context> context =
+ v8::Local<v8::Context>::New(isolate_, context_);
+ return handle_scope.Escape(context);
+}
+
+} // namespace v8_fuzzer
+
+extern "C" int LLVMFuzzerInitialize(int* argc, char*** argv) {
+ v8_fuzzer::g_fuzzer_support = new v8_fuzzer::FuzzerSupport(argc, argv);
+ atexit(&v8_fuzzer::DeleteFuzzerSupport);
+ return 0;
+}
diff --git a/deps/v8/test/fuzzer/fuzzer-support.h b/deps/v8/test/fuzzer/fuzzer-support.h
new file mode 100644
index 0000000000..0241c53665
--- /dev/null
+++ b/deps/v8/test/fuzzer/fuzzer-support.h
@@ -0,0 +1,37 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef TEST_FUZZER_FUZZER_SUPPORT_H_
+#define TEST_FUZZER_FUZZER_SUPPORT_H_
+
+#include "include/v8.h"
+
+namespace v8_fuzzer {
+
+class FuzzerSupport {
+ public:
+ FuzzerSupport(int* argc, char*** argv);
+ ~FuzzerSupport();
+
+ static FuzzerSupport* Get();
+
+ v8::Isolate* GetIsolate();
+ v8::Local<v8::Context> GetContext();
+
+ private:
+ // Prevent copying. Not implemented.
+ FuzzerSupport(const FuzzerSupport&);
+ FuzzerSupport& operator=(const FuzzerSupport&);
+
+ class ArrayBufferAllocator;
+
+ v8::Platform* platform_;
+ ArrayBufferAllocator* allocator_;
+ v8::Isolate* isolate_;
+ v8::Global<v8::Context> context_;
+};
+
+} // namespace
+
+#endif // TEST_FUZZER_FUZZER_SUPPORT_H_
diff --git a/deps/v8/test/fuzzer/fuzzer.cc b/deps/v8/test/fuzzer/fuzzer.cc
new file mode 100644
index 0000000000..71a26b86b3
--- /dev/null
+++ b/deps/v8/test/fuzzer/fuzzer.cc
@@ -0,0 +1,56 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+extern "C" int LLVMFuzzerInitialize(int* argc, char*** argv);
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size);
+
+int main(int argc, char* argv[]) {
+ if (LLVMFuzzerInitialize(&argc, &argv)) {
+ fprintf(stderr, "Failed to initialize fuzzer target\n");
+ return 1;
+ }
+
+ if (argc < 2) {
+ fprintf(stderr, "USAGE: %s <input>\n", argv[0]);
+ return 1;
+ }
+
+ FILE* input = fopen(argv[1], "rb");
+
+ if (!input) {
+ fprintf(stderr, "Failed to open '%s'\n", argv[1]);
+ return 1;
+ }
+
+ fseek(input, 0, SEEK_END);
+ long size = ftell(input);
+ fseek(input, 0, SEEK_SET);
+
+ uint8_t* data = reinterpret_cast<uint8_t*>(malloc(size));
+ if (!data) {
+ fclose(input);
+ fprintf(stderr, "Failed to allocate %ld bytes\n", size);
+ return 1;
+ }
+
+ size_t bytes_read = fread(data, 1, size, input);
+ fclose(input);
+
+ if (bytes_read != size) {
+ free(data);
+ fprintf(stderr, "Failed to read %s\n", argv[1]);
+ return 1;
+ }
+
+ int result = LLVMFuzzerTestOneInput(data, size);
+
+ free(data);
+
+ return result;
+}
diff --git a/deps/v8/test/fuzzer/fuzzer.gyp b/deps/v8/test/fuzzer/fuzzer.gyp
new file mode 100644
index 0000000000..5fc338cb58
--- /dev/null
+++ b/deps/v8/test/fuzzer/fuzzer.gyp
@@ -0,0 +1,134 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'v8_code': 1,
+ },
+ 'includes': ['../../build/toolchain.gypi', '../../build/features.gypi'],
+ 'targets': [
+ {
+ 'target_name': 'json_fuzzer',
+ 'type': 'executable',
+ 'dependencies': [
+ 'json_fuzzer_lib',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [
+ 'fuzzer.cc',
+ ],
+ },
+ {
+ 'target_name': 'json_fuzzer_lib',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'fuzzer_support',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [ ### gcmole(all) ###
+ 'json.cc',
+ ],
+ },
+ {
+ 'target_name': 'parser_fuzzer',
+ 'type': 'executable',
+ 'dependencies': [
+ 'parser_fuzzer_lib',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [
+ 'fuzzer.cc',
+ ],
+ },
+ {
+ 'target_name': 'parser_fuzzer_lib',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'fuzzer_support',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [ ### gcmole(all) ###
+ 'parser.cc',
+ ],
+ },
+ {
+ 'target_name': 'regexp_fuzzer',
+ 'type': 'executable',
+ 'dependencies': [
+ 'regexp_fuzzer_lib',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [
+ 'fuzzer.cc',
+ ],
+ },
+ {
+ 'target_name': 'regexp_fuzzer_lib',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'fuzzer_support',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [ ### gcmole(all) ###
+ 'regexp.cc',
+ ],
+ },
+ {
+ 'target_name': 'fuzzer_support',
+ 'type': 'static_library',
+ 'dependencies': [
+ '../../tools/gyp/v8.gyp:v8_libplatform',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [ ### gcmole(all) ###
+ 'fuzzer-support.cc',
+ 'fuzzer-support.h',
+ ],
+ 'conditions': [
+ ['component=="shared_library"', {
+ # fuzzers can't be built against a shared library, so we need to
+ # depend on the underlying static target in that case.
+ 'dependencies': ['../../tools/gyp/v8.gyp:v8_maybe_snapshot'],
+ }, {
+ 'dependencies': ['../../tools/gyp/v8.gyp:v8'],
+ }],
+ ],
+ },
+ ],
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'fuzzer_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'json_fuzzer',
+ 'parser_fuzzer',
+ 'regexp_fuzzer',
+ ],
+ 'includes': [
+ '../../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'fuzzer.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/deps/v8/test/fuzzer/fuzzer.isolate b/deps/v8/test/fuzzer/fuzzer.isolate
new file mode 100644
index 0000000000..286be2f24a
--- /dev/null
+++ b/deps/v8/test/fuzzer/fuzzer.isolate
@@ -0,0 +1,22 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/json_fuzzer<(EXECUTABLE_SUFFIX)',
+ '<(PRODUCT_DIR)/parser_fuzzer<(EXECUTABLE_SUFFIX)',
+ '<(PRODUCT_DIR)/regexp_fuzzer<(EXECUTABLE_SUFFIX)',
+ './fuzzer.status',
+ './testcfg.py',
+ './json/',
+ './parser/',
+ './regexp/',
+ ],
+ },
+ 'includes': [
+ '../../src/base.isolate',
+ '../../tools/testrunner/testrunner.isolate',
+ ],
+}
diff --git a/deps/v8/test/fuzzer/fuzzer.status b/deps/v8/test/fuzzer/fuzzer.status
new file mode 100644
index 0000000000..df922bbf4e
--- /dev/null
+++ b/deps/v8/test/fuzzer/fuzzer.status
@@ -0,0 +1,7 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+[
+
+]
diff --git a/deps/v8/test/fuzzer/json.cc b/deps/v8/test/fuzzer/json.cc
new file mode 100644
index 0000000000..f20e9b9a3b
--- /dev/null
+++ b/deps/v8/test/fuzzer/json.cc
@@ -0,0 +1,31 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "include/v8.h"
+#include "test/fuzzer/fuzzer-support.h"
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
+ v8::Isolate* isolate = support->GetIsolate();
+
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+ v8::Context::Scope context_scope(support->GetContext());
+ v8::TryCatch try_catch(isolate);
+
+ if (size > INT_MAX) return 0;
+ v8::Local<v8::String> source;
+ if (!v8::String::NewFromOneByte(isolate, data, v8::NewStringType::kNormal,
+ static_cast<int>(size))
+ .ToLocal(&source)) {
+ return 0;
+ }
+
+ v8::JSON::Parse(isolate, source).IsEmpty();
+ return 0;
+}
diff --git a/deps/v8/test/fuzzer/json/json b/deps/v8/test/fuzzer/json/json
new file mode 100644
index 0000000000..f3400b3721
--- /dev/null
+++ b/deps/v8/test/fuzzer/json/json
@@ -0,0 +1 @@
+{"json": 1}
diff --git a/deps/v8/test/fuzzer/json/not-json b/deps/v8/test/fuzzer/json/not-json
new file mode 100644
index 0000000000..6b7a9f4e0c
--- /dev/null
+++ b/deps/v8/test/fuzzer/json/not-json
@@ -0,0 +1 @@
+not json
diff --git a/deps/v8/test/fuzzer/parser.cc b/deps/v8/test/fuzzer/parser.cc
new file mode 100644
index 0000000000..aee4c0dab7
--- /dev/null
+++ b/deps/v8/test/fuzzer/parser.cc
@@ -0,0 +1,42 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "include/v8.h"
+#include "src/objects.h"
+#include "src/parsing/parser.h"
+#include "src/parsing/preparser.h"
+#include "test/fuzzer/fuzzer-support.h"
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
+ v8::Isolate* isolate = support->GetIsolate();
+
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+ v8::Context::Scope context_scope(support->GetContext());
+ v8::TryCatch try_catch(isolate);
+
+ v8::internal::Isolate* i_isolate =
+ reinterpret_cast<v8::internal::Isolate*>(isolate);
+ v8::internal::Factory* factory = i_isolate->factory();
+
+ if (size > INT_MAX) return 0;
+ v8::internal::MaybeHandle<v8::internal::String> source =
+ factory->NewStringFromOneByte(
+ v8::internal::Vector<const uint8_t>(data, static_cast<int>(size)));
+ if (source.is_null()) return 0;
+
+ v8::internal::Handle<v8::internal::Script> script =
+ factory->NewScript(source.ToHandleChecked());
+ v8::internal::Zone zone;
+ v8::internal::ParseInfo info(&zone, script);
+ info.set_global();
+ v8::internal::Parser parser(&info);
+ parser.Parse(&info);
+ return 0;
+}
diff --git a/deps/v8/test/fuzzer/parser/hello-world b/deps/v8/test/fuzzer/parser/hello-world
new file mode 100644
index 0000000000..6be02374db
--- /dev/null
+++ b/deps/v8/test/fuzzer/parser/hello-world
@@ -0,0 +1 @@
+console.log('hello world');
diff --git a/deps/v8/test/fuzzer/regexp.cc b/deps/v8/test/fuzzer/regexp.cc
new file mode 100644
index 0000000000..eb51da8ac7
--- /dev/null
+++ b/deps/v8/test/fuzzer/regexp.cc
@@ -0,0 +1,75 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "include/v8.h"
+#include "src/factory.h"
+#include "src/objects-inl.h"
+#include "src/objects.h"
+#include "src/regexp/jsregexp.h"
+#include "test/fuzzer/fuzzer-support.h"
+
+namespace i = v8::internal;
+
+void Test(v8::Isolate* isolate, i::Handle<i::JSRegExp> regexp,
+ i::Handle<i::String> subject, i::Handle<i::JSArray> results_array) {
+ v8::TryCatch try_catch(isolate);
+ USE(i::RegExpImpl::Exec(regexp, subject, 0, results_array));
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
+ v8::Isolate* isolate = support->GetIsolate();
+
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+ v8::Context::Scope context_scope(support->GetContext());
+ v8::TryCatch try_catch(isolate);
+
+ i::FLAG_harmony_unicode_regexps = true;
+ i::FLAG_harmony_regexp_lookbehind = true;
+
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ i::Factory* factory = i_isolate->factory();
+
+ if (size > INT_MAX) return 0;
+ i::MaybeHandle<i::String> maybe_source = factory->NewStringFromOneByte(
+ i::Vector<const uint8_t>(data, static_cast<int>(size)));
+ i::Handle<i::String> source;
+ if (!maybe_source.ToHandle(&source)) return 0;
+
+ static const int kAllFlags = i::JSRegExp::kGlobal | i::JSRegExp::kIgnoreCase |
+ i::JSRegExp::kMultiline | i::JSRegExp::kSticky |
+ i::JSRegExp::kUnicode;
+
+ const uint8_t one_byte_array[6] = {'f', 'o', 'o', 'b', 'a', 'r'};
+ const i::uc16 two_byte_array[6] = {'f', 0xD83D, 0xDCA9, 'b', 'a', 0x2603};
+
+ i::Handle<i::JSArray> results_array = factory->NewJSArray(5);
+ i::Handle<i::String> one_byte =
+ factory->NewStringFromOneByte(i::Vector<const uint8_t>(one_byte_array, 6))
+ .ToHandleChecked();
+ i::Handle<i::String> two_byte =
+ factory->NewStringFromTwoByte(i::Vector<const i::uc16>(two_byte_array, 6))
+ .ToHandleChecked();
+
+ for (int flags = 0; flags <= kAllFlags; flags++) {
+ i::Handle<i::JSRegExp> regexp;
+ {
+ v8::TryCatch try_catch(isolate);
+ i::MaybeHandle<i::JSRegExp> maybe_regexp =
+ i::JSRegExp::New(source, static_cast<i::JSRegExp::Flags>(flags));
+ if (!maybe_regexp.ToHandle(&regexp)) continue;
+ }
+ Test(isolate, regexp, one_byte, results_array);
+ Test(isolate, regexp, two_byte, results_array);
+ Test(isolate, regexp, factory->empty_string(), results_array);
+ Test(isolate, regexp, source, results_array);
+ }
+
+ return 0;
+}
diff --git a/deps/v8/test/fuzzer/regexp/test00 b/deps/v8/test/fuzzer/regexp/test00
new file mode 100644
index 0000000000..d2a71aece8
--- /dev/null
+++ b/deps/v8/test/fuzzer/regexp/test00
@@ -0,0 +1 @@
+a*
diff --git a/deps/v8/test/fuzzer/regexp/test01 b/deps/v8/test/fuzzer/regexp/test01
new file mode 100644
index 0000000000..83877e955f
--- /dev/null
+++ b/deps/v8/test/fuzzer/regexp/test01
@@ -0,0 +1 @@
+xyz{93}?
diff --git a/deps/v8/test/fuzzer/regexp/test02 b/deps/v8/test/fuzzer/regexp/test02
new file mode 100644
index 0000000000..d27d2ce6ba
--- /dev/null
+++ b/deps/v8/test/fuzzer/regexp/test02
@@ -0,0 +1 @@
+(foo|bar|baz)
diff --git a/deps/v8/test/fuzzer/regexp/test03 b/deps/v8/test/fuzzer/regexp/test03
new file mode 100644
index 0000000000..da14770d64
--- /dev/null
+++ b/deps/v8/test/fuzzer/regexp/test03
@@ -0,0 +1 @@
+[^]
diff --git a/deps/v8/test/fuzzer/regexp/test04 b/deps/v8/test/fuzzer/regexp/test04
new file mode 100644
index 0000000000..a672b260b0
--- /dev/null
+++ b/deps/v8/test/fuzzer/regexp/test04
@@ -0,0 +1 @@
+[\d]
diff --git a/deps/v8/test/fuzzer/regexp/test05 b/deps/v8/test/fuzzer/regexp/test05
new file mode 100644
index 0000000000..edcff4e452
--- /dev/null
+++ b/deps/v8/test/fuzzer/regexp/test05
@@ -0,0 +1 @@
+\c1
diff --git a/deps/v8/test/fuzzer/regexp/test06 b/deps/v8/test/fuzzer/regexp/test06
new file mode 100644
index 0000000000..74c75c65b0
--- /dev/null
+++ b/deps/v8/test/fuzzer/regexp/test06
@@ -0,0 +1 @@
+[a\]c]
diff --git a/deps/v8/test/fuzzer/regexp/test07 b/deps/v8/test/fuzzer/regexp/test07
new file mode 100644
index 0000000000..2ed8704014
--- /dev/null
+++ b/deps/v8/test/fuzzer/regexp/test07
@@ -0,0 +1 @@
+\00011
diff --git a/deps/v8/test/fuzzer/regexp/test08 b/deps/v8/test/fuzzer/regexp/test08
new file mode 100644
index 0000000000..62d553ccfe
--- /dev/null
+++ b/deps/v8/test/fuzzer/regexp/test08
@@ -0,0 +1 @@
+(x)(x)(x)\2*
diff --git a/deps/v8/test/fuzzer/regexp/test09 b/deps/v8/test/fuzzer/regexp/test09
new file mode 100644
index 0000000000..291650041b
--- /dev/null
+++ b/deps/v8/test/fuzzer/regexp/test09
@@ -0,0 +1 @@
+(?=a)?a
diff --git a/deps/v8/test/fuzzer/regexp/test10 b/deps/v8/test/fuzzer/regexp/test10
new file mode 100644
index 0000000000..8aac6957c3
--- /dev/null
+++ b/deps/v8/test/fuzzer/regexp/test10
@@ -0,0 +1 @@
+\1\2(a(?<=\1(b\1\2))\2)\1
diff --git a/deps/v8/test/fuzzer/regexp/test11 b/deps/v8/test/fuzzer/regexp/test11
new file mode 100644
index 0000000000..f9101ec517
--- /dev/null
+++ b/deps/v8/test/fuzzer/regexp/test11
@@ -0,0 +1 @@
+\x34
diff --git a/deps/v8/test/fuzzer/regexp/test12 b/deps/v8/test/fuzzer/regexp/test12
new file mode 100644
index 0000000000..c418bbd6f4
--- /dev/null
+++ b/deps/v8/test/fuzzer/regexp/test12
@@ -0,0 +1 @@
+\u{12345}|\u{23456}
diff --git a/deps/v8/test/fuzzer/regexp/test13 b/deps/v8/test/fuzzer/regexp/test13
new file mode 100644
index 0000000000..6e50a55528
--- /dev/null
+++ b/deps/v8/test/fuzzer/regexp/test13
@@ -0,0 +1 @@
+^a
diff --git a/deps/v8/test/fuzzer/regexp/test14 b/deps/v8/test/fuzzer/regexp/test14
new file mode 100644
index 0000000000..5020b3bd00
--- /dev/null
+++ b/deps/v8/test/fuzzer/regexp/test14
@@ -0,0 +1 @@
+a{1,1}?
diff --git a/deps/v8/test/fuzzer/regexp/test15 b/deps/v8/test/fuzzer/regexp/test15
new file mode 100644
index 0000000000..d1c4bdef9f
--- /dev/null
+++ b/deps/v8/test/fuzzer/regexp/test15
@@ -0,0 +1 @@
+a\d
diff --git a/deps/v8/test/fuzzer/regexp/test16 b/deps/v8/test/fuzzer/regexp/test16
new file mode 100644
index 0000000000..c2d990e791
--- /dev/null
+++ b/deps/v8/test/fuzzer/regexp/test16
@@ -0,0 +1 @@
+a[\q]
diff --git a/deps/v8/test/fuzzer/regexp/test17 b/deps/v8/test/fuzzer/regexp/test17
new file mode 100644
index 0000000000..d62fa9dc3f
--- /dev/null
+++ b/deps/v8/test/fuzzer/regexp/test17
@@ -0,0 +1 @@
+\0
diff --git a/deps/v8/test/fuzzer/regexp/test18 b/deps/v8/test/fuzzer/regexp/test18
new file mode 100644
index 0000000000..3836c6fe54
--- /dev/null
+++ b/deps/v8/test/fuzzer/regexp/test18
@@ -0,0 +1 @@
+a{1z}
diff --git a/deps/v8/test/fuzzer/regexp/test19 b/deps/v8/test/fuzzer/regexp/test19
new file mode 100644
index 0000000000..7eafbfde9f
--- /dev/null
+++ b/deps/v8/test/fuzzer/regexp/test19
@@ -0,0 +1 @@
+{12z}
diff --git a/deps/v8/test/fuzzer/regexp/test20 b/deps/v8/test/fuzzer/regexp/test20
new file mode 100644
index 0000000000..948cf947f8
--- /dev/null
+++ b/deps/v8/test/fuzzer/regexp/test20
@@ -0,0 +1 @@
+|
diff --git a/deps/v8/test/fuzzer/regexp/test21 b/deps/v8/test/fuzzer/regexp/test21
new file mode 100644
index 0000000000..5d2207be4f
--- /dev/null
+++ b/deps/v8/test/fuzzer/regexp/test21
@@ -0,0 +1 @@
+(?:ab)*
diff --git a/deps/v8/test/fuzzer/regexp/test22 b/deps/v8/test/fuzzer/regexp/test22
new file mode 100644
index 0000000000..2ca04990db
--- /dev/null
+++ b/deps/v8/test/fuzzer/regexp/test22
@@ -0,0 +1 @@
+(?:a*)?
diff --git a/deps/v8/test/fuzzer/regexp/test23 b/deps/v8/test/fuzzer/regexp/test23
new file mode 100644
index 0000000000..fbe9c0cd67
--- /dev/null
+++ b/deps/v8/test/fuzzer/regexp/test23
@@ -0,0 +1 @@
+(?:a+){0}
diff --git a/deps/v8/test/fuzzer/regexp/test24 b/deps/v8/test/fuzzer/regexp/test24
new file mode 100644
index 0000000000..bc09447441
--- /dev/null
+++ b/deps/v8/test/fuzzer/regexp/test24
@@ -0,0 +1 @@
+a\Bc
diff --git a/deps/v8/test/fuzzer/testcfg.py b/deps/v8/test/fuzzer/testcfg.py
new file mode 100644
index 0000000000..976325a70a
--- /dev/null
+++ b/deps/v8/test/fuzzer/testcfg.py
@@ -0,0 +1,48 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+
+from testrunner.local import testsuite
+from testrunner.objects import testcase
+
+
+class FuzzerVariantGenerator(testsuite.VariantGenerator):
+ # Only run the fuzzer with standard variant.
+ def FilterVariantsByTest(self, testcase):
+ return self.standard_variant
+
+ def GetFlagSets(self, testcase, variant):
+ return testsuite.FAST_VARIANT_FLAGS[variant]
+
+
+class FuzzerTestSuite(testsuite.TestSuite):
+ SUB_TESTS = ( 'json', 'parser', 'regexp', )
+
+ def __init__(self, name, root):
+ super(FuzzerTestSuite, self).__init__(name, root)
+
+ def ListTests(self, context):
+ tests = []
+ for subtest in FuzzerTestSuite.SUB_TESTS:
+ shell = '%s_fuzzer' % subtest
+ for fname in os.listdir(os.path.join(self.root, subtest)):
+ if not os.path.isfile(os.path.join(self.root, subtest, fname)):
+ continue
+ test = testcase.TestCase(self, '%s/%s' % (subtest, fname),
+ override_shell=shell)
+ tests.append(test)
+ tests.sort()
+ return tests
+
+ def GetFlagsForTestCase(self, testcase, context):
+ suite, name = testcase.path.split('/')
+ return [os.path.join(self.root, suite, name)]
+
+ def _VariantGeneratorFactory(self):
+ return FuzzerVariantGenerator
+
+
+def GetSuite(name, root):
+ return FuzzerTestSuite(name, root)
diff --git a/deps/v8/test/ignition.isolate b/deps/v8/test/ignition.isolate
index 9604a694b2..7e4e581a6b 100644
--- a/deps/v8/test/ignition.isolate
+++ b/deps/v8/test/ignition.isolate
@@ -2,6 +2,11 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
+ 'variables': {
+ 'command': [
+ '../tools/run-tests.py',
+ ],
+ },
'includes': [
'cctest/cctest.isolate',
'mjsunit/mjsunit.isolate',
diff --git a/deps/v8/test/message/arrow-invalid-rest-2.js b/deps/v8/test/message/arrow-invalid-rest-2.js
new file mode 100644
index 0000000000..3517803d30
--- /dev/null
+++ b/deps/v8/test/message/arrow-invalid-rest-2.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+//
+
+var f = (a, ...x = 10) => x;
+f(1, 2, 3, 4, 5);
diff --git a/deps/v8/test/message/arrow-invalid-rest-2.out b/deps/v8/test/message/arrow-invalid-rest-2.out
new file mode 100644
index 0000000000..0196483a66
--- /dev/null
+++ b/deps/v8/test/message/arrow-invalid-rest-2.out
@@ -0,0 +1,4 @@
+*%(basename)s:7: SyntaxError: Rest parameter must be an identifier or destructuring pattern
+var f = (a, ...x = 10) => x;
+ ^^^^^^^^^
+SyntaxError: Rest parameter must be an identifier or destructuring pattern
diff --git a/deps/v8/test/message/arrow-invalid-rest.js b/deps/v8/test/message/arrow-invalid-rest.js
new file mode 100644
index 0000000000..870dbe9f54
--- /dev/null
+++ b/deps/v8/test/message/arrow-invalid-rest.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+//
+
+var f = (...x = 10) => x;
+f(1, 2, 3, 4, 5);
diff --git a/deps/v8/test/message/arrow-invalid-rest.out b/deps/v8/test/message/arrow-invalid-rest.out
new file mode 100644
index 0000000000..4045f14e78
--- /dev/null
+++ b/deps/v8/test/message/arrow-invalid-rest.out
@@ -0,0 +1,4 @@
+*%(basename)s:7: SyntaxError: Rest parameter must be an identifier or destructuring pattern
+var f = (...x = 10) => x;
+ ^^^^^^^^^
+SyntaxError: Rest parameter must be an identifier or destructuring pattern
diff --git a/deps/v8/test/message/for-loop-invalid-lhs.out b/deps/v8/test/message/for-loop-invalid-lhs.out
index 1972146f87..441ba3b60c 100644
--- a/deps/v8/test/message/for-loop-invalid-lhs.out
+++ b/deps/v8/test/message/for-loop-invalid-lhs.out
@@ -2,3 +2,4 @@
function f() { for ("unassignable" in {}); }
^^^^^^^^^^^^^^
SyntaxError: Invalid left-hand side in for-loop
+
diff --git a/deps/v8/test/message/function-sent-escaped.js b/deps/v8/test/message/function-sent-escaped.js
new file mode 100644
index 0000000000..aa17258f85
--- /dev/null
+++ b/deps/v8/test/message/function-sent-escaped.js
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-function-sent
+
+function* f() {
+ return function.s\u0065nt;
+}
+for (var i of f()) print(i);
diff --git a/deps/v8/test/message/function-sent-escaped.out b/deps/v8/test/message/function-sent-escaped.out
new file mode 100644
index 0000000000..d9613d8ef4
--- /dev/null
+++ b/deps/v8/test/message/function-sent-escaped.out
@@ -0,0 +1,4 @@
+*%(basename)s:8: SyntaxError: 'function.sent' must not contain escaped characters
+ return function.s\u0065nt;
+ ^^^^^^^^^^^^^^^^^^
+SyntaxError: 'function.sent' must not contain escaped characters
diff --git a/deps/v8/test/message/let-lexical-name-in-array-prohibited.out b/deps/v8/test/message/let-lexical-name-in-array-prohibited.out
index e6a53dcd17..fc8181a498 100644
--- a/deps/v8/test/message/let-lexical-name-in-array-prohibited.out
+++ b/deps/v8/test/message/let-lexical-name-in-array-prohibited.out
@@ -2,3 +2,4 @@
let [let];
^^^
SyntaxError: let is disallowed as a lexically bound name
+
diff --git a/deps/v8/test/message/let-lexical-name-in-object-prohibited.out b/deps/v8/test/message/let-lexical-name-in-object-prohibited.out
index a1458f9899..c04f6bedd2 100644
--- a/deps/v8/test/message/let-lexical-name-in-object-prohibited.out
+++ b/deps/v8/test/message/let-lexical-name-in-object-prohibited.out
@@ -2,3 +2,4 @@
let {let};
^^^
SyntaxError: let is disallowed as a lexically bound name
+
diff --git a/deps/v8/test/message/let-lexical-name-prohibited.out b/deps/v8/test/message/let-lexical-name-prohibited.out
index fe423dcd77..4b2011b297 100644
--- a/deps/v8/test/message/let-lexical-name-prohibited.out
+++ b/deps/v8/test/message/let-lexical-name-prohibited.out
@@ -2,3 +2,4 @@
let let;
^^^
SyntaxError: let is disallowed as a lexically bound name
+
diff --git a/deps/v8/test/message/new-target-escaped.js b/deps/v8/test/message/new-target-escaped.js
new file mode 100644
index 0000000000..f8398bebd4
--- /dev/null
+++ b/deps/v8/test/message/new-target-escaped.js
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+//
+
+function f() {
+ return new.t\u0061rget;
+}
+var o = new f();
diff --git a/deps/v8/test/message/new-target-escaped.out b/deps/v8/test/message/new-target-escaped.out
new file mode 100644
index 0000000000..21b30e3513
--- /dev/null
+++ b/deps/v8/test/message/new-target-escaped.out
@@ -0,0 +1,4 @@
+*%(basename)s:8: SyntaxError: 'new.target' must not contain escaped characters
+ return new.t\u0061rget;
+ ^^^^^^^^^^^^^^^
+SyntaxError: 'new.target' must not contain escaped characters
diff --git a/deps/v8/test/message/try-catch-lexical-conflict.out b/deps/v8/test/message/try-catch-lexical-conflict.out
index 9dc1b54fd5..0a7a0ebc25 100644
--- a/deps/v8/test/message/try-catch-lexical-conflict.out
+++ b/deps/v8/test/message/try-catch-lexical-conflict.out
@@ -1,4 +1,4 @@
*%(basename)s:10: SyntaxError: Identifier 'x' has already been declared
let x;
- ^
+ ^
SyntaxError: Identifier 'x' has already been declared
diff --git a/deps/v8/test/message/try-catch-variable-conflict.out b/deps/v8/test/message/try-catch-variable-conflict.out
index c7fb8de510..be4858e2fa 100644
--- a/deps/v8/test/message/try-catch-variable-conflict.out
+++ b/deps/v8/test/message/try-catch-variable-conflict.out
@@ -1,4 +1,4 @@
*%(basename)s:9: SyntaxError: Identifier 'x' has already been declared
var x;
- ^
+ ^
SyntaxError: Identifier 'x' has already been declared
diff --git a/deps/v8/test/mjsunit/apply.js b/deps/v8/test/mjsunit/apply.js
index fdd032dab3..7ce6acccba 100644
--- a/deps/v8/test/mjsunit/apply.js
+++ b/deps/v8/test/mjsunit/apply.js
@@ -114,7 +114,7 @@ function al() {
return arguments.length + arguments[arguments.length - 1];
}
-for (var j = 1; j < 0x4000000; j <<= 1) {
+for (var j = 1; j < 0x400000; j <<= 1) {
try {
var a = %NormalizeElements([]);
a.length = j;
@@ -122,7 +122,7 @@ for (var j = 1; j < 0x4000000; j <<= 1) {
assertEquals(42 + j, al.apply(345, a));
} catch (e) {
assertTrue(e.toString().indexOf("Maximum call stack size exceeded") != -1);
- for (; j < 0x4000000; j <<= 1) {
+ for (; j < 0x400000; j <<= 1) {
var caught = false;
try {
a = %NormalizeElements([]);
diff --git a/deps/v8/test/mjsunit/arguments-opt.js b/deps/v8/test/mjsunit/arguments-opt.js
deleted file mode 100644
index b8280b4bec..0000000000
--- a/deps/v8/test/mjsunit/arguments-opt.js
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax
-
-function L0() {
- return %_ArgumentsLength();
-}
-
-function L1(a) {
- return %_ArgumentsLength();
-}
-
-function L5(a,b,c,d,e) {
- return %_ArgumentsLength();
-}
-
-
-assertEquals(0, L0());
-assertEquals(1, L0(1));
-assertEquals(2, L0(1,2));
-assertEquals(5, L0(1,2,3,4,5));
-
-assertEquals(0, L1());
-assertEquals(1, L1(1));
-assertEquals(2, L1(1,2));
-assertEquals(5, L1(1,2,3,4,5));
-
-assertEquals(0, L5());
-assertEquals(1, L5(1));
-assertEquals(2, L5(1,2));
-assertEquals(5, L5(1,2,3,4,5));
-
-
-function A(key) {
- return %_Arguments(key);
-}
-
-// Integer access.
-assertEquals(0, A(0));
-assertEquals(0, A(0,1));
-assertEquals(2, A(1,2));
-assertEquals(2, A(1,2,3,4,5));
-assertEquals(5, A(4,2,3,4,5));
-assertTrue(typeof A(1) == 'undefined');
-assertTrue(typeof A(3,2,1) == 'undefined');
-
-// Out-of-bounds integer access with and without argument
-// adaptor frames.
-assertTrue(typeof(A(-10000)) == 'undefined');
-assertTrue(typeof(A(-10000, 0)) == 'undefined');
-assertTrue(typeof(A(-1)) == 'undefined');
-assertTrue(typeof(A(-1, 0)) == 'undefined');
-assertTrue(typeof(A(10000)) == 'undefined');
-assertTrue(typeof(A(10000, 0)) == 'undefined');
-
-// String access.
-assertEquals('0', A('0'));
-assertEquals('0', A('0',1));
-assertEquals(2, A('1',2));
-assertEquals(2, A('1',2,3,4,5));
-assertEquals(5, A('4',2,3,4,5));
-assertEquals('undefined', typeof A('1'));
-assertEquals('undefined', typeof A('3',2,1));
-assertEquals(A, A('callee'));
-assertEquals(1, A('length'));
-assertEquals(2, A('length',2));
-assertEquals(5, A('length',2,3,4,5));
-assertEquals({}.toString, A('toString'));
-assertEquals({}.isPrototypeOf, A('isPrototypeOf'));
-assertEquals('undefined', typeof A('xxx'));
-
-// Object access.
-function O(key) {
- return { toString: function() { return key; } };
-}
-
-var O0 = O(0);
-assertSame(O0, A(O0));
-assertSame(O0, A(O0,1));
-assertEquals(2, A(O(1),2));
-assertEquals(2, A(O(1),2,3,4,5));
-assertEquals(5, A(O(4),2,3,4,5));
-assertTrue(typeof A(O(1)) == 'undefined');
-assertTrue(typeof A(O(3),2,1) == 'undefined');
-
-O0 = O('0');
-assertSame(O0, A(O0));
-assertSame(O0, A(O0,1));
-assertEquals(2, A(O('1'),2));
-assertEquals(2, A(O('1'),2,3,4,5));
-assertEquals(5, A(O('4'),2,3,4,5));
-assertTrue(typeof A(O('1')) == 'undefined');
-assertTrue(typeof A(O('3'),2,1) == 'undefined');
-assertEquals(A, A(O('callee')));
-assertEquals(1, A(O('length')));
-assertEquals(2, A(O('length'),2));
-assertEquals(5, A(O('length'),2,3,4,5));
-assertEquals({}.toString, A(O('toString')));
-assertEquals({}.isPrototypeOf, A(O('isPrototypeOf')));
-assertTrue(typeof A(O('xxx')) == 'undefined');
-
-// Make sure that out-of-bounds access do lookups in the
-// prototype chain.
-Object.prototype[5] = 42;
-assertEquals(42, A(5));
-Object.prototype[-5] = 87;
-assertEquals(87, A(-5));
diff --git a/deps/v8/test/mjsunit/array-sort.js b/deps/v8/test/mjsunit/array-sort.js
index beb8b95b01..ae9f6efa66 100644
--- a/deps/v8/test/mjsunit/array-sort.js
+++ b/deps/v8/test/mjsunit/array-sort.js
@@ -466,7 +466,16 @@ function TestSortToObject() {
TestSortToObject();
function TestSortOnProxy() {
- var p = new Proxy([2,1,3], {});
- assertEquals([1,2,3], p.sort());
+ {
+ var p = new Proxy([2,1,3], {});
+ assertEquals([1,2,3], p.sort());
+ }
+
+ {
+ function f() { return arguments };
+ var a = f(2,1,3);
+ a.__proto__ = new Proxy({}, {});
+ assertEquals([1,2,3], [...(Array.prototype.sort.apply(a))]);
+ }
}
TestSortOnProxy();
diff --git a/deps/v8/test/mjsunit/compiler/debug-catch-prediction.js b/deps/v8/test/mjsunit/compiler/debug-catch-prediction.js
new file mode 100644
index 0000000000..34d3afd77e
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/debug-catch-prediction.js
@@ -0,0 +1,143 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// Test debug event catch prediction for thrown exceptions. We distinguish
+// between "caught" and "uncaught" based on the following assumptions:
+// 1) try-catch : Will always catch the exception.
+// 2) try-finally : Will always re-throw the exception.
+
+Debug = debug.Debug;
+
+var log = [];
+
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Exception) {
+ log.push([event_data.exception(), event_data.uncaught()]);
+ }
+ } catch (e) {
+ %AbortJS(e + "\n" + e.stack);
+ }
+}
+
+Debug.setBreakOnException();
+Debug.setListener(listener);
+
+(function TryCatch() {
+ log = []; // Clear log.
+ function f(a) {
+ try {
+ throw "boom" + a;
+ } catch(e) {
+ return e;
+ }
+ }
+ assertEquals("boom1", f(1));
+ assertEquals("boom2", f(2));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals("boom3", f(3));
+ print("Collect log:", log);
+ assertEquals([["boom1",false], ["boom2",false], ["boom3",false]], log);
+})();
+
+(function TryFinally() {
+ log = []; // Clear log.
+ function f(a) {
+ try {
+ throw "baem" + a;
+ } finally {
+ return a + 10;
+ }
+ }
+ assertEquals(11, f(1));
+ assertEquals(12, f(2));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(13, f(3));
+ print("Collect log:", log);
+ assertEquals([["baem1",true], ["baem2",true], ["baem3",true]], log);
+})();
+
+(function TryCatchFinally() {
+ log = []; // Clear log.
+ function f(a) {
+ try {
+ throw "wosh" + a;
+ } catch(e) {
+ return e + a;
+ } finally {
+ // Nothing.
+ }
+ }
+ assertEquals("wosh11", f(1));
+ assertEquals("wosh22", f(2));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals("wosh33", f(3));
+ print("Collect log:", log);
+ assertEquals([["wosh1",false], ["wosh2",false], ["wosh3",false]], log);
+})();
+
+(function TryCatchNestedFinally() {
+ log = []; // Clear log.
+ function f(a) {
+ try {
+ try {
+ throw "bang" + a;
+ } finally {
+ // Nothing.
+ }
+ } catch(e) {
+ return e + a;
+ }
+ }
+ assertEquals("bang11", f(1));
+ assertEquals("bang22", f(2));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals("bang33", f(3));
+ print("Collect log:", log);
+ assertEquals([["bang1",false], ["bang2",false], ["bang3",false]], log);
+})();
+
+(function TryFinallyNestedCatch() {
+ log = []; // Clear log.
+ function f(a) {
+ try {
+ try {
+ throw "peng" + a;
+ } catch(e) {
+ return e
+ }
+ } finally {
+ return a + 10;
+ }
+ }
+ assertEquals(11, f(1));
+ assertEquals(12, f(2));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(13, f(3));
+ print("Collect log:", log);
+ assertEquals([["peng1",false], ["peng2",false], ["peng3",false]], log);
+})();
+
+(function TryFinallyNestedFinally() {
+ log = []; // Clear log.
+ function f(a) {
+ try {
+ try {
+ throw "oops" + a;
+ } finally {
+ // Nothing.
+ }
+ } finally {
+ return a + 10;
+ }
+ }
+ assertEquals(11, f(1));
+ assertEquals(12, f(2));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(13, f(3));
+ print("Collect log:", log);
+ assertEquals([["oops1",true], ["oops2",true], ["oops3",true]], log);
+})();
diff --git a/deps/v8/test/mjsunit/regress/setvalueof-deopt.js b/deps/v8/test/mjsunit/compiler/deopt-materialize-accumulator.js
index 8c42c8a20b..c80e329150 100644
--- a/deps/v8/test/mjsunit/regress/setvalueof-deopt.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-materialize-accumulator.js
@@ -1,4 +1,4 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright 2016 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -26,17 +26,16 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax
+//
+// Tests that Turbofan correctly materializes values which are in the
+// interpreters accumulator during deopt.
-function g(x, y) {
- return y;
-}
-
-function f(deopt) {
- return g(%_SetValueOf(1, 1), deopt + 0);
+var global = 3;
+function f(a) {
+ // This will trigger a deopt since global was previously a SMI, with the
+ // accumulator holding an unboxed double which needs materialized.
+ global = %_MathSqrt(a);
}
-
-f(0);
-f(0);
-f(0);
%OptimizeFunctionOnNextCall(f);
-assertEquals("result0", f("result"));
+f(0.25);
+assertEquals(0.5, global);
diff --git a/deps/v8/test/mjsunit/compiler/double-array-to-global.js b/deps/v8/test/mjsunit/compiler/double-array-to-global.js
new file mode 100644
index 0000000000..e221d90358
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/double-array-to-global.js
@@ -0,0 +1,17 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var a = [-0, 0];
+var b;
+function foo(a) {
+ for (var i = 0; i < 2; ++i) {
+ b = a[i];
+ }
+}
+foo(a);
+foo(a);
+%OptimizeFunctionOnNextCall(foo);
+foo(a);
diff --git a/deps/v8/test/mjsunit/compiler/inline-arguments.js b/deps/v8/test/mjsunit/compiler/inline-arguments.js
index d52f31b5e9..1337ab237a 100644
--- a/deps/v8/test/mjsunit/compiler/inline-arguments.js
+++ b/deps/v8/test/mjsunit/compiler/inline-arguments.js
@@ -309,29 +309,3 @@ test_toarr(toarr2);
delete forceDeopt.deopt;
outer();
})();
-
-
-// Test inlining of functions with %_Arguments and %_ArgumentsLength intrinsic.
-(function () {
- function inner(len,a,b,c) {
- assertSame(len, %_ArgumentsLength());
- for (var i = 1; i < len; ++i) {
- var c = String.fromCharCode(96 + i);
- assertSame(c, %_Arguments(i));
- }
- }
-
- function outer() {
- inner(1);
- inner(2, 'a');
- inner(3, 'a', 'b');
- inner(4, 'a', 'b', 'c');
- inner(5, 'a', 'b', 'c', 'd');
- inner(6, 'a', 'b', 'c', 'd', 'e');
- }
-
- outer();
- outer();
- %OptimizeFunctionOnNextCall(outer);
- outer();
-})();
diff --git a/deps/v8/test/mjsunit/compiler/minus-zero.js b/deps/v8/test/mjsunit/compiler/minus-zero.js
index c161257d77..ac66350051 100644
--- a/deps/v8/test/mjsunit/compiler/minus-zero.js
+++ b/deps/v8/test/mjsunit/compiler/minus-zero.js
@@ -37,31 +37,8 @@ assertEquals(0, add(0, 0));
assertEquals(-0, add(-0, -0));
-function test(x, y) {
- assertTrue(%_IsMinusZero(-0));
- assertTrue(%_IsMinusZero(1/(-Infinity)));
- assertTrue(%_IsMinusZero(x));
-
- assertFalse(%_IsMinusZero(0));
- assertFalse(%_IsMinusZero(1/Infinity));
- assertFalse(%_IsMinusZero(0.1));
- assertFalse(%_IsMinusZero(-0.2));
- assertFalse(%_IsMinusZero({}));
- assertFalse(%_IsMinusZero(""));
- assertFalse(%_IsMinusZero("-0"));
- assertFalse(%_IsMinusZero(function() {}));
- assertFalse(%_IsMinusZero(y));
-}
-
-test(-0, 1.2);
-test(-0, 1.2);
-%OptimizeFunctionOnNextCall(test);
-test(-0, 1.2);
-assertOptimized(test);
-
-
function testsin() {
- assertTrue(%_IsMinusZero(Math.sin(-0)));
+ assertEquals(-0, Math.sin(-0));
}
testsin();
@@ -71,8 +48,7 @@ testsin();
function testfloor() {
- assertTrue(%_IsMinusZero(Math.floor(-0)));
- assertFalse(%_IsMinusZero(Math.floor(2)));
+ assertEquals(-0, Math.floor(-0));
}
testfloor();
diff --git a/deps/v8/test/mjsunit/compiler/optimized-for-in.js b/deps/v8/test/mjsunit/compiler/optimized-for-in.js
index f3ff6beb05..d93344ea57 100644
--- a/deps/v8/test/mjsunit/compiler/optimized-for-in.js
+++ b/deps/v8/test/mjsunit/compiler/optimized-for-in.js
@@ -25,8 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --optimize-for-in --allow-natives-syntax
-// Flags: --no-concurrent-osr
+// Flags: --allow-natives-syntax --no-concurrent-osr
// Test for-in support in Crankshaft. For simplicity this tests assumes certain
// fixed iteration order for properties and will have to be adjusted if V8
diff --git a/deps/v8/test/mjsunit/compiler/regress-1085.js b/deps/v8/test/mjsunit/compiler/regress-1085.js
index cea587f500..533cf59c9c 100644
--- a/deps/v8/test/mjsunit/compiler/regress-1085.js
+++ b/deps/v8/test/mjsunit/compiler/regress-1085.js
@@ -33,6 +33,5 @@ function f(x) { return 1 / Math.min(1, x); }
for (var i = 0; i < 5; ++i) f(1);
%OptimizeFunctionOnNextCall(f);
-%OptimizeFunctionOnNextCall(Math.min);
assertEquals(-Infinity, f(-0));
diff --git a/deps/v8/test/mjsunit/compiler/regress-dead-throw-inlining.js b/deps/v8/test/mjsunit/compiler/regress-dead-throw-inlining.js
new file mode 100644
index 0000000000..097a20bc41
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-dead-throw-inlining.js
@@ -0,0 +1,13 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function g() { if (false) throw 0; }
+function f() { g(); }
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/compiler/regress-max.js b/deps/v8/test/mjsunit/compiler/regress-max.js
index ee2fd587ec..7556f2f733 100644
--- a/deps/v8/test/mjsunit/compiler/regress-max.js
+++ b/deps/v8/test/mjsunit/compiler/regress-max.js
@@ -29,7 +29,6 @@
// Test Math.max with negative zero as input.
for (var i = 0; i < 5; i++) Math.max(0, 0);
-%OptimizeFunctionOnNextCall(Math.max);
Math.max(0, 0);
var r = Math.max(-0, -0);
diff --git a/deps/v8/test/mjsunit/compiler/try-catch-deopt.js b/deps/v8/test/mjsunit/compiler/try-catch-deopt.js
new file mode 100644
index 0000000000..2b6372cf28
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/try-catch-deopt.js
@@ -0,0 +1,225 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function LazyDeoptFromTryBlock() {
+ function g(dummy) {
+ %DeoptimizeFunction(f);
+ throw 42;
+ }
+
+ function f() {
+ var a = 1;
+ try {
+ var dummy = 2; // perturb the stack height.
+ g(dummy);
+ } catch (e) {
+ return e + a;
+ }
+ }
+
+ assertEquals(43, f());
+ assertEquals(43, f());
+ %NeverOptimizeFunction(g);
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(43, f());
+})();
+
+
+(function LazyDeoptDoublyNestedTryBlock() {
+ function g(dummy) {
+ %DeoptimizeFunction(f);
+ throw 42;
+ }
+
+ function f() {
+ var b;
+ try {
+ var a = 1;
+ try {
+ var dummy = 2; // perturb the stack height.
+ g(dummy);
+ } catch (e) {
+ b = e + a;
+ }
+ } catch (e) {
+ return 0;
+ }
+ return b;
+ }
+
+ assertEquals(43, f());
+ assertEquals(43, f());
+ %NeverOptimizeFunction(g);
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(43, f());
+})();
+
+(function LazyDeoptInlinedTry() {
+ function g(dummy) {
+ %DeoptimizeFunction(f);
+ %DeoptimizeFunction(h);
+ throw 42;
+ }
+
+ function h() {
+ var a = 1;
+ try {
+ var dummy = 2; // perturb the stack height.
+ g(dummy);
+ } catch (e) {
+ b = e + a;
+ }
+ return b;
+ }
+
+ function f() {
+ var c = 1;
+ return h() + 1;
+ }
+
+ assertEquals(44, f());
+ assertEquals(44, f());
+ %NeverOptimizeFunction(g);
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(44, f());
+})();
+
+(function LazyDeoptInlinedIntoTry() {
+ function g(c) {
+ %DeoptimizeFunction(f);
+ %DeoptimizeFunction(h);
+ throw c;
+ }
+
+ function h(c) {
+ return g(c);
+ }
+
+ function f() {
+ var a = 1;
+ try {
+ var c = 42; // perturb the stack height.
+ h(c);
+ } catch (e) {
+ a += e;
+ }
+ return a;
+ }
+
+ assertEquals(43, f());
+ assertEquals(43, f());
+ %NeverOptimizeFunction(g);
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(43, f());
+})();
+
+(function LazyDeoptTryBlockContextCatch() {
+ var global = 0;
+
+ function g() {
+ %DeoptimizeFunction(f);
+ throw "boom!";
+ }
+
+ function f(a) {
+ var x = a + 23
+ try {
+ let y = a + 42;
+ function capture() { return x + y }
+ g();
+ } catch(e) {
+ global = x;
+ }
+ return x;
+ }
+ assertEquals(23, f(0));
+ assertEquals(24, f(1));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(25, f(2));
+ assertEquals(25, global);
+})();
+
+(function LazyDeoptTryBlockFinally() {
+ var global = 0;
+
+ function g() {
+ %DeoptimizeFunction(f);
+ throw "boom!";
+ }
+
+ function f(a) {
+ var x = a + 23
+ try {
+ let y = a + 42;
+ function capture() { return x + y }
+ g();
+ } finally {
+ global = x;
+ }
+ return x;
+ }
+ assertThrows(function() { f(0) });
+ assertThrows(function() { f(1) });
+ %OptimizeFunctionOnNextCall(f);
+ assertThrowsEquals(function() { f(2) }, "boom!");
+ assertEquals(25, global);
+})();
+
+(function LazyDeoptTryCatchContextCatch() {
+ var global = 0;
+
+ function g() {
+ %DeoptimizeFunction(f);
+ throw 5;
+ }
+
+ function f(a) {
+ var x = a + 23
+ try {
+ try {
+ throw 1;
+ } catch(e2) {
+ function capture() { return x + y }
+ g();
+ }
+ } catch(e) {
+ global = x + e;
+ }
+ return x;
+ }
+ assertEquals(23, f(0));
+ assertEquals(24, f(1));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(25, f(2));
+ assertEquals(30, global);
+})();
+
+(function LazyDeoptTryWithContextCatch() {
+ var global = 0;
+
+ function g() {
+ %DeoptimizeFunction(f);
+ throw 5;
+ }
+
+ function f(a) {
+ var x = a + 23
+ try {
+ with ({ y : a + 42 }) {
+ function capture() { return x + y }
+ g();
+ }
+ } catch(e) {
+ global = x + e;
+ }
+ return x;
+ }
+ assertEquals(23, f(0));
+ assertEquals(24, f(1));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(25, f(2));
+ assertEquals(30, global);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/try-context.js b/deps/v8/test/mjsunit/compiler/try-context.js
new file mode 100644
index 0000000000..4e6d9b028c
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/try-context.js
@@ -0,0 +1,89 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function TryBlockCatch() {
+ var global = 0;
+ function f(a) {
+ var x = a + 23
+ try {
+ let y = a + 42;
+ function capture() { return x + y }
+ throw "boom!";
+ } catch(e) {
+ global = x;
+ }
+ return x;
+ }
+ assertEquals(23, f(0));
+ assertEquals(24, f(1));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(25, f(2));
+ assertEquals(25, global);
+})();
+
+(function TryBlockFinally() {
+ var global = 0;
+ function f(a) {
+ var x = a + 23
+ try {
+ let y = a + 42;
+ function capture() { return x + y }
+ throw "boom!";
+ } finally {
+ global = x;
+ }
+ return x;
+ }
+ assertThrows(function() { f(0) });
+ assertThrows(function() { f(1) });
+ %OptimizeFunctionOnNextCall(f);
+ assertThrows(function() { f(2) });
+ assertEquals(25, global);
+})();
+
+(function TryCatchCatch() {
+ var global = 0;
+ function f(a) {
+ var x = a + 23
+ try {
+ try {
+ throw "boom!";
+ } catch(e2) {
+ function capture() { return x + y }
+ throw "boom!";
+ }
+ } catch(e) {
+ global = x;
+ }
+ return x;
+ }
+ assertEquals(23, f(0));
+ assertEquals(24, f(1));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(25, f(2));
+ assertEquals(25, global);
+})();
+
+(function TryWithCatch() {
+ var global = 0;
+ function f(a) {
+ var x = a + 23
+ try {
+ with ({ y : a + 42 }) {
+ function capture() { return x + y }
+ throw "boom!";
+ }
+ } catch(e) {
+ global = x;
+ }
+ return x;
+ }
+ assertEquals(23, f(0));
+ assertEquals(24, f(1));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(25, f(2));
+ assertEquals(25, global);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/try-finally-deopt.js b/deps/v8/test/mjsunit/compiler/try-finally-deopt.js
new file mode 100644
index 0000000000..455bf3477f
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/try-finally-deopt.js
@@ -0,0 +1,249 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function DeoptimizeFinallyFallThrough() {
+ var global = 0;
+ function f() {
+ var a = 1;
+ try {
+ global = 1;
+ } finally {
+ global = 42;
+ %DeoptimizeNow();
+ }
+ return global + a;
+ }
+
+ f();
+ f();
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(43, f());
+ assertEquals(42, global);
+})();
+
+(function DeoptimizeFinallyReturn() {
+ var global = 0;
+ function f() {
+ try {
+ return 10;
+ } finally {
+ global = 42;
+ %DeoptimizeNow();
+ }
+ return 1;
+ }
+
+ f();
+ f();
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(10, f());
+ assertEquals(42, global);
+})();
+
+(function DeoptimizeFinallyReturnDoublyNested() {
+ var global = 0;
+ function f() {
+ try {
+ try {
+ return 10;
+ } finally {
+ global += 21;
+ %DeoptimizeNow();
+ }
+ } finally {
+ global += 21;
+ }
+ return 1;
+ }
+
+ f();
+ f();
+ %OptimizeFunctionOnNextCall(f);
+ global = 0;
+ assertEquals(10, f());
+ assertEquals(42, global);
+})();
+
+(function DeoptimizeOuterFinallyReturnDoublyNested() {
+ var global = 0;
+ function f() {
+ try {
+ try {
+ return 10;
+ } finally {
+ global += 21;
+ }
+ } finally {
+ global += 21;
+ %DeoptimizeNow();
+ }
+ return 1;
+ }
+
+ f();
+ f();
+ %OptimizeFunctionOnNextCall(f);
+ global = 0;
+ assertEquals(10, f());
+ assertEquals(42, global);
+})();
+
+(function DeoptimizeFinallyThrow() {
+ var global = 0;
+ function f() {
+ try {
+ global = 21;
+ throw 1;
+ global = 2;
+ } finally {
+ global += 21;
+ %DeoptimizeNow();
+ }
+ global = 3;
+ return 1;
+ }
+
+ try { f(); } catch(e) {}
+ try { f(); } catch(e) {}
+ %OptimizeFunctionOnNextCall(f);
+ assertThrowsEquals(f, 1);
+ assertEquals(42, global);
+})();
+
+(function DeoptimizeFinallyThrowNested() {
+ var global = 0;
+ function f() {
+ try {
+ try {
+ global = 10;
+ throw 1;
+ global = 2;
+ } finally {
+ global += 11;
+ %DeoptimizeNow();
+ }
+ global = 4;
+ } finally {
+ global += 21;
+ }
+ global = 3;
+ return 1;
+ }
+
+ try { f(); } catch(e) {}
+ try { f(); } catch(e) {}
+ %OptimizeFunctionOnNextCall(f);
+ assertThrowsEquals(f, 1);
+ assertEquals(42, global);
+})();
+
+(function DeoptimizeFinallyContinue() {
+ var global = 0;
+ function f() {
+ global = 0;
+ for (var i = 0; i < 2; i++) {
+ try {
+ if (i == 0) continue;
+ global += 10;
+ } finally {
+ global += 6;
+ %DeoptimizeNow();
+ }
+ global += 20;
+ }
+ return 1;
+ }
+
+ f();
+ f();
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(1, f());
+ assertEquals(42, global);
+})();
+
+(function DeoptimizeFinallyContinueNestedTry() {
+ var global = 0;
+ function f() {
+ global = 0;
+ for (var i = 0; i < 2; i++) {
+ try {
+ try {
+ if (i == 0) continue;
+ global += 5;
+ } finally {
+ global += 4;
+ %DeoptimizeNow();
+ }
+ global += 5;
+ } finally {
+ global += 2;
+ }
+ global += 20;
+ }
+ return 1;
+ }
+
+ f();
+ f();
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(1, f());
+ assertEquals(42, global);
+})();
+
+(function DeoptimizeFinallyBreak() {
+ var global = 0;
+ function f() {
+ global = 0;
+ for (var i = 0; i < 2; i++) {
+ try {
+ global += 20;
+ if (i == 0) break;
+ global += 5;
+ } finally {
+ global += 22;
+ %DeoptimizeNow();
+ }
+ global += 5;
+ }
+ return 1;
+ }
+
+ f();
+ f();
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(1, f());
+ assertEquals(42, global);
+})();
+
+(function DeoptimizeFinallyBreakNested() {
+ var global = 0;
+ function f() {
+ global = 0;
+ for (var i = 0; i < 2; i++) {
+ try {
+ try {
+ global += 20;
+ if (i == 0) break;
+ global += 5;
+ } finally {
+ global += 12;
+ %DeoptimizeNow();
+ }
+ global += 8;
+ } finally {
+ global += 10;
+ }
+ global += 5;
+ }
+ return 1;
+ }
+
+ f();
+ f();
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(1, f());
+ assertEquals(42, global);
+})();
diff --git a/deps/v8/test/mjsunit/constant-fold-control-instructions.js b/deps/v8/test/mjsunit/constant-fold-control-instructions.js
index eb4994591d..20900d6171 100644
--- a/deps/v8/test/mjsunit/constant-fold-control-instructions.js
+++ b/deps/v8/test/mjsunit/constant-fold-control-instructions.js
@@ -24,15 +24,8 @@ function test() {
assertTrue(%_IsArray([1]));
assertFalse(%_IsArray(function() {}));
- assertTrue(%_IsFunction(function() {}));
- assertFalse(%_IsFunction(null));
-
assertTrue(%_IsJSReceiver(new Date()));
assertFalse(%_IsJSReceiver(1));
-
- assertTrue(%_IsMinusZero(-0.0));
- assertFalse(%_IsMinusZero(1));
- assertFalse(%_IsMinusZero(""));
}
diff --git a/deps/v8/test/mjsunit/debug-changebreakpoint.js b/deps/v8/test/mjsunit/debug-changebreakpoint.js
index 897c3e3919..ad43b1749e 100644
--- a/deps/v8/test/mjsunit/debug-changebreakpoint.js
+++ b/deps/v8/test/mjsunit/debug-changebreakpoint.js
@@ -79,12 +79,6 @@ function listener(event, exec_state, event_data, data) {
testArguments(dcp, '{' + bp_str + ',"enabled":"false"}', true);
testArguments(dcp, '{' + bp_str + ',"condition":"1==2"}', true);
testArguments(dcp, '{' + bp_str + ',"condition":"false"}', true);
- testArguments(dcp, '{' + bp_str + ',"ignoreCount":7}', true);
- testArguments(dcp, '{' + bp_str + ',"ignoreCount":0}', true);
- testArguments(
- dcp,
- '{' + bp_str + ',"enabled":"true","condition":"false","ignoreCount":0}',
- true);
// Indicate that all was processed.
listenerComplete = true;
diff --git a/deps/v8/test/mjsunit/debug-conditional-breakpoints.js b/deps/v8/test/mjsunit/debug-conditional-breakpoints.js
index 6248437300..4414897099 100644
--- a/deps/v8/test/mjsunit/debug-conditional-breakpoints.js
+++ b/deps/v8/test/mjsunit/debug-conditional-breakpoints.js
@@ -53,7 +53,6 @@ break_point_hit_count = 0;
bp = Debug.setBreakPoint(f, 0, 0, '{{{');
f();
assertEquals(0, break_point_hit_count);
-assertEquals(0, Debug.findBreakPoint(bp, false).hit_count());
Debug.clearBreakPoint(bp);
// Conditional breakpoint which evaluates to false.
@@ -61,7 +60,6 @@ break_point_hit_count = 0;
bp = Debug.setBreakPoint(f, 0, 0, 'false');
f();
assertEquals(0, break_point_hit_count);
-assertEquals(0, Debug.findBreakPoint(bp, false).hit_count());
Debug.clearBreakPoint(bp);
// Conditional breakpoint which evaluates to true.
@@ -69,7 +67,6 @@ break_point_hit_count = 0;
bp = Debug.setBreakPoint(f, 0, 0, 'true');
f();
assertEquals(1, break_point_hit_count);
-assertEquals(1, Debug.findBreakPoint(bp, false).hit_count());
Debug.clearBreakPoint(bp);
// Conditional breakpoint which different types of quotes.
@@ -77,13 +74,11 @@ break_point_hit_count = 0;
bp = Debug.setBreakPoint(f, 0, 0, '"a" == "a"');
f();
assertEquals(1, break_point_hit_count);
-assertEquals(1, Debug.findBreakPoint(bp, false).hit_count());
Debug.clearBreakPoint(bp);
break_point_hit_count = 0;
bp = Debug.setBreakPoint(f, 0, 0, "'a' == 'a'");
f();
assertEquals(1, break_point_hit_count);
-assertEquals(1, Debug.findBreakPoint(bp, false).hit_count());
Debug.clearBreakPoint(bp);
// Changing condition.
@@ -91,15 +86,12 @@ break_point_hit_count = 0;
bp = Debug.setBreakPoint(f, 0, 0, '"ab".indexOf("b") > 0');
f();
assertEquals(1, break_point_hit_count);
-assertEquals(1, Debug.findBreakPoint(bp, false).hit_count());
Debug.changeBreakPointCondition(bp, 'Math.sin(Math.PI/2) > 1');
f();
assertEquals(1, break_point_hit_count);
-assertEquals(1, Debug.findBreakPoint(bp, false).hit_count());
Debug.changeBreakPointCondition(bp, '1==1');
f();
assertEquals(2, break_point_hit_count);
-assertEquals(2, Debug.findBreakPoint(bp, false).hit_count());
Debug.clearBreakPoint(bp);
// Conditional breakpoint which checks global variable.
@@ -107,11 +99,9 @@ break_point_hit_count = 0;
bp = Debug.setBreakPoint(f, 0, 0, 'x==1');
f();
assertEquals(0, break_point_hit_count);
-assertEquals(0, Debug.findBreakPoint(bp, false).hit_count());
x=1;
f();
assertEquals(1, break_point_hit_count);
-assertEquals(1, Debug.findBreakPoint(bp, false).hit_count());
Debug.clearBreakPoint(bp);
// Conditional breakpoint which checks global variable.
@@ -121,7 +111,6 @@ for (var i = 0; i < 10; i++) {
g();
}
assertEquals(5, break_point_hit_count);
-assertEquals(5, Debug.findBreakPoint(bp, false).hit_count());
Debug.clearBreakPoint(bp);
// Conditional breakpoint which checks a parameter.
@@ -131,7 +120,6 @@ for (var i = 0; i < 10; i++) {
g();
}
assertEquals(5, break_point_hit_count);
-assertEquals(5, Debug.findBreakPoint(bp, false).hit_count());
Debug.clearBreakPoint(bp);
// Conditional breakpoint which checks a local variable.
@@ -141,7 +129,6 @@ for (var i = 0; i < 10; i++) {
g();
}
assertEquals(5, break_point_hit_count);
-assertEquals(5, Debug.findBreakPoint(bp, false).hit_count());
Debug.clearBreakPoint(bp);
// Multiple conditional breakpoint which the same condition.
@@ -152,8 +139,6 @@ for (var i = 0; i < 10; i++) {
g();
}
assertEquals(5, break_point_hit_count);
-assertEquals(5, Debug.findBreakPoint(bp1, false).hit_count());
-assertEquals(5, Debug.findBreakPoint(bp2, false).hit_count());
Debug.clearBreakPoint(bp1);
Debug.clearBreakPoint(bp2);
@@ -165,7 +150,5 @@ for (var i = 0; i < 10; i++) {
g();
}
assertEquals(10, break_point_hit_count);
-assertEquals(5, Debug.findBreakPoint(bp1, false).hit_count());
-assertEquals(5, Debug.findBreakPoint(bp2, false).hit_count());
Debug.clearBreakPoint(bp1);
Debug.clearBreakPoint(bp2);
diff --git a/deps/v8/test/mjsunit/debug-ignore-breakpoints.js b/deps/v8/test/mjsunit/debug-ignore-breakpoints.js
deleted file mode 100644
index 3cb283bc48..0000000000
--- a/deps/v8/test/mjsunit/debug-ignore-breakpoints.js
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
-Debug = debug.Debug
-
-// Simple debug event handler which just counts the number of break points hit.
-var break_point_hit_count;
-
-function listener(event, exec_state, event_data, data) {
- if (event == Debug.DebugEvent.Break) {
- break_point_hit_count++;
- }
-};
-
-// Add the debug event listener.
-Debug.setListener(listener);
-
-// Test function.
-function f() {};
-
-// This tests ignore of break points including the case with several
-// break points in the same location.
-break_point_hit_count = 0;
-
-// Set a breakpoint in f.
-bp1 = Debug.setBreakPoint(f);
-
-// Try ignore count of 1.
-Debug.changeBreakPointIgnoreCount(bp1, 1);
-f();
-assertEquals(0, break_point_hit_count);
-f();
-assertEquals(1, break_point_hit_count);
-
-// Set another breakpoint in f at the same place.
-bp2 = Debug.setBreakPoint(f);
-f();
-assertEquals(2, break_point_hit_count);
-
-// Set different ignore counts.
-Debug.changeBreakPointIgnoreCount(bp1, 2);
-Debug.changeBreakPointIgnoreCount(bp2, 4);
-f();
-assertEquals(2, break_point_hit_count);
-f();
-assertEquals(2, break_point_hit_count);
-f();
-assertEquals(3, break_point_hit_count);
-f();
-assertEquals(4, break_point_hit_count);
-
-// Set different ignore counts (opposite).
-Debug.changeBreakPointIgnoreCount(bp1, 4);
-Debug.changeBreakPointIgnoreCount(bp2, 2);
-f();
-assertEquals(4, break_point_hit_count);
-f();
-assertEquals(4, break_point_hit_count);
-f();
-assertEquals(5, break_point_hit_count);
-f();
-assertEquals(6, break_point_hit_count);
diff --git a/deps/v8/test/mjsunit/debug-negative-break-points.js b/deps/v8/test/mjsunit/debug-negative-break-points.js
new file mode 100644
index 0000000000..1eb8943a07
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-negative-break-points.js
@@ -0,0 +1,99 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+var Debug = debug.Debug;
+var break_count = 0;
+var exception_count = 0;
+
+function assertCount(expected_breaks, expected_exceptions) {
+ assertEquals(expected_breaks, break_count);
+ assertEquals(expected_exceptions, exception_count);
+}
+
+function listener(event, exec_state, event_data, data) {
+ if (event == Debug.DebugEvent.Break) {
+ break_count++;
+ } else if (event == Debug.DebugEvent.Exception) {
+ exception_count++;
+ }
+}
+
+function f(x) {
+ debugger;
+ return x + 1;
+}
+
+function g(x) {
+ try {
+ throw x;
+ } catch (e) {
+ }
+}
+
+function h(x) {
+ var a = undefined;
+ try {
+ var x = a();
+ } catch (e) {
+ }
+}
+
+Debug.setListener(listener);
+
+assertCount(0, 0);
+f(0);
+assertCount(1, 0);
+g(0);
+assertCount(1, 0);
+
+Debug.setBreakOnException();
+f(0);
+assertCount(2, 0);
+g(0);
+assertCount(2, 1);
+
+Debug.setBreakPoint(f, 1, 0, "x == 1");
+f(1);
+assertCount(3, 1);
+f(2);
+assertCount(3, 1);
+f(1);
+assertCount(4, 1);
+
+Debug.setBreakPoint(f, 1, 0, "x > 0");
+f(1);
+assertCount(5, 1);
+f(0);
+assertCount(5, 1);
+
+Debug.setBreakPoint(g, 2, 0, "1 == 2");
+g(1);
+assertCount(5, 1);
+
+Debug.setBreakPoint(g, 2, 0, "x == 1");
+g(1);
+assertCount(6, 2);
+g(2);
+assertCount(6, 2);
+g(1);
+assertCount(7, 3);
+
+Debug.setBreakPoint(g, 2, 0, "x > 0");
+g(1);
+assertCount(8, 4);
+g(0);
+assertCount(8, 4);
+
+h(0);
+assertCount(8, 5);
+Debug.setBreakPoint(h, 3, 0, "x > 0");
+h(1);
+assertCount(9, 6);
+h(0);
+assertCount(9, 6);
+
+Debug.clearBreakOnException();
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/debug-script.js b/deps/v8/test/mjsunit/debug-script.js
index 8874960208..5396415087 100644
--- a/deps/v8/test/mjsunit/debug-script.js
+++ b/deps/v8/test/mjsunit/debug-script.js
@@ -73,8 +73,8 @@ for (i = 0; i < scripts.length; i++) {
// This has to be updated if the number of native scripts change.
assertEquals(%NativeScriptsCount(), named_native_count);
-// Only the 'gc' extension is loaded.
-assertEquals(1, extension_count);
+// The 'gc' extension and one or two extras scripts are loaded.
+assertTrue(extension_count == 2 || extension_count == 3);
// This script and mjsunit.js has been loaded. If using d8, d8 loads
// a normal script during startup too.
assertTrue(normal_count == 2 || normal_count == 3);
diff --git a/deps/v8/test/mjsunit/debug-setbreakpoint.js b/deps/v8/test/mjsunit/debug-setbreakpoint.js
index bc23021ec7..7c996e5de0 100644
--- a/deps/v8/test/mjsunit/debug-setbreakpoint.js
+++ b/deps/v8/test/mjsunit/debug-setbreakpoint.js
@@ -88,7 +88,6 @@ function listener(event, exec_state, event_data, data) {
testArguments(dcp, '{"type":"function","target":1}', false);
testArguments(dcp, '{"type":"function","target":"f","line":-1}', false);
testArguments(dcp, '{"type":"function","target":"f","column":-1}', false);
- testArguments(dcp, '{"type":"function","target":"f","ignoreCount":-1}', false);
testArguments(dcp, '{"type":"handle","target":"-1"}', false);
mirror = debug.MakeMirror(o);
testArguments(dcp, '{"type":"handle","target":' + mirror.handle() + '}', false);
@@ -101,7 +100,6 @@ function listener(event, exec_state, event_data, data) {
testArguments(dcp, '{"type":"function","target":"f","condition":"i == 1"}', true, false);
testArguments(dcp, '{"type":"function","target":"f","enabled":true}', true, false);
testArguments(dcp, '{"type":"function","target":"f","enabled":false}', true, false);
- testArguments(dcp, '{"type":"function","target":"f","ignoreCount":7}', true, false);
testArguments(dcp, '{"type":"script","target":"test"}', true, true);
testArguments(dcp, '{"type":"script","target":"test"}', true, true);
diff --git a/deps/v8/test/mjsunit/deopt-with-outer-context.js b/deps/v8/test/mjsunit/deopt-with-outer-context.js
new file mode 100644
index 0000000000..42a829d853
--- /dev/null
+++ b/deps/v8/test/mjsunit/deopt-with-outer-context.js
@@ -0,0 +1,22 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function outer(y) {
+ function inner() {
+ var x = 10;
+ (function() {
+ // Access x from inner function to force it to be context allocated.
+ x = 20;
+ %DeoptimizeFunction(inner);
+ })();
+ // Variable y should be read from the outer context.
+ return y;
+ };
+ %OptimizeFunctionOnNextCall(inner);
+ return inner();
+}
+
+assertEquals(30, outer(30));
diff --git a/deps/v8/test/mjsunit/harmony/array-concat.js b/deps/v8/test/mjsunit/es6/array-concat.js
index cabdf2df08..bc9e1a00cc 100644
--- a/deps/v8/test/mjsunit/harmony/array-concat.js
+++ b/deps/v8/test/mjsunit/es6/array-concat.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-concat-spreadable --harmony-proxies --harmony-reflect
+// Flags: --harmony-proxies --harmony-reflect
(function testArrayConcatArity() {
"use strict";
@@ -267,30 +267,22 @@ function testConcatTypedArray(type, elems, modulo) {
}
(function testConcatSmallTypedArray() {
- var max = [Math.pow(2, 8), Math.pow(2, 16), Math.pow(2, 32), false, false];
- [
- Uint8Array,
- Uint16Array,
- Uint32Array,
- Float32Array,
- Float64Array
- ].forEach(function(ctor, i) {
- testConcatTypedArray(ctor, 1, max[i]);
- });
+ var length = 1;
+ testConcatTypedArray(Uint8Array, length, Math.pow(2, 8));
+ testConcatTypedArray(Uint16Array, length, Math.pow(2, 16));
+ testConcatTypedArray(Uint32Array, length, Math.pow(2, 32));
+ testConcatTypedArray(Float32Array, length, false);
+ testConcatTypedArray(Float64Array, length, false);
})();
(function testConcatLargeTypedArray() {
- var max = [Math.pow(2, 8), Math.pow(2, 16), Math.pow(2, 32), false, false];
- [
- Uint8Array,
- Uint16Array,
- Uint32Array,
- Float32Array,
- Float64Array
- ].forEach(function(ctor, i) {
- testConcatTypedArray(ctor, 4000, max[i]);
- });
+ var length = 4000;
+ testConcatTypedArray(Uint8Array, length, Math.pow(2, 8));
+ testConcatTypedArray(Uint16Array, length, Math.pow(2, 16));
+ testConcatTypedArray(Uint32Array, length, Math.pow(2, 32));
+ testConcatTypedArray(Float32Array, length, false);
+ testConcatTypedArray(Float64Array, length, false);
})();
diff --git a/deps/v8/test/mjsunit/es6/array-length.js b/deps/v8/test/mjsunit/es6/array-length.js
index cc3b88105c..06efe00901 100644
--- a/deps/v8/test/mjsunit/es6/array-length.js
+++ b/deps/v8/test/mjsunit/es6/array-length.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-tolength
-
// Test array functions do not cause infinite loops when length is negative,
// max_value, etc.
diff --git a/deps/v8/test/mjsunit/es6/block-for.js b/deps/v8/test/mjsunit/es6/block-for.js
index c7a23e8d32..d953d376f0 100644
--- a/deps/v8/test/mjsunit/es6/block-for.js
+++ b/deps/v8/test/mjsunit/es6/block-for.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-completion
-
"use strict";
function props(x) {
diff --git a/deps/v8/test/mjsunit/es6/classes-super.js b/deps/v8/test/mjsunit/es6/classes-super.js
new file mode 100644
index 0000000000..7bdf4ba86c
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/classes-super.js
@@ -0,0 +1,15 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
+
+class Test {
+ m() {
+ super.length = 10;
+ }
+}
+
+var array = [];
+Test.prototype.m.call(array);
+assertEquals(10, array.length);
diff --git a/deps/v8/test/mjsunit/harmony/completion.js b/deps/v8/test/mjsunit/es6/completion.js
index ceeafb2b3d..05565bfb45 100644
--- a/deps/v8/test/mjsunit/harmony/completion.js
+++ b/deps/v8/test/mjsunit/es6/completion.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-completion --harmony-sloppy-let --no-legacy-const
+// Flags: --harmony-sloppy-let --no-legacy-const
function assertUndef(x) {
diff --git a/deps/v8/test/mjsunit/es6/generators-iteration.js b/deps/v8/test/mjsunit/es6/generators-iteration.js
index faeb68380f..ae4c682e7e 100644
--- a/deps/v8/test/mjsunit/es6/generators-iteration.js
+++ b/deps/v8/test/mjsunit/es6/generators-iteration.js
@@ -101,9 +101,9 @@ function TestGenerator(g, expected_values_for_next,
testThrow(function*() { return yield* g(); });
if (g instanceof GeneratorFunction) {
- testNext(function() { return new g(); });
- testSend(function() { return new g(); });
- testThrow(function() { return new g(); });
+ testNext(g);
+ testSend(g);
+ testThrow(g);
}
}
@@ -259,18 +259,6 @@ TestGenerator(
[1, 2, undefined]);
TestGenerator(
- function g18() {
- function* g() { yield this.x; yield this.y; }
- var iter = new g;
- iter.x = 1;
- iter.y = 2;
- return iter;
- },
- [1, 2, undefined],
- "foo",
- [1, 2, undefined]);
-
-TestGenerator(
function* g19() {
var x = 1;
yield x;
@@ -409,39 +397,17 @@ TestGenerator(
"foo",
[42, undefined]);
-// Test that yield* re-yields received results without re-boxing.
-function TestDelegatingYield() {
- function results(results) {
- var i = 0;
- function next() {
- return results[i++];
- }
- var iter = { next: next };
- var ret = {};
- ret[Symbol.iterator] = function() { return iter; };
- return ret;
- }
- function* yield_results(expected) {
- return yield* results(expected);
- }
- function collect_results(iterable) {
- var iter = iterable[Symbol.iterator]();
- var ret = [];
- var result;
- do {
- result = iter.next();
- ret.push(result);
- } while (!result.done);
- return ret;
- }
- // We have to put a full result for the end, because the return will re-box.
- var expected = [{value: 1}, 13, "foo", {value: 34, done: true}];
-
- // Sanity check.
- assertEquals(expected, collect_results(results(expected)));
- assertEquals(expected, collect_results(yield_results(expected)));
+// Test that yield* validates iterator results.
+function TestDelegatingYield(junk) {
+ var iterator = {next: () => junk};
+ var iterable = {[Symbol.iterator]: () => iterator};
+ function* g() { return yield* iterable };
+ assertThrows(() => g().next(), TypeError);
}
TestDelegatingYield();
+TestDelegatingYield(null);
+TestDelegatingYield(42);
+TestDelegatingYield(true);
function TestTryCatch(instantiate) {
function* g() { yield 1; try { yield 2; } catch (e) { yield e; } yield 3; }
@@ -693,3 +659,16 @@ function TestRecursion() {
assertThrows(TestThrowRecursion, Error);
}
TestRecursion();
+
+
+// Test yield* on non-iterable objects.
+function* g(junk) { return yield* junk }
+var non_iterables = [
+ 42,
+ {[Symbol.iterator]: 42},
+ {[Symbol.iterator]: () => 42},
+ {[Symbol.iterator]: () => ({next: 42})},
+];
+for (let junk of non_iterables) {
+ assertThrows(() => g(junk).next(), TypeError);
+}
diff --git a/deps/v8/test/mjsunit/es6/generators-objects.js b/deps/v8/test/mjsunit/es6/generators-objects.js
index 9390776761..f304738841 100644
--- a/deps/v8/test/mjsunit/es6/generators-objects.js
+++ b/deps/v8/test/mjsunit/es6/generators-objects.js
@@ -59,18 +59,12 @@ function TestGeneratorObject() {
assertEquals("[object Generator]", String(iter));
assertEquals([], Object.getOwnPropertyNames(iter));
assertTrue(iter !== g());
-
- // g() is the same as new g().
- iter = new g();
- assertSame(g.prototype, Object.getPrototypeOf(iter));
- assertTrue(iter instanceof g);
- assertEquals("Generator", %_ClassOf(iter));
- assertEquals("[object Generator]", String(iter));
assertEquals("[object Generator]", Object.prototype.toString.call(iter));
var gf = iter.__proto__.constructor;
assertEquals("[object GeneratorFunction]", Object.prototype.toString.call(gf));
- assertEquals([], Object.getOwnPropertyNames(iter));
- assertTrue(iter !== new g());
+
+ // generators are not constructable.
+ assertThrows(()=>new g());
}
TestGeneratorObject();
diff --git a/deps/v8/test/mjsunit/es6/generators-runtime.js b/deps/v8/test/mjsunit/es6/generators-runtime.js
index 98015b7f7c..5c426b21fd 100644
--- a/deps/v8/test/mjsunit/es6/generators-runtime.js
+++ b/deps/v8/test/mjsunit/es6/generators-runtime.js
@@ -99,7 +99,7 @@ function TestGeneratorObjectPrototype() {
assertSame(GeneratorObjectPrototype,
Object.getPrototypeOf((function*(){yield 1}).prototype));
- var expected_property_names = ["next", "throw", "constructor"];
+ var expected_property_names = ["next", "return", "throw", "constructor"];
var found_property_names =
Object.getOwnPropertyNames(GeneratorObjectPrototype);
diff --git a/deps/v8/test/mjsunit/es6/generators-states.js b/deps/v8/test/mjsunit/es6/generators-states.js
index 0a2173a919..4e8c58029a 100644
--- a/deps/v8/test/mjsunit/es6/generators-states.js
+++ b/deps/v8/test/mjsunit/es6/generators-states.js
@@ -25,6 +25,7 @@ function* throwGenerator() { yield iter.throw(new Bar); }
// Throw on a suspendedStart iterator.
iter = nextGenerator();
assertThrows(function() { iter.throw(new Foo) }, Foo)
+assertIteratorIsClosed(iter);
assertThrows(function() { iter.throw(new Foo) }, Foo)
assertIteratorIsClosed(iter);
@@ -65,3 +66,29 @@ iter = (function* () {
assertIteratorResult(3, false, iter.next());
assertIteratorResult(4, false, iter.next());
assertIteratorIsClosed(iter);
+
+
+// A return that doesn't close.
+{
+ let g = function*() { try {return 42} finally {yield 43} };
+
+ let x = g();
+ assertEquals({value: 43, done: false}, x.next());
+ assertEquals({value: 42, done: true}, x.next());
+}
+{
+ let x;
+ let g = function*() { try {return 42} finally {x.throw(666)} };
+
+ x = g();
+ assertThrows(() => x.next(), TypeError); // Still executing.
+}
+{
+ let x;
+ let g = function*() {
+ try {return 42} finally {try {x.throw(666)} catch(e) {}}
+ };
+
+ x = g();
+ assertEquals({value: 42, done: true}, x.next());
+}
diff --git a/deps/v8/test/mjsunit/es6/hasinstance-symbol.js b/deps/v8/test/mjsunit/es6/hasinstance-symbol.js
new file mode 100644
index 0000000000..6783d8deef
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/hasinstance-symbol.js
@@ -0,0 +1,12 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Verify that the hasInstance symbol is installed on function prototype.
+// Test262 makes deeper tests.
+
+(function TestHasInstance() {
+ var a = Array();
+ assertTrue(Array[Symbol.hasInstance](a));
+ assertFalse(Function.prototype[Symbol.hasInstance].call());
+})();
diff --git a/deps/v8/test/mjsunit/es6/no-unicode-regexp-flag.js b/deps/v8/test/mjsunit/es6/no-unicode-regexp-flag.js
index b56a4b56dd..035627c4d4 100644
--- a/deps/v8/test/mjsunit/es6/no-unicode-regexp-flag.js
+++ b/deps/v8/test/mjsunit/es6/no-unicode-regexp-flag.js
@@ -7,7 +7,7 @@
// mjsunit/es6/regexp-flags tests that the property is there when the
// flag is on.
-// Flags: --harmony-regexp
+// Flags: --harmony-regexps --no-harmony-unicode-regexps
'use strict';
diff --git a/deps/v8/test/mjsunit/es6/object-assign.js b/deps/v8/test/mjsunit/es6/object-assign.js
index d56cb0d1cf..1fec766dd1 100644
--- a/deps/v8/test/mjsunit/es6/object-assign.js
+++ b/deps/v8/test/mjsunit/es6/object-assign.js
@@ -138,3 +138,36 @@ assertSame(Object.assign(o, {}), o);
assertThrows(function() { return Object.assign(target, source); }, ErrorB);
assertEquals(log, "b");
})();
+
+(function add_to_source() {
+ var target = {set k1(v) { source.k3 = 100; }};
+ var source = {k1:10};
+ Object.defineProperty(source, "k2",
+ {value: 20, enumerable: false, configurable: true});
+ Object.assign(target, source);
+ assertEquals(undefined, target.k2);
+ assertEquals(undefined, target.k3);
+})();
+
+(function reconfigure_enumerable_source() {
+ var target = {set k1(v) {
+ Object.defineProperty(source, "k2", {value: 20, enumerable: true});
+ }};
+ var source = {k1:10};
+ Object.defineProperty(source, "k2",
+ {value: 20, enumerable: false, configurable: true});
+ Object.assign(target, source);
+ assertEquals(20, target.k2);
+})();
+
+(function propagate_assign_failure() {
+ var target = {set k1(v) { throw "fail" }};
+ var source = {k1:10};
+ assertThrows(()=>Object.assign(target, source));
+})();
+
+(function propagate_read_failure() {
+ var target = {};
+ var source = {get k1() { throw "fail" }};
+ assertThrows(()=>Object.assign(target, source));
+})();
diff --git a/deps/v8/test/mjsunit/es6/object-literals-method.js b/deps/v8/test/mjsunit/es6/object-literals-method.js
index e4527cb776..90bc51ec03 100644
--- a/deps/v8/test/mjsunit/es6/object-literals-method.js
+++ b/deps/v8/test/mjsunit/es6/object-literals-method.js
@@ -239,16 +239,14 @@ function assertIteratorResult(value, done, result) {
})();
-(function TestGeneratorConstructable() {
+(function TestGeneratorNotConstructable() {
var object = {
*method() {
yield 1;
}
};
- var g = new object.method();
- assertIteratorResult(1, false, g.next());
- assertIteratorResult(undefined, true, g.next());
+ assertThrows(()=>new object.method());
})();
diff --git a/deps/v8/test/mjsunit/es6/regexp-tolength.js b/deps/v8/test/mjsunit/es6/regexp-tolength.js
index d9e967ba27..f7cfe928af 100644
--- a/deps/v8/test/mjsunit/es6/regexp-tolength.js
+++ b/deps/v8/test/mjsunit/es6/regexp-tolength.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-tolength
-
'use strict';
let regexp = /x/g;
diff --git a/deps/v8/test/mjsunit/es6/regexp-tostring.js b/deps/v8/test/mjsunit/es6/regexp-tostring.js
new file mode 100644
index 0000000000..3deeeb7ed8
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/regexp-tostring.js
@@ -0,0 +1,46 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var log = [];
+
+var fake =
+ {
+ get source() {
+ log.push("p");
+ return {
+ toString: function() {
+ log.push("ps");
+ return "pattern";
+ }
+ };
+ },
+ get flags() {
+ log.push("f");
+ return {
+ toString: function() {
+ log.push("fs");
+ return "flags";
+ }
+ };
+ }
+ }
+
+function testThrows(x) {
+ try {
+ RegExp.prototype.toString.call(x);
+ } catch (e) {
+ assertTrue(/incompatible receiver/.test(e.message));
+ return;
+ }
+ assertUnreachable();
+}
+
+testThrows(1);
+testThrows(null);
+Number.prototype.source = "a";
+Number.prototype.flags = "b";
+testThrows(1);
+
+assertEquals("/pattern/flags", RegExp.prototype.toString.call(fake));
+assertEquals(["p", "ps", "f", "fs"], log);
diff --git a/deps/v8/test/mjsunit/es6/symbols.js b/deps/v8/test/mjsunit/es6/symbols.js
index d502a83681..38338575a0 100644
--- a/deps/v8/test/mjsunit/es6/symbols.js
+++ b/deps/v8/test/mjsunit/es6/symbols.js
@@ -441,8 +441,9 @@ TestGetOwnPropertySymbolsWithProto()
function TestWellKnown() {
var symbols = [
+ "hasInstance",
// TODO(rossberg): reactivate once implemented.
- // "hasInstance", "isConcatSpreadable", "isRegExp",
+ // "isConcatSpreadable", "isRegExp",
"iterator", /* "toStringTag", */ "unscopables"
]
diff --git a/deps/v8/test/mjsunit/es6/tail-call-megatest.js b/deps/v8/test/mjsunit/es6/tail-call-megatest.js
new file mode 100644
index 0000000000..005796195a
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/tail-call-megatest.js
@@ -0,0 +1,292 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-tailcalls --no-turbo-inlining
+
+
+Error.prepareStackTrace = (error,stack) => {
+ error.strace = stack;
+ return error.message + "\n at " + stack.join("\n at ");
+}
+
+
+function CheckStackTrace(expected) {
+ var e = new Error();
+ e.stack; // prepare stack trace
+ var stack = e.strace;
+ assertEquals("CheckStackTrace", stack[0].getFunctionName());
+ for (var i = 0; i < expected.length; i++) {
+ assertEquals(expected[i].name, stack[i + 1].getFunctionName());
+ }
+}
+%NeverOptimizeFunction(CheckStackTrace);
+
+
+function CheckArguments(expected, args) {
+ args = Array.prototype.slice.call(args);
+ assertEquals(expected, args);
+}
+%NeverOptimizeFunction(CheckArguments);
+
+
+var CAN_INLINE_COMMENT = "// Let it be inlined.";
+var DONT_INLINE_COMMENT = (function() {
+ var line = "// Don't inline. Don't inline. Don't inline. Don't inline.";
+ for (var i = 0; i < 4; i++) {
+ line += "\n " + line;
+ }
+ return line;
+})();
+
+
+function ident_source(source, ident) {
+ ident = " ".repeat(ident);
+ return ident + source.replace(/\n/gi, "\n" + ident);
+}
+
+var global = Function('return this')();
+var the_receiver = {receiver: 1};
+
+function run_tests() {
+ function inlinable_comment(inlinable) {
+ return inlinable ? CAN_INLINE_COMMENT : DONT_INLINE_COMMENT;
+ }
+
+ var f_cfg_sloppy = {
+ func_name: 'f',
+ source_template: function(cfg) {
+ var receiver = cfg.f_receiver != undefined ? cfg.f_receiver
+ : "global";
+ var lines = [
+ `function f(a) {`,
+ ` ${inlinable_comment(cfg.f_inlinable)}`,
+ ` assertEquals(${receiver}, this);`,
+ ` CheckArguments([${cfg.f_args}], arguments);`,
+ ` CheckStackTrace([f, test]);`,
+ ` %DeoptimizeNow();`,
+ ` CheckArguments([${cfg.f_args}], arguments);`,
+ ` CheckStackTrace([f, test]);`,
+ ` return 42;`,
+ `}`,
+ ];
+ return lines.join("\n");
+ },
+ };
+
+ var f_cfg_strict = {
+ func_name: 'f',
+ source_template: function(cfg) {
+ var receiver = cfg.f_receiver != undefined ? cfg.f_receiver
+ : "undefined";
+ var lines = [
+ `function f(a) {`,
+ ` "use strict";`,
+ ` ${inlinable_comment(cfg.f_inlinable)}`,
+ ` assertEquals(${receiver}, this);`,
+ ` CheckArguments([${cfg.f_args}], arguments);`,
+ ` CheckStackTrace([f, test]);`,
+ ` %DeoptimizeNow();`,
+ ` CheckArguments([${cfg.f_args}], arguments);`,
+ ` CheckStackTrace([f, test]);`,
+ ` return 42;`,
+ `}`,
+ ];
+ return lines.join("\n");
+ },
+ };
+
+ var f_cfg_possibly_eval = {
+ func_name: 'eval',
+ source_template: function(cfg) {
+ var receiver = cfg.f_receiver != undefined ? cfg.f_receiver
+ : "global";
+ var lines = [
+ `function f(a) {`,
+ ` ${inlinable_comment(cfg.f_inlinable)}`,
+ ` assertEquals(${receiver}, this);`,
+ ` CheckArguments([${cfg.f_args}], arguments);`,
+ ` CheckStackTrace([f, test]);`,
+ ` %DeoptimizeNow();`,
+ ` CheckArguments([${cfg.f_args}], arguments);`,
+ ` CheckStackTrace([f, test]);`,
+ ` return 42;`,
+ `}`,
+ `var eval = f;`,
+ ];
+ return lines.join("\n");
+ },
+ };
+
+ var f_cfg_bound = {
+ func_name: 'bound',
+ source_template: function(cfg) {
+ var lines = [
+ `function f(a) {`,
+ ` "use strict";`,
+ ` ${inlinable_comment(cfg.f_inlinable)}`,
+ ` assertEquals(receiver, this);`,
+ ` CheckArguments([${cfg.f_args}], arguments);`,
+ ` CheckStackTrace([f, test]);`,
+ ` %DeoptimizeNow();`,
+ ` CheckArguments([${cfg.f_args}], arguments);`,
+ ` CheckStackTrace([f, test]);`,
+ ` return 42;`,
+ `}`,
+ `var receiver = {a: 153};`,
+ `var bound = f.bind(receiver);`,
+ ];
+ return lines.join("\n");
+ },
+ };
+
+ var f_cfg_proxy = {
+ func_name: 'p',
+ source_template: function(cfg) {
+ var receiver = cfg.f_receiver != undefined ? cfg.f_receiver
+ : "global";
+ var lines = [
+ `function f(a) {`,
+ ` ${inlinable_comment(cfg.f_inlinable)}`,
+ ` assertEquals(${receiver}, this);`,
+ ` CheckArguments([${cfg.f_args}], arguments);`,
+ ` CheckStackTrace([f, test]);`,
+ ` %DeoptimizeNow();`,
+ ` CheckArguments([${cfg.f_args}], arguments);`,
+ ` CheckStackTrace([f, test]);`,
+ ` return 42;`,
+ `}`,
+ `var p = new Proxy(f, {});`,
+ ];
+ return lines.join("\n");
+ },
+ };
+
+ var g_cfg_normal = {
+ receiver: undefined,
+ source_template: function(cfg) {
+ var lines = [
+ `function g(a) {`,
+ ` "use strict";`,
+ ` ${inlinable_comment(cfg.g_inlinable)}`,
+ ` CheckArguments([${cfg.g_args}], arguments);`,
+ ` return ${cfg.f_name}(${cfg.f_args});`,
+ `}`,
+ ];
+ return lines.join("\n");
+ },
+ };
+
+
+ var g_cfg_function_apply = {
+ receiver: "the_receiver",
+ source_template: function(cfg) {
+ var lines = [
+ `function g(a) {`,
+ ` "use strict";`,
+ ` ${inlinable_comment(cfg.g_inlinable)}`,
+ ` CheckArguments([${cfg.g_args}], arguments);`,
+ ` return ${cfg.f_name}.apply(the_receiver, [${cfg.f_args}]);`,
+ `}`,
+ ];
+ return lines.join("\n");
+ },
+ };
+
+
+ var g_cfg_function_call = {
+ receiver: "the_receiver",
+ source_template: function(cfg) {
+ var f_args = "the_receiver";
+ if (cfg.f_args !== "") f_args += ", ";
+ f_args += cfg.f_args;
+
+ var lines = [
+ `function g(a) {`,
+ ` "use strict";`,
+ ` ${inlinable_comment(cfg.g_inlinable)}`,
+ ` CheckArguments([${cfg.g_args}], arguments);`,
+ ` return ${cfg.f_name}.call(${f_args});`,
+ `}`,
+ ];
+ return lines.join("\n");
+ },
+ };
+
+
+ function test_template(cfg) {
+ var f_source = cfg.f_source_template(cfg);
+ var g_source = cfg.g_source_template(cfg);
+ f_source = ident_source(f_source, 2);
+ g_source = ident_source(g_source, 2);
+
+ var lines = [
+ `(function() {`,
+ f_source,
+ g_source,
+ ` function test() {`,
+ ` "use strict";`,
+ ` assertEquals(42, g(${cfg.g_args}));`,
+ ` }`,
+ ` ${cfg.f_inlinable ? "%SetForceInlineFlag(f)" : ""};`,
+ ` ${cfg.g_inlinable ? "%SetForceInlineFlag(g)" : ""};`,
+ ``,
+ ` test();`,
+ ` %OptimizeFunctionOnNextCall(test);`,
+ ` %OptimizeFunctionOnNextCall(f);`,
+ ` %OptimizeFunctionOnNextCall(g);`,
+ ` test();`,
+ `})();`,
+ ``,
+ ];
+ var source = lines.join("\n");
+ return source;
+ }
+
+ // TODO(v8:4698), TODO(ishell): support all commented cases.
+ var f_args_variants = ["", "1", "1, 2"];
+ var g_args_variants = [/*"",*/ "10", /*"10, 20"*/];
+ var f_inlinable_variants = [/*true,*/ false];
+ var g_inlinable_variants = [true, false];
+ var f_variants = [
+ f_cfg_sloppy,
+ f_cfg_strict,
+ f_cfg_bound,
+ f_cfg_proxy,
+ f_cfg_possibly_eval,
+ ];
+ var g_variants = [
+ g_cfg_normal,
+ g_cfg_function_call,
+ g_cfg_function_apply,
+ ];
+
+ f_variants.forEach((f_cfg) => {
+ g_variants.forEach((g_cfg) => {
+ f_args_variants.forEach((f_args) => {
+ g_args_variants.forEach((g_args) => {
+ f_inlinable_variants.forEach((f_inlinable) => {
+ g_inlinable_variants.forEach((g_inlinable) => {
+ var cfg = {
+ f_source_template: f_cfg.source_template,
+ f_inlinable,
+ f_args,
+ f_name: f_cfg.func_name,
+ f_receiver: g_cfg.receiver,
+ g_source_template: g_cfg.source_template,
+ g_inlinable,
+ g_args,
+ };
+ var source = test_template(cfg);
+ print("====================");
+ print(source);
+ eval(source);
+ });
+ });
+ });
+ });
+ });
+ });
+}
+
+run_tests();
diff --git a/deps/v8/test/mjsunit/es6/tail-call-proxies.js b/deps/v8/test/mjsunit/es6/tail-call-proxies.js
new file mode 100644
index 0000000000..25f9fcfbe7
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/tail-call-proxies.js
@@ -0,0 +1,97 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-tailcalls --harmony-proxies
+"use strict";
+
+Error.prepareStackTrace = (e,s) => s;
+
+function CheckStackTrace(expected) {
+ var stack = (new Error()).stack;
+ assertEquals("CheckStackTrace", stack[0].getFunctionName());
+ for (var i = 0; i < expected.length; i++) {
+ assertEquals(expected[i].name, stack[i + 1].getFunctionName());
+ }
+}
+
+
+// Tail call proxy function when caller does not have an arguments
+// adaptor frame.
+(function test() {
+ // Caller and callee have same number of arguments.
+ function f1(a) {
+ CheckStackTrace([f1, test]);
+ return 10 + a;
+ }
+ var p1 = new Proxy(f1, {});
+ function g1(a) { return p1(2); }
+ assertEquals(12, g1(1));
+
+ // Caller has more arguments than callee.
+ function f2(a) {
+ CheckStackTrace([f2, test]);
+ return 10 + a;
+ }
+ var p2 = new Proxy(f2, {});
+ function g2(a, b, c) { return p2(2); }
+ assertEquals(12, g2(1, 2, 3));
+
+ // Caller has less arguments than callee.
+ function f3(a, b, c) {
+ CheckStackTrace([f3, test]);
+ return 10 + a + b + c;
+ }
+ var p3 = new Proxy(f3, {});
+ function g3(a) { return p3(2, 3, 4); }
+ assertEquals(19, g3(1));
+
+ // Callee has arguments adaptor frame.
+ function f4(a, b, c) {
+ CheckStackTrace([f4, test]);
+ return 10 + a;
+ }
+ var p4 = new Proxy(f4, {});
+ function g4(a) { return p4(2); }
+ assertEquals(12, g4(1));
+})();
+
+
+// Tail call proxy function when caller has an arguments adaptor frame.
+(function test() {
+ // Caller and callee have same number of arguments.
+ function f1(a) {
+ CheckStackTrace([f1, test]);
+ return 10 + a;
+ }
+ var p1 = new Proxy(f1, {});
+ function g1(a) { return p1(2); }
+ assertEquals(12, g1());
+
+ // Caller has more arguments than callee.
+ function f2(a) {
+ CheckStackTrace([f2, test]);
+ return 10 + a;
+ }
+ var p2 = new Proxy(f2, {});
+ function g2(a, b, c) { return p2(2); }
+ assertEquals(12, g2());
+
+ // Caller has less arguments than callee.
+ function f3(a, b, c) {
+ CheckStackTrace([f3, test]);
+ return 10 + a + b + c;
+ }
+ var p3 = new Proxy(f3, {});
+ function g3(a) { return p3(2, 3, 4); }
+ assertEquals(19, g3());
+
+ // Callee has arguments adaptor frame.
+ function f4(a, b, c) {
+ CheckStackTrace([f4, test]);
+ return 10 + a;
+ }
+ var p4 = new Proxy(f4, {});
+ function g4(a) { return p4(2); }
+ assertEquals(12, g4());
+})();
diff --git a/deps/v8/test/mjsunit/es6/tail-call-simple.js b/deps/v8/test/mjsunit/es6/tail-call-simple.js
new file mode 100644
index 0000000000..d2890b0212
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/tail-call-simple.js
@@ -0,0 +1,107 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-tailcalls --stack-size=100
+
+//
+// Tail calls work only in strict mode.
+//
+(function() {
+ function f(n) {
+ if (n <= 0) {
+ return "foo";
+ }
+ return f(n - 1);
+ }
+ assertThrows(()=>{ f(1e5) });
+ %OptimizeFunctionOnNextCall(f);
+ assertThrows(()=>{ f(1e5) });
+})();
+
+
+//
+// Tail call normal functions.
+//
+(function() {
+ "use strict";
+ function f(n) {
+ if (n <= 0) {
+ return "foo";
+ }
+ return f(n - 1);
+ }
+ assertEquals("foo", f(1e5));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals("foo", f(1e5));
+})();
+
+
+(function() {
+ "use strict";
+ function f(n){
+ if (n <= 0) {
+ return "foo";
+ }
+ return g(n - 1);
+ }
+ function g(n){
+ if (n <= 0) {
+ return "bar";
+ }
+ return f(n - 1);
+ }
+ assertEquals("foo", f(1e5));
+ assertEquals("bar", f(1e5 + 1));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals("foo", f(1e5));
+ assertEquals("bar", f(1e5 + 1));
+})();
+
+
+//
+// Tail call bound functions.
+//
+(function() {
+ "use strict";
+ function f0(n) {
+ if (n <= 0) {
+ return "foo";
+ }
+ return f_bound(n - 1);
+ }
+ var f_bound = f0.bind({});
+ function f(n) {
+ return f_bound(n);
+ }
+ assertEquals("foo", f(1e5));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals("foo", f(1e5));
+})();
+
+
+(function() {
+ "use strict";
+ function f0(n){
+ if (n <= 0) {
+ return "foo";
+ }
+ return g_bound(n - 1);
+ }
+ function g0(n){
+ if (n <= 0) {
+ return "bar";
+ }
+ return f_bound(n - 1);
+ }
+ var f_bound = f0.bind({});
+ var g_bound = g0.bind({});
+ function f(n) {
+ return f_bound(n);
+ }
+ assertEquals("foo", f(1e5));
+ assertEquals("bar", f(1e5 + 1));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals("foo", f(1e5));
+ assertEquals("bar", f(1e5 + 1));
+})();
diff --git a/deps/v8/test/mjsunit/es6/tail-call.js b/deps/v8/test/mjsunit/es6/tail-call.js
new file mode 100644
index 0000000000..e9539c37ba
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/tail-call.js
@@ -0,0 +1,386 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-tailcalls
+"use strict";
+
+Error.prepareStackTrace = (error,stack) => {
+ error.strace = stack;
+ return error.message + "\n at " + stack.join("\n at ");
+}
+
+
+function CheckStackTrace(expected) {
+ var e = new Error();
+ e.stack; // prepare stack trace
+ var stack = e.strace;
+ assertEquals("CheckStackTrace", stack[0].getFunctionName());
+ for (var i = 0; i < expected.length; i++) {
+ assertEquals(expected[i].name, stack[i + 1].getFunctionName());
+ }
+}
+
+function f(expected_call_stack, a, b) {
+ CheckStackTrace(expected_call_stack);
+ return a;
+}
+
+function f_153(expected_call_stack, a) {
+ CheckStackTrace(expected_call_stack);
+ return 153;
+}
+
+
+// Tail call when caller does not have an arguments adaptor frame.
+(function() {
+ // Caller and callee have same number of arguments.
+ function f1(a) {
+ CheckStackTrace([f1, test]);
+ return 10 + a;
+ }
+ function g1(a) { return f1(2); }
+
+ // Caller has more arguments than callee.
+ function f2(a) {
+ CheckStackTrace([f2, test]);
+ return 10 + a;
+ }
+ function g2(a, b, c) { return f2(2); }
+
+ // Caller has less arguments than callee.
+ function f3(a, b, c) {
+ CheckStackTrace([f3, test]);
+ return 10 + a + b + c;
+ }
+ function g3(a) { return f3(2, 3, 4); }
+
+ // Callee has arguments adaptor frame.
+ function f4(a, b, c) {
+ CheckStackTrace([f4, test]);
+ return 10 + a;
+ }
+ function g4(a) { return f4(2); }
+
+ function test() {
+ assertEquals(12, g1(1));
+ assertEquals(12, g2(1, 2, 3));
+ assertEquals(19, g3(1));
+ assertEquals(12, g4(1));
+ }
+ test();
+ %OptimizeFunctionOnNextCall(test);
+ test();
+})();
+
+
+// Tail call when caller has an arguments adaptor frame.
+(function() {
+ // Caller and callee have same number of arguments.
+ function f1(a) {
+ CheckStackTrace([f1, test]);
+ return 10 + a;
+ }
+ function g1(a) { return f1(2); }
+
+ // Caller has more arguments than callee.
+ function f2(a) {
+ CheckStackTrace([f2, test]);
+ return 10 + a;
+ }
+ function g2(a, b, c) { return f2(2); }
+
+ // Caller has less arguments than callee.
+ function f3(a, b, c) {
+ CheckStackTrace([f3, test]);
+ return 10 + a + b + c;
+ }
+ function g3(a) { return f3(2, 3, 4); }
+
+ // Callee has arguments adaptor frame.
+ function f4(a, b, c) {
+ CheckStackTrace([f4, test]);
+ return 10 + a;
+ }
+ function g4(a) { return f4(2); }
+
+ function test() {
+ assertEquals(12, g1());
+ assertEquals(12, g2());
+ assertEquals(19, g3());
+ assertEquals(12, g4());
+ }
+ test();
+ %OptimizeFunctionOnNextCall(test);
+ test();
+})();
+
+
+// Tail call bound function when caller does not have an arguments
+// adaptor frame.
+(function() {
+ // Caller and callee have same number of arguments.
+ function f1(a) {
+ assertEquals(153, this.a);
+ CheckStackTrace([f1, test]);
+ return 10 + a;
+ }
+ var b1 = f1.bind({a: 153});
+ function g1(a) { return b1(2); }
+
+ // Caller has more arguments than callee.
+ function f2(a) {
+ assertEquals(153, this.a);
+ CheckStackTrace([f2, test]);
+ return 10 + a;
+ }
+ var b2 = f2.bind({a: 153});
+ function g2(a, b, c) { return b2(2); }
+
+ // Caller has less arguments than callee.
+ function f3(a, b, c) {
+ assertEquals(153, this.a);
+ CheckStackTrace([f3, test]);
+ return 10 + a + b + c;
+ }
+ var b3 = f3.bind({a: 153});
+ function g3(a) { return b3(2, 3, 4); }
+
+ // Callee has arguments adaptor frame.
+ function f4(a, b, c) {
+ assertEquals(153, this.a);
+ CheckStackTrace([f4, test]);
+ return 10 + a;
+ }
+ var b4 = f4.bind({a: 153});
+ function g4(a) { return b4(2); }
+
+ function test() {
+ assertEquals(12, g1(1));
+ assertEquals(12, g2(1, 2, 3));
+ assertEquals(19, g3(1));
+ assertEquals(12, g4(1));
+ }
+ test();
+ %OptimizeFunctionOnNextCall(test);
+ test();
+})();
+
+
+// Tail call bound function when caller has an arguments adaptor frame.
+(function() {
+ // Caller and callee have same number of arguments.
+ function f1(a) {
+ assertEquals(153, this.a);
+ CheckStackTrace([f1, test]);
+ return 10 + a;
+ }
+ var b1 = f1.bind({a: 153});
+ function g1(a) { return b1(2); }
+
+ // Caller has more arguments than callee.
+ function f2(a) {
+ assertEquals(153, this.a);
+ CheckStackTrace([f2, test]);
+ return 10 + a;
+ }
+ var b2 = f2.bind({a: 153});
+ function g2(a, b, c) { return b2(2); }
+
+ // Caller has less arguments than callee.
+ function f3(a, b, c) {
+ assertEquals(153, this.a);
+ CheckStackTrace([f3, test]);
+ return 10 + a + b + c;
+ }
+ var b3 = f3.bind({a: 153});
+ function g3(a) { return b3(2, 3, 4); }
+
+ // Callee has arguments adaptor frame.
+ function f4(a, b, c) {
+ assertEquals(153, this.a);
+ CheckStackTrace([f4, test]);
+ return 10 + a;
+ }
+ var b4 = f4.bind({a: 153});
+ function g4(a) { return b4(2); }
+
+ function test() {
+ assertEquals(12, g1());
+ assertEquals(12, g2());
+ assertEquals(19, g3());
+ assertEquals(12, g4());
+ }
+ test();
+ %OptimizeFunctionOnNextCall(test);
+ test();
+})();
+
+
+// Tail calling via various expressions.
+(function() {
+ function g1(a) {
+ return f([f, g1, test], false) || f([f, test], true);
+ }
+
+ function g2(a) {
+ return f([f, g2, test], true) && f([f, test], true);
+ }
+
+ function g3(a) {
+ return f([f, g3, test], 13), f([f, test], 153);
+ }
+
+ function test() {
+ assertEquals(true, g1());
+ assertEquals(true, g2());
+ assertEquals(153, g3());
+ }
+ test();
+ %OptimizeFunctionOnNextCall(test);
+ test();
+})();
+
+
+// Test tail calls from try-catch constructs.
+(function() {
+ function tc1(a) {
+ try {
+ f_153([f_153, tc1, test]);
+ return f_153([f_153, tc1, test]);
+ } catch(e) {
+ f_153([f_153, tc1, test]);
+ }
+ }
+
+ function tc2(a) {
+ try {
+ f_153([f_153, tc2, test]);
+ throw new Error("boom");
+ } catch(e) {
+ f_153([f_153, tc2, test]);
+ return f_153([f_153, test]);
+ }
+ }
+
+ function tc3(a) {
+ try {
+ f_153([f_153, tc3, test]);
+ throw new Error("boom");
+ } catch(e) {
+ f_153([f_153, tc3, test]);
+ }
+ f_153([f_153, tc3, test]);
+ return f_153([f_153, test]);
+ }
+
+ function test() {
+ assertEquals(153, tc1());
+ assertEquals(153, tc2());
+ assertEquals(153, tc3());
+ }
+ test();
+ %OptimizeFunctionOnNextCall(test);
+ test();
+})();
+
+
+// Test tail calls from try-finally constructs.
+(function() {
+ function tf1(a) {
+ try {
+ f_153([f_153, tf1, test]);
+ return f_153([f_153, tf1, test]);
+ } finally {
+ f_153([f_153, tf1, test]);
+ }
+ }
+
+ function tf2(a) {
+ try {
+ f_153([f_153, tf2, test]);
+ throw new Error("boom");
+ } finally {
+ f_153([f_153, tf2, test]);
+ return f_153([f_153, test]);
+ }
+ }
+
+ function tf3(a) {
+ try {
+ f_153([f_153, tf3, test]);
+ } finally {
+ f_153([f_153, tf3, test]);
+ }
+ return f_153([f_153, test]);
+ }
+
+ function test() {
+ assertEquals(153, tf1());
+ assertEquals(153, tf2());
+ assertEquals(153, tf3());
+ }
+ test();
+ %OptimizeFunctionOnNextCall(test);
+ test();
+})();
+
+
+// Test tail calls from try-catch-finally constructs.
+(function() {
+ function tcf1(a) {
+ try {
+ f_153([f_153, tcf1, test]);
+ return f_153([f_153, tcf1, test]);
+ } catch(e) {
+ } finally {
+ f_153([f_153, tcf1, test]);
+ }
+ }
+
+ function tcf2(a) {
+ try {
+ f_153([f_153, tcf2, test]);
+ throw new Error("boom");
+ } catch(e) {
+ f_153([f_153, tcf2, test]);
+ return f_153([f_153, tcf2, test]);
+ } finally {
+ f_153([f_153, tcf2, test]);
+ }
+ }
+
+ function tcf3(a) {
+ try {
+ f_153([f_153, tcf3, test]);
+ throw new Error("boom");
+ } catch(e) {
+ f_153([f_153, tcf3, test]);
+ } finally {
+ f_153([f_153, tcf3, test]);
+ return f_153([f_153, test]);
+ }
+ }
+
+ function tcf4(a) {
+ try {
+ f_153([f_153, tcf4, test]);
+ throw new Error("boom");
+ } catch(e) {
+ f_153([f_153, tcf4, test]);
+ } finally {
+ f_153([f_153, tcf4, test]);
+ }
+ return f_153([f_153, test]);
+ }
+
+ function test() {
+ assertEquals(153, tcf1());
+ assertEquals(153, tcf2());
+ assertEquals(153, tcf3());
+ assertEquals(153, tcf4());
+ }
+ test();
+ %OptimizeFunctionOnNextCall(test);
+ test();
+})();
diff --git a/deps/v8/test/mjsunit/es6/typedarray.js b/deps/v8/test/mjsunit/es6/typedarray.js
index c43ba1c4bf..e6a949ca59 100644
--- a/deps/v8/test/mjsunit/es6/typedarray.js
+++ b/deps/v8/test/mjsunit/es6/typedarray.js
@@ -529,6 +529,8 @@ function TestTypedArraySet() {
assertThrows(function() { a.set(0); }, TypeError);
assertThrows(function() { a.set(0, 1); }, TypeError);
+
+ assertEquals(1, a.set.length);
}
TestTypedArraySet();
@@ -672,7 +674,6 @@ function TestDataViewConstructor() {
// error cases
assertThrows(function() { new DataView(ab, -1); }, RangeError);
- assertThrows(function() { new DataView(ab, 1, -1); }, RangeError);
assertThrows(function() { new DataView(); }, TypeError);
assertThrows(function() { new DataView([]); }, TypeError);
assertThrows(function() { new DataView(ab, 257); }, RangeError);
@@ -693,6 +694,19 @@ function TestDataViewPropertyTypeChecks() {
CheckProperty("buffer");
CheckProperty("byteOffset");
CheckProperty("byteLength");
+
+ function CheckGetSetLength(name) {
+ assertEquals(1, DataView.prototype["get" + name].length);
+ assertEquals(2, DataView.prototype["set" + name].length);
+ }
+ CheckGetSetLength("Int8");
+ CheckGetSetLength("Uint8");
+ CheckGetSetLength("Int16");
+ CheckGetSetLength("Uint16");
+ CheckGetSetLength("Int32");
+ CheckGetSetLength("Uint32");
+ CheckGetSetLength("Float32");
+ CheckGetSetLength("Float64");
}
diff --git a/deps/v8/test/mjsunit/for-in-opt.js b/deps/v8/test/mjsunit/for-in-opt.js
index e458e1d537..8f73539382 100644
--- a/deps/v8/test/mjsunit/for-in-opt.js
+++ b/deps/v8/test/mjsunit/for-in-opt.js
@@ -28,13 +28,14 @@ var deopt_has = false;
var deopt_enum = false;
var handler = {
- enumerate(target) {
+ ownKeys() {
if (deopt_enum) {
%DeoptimizeFunction(f2);
deopt_enum = false;
}
- return keys[Symbol.iterator]();
+ return keys;
},
+ getOwnPropertyDescriptor() { return { enumerable: true, configurable: true }},
has(target, k) {
if (deopt_has) {
@@ -42,7 +43,7 @@ var handler = {
deopt_has = false;
}
has_keys.push(k);
- return {value: 10, configurable: true, writable: false, enumerable: true};
+ return true;
}
};
@@ -67,7 +68,7 @@ function check_f2() {
check_f2();
check_f2();
-// Test lazy deopt after GetPropertyNamesFast
+// Test lazy deopt after ForInEnumerate
%OptimizeFunctionOnNextCall(f2);
deopt_enum = true;
check_f2();
@@ -136,14 +137,13 @@ function listener(event, exec_state, event_data, data) {
}
var handler3 = {
- enumerate(target) {
- return ["a", "b"][Symbol.iterator]();
- },
+ ownKeys() { return ["a", "b"] },
+ getOwnPropertyDescriptor() { return { enumerable: true, configurable: true }},
has(target, k) {
if (k == "a") count++;
if (x) %ScheduleBreak();
- return {value: 10, configurable: true, writable: false, enumerable: true};
+ return true;
}
};
diff --git a/deps/v8/test/mjsunit/function-caller.js b/deps/v8/test/mjsunit/function-caller.js
index a2c54bbfd3..84f3cbed2f 100644
--- a/deps/v8/test/mjsunit/function-caller.js
+++ b/deps/v8/test/mjsunit/function-caller.js
@@ -47,7 +47,8 @@ f(null);
eval('f(null)');
// Check called from strict builtin functions.
-[null, null].sort(f);
+// [null, null].sort(f); // Does not work because sort tail calls.
+[null].forEach(f, null);
// Check called from sloppy builtin functions.
"abel".replace(/b/g, function h() {
diff --git a/deps/v8/test/mjsunit/function-names.js b/deps/v8/test/mjsunit/function-names.js
index 5ed0b794e8..6df31b900a 100644
--- a/deps/v8/test/mjsunit/function-names.js
+++ b/deps/v8/test/mjsunit/function-names.js
@@ -65,10 +65,11 @@ var datePrototypeFunctions = [
"getTimezoneOffset", "setTime", "setMilliseconds",
"setUTCMilliseconds", "setSeconds", "setUTCSeconds", "setMinutes",
"setUTCMinutes", "setHours", "setUTCHours", "setDate", "setUTCDate",
- "setMonth", "setUTCMonth", "setFullYear", "setUTCFullYear", "toGMTString",
+ "setMonth", "setUTCMonth", "setFullYear", "setUTCFullYear",
"toUTCString", "getYear", "setYear"];
TestFunctionNames(Date.prototype, datePrototypeFunctions);
+assertEquals(Date.prototype.toGMTString, Date.prototype.toUTCString);
// Function.prototype functions.
diff --git a/deps/v8/test/mjsunit/harmony/array-species-constructor-delete.js b/deps/v8/test/mjsunit/harmony/array-species-constructor-delete.js
new file mode 100644
index 0000000000..e61d0ddebf
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/array-species-constructor-delete.js
@@ -0,0 +1,28 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-species
+
+// Overwriting the constructor of an instance updates the protector
+
+let x = [];
+
+assertEquals(Array, x.map(()=>{}).constructor);
+assertEquals(Array, x.filter(()=>{}).constructor);
+assertEquals(Array, x.slice().constructor);
+assertEquals(Array, x.splice().constructor);
+assertEquals(Array, x.concat([1]).constructor);
+assertEquals(1, x.concat([1])[0]);
+
+class MyArray extends Array { }
+
+Object.prototype.constructor = MyArray;
+delete Array.prototype.constructor;
+
+assertEquals(MyArray, x.map(()=>{}).constructor);
+assertEquals(MyArray, x.filter(()=>{}).constructor);
+assertEquals(MyArray, x.slice().constructor);
+assertEquals(MyArray, x.splice().constructor);
+assertEquals(MyArray, x.concat([1]).constructor);
+assertEquals(1, x.concat([1])[0]);
diff --git a/deps/v8/test/mjsunit/harmony/array-species-constructor.js b/deps/v8/test/mjsunit/harmony/array-species-constructor.js
new file mode 100644
index 0000000000..d4eeefa010
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/array-species-constructor.js
@@ -0,0 +1,27 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-species
+
+// Overwriting the constructor of an instance updates the protector
+
+let x = [];
+
+assertEquals(Array, x.map(()=>{}).constructor);
+assertEquals(Array, x.filter(()=>{}).constructor);
+assertEquals(Array, x.slice().constructor);
+assertEquals(Array, x.splice().constructor);
+assertEquals(Array, x.concat([1]).constructor);
+assertEquals(1, x.concat([1])[0]);
+
+class MyArray extends Array { }
+
+x.constructor = MyArray;
+
+assertEquals(MyArray, x.map(()=>{}).constructor);
+assertEquals(MyArray, x.filter(()=>{}).constructor);
+assertEquals(MyArray, x.slice().constructor);
+assertEquals(MyArray, x.splice().constructor);
+assertEquals(MyArray, x.concat([1]).constructor);
+assertEquals(1, x.concat([1])[0]);
diff --git a/deps/v8/test/mjsunit/harmony/array-species-delete.js b/deps/v8/test/mjsunit/harmony/array-species-delete.js
new file mode 100644
index 0000000000..bccf3a4df9
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/array-species-delete.js
@@ -0,0 +1,28 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-species
+
+// Overwriting the constructor of an instance updates the protector
+
+let x = [];
+
+assertEquals(Array, x.map(()=>{}).constructor);
+assertEquals(Array, x.filter(()=>{}).constructor);
+assertEquals(Array, x.slice().constructor);
+assertEquals(Array, x.splice().constructor);
+assertEquals(Array, x.concat([1]).constructor);
+assertEquals(1, x.concat([1])[0]);
+
+class MyArray extends Array { }
+
+Object.prototype[Symbol.species] = MyArray;
+delete Array[Symbol.species];
+
+assertEquals(MyArray, x.map(()=>{}).constructor);
+assertEquals(MyArray, x.filter(()=>{}).constructor);
+assertEquals(MyArray, x.slice().constructor);
+assertEquals(MyArray, x.splice().constructor);
+assertEquals(MyArray, x.concat([1]).constructor);
+assertEquals(1, x.concat([1])[0]);
diff --git a/deps/v8/test/mjsunit/harmony/array-species-modified.js b/deps/v8/test/mjsunit/harmony/array-species-modified.js
new file mode 100644
index 0000000000..b5c5c16d7b
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/array-species-modified.js
@@ -0,0 +1,27 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-species
+
+// Overwriting Array[Symbol.species] updates the protector
+
+let x = [];
+
+assertEquals(Array, x.map(()=>{}).constructor);
+assertEquals(Array, x.filter(()=>{}).constructor);
+assertEquals(Array, x.slice().constructor);
+assertEquals(Array, x.splice().constructor);
+assertEquals(Array, x.concat([1]).constructor);
+assertEquals(1, x.concat([1])[0]);
+
+class MyArray extends Array { }
+
+Object.defineProperty(Array, Symbol.species, {value: MyArray});
+
+assertEquals(MyArray, x.map(()=>{}).constructor);
+assertEquals(MyArray, x.filter(()=>{}).constructor);
+assertEquals(MyArray, x.slice().constructor);
+assertEquals(MyArray, x.splice().constructor);
+assertEquals(MyArray, x.concat([1]).constructor);
+assertEquals(1, x.concat([1])[0]);
diff --git a/deps/v8/test/mjsunit/harmony/array-species-parent-constructor.js b/deps/v8/test/mjsunit/harmony/array-species-parent-constructor.js
new file mode 100644
index 0000000000..8ea59bcfe4
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/array-species-parent-constructor.js
@@ -0,0 +1,27 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-species
+
+// Overwriting Array.prototype.constructor updates the protector
+
+let x = [];
+
+assertEquals(Array, x.map(()=>{}).constructor);
+assertEquals(Array, x.filter(()=>{}).constructor);
+assertEquals(Array, x.slice().constructor);
+assertEquals(Array, x.splice().constructor);
+assertEquals(Array, x.concat([1]).constructor);
+assertEquals(1, x.concat([1])[0]);
+
+class MyArray extends Array { }
+
+Array.prototype.constructor = MyArray;
+
+assertEquals(MyArray, x.map(()=>{}).constructor);
+assertEquals(MyArray, x.filter(()=>{}).constructor);
+assertEquals(MyArray, x.slice().constructor);
+assertEquals(MyArray, x.splice().constructor);
+assertEquals(MyArray, x.concat([1]).constructor);
+assertEquals(1, x.concat([1])[0]);
diff --git a/deps/v8/test/mjsunit/harmony/array-species-proto.js b/deps/v8/test/mjsunit/harmony/array-species-proto.js
new file mode 100644
index 0000000000..077b3f5a17
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/array-species-proto.js
@@ -0,0 +1,27 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-species
+
+// Overwriting an array instance's __proto__ updates the protector
+
+let x = [];
+
+assertEquals(Array, x.map(()=>{}).constructor);
+assertEquals(Array, x.filter(()=>{}).constructor);
+assertEquals(Array, x.slice().constructor);
+assertEquals(Array, x.splice().constructor);
+assertEquals(Array, x.concat([1]).constructor);
+assertEquals(1, x.concat([1])[0]);
+
+class MyArray extends Array { }
+
+x.__proto__ = MyArray.prototype;
+
+assertEquals(MyArray, x.map(()=>{}).constructor);
+assertEquals(MyArray, x.filter(()=>{}).constructor);
+assertEquals(MyArray, x.slice().constructor);
+assertEquals(MyArray, x.splice().constructor);
+assertEquals(MyArray, x.concat([1]).constructor);
+assertEquals(1, x.concat([1])[0]);
diff --git a/deps/v8/test/mjsunit/harmony/array-species.js b/deps/v8/test/mjsunit/harmony/array-species.js
index 75a45aaf59..3cef50cc4c 100644
--- a/deps/v8/test/mjsunit/harmony/array-species.js
+++ b/deps/v8/test/mjsunit/harmony/array-species.js
@@ -16,6 +16,8 @@ assertEquals(MyArray, new MyArray().map(()=>{}).constructor);
assertEquals(MyArray, new MyArray().filter(()=>{}).constructor);
assertEquals(MyArray, new MyArray().slice().constructor);
assertEquals(MyArray, new MyArray().splice().constructor);
+assertEquals(MyArray, new MyArray().concat([1]).constructor);
+assertEquals(1, new MyArray().concat([1])[0]);
// Subclasses can override @@species to return the another class
@@ -27,6 +29,7 @@ assertEquals(MyArray, new MyOtherArray().map(()=>{}).constructor);
assertEquals(MyArray, new MyOtherArray().filter(()=>{}).constructor);
assertEquals(MyArray, new MyOtherArray().slice().constructor);
assertEquals(MyArray, new MyOtherArray().splice().constructor);
+assertEquals(MyArray, new MyOtherArray().concat().constructor);
// Array methods on non-arrays return arrays
@@ -44,11 +47,15 @@ assertEquals(MyObject,
Array.prototype.slice.call(new MyNonArray()).constructor);
assertEquals(MyObject,
Array.prototype.splice.call(new MyNonArray()).constructor);
+assertEquals(MyObject,
+ Array.prototype.concat.call(new MyNonArray()).constructor);
assertEquals(undefined,
Array.prototype.map.call(new MyNonArray(), ()=>{}).length);
assertEquals(undefined,
Array.prototype.filter.call(new MyNonArray(), ()=>{}).length);
+assertEquals(undefined,
+ Array.prototype.concat.call(new MyNonArray(), ()=>{}).length);
// slice and splice actually do explicitly define the length for some reason
assertEquals(0, Array.prototype.slice.call(new MyNonArray()).length);
assertEquals(0, Array.prototype.splice.call(new MyNonArray()).length);
@@ -61,6 +68,9 @@ assertEquals(Array,
Realm.eval(realm, "[]"), ()=>{}).constructor);
assertFalse(Array === Realm.eval(realm, "[]").map(()=>{}).constructor);
assertFalse(Array === Realm.eval(realm, "[].map(()=>{}).constructor"));
+assertEquals(Array,
+ Array.prototype.concat.call(
+ Realm.eval(realm, "[]")).constructor);
// Defaults when constructor or @@species is missing or non-constructor
@@ -74,6 +84,7 @@ assertEquals(MyOtherDefaultArray,
new MyOtherDefaultArray().map(()=>{}).constructor);
MyOtherDefaultArray.prototype.constructor = undefined;
assertEquals(Array, new MyOtherDefaultArray().map(()=>{}).constructor);
+assertEquals(Array, new MyOtherDefaultArray().concat().constructor);
// Exceptions propagated when getting constructor @@species throws
@@ -100,6 +111,7 @@ assertThrows(() => new FrozenArray([1]).map(()=>0), TypeError);
assertThrows(() => new FrozenArray([1]).filter(()=>true), TypeError);
assertThrows(() => new FrozenArray([1]).slice(0, 1), TypeError);
assertThrows(() => new FrozenArray([1]).splice(0, 1), TypeError);
+assertThrows(() => new FrozenArray([]).concat([1]), TypeError);
// Verify call counts and constructor parameters
@@ -133,17 +145,22 @@ assertArrayEquals([0], params);
count = 0;
params = undefined;
assertEquals(MyObservedArray,
+ new MyObservedArray().concat().constructor);
+assertEquals(1, count);
+assertArrayEquals([0], params);
+
+count = 0;
+params = undefined;
+assertEquals(MyObservedArray,
new MyObservedArray().slice().constructor);
-// TODO(littledan): Should be 1
-assertEquals(2, count);
+assertEquals(1, count);
assertArrayEquals([0], params);
count = 0;
params = undefined;
assertEquals(MyObservedArray,
new MyObservedArray().splice().constructor);
-// TODO(littledan): Should be 1
-assertEquals(2, count);
+assertEquals(1, count);
assertArrayEquals([0], params);
// @@species constructor can be a Proxy, and the realm access doesn't
diff --git a/deps/v8/test/mjsunit/harmony/block-for-sloppy.js b/deps/v8/test/mjsunit/harmony/block-for-sloppy.js
index e9e960504b..051d2b16ea 100644
--- a/deps/v8/test/mjsunit/harmony/block-for-sloppy.js
+++ b/deps/v8/test/mjsunit/harmony/block-for-sloppy.js
@@ -26,7 +26,6 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --no-legacy-const --harmony-sloppy --harmony-sloppy-let
-// Flags: --harmony-completion
function props(x) {
var array = [];
diff --git a/deps/v8/test/mjsunit/harmony/debug-stepin-proxies.js b/deps/v8/test/mjsunit/harmony/debug-stepin-proxies.js
index 0689801a4f..8595f404f0 100644
--- a/deps/v8/test/mjsunit/harmony/debug-stepin-proxies.js
+++ b/deps/v8/test/mjsunit/harmony/debug-stepin-proxies.js
@@ -36,13 +36,6 @@ var handler = {
set: function(target, name, value) {
return false; // l
}, // m
- enumerate: function(target) {
- function* keys() { // n
- yield "foo"; // o
- yield "bar"; // p
- } // q
- return keys(); // r
- }, // s
}
var proxy = new Proxy(target, handler);
@@ -52,9 +45,6 @@ debugger; // a
var has = "step" in proxy; // b
var get = proxy.step; // c
proxy.step = 43; // d
-for (var i in proxy) { // e
- log.push(i); // f
-}
Debug.setListener(null); // g
@@ -67,12 +57,5 @@ assertEquals([
"b0", "h4b20", "i2b20", // [[Has]]
"c0", "j4c15", "k2c15", // [[Get]]
"d0", "l4d11", "m2d11", // [[Set]]
- "e14", "r4e14", "q4r11e14", "s2e14", // for-in [[Enumerate]]
- "o6e14", "q4e14", "p6e14", "q4e14", "q4e14", // exhaust iterator
- "e9", // for-in-body
- "h4e9","i2e9", // [[Has]] property
- "f2","foo", "e9", // for-in-body
- "h4e9","i2e9", // [[Has]]property
- "f2","bar", "e9", // for-in-body
"g0"
], log);
diff --git a/deps/v8/test/mjsunit/harmony/destructuring.js b/deps/v8/test/mjsunit/harmony/destructuring.js
index 50f27857ec..b6eb6eab09 100644
--- a/deps/v8/test/mjsunit/harmony/destructuring.js
+++ b/deps/v8/test/mjsunit/harmony/destructuring.js
@@ -1061,8 +1061,8 @@
(function TestForInOfTDZ() {
- assertThrows("'use strict'; let x = {}; for (let [x, y] of {x});", ReferenceError);
- assertThrows("'use strict'; let x = {}; for (let [y, x] of {x});", ReferenceError);
+ assertThrows("'use strict'; let x = {}; for (let [x, y] of [x]);", ReferenceError);
+ assertThrows("'use strict'; let x = {}; for (let [y, x] of [x]);", ReferenceError);
assertThrows("'use strict'; let x = {}; for (let [x, y] in {x});", ReferenceError);
assertThrows("'use strict'; let x = {}; for (let [y, x] in {x});", ReferenceError);
}());
diff --git a/deps/v8/test/mjsunit/harmony/do-expressions.js b/deps/v8/test/mjsunit/harmony/do-expressions.js
index e7e513a230..3aace577d5 100644
--- a/deps/v8/test/mjsunit/harmony/do-expressions.js
+++ b/deps/v8/test/mjsunit/harmony/do-expressions.js
@@ -4,7 +4,6 @@
// Flags: --harmony-do-expressions --harmony-sloppy-let --allow-natives-syntax
// Flags: --harmony-default-parameters --harmony-destructuring-bind
-// Flags: --harmony-completion
function returnValue(v) { return v; }
function MyError() {}
diff --git a/deps/v8/test/mjsunit/harmony/function-name.js b/deps/v8/test/mjsunit/harmony/function-name.js
index 8ca5d8209a..7bb1f6ae01 100644
--- a/deps/v8/test/mjsunit/harmony/function-name.js
+++ b/deps/v8/test/mjsunit/harmony/function-name.js
@@ -3,6 +3,7 @@
// found in the LICENSE file.
//
// Flags: --harmony-function-name
+// Flags: --harmony-destructuring-bind --harmony-destructuring-assignment
(function testVariableDeclarationsFunction() {
'use strict';
@@ -89,36 +90,59 @@
assertEquals('set 44', descriptor.set.name);
})();
-// TODO(adamk): Make computed property names work.
(function testComputedProperties() {
'use strict';
var a = 'a';
+ var b = 'b';
var sym1 = Symbol('1');
var sym2 = Symbol('2');
+ var sym3 = Symbol('3');
+ var symNoDescription = Symbol();
var obj = {
[a]: function() {},
[sym1]: function() {},
[sym2]: function withName() {},
+ [symNoDescription]: function() {},
+
+ get [sym3]() {},
+ set [b](val) {},
};
- // Should be 'a'
- assertEquals('', obj[a].name);
- // Should be '[1]'
- assertEquals('', obj[sym1].name);
+ assertEquals('a', obj[a].name);
+ assertEquals('[1]', obj[sym1].name);
assertEquals('withName', obj[sym2].name);
+ assertEquals('', obj[symNoDescription].name);
+
+ assertEquals('get [3]', Object.getOwnPropertyDescriptor(obj, sym3).get.name);
+ assertEquals('set b', Object.getOwnPropertyDescriptor(obj, 'b').set.name);
+
+ var objMethods = {
+ [a]() {},
+ [sym1]() {},
+ [symNoDescription]: function() {},
+ };
+
+ assertEquals('a', objMethods[a].name);
+ assertEquals('[1]', objMethods[sym1].name);
+ assertEquals('', objMethods[symNoDescription].name);
class C {
[a]() { }
[sym1]() { }
static [sym2]() { }
+ [symNoDescription]() { }
+
+ get [sym3]() { }
+ static set [b](val) { }
}
- // Should be 'a'
- assertEquals('', C.prototype[a].name);
- // Should be '[1]'
- assertEquals('', C.prototype[sym1].name);
- // Should be '[2]'
- assertEquals('', C[sym2].name);
+ assertEquals('a', C.prototype[a].name);
+ assertEquals('[1]', C.prototype[sym1].name);
+ assertEquals('[2]', C[sym2].name);
+ assertEquals('', C.prototype[symNoDescription].name);
+
+ assertEquals('get [3]', Object.getOwnPropertyDescriptor(C.prototype, sym3).get.name);
+ assertEquals('set b', Object.getOwnPropertyDescriptor(C, 'b').set.name);
})();
@@ -159,3 +183,191 @@
classLit = class { constructor() {} static get ['name']() { return true; } };
assertTrue(classLit.name);
})();
+
+(function testObjectBindingPattern() {
+ var {
+ a = function() {},
+ b = () => {},
+ x = function withName() { },
+ y = class { },
+ z = class ClassName { },
+ q = class { static name() { return 42 } },
+ foo: bar = function() {},
+ inParens = (() => {}),
+ inManyParens = ((((() => {})))),
+ } = {};
+ assertEquals('a', a.name);
+ assertEquals('b', b.name);
+ assertEquals('withName', x.name);
+ assertEquals('y', y.name);
+ assertEquals('ClassName', z.name);
+ assertEquals('function', typeof q.name);
+ assertEquals('bar', bar.name);
+ assertEquals('inParens', inParens.name)
+ assertEquals('inManyParens', inManyParens.name)
+})();
+
+(function testArrayBindingPattern() {
+ var [
+ a = function() {},
+ b = () => {},
+ x = function withName() { },
+ y = class { },
+ z = class ClassName { },
+ q = class { static name() { return 42 } },
+ inParens = (() => {}),
+ inManyParens = ((((() => {})))),
+ ] = [];
+ assertEquals('a', a.name);
+ assertEquals('b', b.name);
+ assertEquals('withName', x.name);
+ assertEquals('y', y.name);
+ assertEquals('ClassName', z.name);
+ assertEquals('function', typeof q.name);
+ assertEquals('inParens', inParens.name)
+ assertEquals('inManyParens', inManyParens.name)
+})();
+
+(function testObjectAssignmentPattern() {
+ var a, b, x, y, z, q;
+ ({
+ a = function() {},
+ b = () => {},
+ x = function withName() { },
+ y = class { },
+ z = class ClassName { },
+ q = class { static name() { return 42 } },
+ foo: bar = function() {},
+ inParens = (() => {}),
+ inManyParens = ((((() => {})))),
+ } = {});
+ assertEquals('a', a.name);
+ assertEquals('b', b.name);
+ assertEquals('withName', x.name);
+ assertEquals('y', y.name);
+ assertEquals('ClassName', z.name);
+ assertEquals('function', typeof q.name);
+ assertEquals('bar', bar.name);
+ assertEquals('inParens', inParens.name)
+ assertEquals('inManyParens', inManyParens.name)
+})();
+
+(function testArrayAssignmentPattern() {
+ var a, b, x, y, z, q;
+ [
+ a = function() {},
+ b = () => {},
+ x = function withName() { },
+ y = class { },
+ z = class ClassName { },
+ q = class { static name() { return 42 } },
+ inParens = (() => {}),
+ inManyParens = ((((() => {})))),
+ ] = [];
+ assertEquals('a', a.name);
+ assertEquals('b', b.name);
+ assertEquals('withName', x.name);
+ assertEquals('y', y.name);
+ assertEquals('ClassName', z.name);
+ assertEquals('function', typeof q.name);
+ assertEquals('inParens', inParens.name)
+ assertEquals('inManyParens', inManyParens.name)
+})();
+
+(function testParameterDestructuring() {
+ (function({ a = function() {},
+ b = () => {},
+ x = function withName() { },
+ y = class { },
+ z = class ClassName { },
+ q = class { static name() { return 42 } },
+ foo: bar = function() {},
+ inParens = (() => {}),
+ inManyParens = ((((() => {})))) }) {
+ assertEquals('a', a.name);
+ assertEquals('b', b.name);
+ assertEquals('withName', x.name);
+ assertEquals('y', y.name);
+ assertEquals('ClassName', z.name);
+ assertEquals('function', typeof q.name);
+ assertEquals('bar', bar.name);
+ assertEquals('inParens', inParens.name)
+ assertEquals('inManyParens', inManyParens.name)
+ })({});
+
+ (function([ a = function() {},
+ b = () => {},
+ x = function withName() { },
+ y = class { },
+ z = class ClassName { },
+ q = class { static name() { return 42 } },
+ inParens = (() => {}),
+ inManyParens = ((((() => {})))) ]) {
+ assertEquals('a', a.name);
+ assertEquals('b', b.name);
+ assertEquals('withName', x.name);
+ assertEquals('y', y.name);
+ assertEquals('ClassName', z.name);
+ assertEquals('function', typeof q.name);
+ assertEquals('inParens', inParens.name)
+ assertEquals('inManyParens', inManyParens.name)
+ })([]);
+})();
+
+(function testDefaultParameters() {
+ (function(a = function() {},
+ b = () => {},
+ x = function withName() { },
+ y = class { },
+ z = class ClassName { },
+ q = class { static name() { return 42 } },
+ inParens = (() => {}),
+ inManyParens = ((((() => {}))))) {
+ assertEquals('a', a.name);
+ assertEquals('b', b.name);
+ assertEquals('withName', x.name);
+ assertEquals('y', y.name);
+ assertEquals('ClassName', z.name);
+ assertEquals('function', typeof q.name);
+ assertEquals('inParens', inParens.name)
+ assertEquals('inManyParens', inManyParens.name)
+ })();
+})();
+
+(function testComputedNameNotShared() {
+ function makeClass(propName) {
+ return class {
+ static [propName]() {}
+ }
+ }
+
+ var sym1 = Symbol('1');
+ var sym2 = Symbol('2');
+ var class1 = makeClass(sym1);
+ assertEquals('[1]', class1[sym1].name);
+ var class2 = makeClass(sym2);
+ assertEquals('[2]', class2[sym2].name);
+ assertEquals('[1]', class1[sym1].name);
+})();
+
+
+(function testComputedNamesOnlyAppliedSyntactically() {
+ function factory() { return () => {}; }
+
+ var obj = { ['foo']: factory() };
+ assertEquals('', obj.foo.name);
+})();
+
+
+(function testNameNotReflectedInToString() {
+ var f = function() {};
+ var g = function*() {};
+ var obj = {
+ ['h']: function() {},
+ i: () => {}
+ };
+ assertEquals('function () {}', f.toString());
+ assertEquals('function* () {}', g.toString());
+ assertEquals('function () {}', obj.h.toString());
+ assertEquals('() => {}', obj.i.toString());
+})();
diff --git a/deps/v8/test/mjsunit/harmony/function-sent.js b/deps/v8/test/mjsunit/harmony/function-sent.js
new file mode 100644
index 0000000000..b3cd644dd9
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/function-sent.js
@@ -0,0 +1,90 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-function-sent
+
+
+{
+ function* g() { return function.sent }
+ assertEquals({value: 42, done: true}, g().next(42));
+}
+
+
+{
+ function* g() {
+ try {
+ yield function.sent;
+ } finally {
+ yield function.sent;
+ return function.sent;
+ }
+ }
+
+ {
+ let x = g();
+ assertEquals({value: 1, done: false}, x.next(1));
+ assertEquals({value: 2, done: false}, x.next(2));
+ assertEquals({value: 3, done: true}, x.next(3));
+ }
+
+ {
+ let x = g();
+ assertEquals({value: 1, done: false}, x.next(1));
+ assertEquals({value: 2, done: false}, x.throw(2));
+ assertEquals({value: 3, done: true}, x.next(3));
+ }
+
+ {
+ let x = g();
+ assertEquals({value: 1, done: false}, x.next(1));
+ assertEquals({value: 2, done: false}, x.return(2));
+ assertEquals({value: 3, done: true}, x.next(3));
+ }
+}
+
+
+{
+ function* inner() {
+ try {
+ yield function.sent;
+ } finally {
+ return 666;
+ }
+ }
+
+ function* g() {
+ yield function.sent;
+ yield* inner();
+ return function.sent;
+ }
+
+ {
+ let x = g();
+ assertEquals({value: 1, done: false}, x.next(1));
+ assertEquals({value: undefined, done: false}, x.next(2));
+ assertEquals({value: 3, done: true}, x.next(3));
+ }
+
+ {
+ let x = g();
+ assertEquals({value: 1, done: false}, x.next(1));
+ assertEquals({value: undefined, done: false}, x.next(2));
+ assertEquals({value: 42, done: true}, x.throw(42));
+ }
+
+ {
+ let x = g();
+ assertEquals({value: 1, done: false}, x.next(1));
+ assertEquals({value: undefined, done: false}, x.next(2));
+ assertEquals({value: 42, done: true}, x.return(42));
+ }
+}
+
+
+assertThrows("function f() { return function.sent }", SyntaxError);
+assertThrows("() => { return function.sent }", SyntaxError);
+assertThrows("() => { function.sent }", SyntaxError);
+assertThrows("() => function.sent", SyntaxError);
+assertThrows("({*f() { function.sent }})", SyntaxError);
+assertDoesNotThrow("({*f() { return function.sent }})");
diff --git a/deps/v8/test/mjsunit/harmony/generators.js b/deps/v8/test/mjsunit/harmony/generators.js
new file mode 100644
index 0000000000..5b045049e9
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/generators.js
@@ -0,0 +1,252 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+{ // yield in try-catch
+
+ let g = function*() {
+ try {yield 1} catch (error) {assertEquals("caught", error)}
+ };
+
+ assertThrowsEquals(() => g().throw("not caught"), "not caught");
+
+ {
+ let x = g();
+ assertEquals({value: 1, done: false}, x.next());
+ assertEquals({value: undefined, done: true}, x.throw("caught"));
+ }
+
+ {
+ let x = g();
+ assertEquals({value: 1, done: false}, x.next());
+ assertEquals({value: undefined, done: true}, x.next());
+ assertThrowsEquals(() => x.throw("not caught"), "not caught");
+ }
+}
+
+
+{ // return that doesn't close
+ let g = function*() { try {return 42} finally {yield 43} };
+
+ {
+ let x = g();
+ assertEquals({value: 43, done: false}, x.next());
+ assertEquals({value: 42, done: true}, x.next());
+ }
+}
+
+
+{ // return that doesn't close
+ let x;
+ let g = function*() { try {return 42} finally {x.throw(666)} };
+
+ {
+ x = g();
+ assertThrows(() => x.next(), TypeError); // still executing
+ }
+}
+
+
+{ // yield in try-finally, finally clause performs return
+
+ let g = function*() { try {yield 42} finally {return 13} };
+
+ { // "return" closes at suspendedStart
+ let x = g();
+ assertEquals({value: 666, done: true}, x.return(666));
+ assertEquals({value: undefined, done: true}, x.next(42));
+ assertThrowsEquals(() => x.throw(43), 43);
+ assertEquals({value: 42, done: true}, x.return(42));
+ }
+
+ { // "throw" closes at suspendedStart
+ let x = g();
+ assertThrowsEquals(() => x.throw(666), 666);
+ assertEquals({value: undefined, done: true}, x.next(42));
+ assertEquals({value: 43, done: true}, x.return(43));
+ assertThrowsEquals(() => x.throw(44), 44);
+ }
+
+ { // "next" closes at suspendedYield
+ let x = g();
+ assertEquals({value: 42, done: false}, x.next());
+ assertEquals({value: 13, done: true}, x.next(666));
+ assertEquals({value: undefined, done: true}, x.next(666));
+ assertThrowsEquals(() => x.throw(666), 666);
+ }
+
+ { // "return" closes at suspendedYield
+ let x = g();
+ assertEquals({value: 42, done: false}, x.next());
+ assertEquals({value: 13, done: true}, x.return(666));
+ assertEquals({value: undefined, done: true}, x.next(666));
+ assertEquals({value: 666, done: true}, x.return(666));
+ }
+
+ { // "throw" closes at suspendedYield
+ let x = g();
+ assertEquals({value: 42, done: false}, x.next());
+ assertEquals({value: 13, done: true}, x.throw(666));
+ assertThrowsEquals(() => x.throw(666), 666);
+ assertEquals({value: undefined, done: true}, x.next(666));
+ }
+}
+
+
+{ // yield in try-finally, finally clause doesn't perform return
+
+ let g = function*() { try {yield 42} finally {13} };
+
+ { // "return" closes at suspendedStart
+ let x = g();
+ assertEquals({value: 666, done: true}, x.return(666));
+ assertEquals({value: undefined, done: true}, x.next(42));
+ assertThrowsEquals(() => x.throw(43), 43);
+ assertEquals({value: 42, done: true}, x.return(42));
+ }
+
+ { // "throw" closes at suspendedStart
+ let x = g();
+ assertThrowsEquals(() => x.throw(666), 666);
+ assertEquals({value: undefined, done: true}, x.next(42));
+ assertEquals({value: 43, done: true}, x.return(43));
+ assertThrowsEquals(() => x.throw(44), 44);
+ }
+
+ { // "next" closes at suspendedYield
+ let x = g();
+ assertEquals({value: 42, done: false}, x.next());
+ assertEquals({value: undefined, done: true}, x.next(666));
+ assertEquals({value: undefined, done: true}, x.next(666));
+ assertThrowsEquals(() => x.throw(666), 666);
+ assertEquals({value: 42, done: true}, x.return(42));
+ }
+
+ { // "return" closes at suspendedYield
+ let x = g();
+ assertEquals({value: 42, done: false}, x.next());
+ assertEquals({value: 666, done: true}, x.return(666));
+ assertEquals({value: undefined, done: true}, x.next(666));
+ assertThrowsEquals(() => x.throw(44), 44);
+ assertEquals({value: 42, done: true}, x.return(42));
+ }
+
+ { // "throw" closes at suspendedYield
+ let x = g();
+ assertEquals({value: 42, done: false}, x.next());
+ assertThrowsEquals(() => x.throw(666), 666);
+ assertEquals({value: undefined, done: true}, x.next(666));
+ assertThrowsEquals(() => x.throw(666), 666);
+ assertEquals({value: 42, done: true}, x.return(42));
+ }
+}
+
+
+{ // yield in try-finally, finally clause yields and performs return
+
+ let g = function*() { try {yield 42} finally {yield 43; return 13} };
+
+ {
+ let x = g();
+ assertEquals({value: 42, done: false}, x.next());
+ assertEquals({value: 43, done: false}, x.return(666));
+ assertEquals({value: 13, done: true}, x.next());
+ assertEquals({value: 666, done: true}, x.return(666));
+ }
+
+ {
+ let x = g();
+ assertEquals({value: 666, done: true}, x.return(666));
+ assertEquals({value: undefined, done: true}, x.next());
+ assertEquals({value: 666, done: true}, x.return(666));
+ }
+}
+
+
+{ // yield in try-finally, finally clause yields and doesn't perform return
+
+ let g = function*() { try {yield 42} finally {yield 43; 13} };
+
+ {
+ let x = g();
+ assertEquals({value: 42, done: false}, x.next());
+ assertEquals({value: 43, done: false}, x.return(666));
+ assertEquals({value: 666, done: true}, x.next());
+ assertEquals({value: 5, done: true}, x.return(5));
+ }
+
+ {
+ let x = g();
+ assertEquals({value: 666, done: true}, x.return(666));
+ assertEquals({value: undefined, done: true}, x.next());
+ assertEquals({value: 666, done: true}, x.return(666));
+ }
+}
+
+
+{ // yield*, finally clause performs return
+
+ let h = function*() { try {yield 42} finally {yield 43; return 13} };
+ let g = function*() { yield 1; yield yield* h(); };
+
+ {
+ let x = g();
+ assertEquals({value: 1, done: false}, x.next());
+ assertEquals({value: 42, done: false}, x.next());
+ assertEquals({value: 43, done: false}, x.next(666));
+ assertEquals({value: 13, done: false}, x.next());
+ assertEquals({value: undefined, done: true}, x.next());
+ }
+
+ {
+ let x = g();
+ assertEquals({value: 1, done: false}, x.next());
+ assertEquals({value: 42, done: false}, x.next());
+ assertEquals({value: 43, done: false}, x.return(666));
+ assertEquals({value: 13, done: false}, x.next());
+ assertEquals({value: undefined, done: true}, x.next());
+ }
+
+ {
+ let x = g();
+ assertEquals({value: 1, done: false}, x.next());
+ assertEquals({value: 42, done: false}, x.next());
+ assertEquals({value: 43, done: false}, x.throw(666));
+ assertEquals({value: 13, done: false}, x.next());
+ assertEquals({value: undefined, done: true}, x.next());
+ }
+}
+
+
+{ // yield*, finally clause does not perform return
+
+ let h = function*() { try {yield 42} finally {yield 43; 13} };
+ let g = function*() { yield 1; yield yield* h(); };
+
+ {
+ let x = g();
+ assertEquals({value: 1, done: false}, x.next());
+ assertEquals({value: 42, done: false}, x.next());
+ assertEquals({value: 43, done: false}, x.next(666));
+ assertEquals({value: undefined, done: false}, x.next());
+ assertEquals({value: undefined, done: true}, x.next());
+ }
+
+ {
+ let x = g();
+ assertEquals({value: 1, done: false}, x.next());
+ assertEquals({value: 42, done: false}, x.next());
+ assertEquals({value: 43, done: false}, x.return(666));
+ assertEquals({value: undefined, done: false}, x.next());
+ assertEquals({value: undefined, done: true}, x.next());
+ }
+
+ {
+ let x = g();
+ assertEquals({value: 1, done: false}, x.next());
+ assertEquals({value: 42, done: false}, x.next());
+ assertEquals({value: 43, done: false}, x.throw(666));
+ assertThrowsEquals(() => x.next(), 666);
+ }
+}
diff --git a/deps/v8/test/mjsunit/harmony/instanceof-es6.js b/deps/v8/test/mjsunit/harmony/instanceof-es6.js
new file mode 100644
index 0000000000..60e7ee2c39
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/instanceof-es6.js
@@ -0,0 +1,50 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-instanceof
+
+// Make sure it's an error if @@hasInstance isn't a function.
+(function() {
+ var F = {};
+ F[Symbol.hasInstance] = null;
+ assertThrows(function() { 0 instanceof F; }, TypeError);
+})();
+
+// Make sure the result is coerced to boolean.
+(function() {
+ var F = {};
+ F[Symbol.hasInstance] = function() { return undefined; };
+ assertEquals(0 instanceof F, false);
+ F[Symbol.hasInstance] = function() { return null; };
+ assertEquals(0 instanceof F, false);
+ F[Symbol.hasInstance] = function() { return true; };
+ assertEquals(0 instanceof F, true);
+})();
+
+// Make sure if @@hasInstance throws, we catch it.
+(function() {
+ var F = {};
+ F[Symbol.hasInstance] = function() { throw new Error("always throws"); }
+ try {
+ 0 instanceof F;
+ } catch (e) {
+ assertEquals(e.message, "always throws");
+ }
+})();
+
+// @@hasInstance works for bound functions.
+(function() {
+ var BC = function() {};
+ var bc = new BC();
+ var bound = BC.bind();
+ assertEquals(bound[Symbol.hasInstance](bc), true);
+ assertEquals(bound[Symbol.hasInstance]([]), false);
+})();
+
+// if OrdinaryHasInstance is passed a non-callable receiver, return false.
+assertEquals(Function.prototype[Symbol.hasInstance].call(Array, []), true);
+assertEquals(Function.prototype[Symbol.hasInstance].call({}, {}), false);
+
+// OrdinaryHasInstance passed a non-object argument returns false.
+assertEquals(Function.prototype[Symbol.hasInstance].call(Array, 0), false);
diff --git a/deps/v8/test/mjsunit/harmony/iterator-close.js b/deps/v8/test/mjsunit/harmony/iterator-close.js
new file mode 100644
index 0000000000..94785de51f
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/iterator-close.js
@@ -0,0 +1,364 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-iterator-close
+
+function* g() { yield 42; return 88 };
+
+
+// Return method is "undefined".
+{
+ g.prototype.return = null;
+
+ assertEquals(undefined, (() => {
+ for (let x of g()) { break; }
+ })());
+
+ assertEquals(undefined, (() => {
+ for (x of g()) { break; }
+ })());
+
+ assertThrowsEquals(() => {
+ for (let x of g()) { throw 42; }
+ }, 42);
+
+ assertThrowsEquals(() => {
+ for (x of g()) { throw 42; }
+ }, 42);
+
+ assertEquals(42, (() => {
+ for (let x of g()) { return 42; }
+ })());
+
+ assertEquals(42, (() => {
+ for (x of g()) { return 42; }
+ })());
+
+ assertEquals(42, eval('for (let x of g()) { x; }'));
+
+ assertEquals(42, eval('for (let x of g()) { x; }'));
+}
+
+
+// Return method is not callable.
+{
+ g.prototype.return = 666;
+
+ assertThrows(() => {
+ for (let x of g()) { break; }
+ }, TypeError);
+
+ assertThrows(() => {
+ for (x of g()) { break; }
+ }, TypeError);
+
+ assertThrows(() => {
+ for (let x of g()) { throw 666; }
+ }, TypeError);
+
+ assertThrows(() => {
+ for (x of g()) { throw 666; }
+ }, TypeError);
+
+ assertThrows(() => {
+ for (let x of g()) { return 666; }
+ }, TypeError);
+
+ assertThrows(() => {
+ for (x of g()) { return 666; }
+ }, TypeError);
+
+ assertEquals(42, eval('for (let x of g()) { x; }'));
+
+ assertEquals(42, eval('for (let x of g()) { x; }'));
+}
+
+
+// Return method does not return an object.
+{
+ g.prototype.return = () => 666;
+
+ assertThrows(() => {
+ for (let x of g()) { break; }
+ }, TypeError);
+
+ assertThrows(() => {
+ for (x of g()) { break; }
+ }, TypeError);
+
+ assertThrows(() => {
+ for (let x of g()) { throw 666; }
+ }, TypeError);
+
+ assertThrows(() => {
+ for (x of g()) { throw 666; }
+ }, TypeError);
+
+ assertThrows(() => {
+ for (let x of g()) { return 666; }
+ }, TypeError);
+
+ assertThrows(() => {
+ for (x of g()) { return 666; }
+ }, TypeError);
+
+ assertEquals(42, eval('for (let x of g()) { x; }'));
+
+ assertEquals(42, eval('for (x of g()) { x; }'));
+}
+
+
+// Return method returns an object.
+{
+ let log = [];
+ g.prototype.return = (...args) => { log.push(args); return {} };
+
+ log = [];
+ for (let x of g()) { break; }
+ assertEquals([[]], log);
+
+ log = [];
+ for (x of g()) { break; }
+ assertEquals([[]], log);
+
+ log = [];
+ assertThrowsEquals(() => {
+ for (let x of g()) { throw 42; }
+ }, 42);
+ assertEquals([[]], log);
+
+ log = [];
+ assertThrowsEquals(() => {
+ for (x of g()) { throw 42; }
+ }, 42);
+ assertEquals([[]], log);
+
+ log = [];
+ assertEquals(42, (() => {
+ for (let x of g()) { return 42; }
+ })());
+ assertEquals([[]], log);
+
+ log = [];
+ assertEquals(42, (() => {
+ for (x of g()) { return 42; }
+ })());
+ assertEquals([[]], log);
+
+ log = [];
+ assertEquals(42, eval('for (let x of g()) { x; }'));
+ assertEquals([], log);
+
+ log = [];
+ assertEquals(42, eval('for (x of g()) { x; }'));
+ assertEquals([], log);
+}
+
+
+// Return method throws.
+{
+ let log = [];
+ g.prototype.return = (...args) => { log.push(args); throw 23 };
+
+ log = [];
+ assertThrowsEquals(() => {
+ for (let x of g()) { break; }
+ }, 23);
+ assertEquals([[]], log);
+
+ log = [];
+ assertThrowsEquals(() => {
+ for (x of g()) { break; }
+ }, 23);
+ assertEquals([[]], log);
+
+ log = [];
+ assertThrowsEquals(() => {
+ for (let x of g()) { throw 42; }
+ }, 42);
+ assertEquals([[]], log);
+
+ log = [];
+ assertThrowsEquals(() => {
+ for (x of g()) { throw 42; }
+ }, 42);
+ assertEquals([[]], log);
+
+ log = [];
+ assertThrowsEquals(() => {
+ for (let x of g()) { return 42; }
+ }, 23);
+ assertEquals([[]], log);
+
+ log = [];
+ assertThrowsEquals(() => {
+ for (x of g()) { return 42; }
+ }, 23);
+ assertEquals([[]], log);
+
+ log = [];
+ assertEquals(42, eval('for (let x of g()) { x; }'));
+ assertEquals([], log);
+
+ log = [];
+ assertEquals(42, eval('for (x of g()) { x; }'));
+ assertEquals([], log);
+}
+
+
+// Next method throws.
+{
+ g.prototype.next = () => { throw 666; };
+ g.prototype.return = () => { assertUnreachable() };
+
+ assertThrowsEquals(() => {
+ for (let x of g()) {}
+ }, 666);
+
+ assertThrowsEquals(() => {
+ for (x of g()) {}
+ }, 666);
+}
+
+
+// Nested loops.
+{
+ function* g1() { yield 1; yield 2; throw 3; }
+ function* g2() { yield -1; yield -2; throw -3; }
+
+ assertDoesNotThrow(() => {
+ for (let x of g1()) {
+ for (let y of g2()) {
+ if (y == -2) break;
+ }
+ if (x == 2) break;
+ }
+ }, -3);
+
+ assertThrowsEquals(() => {
+ for (let x of g1()) {
+ for (let y of g2()) {
+ }
+ }
+ }, -3);
+
+ assertThrowsEquals(() => {
+ for (let x of g1()) {
+ for (let y of g2()) {
+ if (y == -2) break;
+ }
+ }
+ }, 3);
+
+ assertDoesNotThrow(() => {
+ l: for (let x of g1()) {
+ for (let y of g2()) {
+ if (y == -2) break l;
+ }
+ }
+ });
+
+ assertThrowsEquals(() => {
+ for (let x of g1()) {
+ for (let y of g2()) {
+ throw 4;
+ }
+ }
+ }, 4);
+
+ assertThrowsEquals(() => {
+ for (let x of g1()) {
+ for (let y of g2()) {
+ if (y == -2) throw 4;
+ }
+ }
+ }, 4);
+
+ let log = [];
+ g1.prototype.return = () => { log.push(1); throw 5 };
+ g2.prototype.return = () => { log.push(2); throw -5 };
+
+ log = [];
+ assertThrowsEquals(() => {
+ for (let x of g1()) {
+ for (let y of g2()) {
+ if (y == -2) break;
+ }
+ if (x == 2) break;
+ }
+ }, -5);
+ assertEquals([2, 1], log);
+
+ log = [];
+ assertThrowsEquals(() => {
+ for (let x of g1()) {
+ for (let y of g2()) {
+ }
+ }
+ }, -3);
+ assertEquals([1], log);
+
+ log = [];
+ assertThrowsEquals(() => {
+ for (let x of g1()) {
+ for (let y of g2()) {
+ if (y == -2) break;
+ }
+ }
+ }, -5);
+ assertEquals([2, 1], log);
+
+ log = [];
+ assertThrowsEquals(() => {
+ l: for (let x of g1()) {
+ for (let y of g2()) {
+ if (y == -2) break l;
+ }
+ }
+ }, -5);
+ assertEquals([2, 1], log);
+
+ log = [];
+ assertThrowsEquals(() => {
+ for (let x of g1()) {
+ for (let y of g2()) {
+ throw 4;
+ }
+ }
+ }, 4);
+ assertEquals([2, 1], log);
+
+ log = [];
+ assertThrowsEquals(() => {
+ for (let x of g1()) {
+ for (let y of g2()) {
+ if (y == -2) throw 4;
+ }
+ }
+ }, 4);
+ assertEquals([2, 1], log);
+
+ log = [];
+ assertThrowsEquals(() => {
+ for (let x of g1()) {
+ try {
+ for (let y of g2()) {
+ }
+ } catch (_) {}
+ }
+ }, 3);
+ assertEquals([], log);
+
+ log = [];
+ assertThrowsEquals(() => {
+ for (let x of g1()) {
+ try {
+ for (let y of g2()) {
+ }
+ } catch (_) {}
+ if (x == 2) break;
+ }
+ }, 5);
+ assertEquals([1], log);
+}
diff --git a/deps/v8/test/mjsunit/harmony/object-entries.js b/deps/v8/test/mjsunit/harmony/object-entries.js
new file mode 100644
index 0000000000..58af4d6f33
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/object-entries.js
@@ -0,0 +1,249 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-object-values-entries --harmony-proxies --harmony-reflect
+// Flags: --allow-natives-syntax
+
+function TestMeta() {
+ assertEquals(1, Object.entries.length);
+ assertEquals(Function.prototype, Object.getPrototypeOf(Object.entries));
+ assertEquals("entries", Object.entries.name);
+
+ var descriptor = Object.getOwnPropertyDescriptor(Object, "entries");
+ assertTrue(descriptor.writable);
+ assertFalse(descriptor.enumerable);
+ assertTrue(descriptor.configurable);
+
+ assertThrows(() => new Object.entries({}), TypeError);
+}
+TestMeta();
+
+
+function TestBasic() {
+ var x = 16;
+ var O = {
+ d: 1,
+ c: 3,
+ [Symbol.iterator]: void 0,
+ 0: 123,
+ 1000: 456,
+ [x * x]: "ducks",
+ [`0x${(x * x).toString(16)}`]: "quack"
+ };
+ O.a = 2;
+ O.b = 4;
+ Object.defineProperty(O, "HIDDEN", { enumerable: false, value: NaN });
+ assertEquals([
+ ["0", 123],
+ ["256", "ducks"],
+ ["1000", 456],
+ ["d", 1],
+ ["c", 3],
+ ["0x100", "quack"],
+ ["a", 2],
+ ["b", 4]
+ ], Object.entries(O));
+ assertEquals(Object.entries(O), Object.keys(O).map(key => [key, O[key]]));
+
+ assertTrue(Array.isArray(Object.entries({})));
+ assertEquals(0, Object.entries({}).length);
+}
+TestBasic();
+
+
+function TestToObject() {
+ assertThrows(function() { Object.entries(); }, TypeError);
+ assertThrows(function() { Object.entries(null); }, TypeError);
+ assertThrows(function() { Object.entries(void 0); }, TypeError);
+}
+TestToObject();
+
+
+function TestOrder() {
+ var O = {
+ a: 1,
+ [Symbol.iterator]: null
+ };
+ O[456] = 123;
+ Object.defineProperty(O, "HIDDEN", { enumerable: false, value: NaN });
+ var priv = %CreatePrivateSymbol("Secret");
+ O[priv] = 56;
+
+ var log = [];
+ var P = new Proxy(O, {
+ ownKeys(target) {
+ log.push("[[OwnPropertyKeys]]");
+ return Reflect.ownKeys(target);
+ },
+ get(target, name) {
+ log.push(`[[Get]](${JSON.stringify(name)})`);
+ return Reflect.get(target, name);
+ },
+ getOwnPropertyDescriptor(target, name) {
+ log.push(`[[GetOwnProperty]](${JSON.stringify(name)})`);
+ return Reflect.getOwnPropertyDescriptor(target, name);
+ },
+ set(target, name, value) {
+ assertUnreachable();
+ }
+ });
+
+ assertEquals([["456", 123], ["a", 1]], Object.entries(P));
+ assertEquals([
+ "[[OwnPropertyKeys]]",
+ "[[GetOwnProperty]](\"456\")",
+ "[[Get]](\"456\")",
+ "[[GetOwnProperty]](\"a\")",
+ "[[Get]](\"a\")",
+ "[[GetOwnProperty]](\"HIDDEN\")"
+ ], log);
+}
+TestOrder();
+
+
+function TestOrderWithDuplicates() {
+ var O = {
+ a: 1,
+ [Symbol.iterator]: null
+ };
+ O[456] = 123;
+ Object.defineProperty(O, "HIDDEN", { enumerable: false, value: NaN });
+ var priv = %CreatePrivateSymbol("Secret");
+ O[priv] = 56;
+
+ var log = [];
+ var P = new Proxy(O, {
+ ownKeys(target) {
+ log.push("[[OwnPropertyKeys]]");
+ return ["a", Symbol.iterator, "a", "456", "HIDDEN", "HIDDEN", "456"];
+ },
+ get(target, name) {
+ log.push(`[[Get]](${JSON.stringify(name)})`);
+ return Reflect.get(target, name);
+ },
+ getOwnPropertyDescriptor(target, name) {
+ log.push(`[[GetOwnProperty]](${JSON.stringify(name)})`);
+ return Reflect.getOwnPropertyDescriptor(target, name);
+ },
+ set(target, name, value) {
+ assertUnreachable();
+ }
+ });
+
+ assertEquals([
+ ["a", 1],
+ ["a", 1],
+ ["456", 123],
+ ["456", 123]
+ ], Object.entries(P));
+ assertEquals([
+ "[[OwnPropertyKeys]]",
+ "[[GetOwnProperty]](\"a\")",
+ "[[Get]](\"a\")",
+ "[[GetOwnProperty]](\"a\")",
+ "[[Get]](\"a\")",
+ "[[GetOwnProperty]](\"456\")",
+ "[[Get]](\"456\")",
+ "[[GetOwnProperty]](\"HIDDEN\")",
+ "[[GetOwnProperty]](\"HIDDEN\")",
+ "[[GetOwnProperty]](\"456\")",
+ "[[Get]](\"456\")"
+ ], log);
+}
+TestOrderWithDuplicates();
+
+
+function TestPropertyFilter() {
+ var object = { prop3: 30 };
+ object[2] = 40;
+ object["prop4"] = 50;
+ Object.defineProperty(object, "prop5", { value: 60, enumerable: true });
+ Object.defineProperty(object, "prop6", { value: 70, enumerable: false });
+ Object.defineProperty(object, "prop7", {
+ enumerable: true, get() { return 80; }});
+ var sym = Symbol("prop8");
+ object[sym] = 90;
+
+ values = Object.entries(object);
+ assertEquals(5, values.length);
+ assertEquals([
+ [ "2", 40 ],
+ [ "prop3", 30 ],
+ [ "prop4", 50 ],
+ [ "prop5", 60 ],
+ [ "prop7", 80 ]
+ ], values);
+}
+TestPropertyFilter();
+
+
+function TestWithProxy() {
+ var obj1 = {prop1:10};
+ var proxy1 = new Proxy(obj1, { });
+ assertEquals([ [ "prop1", 10 ] ], Object.entries(proxy1));
+
+ var obj2 = {};
+ Object.defineProperty(obj2, "prop2", { value: 20, enumerable: true });
+ Object.defineProperty(obj2, "prop3", {
+ get() { return 30; }, enumerable: true });
+ var proxy2 = new Proxy(obj2, {
+ getOwnPropertyDescriptor(target, name) {
+ return Reflect.getOwnPropertyDescriptor(target, name);
+ }
+ });
+ assertEquals([ [ "prop2", 20 ], [ "prop3", 30 ] ], Object.entries(proxy2));
+
+ var obj3 = {};
+ var count = 0;
+ var proxy3 = new Proxy(obj3, {
+ get(target, property, receiver) {
+ return count++ * 5;
+ },
+ getOwnPropertyDescriptor(target, property) {
+ return { configurable: true, enumerable: true };
+ },
+ ownKeys(target) {
+ return [ "prop0", "prop1", Symbol("prop2"), Symbol("prop5") ];
+ }
+ });
+ assertEquals([ [ "prop0", 0 ], [ "prop1", 5 ] ], Object.entries(proxy3));
+}
+TestWithProxy();
+
+
+function TestMutateDuringEnumeration() {
+ var aDeletesB = {
+ get a() {
+ delete this.b;
+ return 1;
+ },
+ b: 2
+ };
+ assertEquals([ [ "a", 1 ] ], Object.entries(aDeletesB));
+
+ var aRemovesB = {
+ get a() {
+ Object.defineProperty(this, "b", { enumerable: false });
+ return 1;
+ },
+ b: 2
+ };
+ assertEquals([ [ "a", 1 ] ], Object.entries(aRemovesB));
+
+ var aAddsB = { get a() { this.b = 2; return 1; } };
+ assertEquals([ [ "a", 1 ] ], Object.entries(aAddsB));
+
+ var aMakesBEnumerable = {};
+ Object.defineProperty(aMakesBEnumerable, "a", {
+ get() {
+ Object.defineProperty(this, "b", { enumerable: true });
+ return 1;
+ },
+ enumerable: true
+ });
+ Object.defineProperty(aMakesBEnumerable, "b", {
+ value: 2, configurable:true, enumerable: false });
+ assertEquals([ [ "a", 1 ], [ "b", 2 ] ], Object.entries(aMakesBEnumerable));
+}
+TestMutateDuringEnumeration();
diff --git a/deps/v8/test/mjsunit/harmony/object-get-own-property-descriptors.js b/deps/v8/test/mjsunit/harmony/object-get-own-property-descriptors.js
new file mode 100644
index 0000000000..b23e7d6e02
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/object-get-own-property-descriptors.js
@@ -0,0 +1,206 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-object-own-property-descriptors
+// Flags: --harmony-proxies --harmony-reflect --allow-natives-syntax
+
+function DataDescriptor(value) {
+ return { "enumerable": true, "configurable": true, "writable": true, value };
+}
+
+
+function TestMeta() {
+ assertEquals(1, Object.getOwnPropertyDescriptors.length);
+ assertEquals(Function.prototype,
+ Object.getPrototypeOf(Object.getOwnPropertyDescriptors));
+ assertEquals(
+ 'getOwnPropertyDescriptors', Object.getOwnPropertyDescriptors.name);
+ var desc = Reflect.getOwnPropertyDescriptor(
+ Object, 'getOwnPropertyDescriptors');
+ assertFalse(desc.enumerable);
+ assertTrue(desc.writable);
+ assertTrue(desc.configurable);
+}
+TestMeta();
+
+
+function TestToObject() {
+ assertThrows(function() {
+ Object.getOwnPropertyDescriptors(null);
+ }, TypeError);
+
+ assertThrows(function() {
+ Object.getOwnPropertyDescriptors(undefined);
+ }, TypeError);
+
+ assertThrows(function() {
+ Object.getOwnPropertyDescriptors();
+ }, TypeError);
+}
+TestToObject();
+
+
+function TestPrototypeProperties() {
+ function F() {};
+ F.prototype.a = "A";
+ F.prototype.b = "B";
+
+ var F2 = new F();
+ Object.defineProperties(F2, {
+ "b": {
+ enumerable: false,
+ configurable: true,
+ writable: false,
+ value: "Shadowed 'B'"
+ },
+ "c": {
+ enumerable: false,
+ configurable: true,
+ writable: false,
+ value: "C"
+ }
+ });
+
+ assertEquals({
+ "b": {
+ enumerable: false,
+ configurable: true,
+ writable: false,
+ value: "Shadowed 'B'"
+ },
+ "c": {
+ enumerable: false,
+ configurable: true,
+ writable: false,
+ value: "C"
+ }
+ }, Object.getOwnPropertyDescriptors(F2));
+}
+TestPrototypeProperties();
+
+
+function TestPrototypeProperties() {
+ function F() {};
+ F.prototype.a = "A";
+ F.prototype.b = "B";
+
+ var F2 = new F();
+ Object.defineProperties(F2, {
+ "b": {
+ enumerable: false,
+ configurable: true,
+ writable: false,
+ value: "Shadowed 'B'"
+ },
+ "c": {
+ enumerable: false,
+ configurable: true,
+ writable: false,
+ value: "C"
+ }
+ });
+
+ assertEquals({
+ "b": {
+ enumerable: false,
+ configurable: true,
+ writable: false,
+ value: "Shadowed 'B'"
+ },
+ "c": {
+ enumerable: false,
+ configurable: true,
+ writable: false,
+ value: "C"
+ }
+ }, Object.getOwnPropertyDescriptors(F2));
+}
+TestPrototypeProperties();
+
+
+function TestTypeFilteringAndOrder() {
+ var log = [];
+ var sym = Symbol("foo");
+ var psym = %CreatePrivateSymbol("private");
+ var O = {
+ 0: 0,
+ [sym]: 3,
+ "a": 2,
+ [psym]: 4,
+ 1: 1,
+ };
+ var P = new Proxy(O, {
+ ownKeys(target) {
+ log.push("ownKeys()");
+ return Reflect.ownKeys(target);
+ },
+ getOwnPropertyDescriptor(target, name) {
+ log.push(`getOwnPropertyDescriptor(${String(name)})`);
+ return Reflect.getOwnPropertyDescriptor(target, name);
+ },
+ get(target, name) { assertUnreachable(); },
+ set(target, name, value) { assertUnreachable(); },
+ deleteProperty(target, name) { assertUnreachable(); },
+ defineProperty(target, name, desc) { assertUnreachable(); }
+ });
+
+ var result1 = Object.getOwnPropertyDescriptors(O);
+ assertEquals({
+ 0: DataDescriptor(0),
+ 1: DataDescriptor(1),
+ "a": DataDescriptor(2),
+ [sym]: DataDescriptor(3)
+ }, result1);
+
+ var result2 = Object.getOwnPropertyDescriptors(P);
+ assertEquals([
+ "ownKeys()",
+ "getOwnPropertyDescriptor(0)",
+ "getOwnPropertyDescriptor(1)",
+ "getOwnPropertyDescriptor(a)",
+ "getOwnPropertyDescriptor(Symbol(foo))"
+ ], log);
+ assertEquals({
+ 0: DataDescriptor(0),
+ 1: DataDescriptor(1),
+ "a": DataDescriptor(2),
+ [sym]: DataDescriptor(3)
+ }, result2);
+}
+TestTypeFilteringAndOrder();
+
+
+function TestDuplicateKeys() {
+ var i = 0;
+ var log = [];
+ var P = new Proxy({}, {
+ ownKeys() {
+ log.push(`ownKeys()`);
+ return ["A", "A"];
+ },
+ getOwnPropertyDescriptor(t, name) {
+ log.push(`getOwnPropertyDescriptor(${name})`);
+ if (i++) return;
+ return {
+ configurable: true,
+ writable: false,
+ value: "VALUE"
+ };
+ },
+ get(target, name) { assertUnreachable(); },
+ set(target, name, value) { assertUnreachable(); },
+ deleteProperty(target, name) { assertUnreachable(); },
+ defineProperty(target, name, desc) { assertUnreachable(); }
+ });
+
+ var result = Object.getOwnPropertyDescriptors(P);
+ assertEquals({ "A": undefined }, result);
+ assertTrue(result.hasOwnProperty("A"));
+ assertEquals([
+ "ownKeys()",
+ "getOwnPropertyDescriptor(A)",
+ "getOwnPropertyDescriptor(A)"
+ ], log);
+}
+TestDuplicateKeys();
diff --git a/deps/v8/test/mjsunit/harmony/object-values.js b/deps/v8/test/mjsunit/harmony/object-values.js
new file mode 100644
index 0000000000..f56fe8a7b3
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/object-values.js
@@ -0,0 +1,229 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-object-values-entries --harmony-proxies --harmony-reflect
+// Flags: --allow-natives-syntax
+
+function TestMeta() {
+ assertEquals(1, Object.values.length);
+ assertEquals(Function.prototype, Object.getPrototypeOf(Object.values));
+ assertEquals("values", Object.values.name);
+
+ var descriptor = Object.getOwnPropertyDescriptor(Object, "values");
+ assertTrue(descriptor.writable);
+ assertFalse(descriptor.enumerable);
+ assertTrue(descriptor.configurable);
+
+ assertThrows(() => new Object.values({}), TypeError);
+}
+TestMeta();
+
+
+function TestBasic() {
+ var x = 16;
+ var O = {
+ d: 1,
+ c: 3,
+ [Symbol.iterator]: void 0,
+ 0: 123,
+ 1000: 456,
+ [x * x]: "ducks",
+ [`0x${(x * x).toString(16)}`]: "quack"
+ };
+ O.a = 2;
+ O.b = 4;
+ Object.defineProperty(O, "HIDDEN", { enumerable: false, value: NaN });
+ assertEquals([123, "ducks", 456, 1, 3, "quack", 2, 4], Object.values(O));
+ assertEquals(Object.values(O), Object.keys(O).map(key => O[key]));
+
+ assertTrue(Array.isArray(Object.values({})));
+ assertEquals(0, Object.values({}).length);
+}
+TestBasic();
+
+
+function TestToObject() {
+ assertThrows(function() { Object.values(); }, TypeError);
+ assertThrows(function() { Object.values(null); }, TypeError);
+ assertThrows(function() { Object.values(void 0); }, TypeError);
+}
+TestToObject();
+
+
+function TestOrder() {
+ var O = {
+ a: 1,
+ [Symbol.iterator]: null
+ };
+ O[456] = 123;
+ Object.defineProperty(O, "HIDDEN", { enumerable: false, value: NaN });
+ var priv = %CreatePrivateSymbol("Secret");
+ O[priv] = 56;
+
+ var log = [];
+ var P = new Proxy(O, {
+ ownKeys(target) {
+ log.push("[[OwnPropertyKeys]]");
+ return Reflect.ownKeys(target);
+ },
+ get(target, name) {
+ log.push(`[[Get]](${JSON.stringify(name)})`);
+ return Reflect.get(target, name);
+ },
+ getOwnPropertyDescriptor(target, name) {
+ log.push(`[[GetOwnProperty]](${JSON.stringify(name)})`);
+ return Reflect.getOwnPropertyDescriptor(target, name);
+ },
+ set(target, name, value) {
+ assertUnreachable();
+ }
+ });
+
+ assertEquals([123, 1], Object.values(P));
+ assertEquals([
+ "[[OwnPropertyKeys]]",
+ "[[GetOwnProperty]](\"456\")",
+ "[[Get]](\"456\")",
+ "[[GetOwnProperty]](\"a\")",
+ "[[Get]](\"a\")",
+ "[[GetOwnProperty]](\"HIDDEN\")"
+ ], log);
+}
+TestOrder();
+
+
+function TestOrderWithDuplicates() {
+ var O = {
+ a: 1,
+ [Symbol.iterator]: null
+ };
+ O[456] = 123;
+ Object.defineProperty(O, "HIDDEN", { enumerable: false, value: NaN });
+ O[priv] = 56;
+ var priv = %CreatePrivateSymbol("private");
+
+ var log = [];
+ var P = new Proxy(O, {
+ ownKeys(target) {
+ log.push("[[OwnPropertyKeys]]");
+ return [ "a", Symbol.iterator, "a", "456", "HIDDEN", "HIDDEN", "456" ];
+ },
+ get(target, name) {
+ log.push(`[[Get]](${JSON.stringify(name)})`);
+ return Reflect.get(target, name);
+ },
+ getOwnPropertyDescriptor(target, name) {
+ log.push(`[[GetOwnProperty]](${JSON.stringify(name)})`);
+ return Reflect.getOwnPropertyDescriptor(target, name);
+ },
+ set(target, name, value) {
+ assertUnreachable();
+ }
+ });
+
+ assertEquals([1, 1, 123, 123], Object.values(P));
+ assertEquals([
+ "[[OwnPropertyKeys]]",
+ "[[GetOwnProperty]](\"a\")",
+ "[[Get]](\"a\")",
+ "[[GetOwnProperty]](\"a\")",
+ "[[Get]](\"a\")",
+ "[[GetOwnProperty]](\"456\")",
+ "[[Get]](\"456\")",
+ "[[GetOwnProperty]](\"HIDDEN\")",
+ "[[GetOwnProperty]](\"HIDDEN\")",
+ "[[GetOwnProperty]](\"456\")",
+ "[[Get]](\"456\")",
+ ], log);
+}
+TestOrderWithDuplicates();
+
+
+function TestPropertyFilter() {
+ var object = { prop3: 30 };
+ object[2] = 40;
+ object["prop4"] = 50;
+ Object.defineProperty(object, "prop5", { value: 60, enumerable: true });
+ Object.defineProperty(object, "prop6", { value: 70, enumerable: false });
+ Object.defineProperty(object, "prop7", {
+ enumerable: true, get() { return 80; }});
+ var sym = Symbol("prop8");
+ object[sym] = 90;
+
+ values = Object.values(object);
+ assertEquals(5, values.length);
+ assertEquals([40,30,50,60,80], values);
+}
+TestPropertyFilter();
+
+
+function TestWithProxy() {
+ var obj1 = {prop1:10};
+ var proxy1 = new Proxy(obj1, { });
+ assertEquals([10], Object.values(proxy1));
+
+ var obj2 = {};
+ Object.defineProperty(obj2, "prop2", { value: 20, enumerable: true });
+ Object.defineProperty(obj2, "prop3", {
+ get() { return 30; }, enumerable: true });
+ var proxy2 = new Proxy(obj2, {
+ getOwnPropertyDescriptor(target, name) {
+ return Reflect.getOwnPropertyDescriptor(target, name);
+ }
+ });
+ assertEquals([20, 30], Object.values(proxy2));
+
+ var obj3 = {};
+ var count = 0;
+ var proxy3 = new Proxy(obj3, {
+ get(target, property, receiver) {
+ return count++ * 5;
+ },
+ getOwnPropertyDescriptor(target, property) {
+ return { configurable: true, enumerable: true };
+ },
+ ownKeys(target) {
+ return [ "prop0", "prop1", Symbol("prop2"), Symbol("prop5") ];
+ }
+ });
+ assertEquals([0, 5], Object.values(proxy3));
+}
+TestWithProxy();
+
+
+function TestMutateDuringEnumeration() {
+ var aDeletesB = {
+ get a() {
+ delete this.b;
+ return 1;
+ },
+ b: 2
+ };
+ assertEquals([1], Object.values(aDeletesB));
+
+ var aRemovesB = {
+ get a() {
+ Object.defineProperty(this, "b", { enumerable: false });
+ return 1;
+ },
+ b: 2
+ };
+ assertEquals([1], Object.values(aRemovesB));
+
+ var aAddsB = { get a() { this.b = 2; return 1; } };
+ assertEquals([1], Object.values(aAddsB));
+
+ var aMakesBEnumerable = {};
+ Object.defineProperty(aMakesBEnumerable, "a", {
+ get() {
+ Object.defineProperty(this, "b", { enumerable: true });
+ return 1;
+ },
+ enumerable: true
+ });
+ Object.defineProperty(aMakesBEnumerable, "b", {
+ value: 2, configurable:true, enumerable: false });
+ assertEquals([1, 2], Object.values(aMakesBEnumerable));
+}
+TestMutateDuringEnumeration();
diff --git a/deps/v8/test/mjsunit/harmony/private-symbols.js b/deps/v8/test/mjsunit/harmony/private-symbols.js
index 369c222897..18a2e4cf07 100644
--- a/deps/v8/test/mjsunit/harmony/private-symbols.js
+++ b/deps/v8/test/mjsunit/harmony/private-symbols.js
@@ -16,7 +16,6 @@ for (var key of Object.keys(object)) assertUnreachable();
for (var key of Object.getOwnPropertySymbols(object)) assertUnreachable();
for (var key of Object.getOwnPropertyNames(object)) assertUnreachable();
for (var key of Reflect.ownKeys(object)) assertUnreachable();
-for (var key of Reflect.enumerate(object)) assertUnreachable();
for (var key in object) assertUnreachable();
var object2 = {__proto__: object};
@@ -24,7 +23,6 @@ for (var key of Object.keys(object2)) assertUnreachable();
for (var key of Object.getOwnPropertySymbols(object2)) assertUnreachable();
for (var key of Object.getOwnPropertyNames(object2)) assertUnreachable();
for (var key of Reflect.ownKeys(object2)) assertUnreachable();
-for (var key of Reflect.enumerate(object2)) assertUnreachable();
for (var key in object2) assertUnreachable();
diff --git a/deps/v8/test/mjsunit/harmony/proxies-apply.js b/deps/v8/test/mjsunit/harmony/proxies-apply.js
index 4ddffe73b8..dae362ac61 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-apply.js
+++ b/deps/v8/test/mjsunit/harmony/proxies-apply.js
@@ -87,3 +87,36 @@
assertTrue(called_target);
assertTrue(called_handler);
})();
+
+
+(function testCallProxyNonCallableTarget() {
+ var values = [NaN, 1.5, 100, /RegExp/, "string", {}, [], Symbol(),
+ new Map(), new Set(), new WeakMap(), new WeakSet()];
+ values.forEach(target => {
+ target = Object(target);
+ var proxy = new Proxy(target, { apply() { assertUnreachable(); } });
+ assertThrows(() => { proxy(); }, TypeError);
+ assertThrows(() => { ({ proxy }).proxy(); }, TypeError);
+ assertThrows(() => { Reflect.apply(proxy, null, []); }, TypeError);
+ assertThrows(() => { Reflect.apply(proxy, { proxy }, []); }, TypeError);
+ assertThrows(() => {
+ Function.prototype.call.apply(proxy, [null]);
+ }, TypeError);
+ assertThrows(() => {
+ Function.prototype.apply.apply(proxy, [null, []]);
+ }, TypeError);
+
+ var proxy_to_proxy = new Proxy(proxy, { apply() { assertUnreachable(); } });
+ assertThrows(() => { proxy_to_proxy(); }, TypeError);
+ assertThrows(() => { ({ proxy_to_proxy }).proxy_to_proxy(); }, TypeError);
+ assertThrows(() => { Reflect.apply(proxy_to_proxy, null, []); }, TypeError);
+ assertThrows(() => { Reflect.apply(proxy_to_proxy, { proxy }, []); },
+ TypeError);
+ assertThrows(() => {
+ Function.prototype.call.apply(proxy_to_proxy, [null]);
+ }, TypeError);
+ assertThrows(() => {
+ Function.prototype.apply.apply(proxy_to_proxy, [null, []]);
+ }, TypeError);
+ });
+})();
diff --git a/deps/v8/test/mjsunit/harmony/proxies-enumerate.js b/deps/v8/test/mjsunit/harmony/proxies-enumerate.js
deleted file mode 100644
index 82464d0c7f..0000000000
--- a/deps/v8/test/mjsunit/harmony/proxies-enumerate.js
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-proxies
-
-var target = {
- "target_one": 1
-};
-target.__proto__ = {
- "target_two": 2
-};
-var handler = {
- enumerate: function(target) {
- function* keys() {
- yield "foo";
- yield "bar";
- }
- return keys();
- },
- // For-in calls "has" on every iteration, so for TestForIn() below to
- // detect all results of the "enumerate" trap, "has" must return true.
- has: function(target, name) {
- return true;
- }
-}
-
-var proxy = new Proxy(target, handler);
-
-function TestForIn(receiver, expected) {
- var result = [];
- for (var k in receiver) {
- result.push(k);
- }
- assertEquals(expected, result);
-}
-
-TestForIn(proxy, ["foo", "bar"]);
-
-// Test revoked proxy.
-var pair = Proxy.revocable(target, handler);
-TestForIn(pair.proxy, ["foo", "bar"]);
-pair.revoke();
-assertThrows(()=>{ TestForIn(pair.proxy, ["foo", "bar"]) }, TypeError);
-
-// Properly call traps on proxies on the prototype chain.
-var receiver = {
- "receiver_one": 1
-};
-receiver.__proto__ = proxy;
-TestForIn(receiver, ["receiver_one", "foo", "bar"]);
-
-// Fall through to default behavior when trap is undefined.
-handler.enumerate = undefined;
-TestForIn(proxy, ["target_one", "target_two"]);
-delete handler.enumerate;
-TestForIn(proxy, ["target_one", "target_two"]);
-
-// Non-string keys must be filtered.
-function TestNonStringKey(key) {
- handler.enumerate = function(target) {
- function* keys() { yield key; }
- return keys();
- }
- assertThrows("for (var k in proxy) {}", TypeError);
-}
-
-TestNonStringKey(1);
-TestNonStringKey(3.14);
-TestNonStringKey(Symbol("foo"));
-TestNonStringKey({bad: "value"});
-TestNonStringKey(null);
-TestNonStringKey(undefined);
-TestNonStringKey(true);
-
-(function testProtoProxyEnumerate() {
- var keys = ['a', 'b', 'c', 'd'];
- var handler = {
- enumerate() { return keys[Symbol.iterator]() },
- has(target, key) { return false }
- };
- var proxy = new Proxy({}, handler);
- var seen_keys = [];
- for (var i in proxy) {
- seen_keys.push(i);
- }
- assertEquals([], seen_keys);
-
- handler.has = function(target, key) { return true };
- for (var i in proxy) {
- seen_keys.push(i);
- }
- assertEquals(keys, seen_keys);
-
- o = {__proto__:proxy};
- handler.has = function(target, key) { return false };
- seen_keys = [];
- for (var i in o) {
- seen_keys.push(i);
- }
- assertEquals([], seen_keys);
-
- handler.has = function(target, key) { return true };
- seen_keys = [];
- for (var i in o) {
- seen_keys.push(i);
- }
- assertEquals(keys, seen_keys);
-})();
diff --git a/deps/v8/test/mjsunit/harmony/proxies-for.js b/deps/v8/test/mjsunit/harmony/proxies-for.js
index aea9bd6c21..e52ee43031 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-for.js
+++ b/deps/v8/test/mjsunit/harmony/proxies-for.js
@@ -27,21 +27,15 @@
// Flags: --harmony-proxies
-
// Helper.
function TestWithProxies(test, x, y, z) {
test(function(h){ return new Proxy({}, h) }, x, y, z)
- test(function(h) {
- return new Proxy(function() {}, h)
- }, x, y, z)
}
// Iterate over a proxy.
-Array.prototype.values = function() { return this[Symbol.iterator]() }
-
function TestForIn(properties, handler) {
TestWithProxies(TestForIn2, properties, handler)
}
@@ -54,23 +48,18 @@ function TestForIn2(create, properties, handler) {
}
TestForIn(["0", "a"], {
- enumerate() { return ["0", "a"].values() },
- has(target, property) { return true }
+ ownKeys() { return ["0", "a"] },
+ has(target, property) { return true },
+ getOwnPropertyDescriptor() { return { enumerable: true, configurable: true }}
})
TestForIn(["null", "a"], {
- enumerate() { return this.enumerate2() },
- enumerate2() { return ["null", "a"].values() },
- has(target, property) { return true }
+ ownKeys() { return this.enumerate() },
+ enumerate() { return ["null", "a"] },
+ has(target, property) { return true },
+ getOwnPropertyDescriptor() { return { enumerable: true, configurable: true }}
})
-TestForIn(["b", "a", "0", "c"], new Proxy({}, {
- get: function(pr, pk) {
- return function() { return ["b", "a", "0", "c"].values() }
- }
-}))
-
-
// Iterate over an object with a proxy prototype.
@@ -94,19 +83,21 @@ function TestForInDerived2(create, properties, handler) {
}
TestForInDerived(["0", "a"], {
- enumerate: function() { return ["0", "a"].values() },
- has: function(t, k) { return k == "0" || k == "a" }
+ ownKeys: function() { return ["0", "a"] },
+ has: function(t, k) { return k == "0" || k == "a" },
+ getOwnPropertyDescriptor() { return { enumerable: true, configurable: true }}
})
TestForInDerived(["null", "a"], {
- enumerate: function() { return this.enumerate2() },
- enumerate2: function() { return ["null", "a"].values() },
- has: function(t, k) { return k == "null" || k == "a" }
+ ownKeys: function() { return this.enumerate() },
+ enumerate: function() { return ["null", "a"] },
+ has: function(t, k) { return k == "null" || k == "a" },
+ getOwnPropertyDescriptor() { return { enumerable: true, configurable: true }}
})
-// Throw exception in enumerate trap.
+// Throw exception in ownKeys trap.
function TestForInThrow(handler) {
TestWithProxies(TestForInThrow2, handler)
@@ -120,12 +111,12 @@ function TestForInThrow2(create, handler) {
}
TestForInThrow({
- enumerate: function() { throw "myexn" }
+ ownKeys: function() { throw "myexn" }
})
TestForInThrow({
- enumerate: function() { return this.enumerate2() },
- enumerate2: function() { throw "myexn" }
+ ownKeys: function() { return this.enumerate() },
+ enumerate: function() { throw "myexn" }
})
TestForInThrow(new Proxy({}, {
@@ -135,7 +126,7 @@ TestForInThrow(new Proxy({}, {
}));
(function() {
- var p = new Proxy({}, {enumerate:function() { return ["0"].values(); }});
+ var p = new Proxy({}, {ownKeys:function() { return ["0"]; }});
var o = [0];
o.__proto__ = p;
var keys = [];
diff --git a/deps/v8/test/mjsunit/harmony/proxies-ownkeys.js b/deps/v8/test/mjsunit/harmony/proxies-ownkeys.js
index 6a7ae64d78..88350cca02 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-ownkeys.js
+++ b/deps/v8/test/mjsunit/harmony/proxies-ownkeys.js
@@ -56,6 +56,10 @@ assertEquals(["a", "b", "c"], Reflect.ownKeys(proxy));
keys.length = Math.pow(2, 33);
assertThrows("Reflect.ownKeys(proxy)", RangeError);
+// Check that we allow duplicated keys.
+keys = ['a', 'a', 'a']
+assertEquals(keys, Reflect.ownKeys(proxy));
+
// Non-Name results throw.
keys = [1];
assertThrows("Reflect.ownKeys(proxy)", TypeError);
@@ -73,6 +77,10 @@ assertThrows("Reflect.ownKeys(proxy)", TypeError);
keys = ["nonconf"];
assertEquals(keys, Reflect.ownKeys(proxy));
+// Check that we allow duplicated keys.
+keys = ['nonconf', 'nonconf', 'nonconf']
+assertEquals(keys, Reflect.ownKeys(proxy));
+
// Step 19a: The trap result must all keys of a non-extensible target.
Object.preventExtensions(target);
assertThrows("Reflect.ownKeys(proxy)", TypeError);
@@ -82,3 +90,7 @@ assertEquals(keys, Reflect.ownKeys(proxy));
// Step 20: The trap result must not add keys to a non-extensible target.
keys = ["nonconf", "target_one", "fantasy"];
assertThrows("Reflect.ownKeys(proxy)", TypeError);
+
+// Check that we allow duplicated keys.
+keys = ['nonconf', 'target_one', 'nonconf', 'nonconf', 'target_one',]
+assertEquals(keys, Reflect.ownKeys(proxy));
diff --git a/deps/v8/test/mjsunit/harmony/proxies-set-prototype-of.js b/deps/v8/test/mjsunit/harmony/proxies-set-prototype-of.js
index bc60ff492c..810c219533 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-set-prototype-of.js
+++ b/deps/v8/test/mjsunit/harmony/proxies-set-prototype-of.js
@@ -120,3 +120,11 @@ assertEquals({a:5}, seen_prototype);
prototype = [5];
assertThrows(() => {Reflect.setPrototypeOf(proxy2, prototype)}, TypeError);
})();
+
+(function testProxyTrapReturnsFalse() {
+ var handler = {};
+ handler.setPrototypeOf = () => false;
+ var target = new Proxy({}, {isExtensible: () => assertUnreachable()});
+ var object = new Proxy(target, handler);
+ assertFalse(Reflect.setPrototypeOf(object, {}));
+})();
diff --git a/deps/v8/test/mjsunit/harmony/reflect-construct.js b/deps/v8/test/mjsunit/harmony/reflect-construct.js
index f2dfc15366..c136957df0 100644
--- a/deps/v8/test/mjsunit/harmony/reflect-construct.js
+++ b/deps/v8/test/mjsunit/harmony/reflect-construct.js
@@ -279,10 +279,7 @@
(function() {
function* f() { yield 1; yield 2; }
function* g() { yield 3; yield 4; }
- var o = Reflect.construct(f, [], g);
- assertEquals([1, 2], [...o]);
- assertTrue(o.__proto__ === g.prototype);
- assertTrue(o.__proto__ !== f.prototype);
+ assertThrows(()=>Reflect.construct(f, [], g));
})();
(function () {
diff --git a/deps/v8/test/mjsunit/harmony/reflect-enumerate-delete.js b/deps/v8/test/mjsunit/harmony/reflect-enumerate-delete.js
deleted file mode 100644
index 1137d8a0a4..0000000000
--- a/deps/v8/test/mjsunit/harmony/reflect-enumerate-delete.js
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2010-2015 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Test that properties deleted during an enumeration do not show up in
-// the enumeration. This is adapted from mjsunit/for-in-delete.js.
-
-// Flags: --harmony-reflect
-
-
-function f(o, expected, del) {
- var index = 0;
- for (p of Reflect.enumerate(o)) {
- if (del) delete o[del];
- assertEquals(expected[index], p);
- index++;
- }
- assertEquals(expected.length, index);
-}
-
-var o = {}
-o.a = 1;
-o.b = 2;
-o.c = 3;
-o.d = 3;
-
-f(o, ['a', 'b', 'c', 'd']);
-f(o, ['a', 'b', 'c', 'd']);
-f(o, ['a', 'c', 'd'], 'b');
-f(o, ['a', 'c'], 'd');
diff --git a/deps/v8/test/mjsunit/harmony/reflect-enumerate-opt.js b/deps/v8/test/mjsunit/harmony/reflect-enumerate-opt.js
deleted file mode 100644
index ccd1845c78..0000000000
--- a/deps/v8/test/mjsunit/harmony/reflect-enumerate-opt.js
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This is adapted from mjsunit/for-in-opt.js.
-
-// Flags: --harmony-proxies --harmony-reflect --allow-natives-syntax
-
-
-"use strict";
-
-function f(o) {
- var result = [];
- for (var i of Reflect.enumerate(Object(o))) {
- result.push(i);
- }
- return result;
-}
-
-assertEquals(["0"], f("a"));
-assertEquals(["0"], f("a"));
-%OptimizeFunctionOnNextCall(f);
-assertEquals(["0","1","2"], f("bla"));
-
-// Test the lazy deopt points.
-var keys = ["a", "b", "c", "d"];
-var has_keys = [];
-var deopt_has = false;
-var deopt_enum = false;
-
-var handler = {
- enumerate: function(target) {
- if (deopt_enum) {
- %DeoptimizeFunction(f2);
- deopt_enum = false;
- }
- return keys;
- },
-
- getPropertyDescriptor: function(k) {
- if (deopt_has) {
- %DeoptimizeFunction(f2);
- deopt_has = false;
- }
- has_keys.push(k);
- return {value: 10, configurable: true, writable: false, enumerable: true};
- }
-};
-
-// TODO(neis,cbruni): Enable once the enumerate proxy trap is properly
-// implemented.
-// var proxy = new Proxy({}, handler);
-// var o = {__proto__: proxy};
-//
-// function f2(o) {
-// var result = [];
-// for (var i of Reflect.enumerate(o)) {
-// result.push(i);
-// }
-// return result;
-// }
-//
-// function check_f2() {
-// assertEquals(keys, f2(o));
-// assertEquals(keys, has_keys);
-// has_keys.length = 0;
-// }
-//
-// check_f2();
-// check_f2();
-// Test lazy deopt after GetPropertyNamesFast
-// %OptimizeFunctionOnNextCall(f2);
-// deopt_enum = true;
-// check_f2();
-// Test lazy deopt after FILTER_KEY
-// %OptimizeFunctionOnNextCall(f2);
-// deopt_has = true;
-// check_f2();
diff --git a/deps/v8/test/mjsunit/harmony/reflect-enumerate-special-cases.js b/deps/v8/test/mjsunit/harmony/reflect-enumerate-special-cases.js
deleted file mode 100644
index 234a3e3e0d..0000000000
--- a/deps/v8/test/mjsunit/harmony/reflect-enumerate-special-cases.js
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2008-2015 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This is adapted from mjsunit/for-in-special-cases.js.
-
-// Flags: --harmony-reflect
-
-
-function Accumulate(x) {
- var accumulator = "";
- for (var i of Reflect.enumerate(Object(x))) {
- accumulator += i;
- }
- return accumulator;
-}
-
-for (var i = 0; i < 3; ++i) {
- var elements = Accumulate("abcd");
- // We do not assume that enumerate enumerates elements in order.
- assertTrue(-1 != elements.indexOf("0"));
- assertTrue(-1 != elements.indexOf("1"));
- assertTrue(-1 != elements.indexOf("2"));
- assertTrue(-1 != elements.indexOf("3"));
- assertEquals(4, elements.length);
-}
-
-function for_in_string_prototype() {
-
- var x = new String("abc");
- x.foo = 19;
- function B() {
- this.bar = 5;
- this[7] = 4;
- }
- B.prototype = x;
-
- var y = new B();
- y.gub = 13;
-
- var elements = Accumulate(y);
- var elements1 = Accumulate(y);
- // If enumerate returns elements in a different order on multiple calls, this
- // assert will fail. If that happens, consider if that behavior is OK.
- assertEquals(elements, elements1, "Enumeration not the same both times.");
- // We do not assume that enumerate enumerates elements in order.
- assertTrue(-1 != elements.indexOf("0"));
- assertTrue(-1 != elements.indexOf("1"));
- assertTrue(-1 != elements.indexOf("2"));
- assertTrue(-1 != elements.indexOf("7"));
- assertTrue(-1 != elements.indexOf("foo"));
- assertTrue(-1 != elements.indexOf("bar"));
- assertTrue(-1 != elements.indexOf("gub"));
- assertEquals(13, elements.length);
-
- elements = Accumulate(x);
- assertTrue(-1 != elements.indexOf("0"));
- assertTrue(-1 != elements.indexOf("1"));
- assertTrue(-1 != elements.indexOf("2"));
- assertTrue(-1 != elements.indexOf("foo"));
- assertEquals(6, elements.length);
-}
-
-for_in_string_prototype();
-for_in_string_prototype();
diff --git a/deps/v8/test/mjsunit/harmony/reflect-enumerate.js b/deps/v8/test/mjsunit/harmony/reflect-enumerate.js
deleted file mode 100644
index bbc364e7b9..0000000000
--- a/deps/v8/test/mjsunit/harmony/reflect-enumerate.js
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2008-2015 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// This is adapted from mjsunit/for-in.js.
-
-// Flags: --harmony-reflect
-
-
-function props(x) {
- var array = [];
- for (var p of Reflect.enumerate(x)) array.push(p);
- return array.sort();
-}
-
-assertEquals(0, props({}).length, "olen0");
-assertEquals(1, props({x:1}).length, "olen1");
-assertEquals(2, props({x:1, y:2}).length, "olen2");
-
-assertArrayEquals(["x"], props({x:1}), "x");
-assertArrayEquals(["x", "y"], props({x:1, y:2}), "xy");
-assertArrayEquals(["x", "y", "zoom"], props({x:1, y:2, zoom:3}), "xyzoom");
-
-assertEquals(0, props([]).length, "alen0");
-assertEquals(1, props([1]).length, "alen1");
-assertEquals(2, props([1,2]).length, "alen2");
-
-assertArrayEquals(["0"], props([1]), "0");
-assertArrayEquals(["0", "1"], props([1,2]), "01");
-assertArrayEquals(["0", "1", "2"], props([1,2,3]), "012");
-
-var o = {};
-var a = [];
-for (var i = 0x0020; i < 0x01ff; i+=2) {
- var s = 'char:' + String.fromCharCode(i);
- a.push(s);
- o[s] = i;
-}
-assertArrayEquals(a, props(o), "charcodes");
-
-var a = [];
-assertEquals(0, props(a).length, "proplen0");
-a[Math.pow(2,30)-1] = 0;
-assertEquals(1, props(a).length, "proplen1");
-a[Math.pow(2,31)-1] = 0;
-assertEquals(2, props(a).length, "proplen2");
-a[1] = 0;
-assertEquals(3, props(a).length, "proplen3");
-
-var result = '';
-for (var p of Reflect.enumerate({a : [0], b : 1})) { result += p; }
-assertEquals('ab', result, "ab");
-
-var result = '';
-for (var p of Reflect.enumerate({a : {v:1}, b : 1})) { result += p; }
-assertEquals('ab', result, "ab-nodeep");
-
-var result = '';
-for (var p of Reflect.enumerate({ get a() {}, b : 1})) { result += p; }
-assertEquals('ab', result, "abget");
-
-var result = '';
-for (var p of Reflect.enumerate({ get a() {}, set a(x) {}, b : 1})) {
- result += p;
-}
-assertEquals('ab', result, "abgetset");
-
-(function() {
- var large_key = 2147483650;
- var o = {__proto__: {}};
- o[large_key] = 1;
- o.__proto__[large_key] = 1;
- var keys = [];
- for (var k of Reflect.enumerate(o)) {
- keys.push(k);
- }
- assertEquals(["2147483650"], keys);
-})();
diff --git a/deps/v8/test/mjsunit/harmony/reflect.js b/deps/v8/test/mjsunit/harmony/reflect.js
index 8ee1227a44..6449eb8259 100644
--- a/deps/v8/test/mjsunit/harmony/reflect.js
+++ b/deps/v8/test/mjsunit/harmony/reflect.js
@@ -486,27 +486,6 @@ function prepare(target) {
})();
-
-////////////////////////////////////////////////////////////////////////////////
-// Reflect.enumerate
-
-
-(function testReflectEnumerateArity() {
- assertEquals(1, Reflect.enumerate.length);
-})();
-
-
-(function testReflectEnumerateOnNonObject() {
- assertThrows(function() { Reflect.enumerate(); }, TypeError);
- assertThrows(function() { Reflect.enumerate(42); }, TypeError);
- assertThrows(function() { Reflect.enumerate(null); }, TypeError);
-})();
-
-
-// See reflect-enumerate*.js for further tests.
-
-
-
////////////////////////////////////////////////////////////////////////////////
// Reflect.getOwnPropertyDescriptor
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-4696.js b/deps/v8/test/mjsunit/harmony/regress/regress-4696.js
new file mode 100644
index 0000000000..82969f9fbe
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-4696.js
@@ -0,0 +1,29 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function testSpreadIndex() {
+ var result = [...[17, 42]][1];
+ assertEquals(result, 42);
+})();
+
+(function testSpreadProperty() {
+ var result = [...[17, 42]].length;
+ assertEquals(result, 2);
+})();
+
+(function testSpreadMethodCall() {
+ var result = [...[17, 42]].join("+");
+ assertEquals(result, "17+42");
+})();
+
+(function testSpreadSavedMethodCall() {
+ var x = [...[17, 42]];
+ var method = x.join;
+ var result = method.call(x, "+");
+ assertEquals(result, "17+42");
+})();
+
+(function testSpreadAsTemplateTag() {
+ assertThrows(function() { [...[17, 42]] `foo`; }, TypeError)
+})();
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-4755.js b/deps/v8/test/mjsunit/harmony/regress/regress-4755.js
new file mode 100644
index 0000000000..2a0df9dba4
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-4755.js
@@ -0,0 +1,45 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-do-expressions
+
+(function DoTryCatchInsideBinop() {
+ function f(a, b) {
+ return a + do { try { throw "boom" } catch(e) { b } }
+ }
+ assertEquals(3, f(1, 2));
+ assertEquals(3, f(1, 2));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(3, f(1, 2));
+})();
+
+(function DoTryCatchInsideCall() {
+ function f(a, b) {
+ return Math.max(a, do { try { throw a } catch(e) { e + b } })
+ }
+ assertEquals(3, f(1, 2));
+ assertEquals(3, f(1, 2));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(3, f(1, 2));
+})();
+
+(function DoTryCatchInsideTry() {
+ function f(a, b) {
+ try { return do { try { throw a } catch(e) { e + b } } } catch(e) {}
+ }
+ assertEquals(3, f(1, 2));
+ assertEquals(3, f(1, 2));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(3, f(1, 2));
+})();
+
+(function DoTryCatchInsideFinally() {
+ function f(a, b) {
+ try {} finally { return do { try { throw a } catch(e) { e + b } } }
+ }
+ assertEquals(3, f(1, 2));
+ assertEquals(3, f(1, 2));
+ %OptimizeFunctionOnNextCall(f);
+ assertEquals(3, f(1, 2));
+})();
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-crbug-578038.js b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-578038.js
new file mode 100644
index 0000000000..42774b84ed
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-578038.js
@@ -0,0 +1,16 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-do-expressions
+
+(function testWithoutOtherLiteral() {
+ var result = ((x = [...[42]]) => x)();
+ assertEquals(result, [42]);
+})();
+
+(function testWithSomeOtherLiteral() {
+ []; // important: an array literal before the arrow function
+ var result = ((x = [...[42]]) => x)(); // will core dump, if not fixed.
+ assertEquals(result, [42]);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/simd.js b/deps/v8/test/mjsunit/harmony/simd.js
index 6330ac8338..ff81506afe 100644
--- a/deps/v8/test/mjsunit/harmony/simd.js
+++ b/deps/v8/test/mjsunit/harmony/simd.js
@@ -407,7 +407,7 @@ function TestSameValue(type, lanes) {
var simdFn = SIMD[type];
var instance = createInstance(type);
var sameValue = Object.is
- var sameValueZero = natives.ImportNow("SameValueZero");
+ var sameValueZero = function(x, y) { return %SameValueZero(x, y); }
// SIMD values should not be the same as instances of different types.
checkTypeMatrix(type, function(other) {
diff --git a/deps/v8/test/mjsunit/harmony/string-replace.js b/deps/v8/test/mjsunit/harmony/string-replace.js
new file mode 100644
index 0000000000..208c483fd0
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/string-replace.js
@@ -0,0 +1,19 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-subclass
+
+var pattern = {
+ [Symbol.replace]: (string, newValue) => string + newValue
+};
+// Check object coercible fails.
+assertThrows(() => String.prototype.replace.call(null, pattern, "x"),
+ TypeError);
+// Override is called.
+assertEquals("abcdex", "abcde".replace(pattern, "x"));
+// Non-callable override.
+pattern[Symbol.replace] = "dumdidum";
+assertThrows(() => "abcde".replace(pattern, "x"), TypeError);
+
+assertEquals("[Symbol.replace]", RegExp.prototype[Symbol.replace].name);
diff --git a/deps/v8/test/mjsunit/harmony/unicode-character-ranges.js b/deps/v8/test/mjsunit/harmony/unicode-character-ranges.js
new file mode 100644
index 0000000000..e4f5247c15
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/unicode-character-ranges.js
@@ -0,0 +1,158 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-unicode-regexps --harmony-regexp-lookbehind
+
+function execl(expectation, regexp, subject) {
+ if (regexp instanceof String) regexp = new RegExp(regexp, "u");
+ assertEquals(expectation, regexp.exec(subject));
+}
+
+function execs(expectation, regexp_source, subject) {
+ execl(expectation, new RegExp(regexp_source, "u"), subject);
+}
+
+// Character ranges.
+execl(["A"], /[A-D]/u, "A");
+execs(["A"], "[A-D]", "A");
+execl(["ABCD"], /[A-D]+/u, "ZABCDEF");
+execs(["ABCD"], "[A-D]+", "ZABCDEF");
+
+execl(["\u{12345}"], /[\u1234-\u{12345}]/u, "\u{12345}");
+execs(["\u{12345}"], "[\u1234-\u{12345}]", "\u{12345}");
+execl(null, /[^\u1234-\u{12345}]/u, "\u{12345}");
+execs(null, "[^\u1234-\u{12345}]", "\u{12345}");
+
+execl(["\u{1234}"], /[\u1234-\u{12345}]/u, "\u{1234}");
+execs(["\u{1234}"], "[\u1234-\u{12345}]", "\u{1234}");
+execl(null, /[^\u1234-\u{12345}]/u, "\u{1234}");
+execs(null, "[^\u1234-\u{12345}]", "\u{1234}");
+
+execl(null, /[\u1234-\u{12345}]/u, "\u{1233}");
+execs(null, "[\u1234-\u{12345}]", "\u{1233}");
+execl(["\u{1233}"], /[^\u1234-\u{12345}]/u, "\u{1233}");
+execs(["\u{1233}"], "[^\u1234-\u{12345}]", "\u{1233}");
+
+execl(["\u{12346}"], /[^\u1234-\u{12345}]/u, "\u{12346}");
+execs(["\u{12346}"], "[^\u1234-\u{12345}]", "\u{12346}");
+execl(null, /[\u1234-\u{12345}]/u, "\u{12346}");
+execs(null, "[\u1234-\u{12345}]", "\u{12346}");
+
+execl(["\u{12342}"], /[\u{12340}-\u{12345}]/u, "\u{12342}");
+execs(["\u{12342}"], "[\u{12340}-\u{12345}]", "\u{12342}");
+execl(["\u{12342}"], /[\ud808\udf40-\ud808\udf45]/u, "\u{12342}");
+execs(["\u{12342}"], "[\ud808\udf40-\ud808\udf45]", "\u{12342}");
+execl(null, /[^\u{12340}-\u{12345}]/u, "\u{12342}");
+execs(null, "[^\u{12340}-\u{12345}]", "\u{12342}");
+execl(null, /[^\ud808\udf40-\ud808\udf45]/u, "\u{12342}");
+execs(null, "[^\ud808\udf40-\ud808\udf45]", "\u{12342}");
+
+execl(["\u{ffff}"], /[\u{ff80}-\u{12345}]/u, "\u{ffff}");
+execs(["\u{ffff}"], "[\u{ff80}-\u{12345}]", "\u{ffff}");
+execl(["\u{ffff}"], /[\u{ff80}-\ud808\udf45]/u, "\u{ffff}");
+execs(["\u{ffff}"], "[\u{ff80}-\ud808\udf45]", "\u{ffff}");
+execl(null, /[^\u{ff80}-\u{12345}]/u, "\u{ffff}");
+execs(null, "[^\u{ff80}-\u{12345}]", "\u{ffff}");
+execl(null, /[^\u{ff80}-\ud808\udf45]/u, "\u{ffff}");
+execs(null, "[^\u{ff80}-\ud808\udf45]", "\u{ffff}");
+
+// Lone surrogate
+execl(["\ud800"], /[^\u{ff80}-\u{12345}]/u, "\uff99\u{d800}A");
+execs(["\udc00"], "[^\u{ff80}-\u{12345}]", "\uff99\u{dc00}A");
+execl(["\udc01"], /[\u0100-\u{10ffff}]/u, "A\udc01");
+execl(["\udc03"], /[\udc01-\udc03]/u, "\ud801\udc02\udc03");
+execl(["\ud801"], /[\ud801-\ud803]/u, "\ud802\udc01\ud801");
+
+// Paired sorrogate.
+execl(null, /[^\u{ff80}-\u{12345}]/u, "\u{d800}\u{dc00}");
+execs(null, "[^\u{ff80}-\u{12345}]", "\u{d800}\u{dc00}");
+execl(["\ud800\udc00"], /[\u{ff80}-\u{12345}]/u, "\u{d800}\u{dc00}");
+execs(["\ud800\udc00"], "[\u{ff80}-\u{12345}]", "\u{d800}\u{dc00}");
+execl(["foo\u{10e6d}bar"], /foo\ud803\ude6dbar/u, "foo\u{10e6d}bar");
+
+// Lone surrogates
+execl(["\ud801\ud801"], /\ud801+/u, "\ud801\udc01\ud801\ud801");
+execl(["\udc01\udc01"], /\udc01+/u, "\ud801\ud801\udc01\udc01\udc01");
+
+execl(["\udc02\udc03A"], /\W\WA/u, "\ud801\udc01A\udc02\udc03A");
+execl(["\ud801\ud802"], /\ud801./u, "\ud801\udc01\ud801\ud802");
+execl(["\udc02\udc03A"], /[\ud800-\udfff][\ud800-\udfff]A/u,
+ "\ud801\udc01A\udc02\udc03A");
+
+// Character classes
+execl(null, /\w/u, "\ud801\udc01");
+execl(["\ud801"], /[^\w]/, "\ud801\udc01");
+execl(["\ud801\udc01"], /[^\w]/u, "\ud801\udc01");
+execl(["\ud801"], /\W/, "\ud801\udc01");
+execl(["\ud801\udc01"], /\W/u, "\ud801\udc01");
+
+execl(["\ud800X"], /.X/u, "\ud800XaX");
+execl(["aX"], /.(?<!\ud800)X/u, "\ud800XaX");
+execl(["aX"], /.(?<![\ud800-\ud900])X/u, "\ud800XaX");
+
+execl(null, /[]/u, "\u1234");
+execl(["0abc"], /[^]abc/u, "0abc");
+execl(["\u1234abc"], /[^]abc/u, "\u1234abc");
+execl(["\u{12345}abc"], /[^]abc/u, "\u{12345}abc");
+
+execl(null, /[\u{0}-\u{1F444}]/u, "\ud83d\udfff");
+
+// Backward matches of lone surrogates.
+execl(["B", "\ud803A"], /(?<=([\ud800-\ud900]A))B/u,
+ "\ud801\udc00AB\udc00AB\ud802\ud803AB");
+execl(["B", "\udc00A"], /(?<=([\ud800-\u{10300}]A))B/u,
+ "\ud801\udc00AB\udc00AB\ud802\ud803AB");
+execl(["B", "\udc11A"], /(?<=([\udc00-\udd00]A))B/u,
+ "\ud801\udc00AB\udc11AB\ud802\ud803AB");
+execl(["X", "\ud800C"], /(?<=(\ud800\w))X/u,
+ "\ud800\udc00AX\udc11BX\ud800\ud800CX");
+execl(["C", "\ud800\ud800"], /(?<=(\ud800.))\w/u,
+ "\ud800\udc00AX\udc11BX\ud800\ud800CX");
+execl(["X", "\udc01C"], /(?<=(\udc01\w))X/u,
+ "\ud800\udc01AX\udc11BX\udc01\udc01CX");
+execl(["C", "\udc01\udc01"], /(?<=(\udc01.))./u,
+ "\ud800\udc01AX\udc11BX\udc01\udc01CX");
+
+var L = "\ud800";
+var T = "\udc00";
+var X = "X";
+
+// Test string contains only match.
+function testw(expect, src, subject) {
+ var re = new RegExp("^" + src + "$", "u");
+ assertEquals(expect, re.test(subject));
+}
+
+// Test string starts with match.
+function tests(expect, src, subject) {
+ var re = new RegExp("^" + src, "u");
+ assertEquals(expect, re.test(subject));
+}
+
+testw(true, X, X);
+testw(true, L, L);
+testw(true, T, T);
+testw(true, L + T, L + T);
+testw(true, T + L, T + L);
+testw(false, T, L + T);
+testw(false, L, L + T);
+testw(true, ".(?<=" + L + ")", L);
+testw(true, ".(?<=" + T + ")", T);
+testw(true, ".(?<=" + L + T + ")", L + T);
+testw(true, ".(?<=" + L + T + ")", L + T);
+tests(true, ".(?<=" + T + ")", T + L);
+tests(false, ".(?<=" + L + ")", L + T);
+tests(false, ".(?<=" + T + ")", L + T);
+tests(true, "..(?<=" + T + ")", T + T + L);
+tests(true, "..(?<=" + T + ")", X + T + L);
+tests(true, "...(?<=" + L + ")", X + T + L);
+tests(false, "...(?<=" + T + ")", X + L + T)
+tests(true, "..(?<=" + L + T + ")", X + L + T)
+tests(true, "..(?<=" + L + T + "(?<=" + L + T + "))", X + L + T);
+tests(false, "..(?<=" + L + "(" + T + "))", X + L + T);
+tests(false, ".*" + L, X + L + T);
+tests(true, ".*" + L, X + L + L + T);
+tests(false, ".*" + L, X + L + T + L + T);
+tests(false, ".*" + T, X + L + T + L + T);
+tests(true, ".*" + T, X + L + T + T + L + T);
diff --git a/deps/v8/test/mjsunit/harmony/unicode-escapes-in-regexps.js b/deps/v8/test/mjsunit/harmony/unicode-escapes-in-regexps.js
index f591dac930..895e0c6722 100644
--- a/deps/v8/test/mjsunit/harmony/unicode-escapes-in-regexps.js
+++ b/deps/v8/test/mjsunit/harmony/unicode-escapes-in-regexps.js
@@ -252,6 +252,36 @@ assertFalse(/(\u{12345}|\u{23456}).\1/u.test("\u{12345}b\u{23456}"));
assertTrue(new RegExp("\u{12345}{3}", "u").test("\u{12345}\u{12345}\u{12345}"));
assertTrue(/\u{12345}{3}/u.test("\u{12345}\u{12345}\u{12345}"));
assertTrue(new RegExp("\u{12345}{3}").test("\u{12345}\udf45\udf45"));
-assertTrue(/\ud808\udf45{3}/u.test("\u{12345}\udf45\udf45"));
+assertFalse(/\ud808\udf45{3}/u.test("\u{12345}\udf45\udf45"));
+assertTrue(/\ud808\udf45{3}/u.test("\u{12345}\u{12345}\u{12345}"));
assertFalse(new RegExp("\u{12345}{3}", "u").test("\u{12345}\udf45\udf45"));
assertFalse(/\u{12345}{3}/u.test("\u{12345}\udf45\udf45"));
+
+// Literal surrogates.
+assertEquals(["\u{10000}\u{10000}"],
+ new RegExp("\ud800\udc00+", "u").exec("\u{10000}\u{10000}"));
+assertEquals(["\u{10000}\u{10000}"],
+ new RegExp("\\ud800\\udc00+", "u").exec("\u{10000}\u{10000}"));
+
+assertEquals(["\u{10003}\u{50001}"],
+ new RegExp("[\\ud800\\udc03-\\ud900\\udc01\]+", "u").exec(
+ "\u{10003}\u{50001}"));
+assertEquals(["\u{10003}\u{50001}"],
+ new RegExp("[\ud800\udc03-\u{50001}\]+", "u").exec(
+ "\u{10003}\u{50001}"));
+
+// Unicode escape sequences to represent a non-BMP character cannot have
+// mixed notation, and must follow the rules for RegExpUnicodeEscapeSequence.
+assertThrows(() => new RegExp("[\\ud800\udc03-\ud900\\udc01\]+", "u"));
+assertThrows(() => new RegExp("[\\ud800\udc03-\ud900\\udc01\]+", "u"));
+assertNull(new RegExp("\\ud800\udc00+", "u").exec("\u{10000}\u{10000}"));
+assertNull(new RegExp("\ud800\\udc00+", "u").exec("\u{10000}\u{10000}"));
+
+assertNull(new RegExp("[\\ud800\udc00]", "u").exec("\u{10000}"));
+assertNull(new RegExp("[\\{ud800}\udc00]", "u").exec("\u{10000}"));
+assertNull(new RegExp("[\ud800\\udc00]", "u").exec("\u{10000}"));
+assertNull(new RegExp("[\ud800\\{udc00}]", "u").exec("\u{10000}"));
+
+assertNull(/\u{d800}\u{dc00}+/u.exec("\ud800\udc00\udc00"));
+assertNull(/\ud800\u{dc00}+/u.exec("\ud800\udc00\udc00"));
+assertNull(/\u{d800}\udc00+/u.exec("\ud800\udc00\udc00"));
diff --git a/deps/v8/test/mjsunit/harmony/unicode-regexp-backrefs.js b/deps/v8/test/mjsunit/harmony/unicode-regexp-backrefs.js
new file mode 100644
index 0000000000..e02301be1e
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/unicode-regexp-backrefs.js
@@ -0,0 +1,53 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-unicode-regexps --harmony-regexp-lookbehind
+
+// Back reference does not end in the middle of a surrogate pair.
+function replace(string) {
+ return string.replace(/L/g, "\ud800")
+ .replace(/l/g, "\ud801")
+ .replace(/T/g, "\udc00")
+ .replace(/\./g, "[^]");
+}
+
+function test(expectation, regexp_source, subject) {
+ if (expectation !== null) expectation = expectation.map(replace);
+ subject = replace(subject);
+ regexp_source = replace(regexp_source);
+ assertEquals(expectation, new RegExp(regexp_source, "u").exec(subject));
+}
+
+// Back reference does not end in the middle of a surrogate pair.
+test(null, "(L)\\1", "LLT");
+test(["LLTLl", "L", "l"], "(L).*\\1(.)", "LLTLl");
+test(null, "(aL).*\\1", "aLaLT");
+test(["aLaLTaLl", "aL", "l"], "(aL).*\\1(.)", "aLaLTaLl");
+
+var s = "TabcLxLTabcLxTabcLTyTabcLz";
+test([s, "TabcL", "z"], "([^x]+).*\\1(.)", s);
+
+// Back reference does not start in the middle of a surrogate pair.
+test(["TLTabTc", "T", "c"], "(T).*\\1(.)", "TLTabTc");
+
+// Lookbehinds.
+test(null, "(?<=\\1(T)x)", "LTTx");
+test(["", "b", "T"], "(?<=(.)\\2.*(T)x)", "bTaLTTx");
+test(null, "(?<=\\1.*(L)x)", "LTLx");
+test(["", "b", "L"], "(?<=(.)\\2.*(L)x)", "bLaLTLx");
+
+
+test(null, "([^x]+)x*\\1", "LxLT");
+test(null, "([^x]+)x*\\1", "TxLT");
+test(null, "([^x]+)x*\\1", "LTxL");
+test(null, "([^x]+)x*\\1", "LTxT");
+test(null, "([^x]+)x*\\1", "xLxLT");
+test(null, "([^x]+)x*\\1", "xTxLT");
+test(null, "([^x]+)x*\\1", "xLTxL");
+test(null, "([^x]+)x*\\1", "xLTxT");
+test(null, "([^x]+)x*\\1", "xxxLxxLTxx");
+test(null, "([^x]+)x*\\1", "xxxTxxLTxx");
+test(null, "([^x]+)x*\\1", "xxxLTxxLxx");
+test(null, "([^x]+)x*\\1", "xxxLTxxTxx");
+test(["LTTxxLTT", "LTT"], "([^x]+)x*\\1", "xxxLTTxxLTTxx");
diff --git a/deps/v8/test/mjsunit/harmony/unicode-regexp-ignore-case-noi18n.js b/deps/v8/test/mjsunit/harmony/unicode-regexp-ignore-case-noi18n.js
new file mode 100644
index 0000000000..a4cb9dc337
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/unicode-regexp-ignore-case-noi18n.js
@@ -0,0 +1,59 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-unicode-regexps
+
+// Non-unicode use toUpperCase mappings.
+assertFalse(/[\u00e5]/i.test("\u212b"));
+assertFalse(/[\u212b]/i.test("\u00e5\u1234"));
+assertFalse(/[\u212b]/i.test("\u00e5"));
+
+assertTrue("\u212b".toLowerCase() == "\u00e5");
+assertTrue("\u00c5".toLowerCase() == "\u00e5");
+assertTrue("\u00e5".toUpperCase() == "\u00c5");
+
+// Unicode uses case folding mappings.
+assertFalse(/\u00e5/ui.test("\u212b"));
+assertTrue(/\u00e5/ui.test("\u00c5"));
+assertTrue(/\u00e5/ui.test("\u00e5"));
+assertFalse(/\u00e5/ui.test("\u212b"));
+assertTrue(/\u00c5/ui.test("\u00e5"));
+assertFalse(/\u00c5/ui.test("\u212b"));
+assertTrue(/\u00c5/ui.test("\u00c5"));
+assertFalse(/\u212b/ui.test("\u00c5"));
+assertFalse(/\u212b/ui.test("\u00e5"));
+assertTrue(/\u212b/ui.test("\u212b"));
+
+// Non-BMP.
+assertFalse(/\u{10400}/i.test("\u{10428}"));
+assertFalse(/\u{10400}/ui.test("\u{10428}"));
+assertFalse(/\ud801\udc00/ui.test("\u{10428}"));
+assertFalse(/[\u{10428}]/ui.test("\u{10400}"));
+assertFalse(/[\ud801\udc28]/ui.test("\u{10400}"));
+assertEquals(["\uff21\u{10400}"],
+ /[\uff40-\u{10428}]+/ui.exec("\uff21\u{10400}abc"));
+assertEquals(["abc"], /[^\uff40-\u{10428}]+/ui.exec("\uff21\u{10400}abc\uff23"));
+assertEquals(["\uff53\u24bb"],
+ /[\u24d5-\uff33]+/ui.exec("\uff54\uff53\u24bb\u24ba"));
+
+// Full mappings are ignored.
+assertFalse(/\u00df/ui.test("SS"));
+assertFalse(/\u1f8d/ui.test("\u1f05\u03b9"));
+
+// Simple mappings.
+assertFalse(/\u1f8d/ui.test("\u1f85"));
+
+// Common mappings.
+assertTrue(/\u1f6b/ui.test("\u1f63"));
+
+// Back references.
+assertNull(/(.)\1\1/ui.exec("\u00e5\u212b\u00c5"));
+assertNull(/(.)\1/ui.exec("\u{118aa}\u{118ca}"));
+
+
+// Non-Latin1 maps to Latin1.
+assertNull(/^\u017F/ui.exec("s"));
+assertNull(/^\u017F/ui.exec("s\u1234"));
+assertNull(/^a[\u017F]/ui.exec("as"));
+assertNull(/^a[\u017F]/ui.exec("as\u1234"));
diff --git a/deps/v8/test/mjsunit/harmony/unicode-regexp-ignore-case.js b/deps/v8/test/mjsunit/harmony/unicode-regexp-ignore-case.js
new file mode 100644
index 0000000000..291b8662ff
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/unicode-regexp-ignore-case.js
@@ -0,0 +1,64 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-unicode-regexps
+
+// Non-unicode use toUpperCase mappings.
+assertFalse(/[\u00e5]/i.test("\u212b"));
+assertFalse(/[\u212b]/i.test("\u00e5\u1234"));
+assertFalse(/[\u212b]/i.test("\u00e5"));
+
+assertTrue("\u212b".toLowerCase() == "\u00e5");
+assertTrue("\u00c5".toLowerCase() == "\u00e5");
+assertTrue("\u00e5".toUpperCase() == "\u00c5");
+
+// Unicode uses case folding mappings.
+assertTrue(/\u00e5/ui.test("\u212b"));
+assertTrue(/\u00e5/ui.test("\u00c5"));
+assertTrue(/\u00e5/ui.test("\u00e5"));
+assertTrue(/\u00e5/ui.test("\u212b"));
+assertTrue(/\u00c5/ui.test("\u00e5"));
+assertTrue(/\u00c5/ui.test("\u212b"));
+assertTrue(/\u00c5/ui.test("\u00c5"));
+assertTrue(/\u212b/ui.test("\u00c5"));
+assertTrue(/\u212b/ui.test("\u00e5"));
+assertTrue(/\u212b/ui.test("\u212b"));
+
+// Non-BMP.
+assertFalse(/\u{10400}/i.test("\u{10428}"));
+assertTrue(/\u{10400}/ui.test("\u{10428}"));
+assertTrue(/\ud801\udc00/ui.test("\u{10428}"));
+assertTrue(/[\u{10428}]/ui.test("\u{10400}"));
+assertTrue(/[\ud801\udc28]/ui.test("\u{10400}"));
+assertEquals(["\uff21\u{10400}"],
+ /[\uff40-\u{10428}]+/ui.exec("\uff21\u{10400}abc"));
+assertEquals(["abc"], /[^\uff40-\u{10428}]+/ui.exec("\uff21\u{10400}abc\uff23"));
+assertEquals(["\uff53\u24bb"],
+ /[\u24d5-\uff33]+/ui.exec("\uff54\uff53\u24bb\u24ba"));
+
+// Full mappings are ignored.
+assertFalse(/\u00df/ui.test("SS"));
+assertFalse(/\u1f8d/ui.test("\u1f05\u03b9"));
+
+// Simple mappings work.
+assertTrue(/\u1f8d/ui.test("\u1f85"));
+
+// Common mappings work.
+assertTrue(/\u1f6b/ui.test("\u1f63"));
+
+// Back references.
+assertEquals(["\u00e5\u212b\u00c5", "\u00e5"],
+ /(.)\1\1/ui.exec("\u00e5\u212b\u00c5"));
+assertEquals(["\u{118aa}\u{118ca}", "\u{118aa}"],
+ /(.)\1/ui.exec("\u{118aa}\u{118ca}"));
+
+// Misc.
+assertTrue(/\u00e5\u00e5\u00e5/ui.test("\u212b\u00e5\u00c5"));
+assertTrue(/AB\u{10400}/ui.test("ab\u{10428}"));
+
+// Non-Latin1 maps to Latin1.
+assertEquals(["s"], /^\u017F/ui.exec("s"));
+assertEquals(["s"], /^\u017F/ui.exec("s\u1234"));
+assertEquals(["as"], /^a[\u017F]/ui.exec("as"));
+assertEquals(["as"], /^a[\u017F]/ui.exec("as\u1234"));
diff --git a/deps/v8/test/mjsunit/harmony/unicode-regexp-last-index.js b/deps/v8/test/mjsunit/harmony/unicode-regexp-last-index.js
new file mode 100644
index 0000000000..4a075d4380
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/unicode-regexp-last-index.js
@@ -0,0 +1,104 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-unicode-regexps --harmony-regexp-lookbehind
+
+var r = /./ug;
+assertEquals(["\ud800\udc00"], r.exec("\ud800\udc00\ud801\udc01"));
+assertEquals(2, r.lastIndex);
+r.lastIndex = 1;
+assertEquals(["\ud800\udc00"], r.exec("\ud800\udc00\ud801\udc01"));
+assertEquals(2, r.lastIndex);
+assertEquals(["\ud801\udc01"], r.exec("\ud800\udc00\ud801\udc01"));
+r.lastIndex = 3;
+assertEquals(["\ud801\udc01"], r.exec("\ud800\udc00\ud801\udc01"));
+assertEquals(4, r.lastIndex);
+r.lastIndex = 4;
+assertNull(r.exec("\ud800\udc00\ud801\udc01"));
+assertEquals(0, r.lastIndex);
+r.lastIndex = 5;
+assertNull(r.exec("\ud800\udc00\ud801\udc01"));
+assertEquals(0, r.lastIndex);
+
+r.lastIndex = 3;
+assertEquals(["\ud802"], r.exec("\ud800\udc00\ud801\ud802"));
+r.lastIndex = 4;
+assertNull(r.exec("\ud800\udc00\ud801\ud802"));
+
+r = /./g;
+assertEquals(["\ud800"], r.exec("\ud800\udc00\ud801\udc01"));
+assertEquals(1, r.lastIndex);
+assertEquals(["\udc00"], r.exec("\ud800\udc00\ud801\udc01"));
+assertEquals(2, r.lastIndex);
+assertEquals(["\ud801"], r.exec("\ud800\udc00\ud801\udc01"));
+assertEquals(3, r.lastIndex);
+assertEquals(["\udc01"], r.exec("\ud800\udc00\ud801\udc01"));
+assertEquals(4, r.lastIndex);
+assertNull(r.exec("\ud800\udc00\ud801\udc01"));
+assertEquals(0, r.lastIndex);
+r.lastIndex = 1;
+assertEquals(["\udc00"], r.exec("\ud800\udc00\ud801\udc01"));
+assertEquals(2, r.lastIndex);
+
+// ------------------------
+
+r = /^./ug;
+assertEquals(["\ud800\udc00"], r.exec("\ud800\udc00\ud801\udc01"));
+assertEquals(2, r.lastIndex);
+r.lastIndex = 1;
+assertEquals(["\ud800\udc00"], r.exec("\ud800\udc00\ud801\udc01"));
+assertEquals(2, r.lastIndex);
+assertNull(r.exec("\ud800\udc00\ud801\udc01"));
+assertEquals(0, r.lastIndex);
+r.lastIndex = 3;
+assertNull(r.exec("\ud800\udc00\ud801\udc01"));
+assertEquals(0, r.lastIndex);
+r.lastIndex = 4;
+assertNull(r.exec("\ud800\udc00\ud801\udc01"));
+assertEquals(0, r.lastIndex);
+r.lastIndex = 5;
+assertNull(r.exec("\ud800\udc00\ud801\udc01"));
+assertEquals(0, r.lastIndex);
+
+r = /^./g;
+assertEquals(["\ud800"], r.exec("\ud800\udc00\ud801\udc01"));
+assertEquals(1, r.lastIndex);
+assertNull(r.exec("\ud800\udc00\ud801\udc01"));
+assertEquals(0, r.lastIndex);
+r.lastIndex = 3;
+assertNull(r.exec("\ud800\udc00\ud801\udc01"));
+assertEquals(0, r.lastIndex);
+
+//------------------------
+
+r = /(?:(^.)|.)/ug;
+assertEquals(["\ud800\udc00", "\ud800\udc00"],
+ r.exec("\ud800\udc00\ud801\udc01"));
+assertEquals(2, r.lastIndex);
+r.lastIndex = 1;
+assertEquals(["\ud800\udc00", "\ud800\udc00"],
+ r.exec("\ud800\udc00\ud801\udc01"));
+assertEquals(2, r.lastIndex);
+assertEquals(["\ud801\udc01", undefined], r.exec("\ud800\udc00\ud801\udc01"));
+r.lastIndex = 3;
+assertEquals(["\ud801\udc01", undefined], r.exec("\ud800\udc00\ud801\udc01"));
+r.lastIndex = 4;
+assertNull(r.exec("\ud800\udc00\ud801\udc01"));
+r.lastIndex = 5;
+assertNull(r.exec("\ud800\udc00\ud801\udc01"));
+
+r.lastIndex = 3;
+assertEquals(["\ud802", undefined], r.exec("\ud800\udc00\ud801\ud802"));
+r.lastIndex = 4;
+assertNull(r.exec("\ud800\udc00\ud801\ud802"));
+
+r = /(?:(^.)|.)/g;
+assertEquals(["\ud800", "\ud800"],
+ r.exec("\ud800\udc00\ud801\udc01"));
+assertEquals(1, r.lastIndex);
+assertEquals(["\udc00", undefined], r.exec("\ud800\udc00\ud801\udc01"));
+assertEquals(2, r.lastIndex);
+r.lastIndex = 3;
+assertEquals(["\udc01", undefined], r.exec("\ud800\udc00\ud801\udc01"));
+assertEquals(4, r.lastIndex);
diff --git a/deps/v8/test/mjsunit/harmony/unicode-regexp-property-class.js b/deps/v8/test/mjsunit/harmony/unicode-regexp-property-class.js
new file mode 100644
index 0000000000..323873ab7f
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/unicode-regexp-property-class.js
@@ -0,0 +1,64 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexp-property --harmony-unicode-regexps
+
+assertThrows("/\\p/u");
+assertThrows("/\\p{garbage}/u");
+assertThrows("/\\p{}/u");
+assertThrows("/\\p{/u");
+assertThrows("/\\p}/u");
+assertThrows("/\p{Math}/u");
+assertThrows("/\p{Bidi_M}/u");
+assertThrows("/\p{Hex}/u");
+
+assertTrue(/\p{Ll}/u.test("a"));
+assertFalse(/\P{Ll}/u.test("a"));
+assertTrue(/\P{Ll}/u.test("A"));
+assertFalse(/\p{Ll}/u.test("A"));
+assertTrue(/\p{Ll}/u.test("\u{1D7BE}"));
+assertFalse(/\P{Ll}/u.test("\u{1D7BE}"));
+assertFalse(/\p{Ll}/u.test("\u{1D5E3}"));
+assertTrue(/\P{Ll}/u.test("\u{1D5E3}"));
+
+assertTrue(/\p{Ll}/iu.test("a"));
+assertTrue(/\p{Ll}/iu.test("\u{118D4}"));
+assertTrue(/\p{Ll}/iu.test("A"));
+assertTrue(/\p{Ll}/iu.test("\u{118B4}"));
+assertFalse(/\P{Ll}/iu.test("a"));
+assertFalse(/\P{Ll}/iu.test("\u{118D4}"));
+assertFalse(/\P{Ll}/iu.test("A"));
+assertFalse(/\P{Ll}/iu.test("\u{118B4}"));
+
+assertTrue(/\p{Lu}/u.test("A"));
+assertFalse(/\P{Lu}/u.test("A"));
+assertTrue(/\P{Lu}/u.test("a"));
+assertFalse(/\p{Lu}/u.test("a"));
+assertTrue(/\p{Lu}/u.test("\u{1D5E3}"));
+assertFalse(/\P{Lu}/u.test("\u{1D5E3}"));
+assertFalse(/\p{Lu}/u.test("\u{1D7BE}"));
+assertTrue(/\P{Lu}/u.test("\u{1D7BE}"));
+
+assertTrue(/\p{Lu}/iu.test("a"));
+assertTrue(/\p{Lu}/iu.test("\u{118D4}"));
+assertTrue(/\p{Lu}/iu.test("A"));
+assertTrue(/\p{Lu}/iu.test("\u{118B4}"));
+assertFalse(/\P{Lu}/iu.test("a"));
+assertFalse(/\P{Lu}/iu.test("\u{118D4}"));
+assertFalse(/\P{Lu}/iu.test("A"));
+assertFalse(/\P{Lu}/iu.test("\u{118B4}"));
+
+assertTrue(/\p{Sm}/u.test("+"));
+assertFalse(/\P{Sm}/u.test("+"));
+assertTrue(/\p{Sm}/u.test("\u{1D6C1}"));
+assertFalse(/\P{Sm}/u.test("\u{1D6C1}"));
+
+assertTrue(/\pL/u.test("a"));
+assertFalse(/\PL/u.test("a"));
+assertFalse(/\pL/u.test("1"));
+assertTrue(/\PL/u.test("1"));
+assertTrue(/\pL/u.test("\u1FAB"));
+assertFalse(/\PL/u.test("\u1FAB"));
+assertFalse(/\p{L}/u.test("\uA6EE"));
+assertTrue(/\P{L}/u.test("\uA6EE"));
diff --git a/deps/v8/test/mjsunit/harmony/unicode-regexp-restricted-syntax.js b/deps/v8/test/mjsunit/harmony/unicode-regexp-restricted-syntax.js
new file mode 100644
index 0000000000..d129cc340e
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/unicode-regexp-restricted-syntax.js
@@ -0,0 +1,44 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-unicode-regexps
+
+// test262/data/test/language/literals/regexp/u-dec-esc
+assertThrows("/\\1/u", SyntaxError);
+// test262/language/literals/regexp/u-invalid-char-range-a
+assertThrows("/[\\w-a]/u", SyntaxError);
+// test262/language/literals/regexp/u-invalid-char-range-b
+assertThrows("/[a-\\w]/u", SyntaxError);
+// test262/language/literals/regexp/u-invalid-char-esc
+assertThrows("/\\c/u", SyntaxError);
+assertThrows("/\\c0/u", SyntaxError);
+// test262/built-ins/RegExp/unicode_restricted_quantifiable_assertion
+assertThrows("/(?=.)*/u", SyntaxError);
+// test262/built-ins/RegExp/unicode_restricted_octal_escape
+assertThrows("/[\\1]/u", SyntaxError);
+assertThrows("/\\00/u", SyntaxError);
+assertThrows("/\\09/u", SyntaxError);
+// test262/built-ins/RegExp/unicode_restricted_identity_escape_alpha
+assertThrows("/[\\c]/u", SyntaxError);
+// test262/built-ins/RegExp/unicode_restricted_identity_escape_c
+assertThrows("/[\\c0]/u", SyntaxError);
+// test262/built-ins/RegExp/unicode_restricted_incomple_quantifier
+assertThrows("/a{/u", SyntaxError);
+assertThrows("/a{1,/u", SyntaxError);
+assertThrows("/{/u", SyntaxError);
+assertThrows("/}/u", SyntaxError);
+// test262/data/test/built-ins/RegExp/unicode_restricted_brackets
+assertThrows("/]/u", SyntaxError);
+// test262/built-ins/RegExp/unicode_identity_escape
+/\//u;
+
+// escaped \0 is allowed inside a character class.
+assertEquals(["\0"], /[\0]/u.exec("\0"));
+// unless it is followed by another digit.
+assertThrows("/[\\00]/u", SyntaxError);
+assertThrows("/[\\01]/u", SyntaxError);
+assertThrows("/[\\09]/u", SyntaxError);
+assertEquals(["\u{0}1\u{0}a\u{0}"], /[1\0a]+/u.exec("b\u{0}1\u{0}a\u{0}2"));
+// escaped \- is allowed inside a character class.
+assertEquals(["-"], /[a\-z]/u.exec("12-34"));
diff --git a/deps/v8/test/mjsunit/harmony/unicode-regexp-unanchored-advance.js b/deps/v8/test/mjsunit/harmony/unicode-regexp-unanchored-advance.js
new file mode 100644
index 0000000000..97960e1cd3
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/unicode-regexp-unanchored-advance.js
@@ -0,0 +1,8 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-unicode-regexps
+
+var s = "a".repeat(1E7) + "\u1234";
+assertEquals(["\u1234", "\u1234"], /(\u1234)/u.exec(s));
diff --git a/deps/v8/test/mjsunit/harmony/unicode-regexp-zero-length.js b/deps/v8/test/mjsunit/harmony/unicode-regexp-zero-length.js
new file mode 100644
index 0000000000..bbc17dc2d5
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/unicode-regexp-zero-length.js
@@ -0,0 +1,58 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-unicode-regexps
+
+var L = "\ud800";
+var T = "\udc00";
+var x = "x";
+
+var r = /()/g; // Global, but not unicode.
+// Zero-length matches do not advance lastIndex.
+assertEquals(["", ""], r.exec(L + T + L + T));
+assertEquals(0, r.lastIndex);
+r.lastIndex = 1;
+assertEquals(["", ""], r.exec(L + T + L + T));
+assertEquals(1, r.lastIndex);
+
+var u = /()/ug; // Global and unicode.
+// Zero-length matches do not advance lastIndex.
+assertEquals(["", ""], u.exec(L + T + L + T));
+assertEquals(0, u.lastIndex);
+u.lastIndex = 1;
+assertEquals(["", ""], u.exec(L + T + L + T));
+assertEquals(0, u.lastIndex);
+
+// However, with repeating matches, lastIndex does not matter.
+// We do advance from match to match.
+r.lastIndex = 2;
+assertEquals(x + L + x + T + x + L + x + T + x,
+ (L + T + L + T).replace(r, "x"));
+
+// With unicode flag, we advance code point by code point.
+u.lastIndex = 3;
+assertEquals(x + L + T + x + L + T + x,
+ (L + T + L + T).replace(u, "x"));
+
+// Test that exhausting the global match cache is fine.
+assertEquals((x + L + T).repeat(1000) + x,
+ (L + T).repeat(1000).replace(u, "x"));
+
+// Same thing for RegExp.prototype.match.
+r.lastIndex = 1;
+assertEquals(["","","","",""], (L + T + L + T).match(r));
+r.lastIndex = 2;
+assertEquals(["","","","",""], (L + T + L + T).match(r));
+
+u.lastIndex = 1;
+assertEquals(["","",""], (L + T + L + T).match(u));
+u.lastIndex = 2;
+assertEquals(["","",""], (L + T + L + T).match(u));
+
+var expected = [];
+for (var i = 0; i <= 1000; i++) expected.push("");
+assertEquals(expected, (L + T).repeat(1000).match(u));
+
+// Also test RegExp.prototype.@@split.
+assertEquals(["\u{12345}"], "\u{12345}".split(/(?:)/u));
diff --git a/deps/v8/test/mjsunit/ignition/dead-code-source-position.js b/deps/v8/test/mjsunit/ignition/dead-code-source-position.js
new file mode 100644
index 0000000000..95bb9183b8
--- /dev/null
+++ b/deps/v8/test/mjsunit/ignition/dead-code-source-position.js
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f() {
+ for (f(x) in []) { f(new f()) }
+}
+
+f();
diff --git a/deps/v8/test/mjsunit/ignition/debug-break-on-stack.js b/deps/v8/test/mjsunit/ignition/debug-break-on-stack.js
new file mode 100644
index 0000000000..d2577b38de
--- /dev/null
+++ b/deps/v8/test/mjsunit/ignition/debug-break-on-stack.js
@@ -0,0 +1,48 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+var Debug = debug.Debug;
+
+var break_count = 0;
+var exception = null;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ break_count++;
+ var line = exec_state.frame(0).sourceLineText();
+ print(line);
+ assertTrue(line.indexOf(`B${break_count}`) > 0);
+ } catch (e) {
+ exception = e;
+ }
+}
+
+
+function g() {
+ setbreaks();
+ throw 1; // B1
+}
+
+function f() {
+ try {
+ g();
+ } catch (e) {}
+ return 2; // B2
+}
+
+function setbreaks() {
+ Debug.setListener(listener);
+ Debug.setBreakPoint(g, 2, 0);
+ Debug.setBreakPoint(f, 4, 0);
+}
+
+f();
+
+assertEquals(2, break_count);
+assertNull(exception);
+
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/ignition/debug-break.js b/deps/v8/test/mjsunit/ignition/debug-break.js
new file mode 100644
index 0000000000..8237d4a552
--- /dev/null
+++ b/deps/v8/test/mjsunit/ignition/debug-break.js
@@ -0,0 +1,46 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+var Debug = debug.Debug;
+
+var break_count = 0;
+var exception = null;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ break_count++;
+ var line = exec_state.frame(0).sourceLineText();
+ assertTrue(line.indexOf(`B${break_count}`) > 0);
+ } catch (e) {
+ exception = e;
+ }
+}
+
+Debug.setListener(listener);
+
+function g() {
+ throw 1;
+}
+
+function f() {
+ try {
+ g(); // B1
+ } catch (e) {}
+ assertEquals(2, break_count); // B2
+ return 1; // B3
+}
+
+Debug.setBreakPoint(f, 2, 0);
+Debug.setBreakPoint(f, 4, 1);
+Debug.setBreakPoint(f, 5, 1);
+
+f();
+
+assertEquals(3, break_count);
+assertNull(exception);
+
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/ignition/debugger-statement.js b/deps/v8/test/mjsunit/ignition/debugger-statement.js
new file mode 100644
index 0000000000..9c2204e4d2
--- /dev/null
+++ b/deps/v8/test/mjsunit/ignition/debugger-statement.js
@@ -0,0 +1,31 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --ignition-filter=f --expose-debug-as debug
+
+var Debug = debug.Debug;
+
+var break_count = 0;
+
+function f() {
+ debugger;
+}
+
+function listener(event, exec_data) {
+ if (event != Debug.DebugEvent.Break) return;
+ break_count++;
+}
+
+f();
+assertEquals(0, break_count);
+
+Debug.setListener(listener);
+
+f();
+assertEquals(1, break_count);
+
+Debug.setListener(null);
+
+f();
+assertEquals(1, break_count);
diff --git a/deps/v8/test/mjsunit/ignition/stack-trace-source-position.js b/deps/v8/test/mjsunit/ignition/stack-trace-source-position.js
new file mode 100644
index 0000000000..ce236c398c
--- /dev/null
+++ b/deps/v8/test/mjsunit/ignition/stack-trace-source-position.js
@@ -0,0 +1,21 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --ignition-filter=f
+// Flags: --no-turbo
+
+// TODO(yangguo): fix for turbofan
+
+function f(x) {
+ if (x == 0) {
+ return new Error().stack;
+ }
+ return f(x - 1);
+}
+
+var stack_lines = f(2).split("\n");
+
+assertTrue(/at f \(.*?:12:12\)/.test(stack_lines[1]));
+assertTrue(/at f \(.*?:14:10\)/.test(stack_lines[2]));
+assertTrue(/at f \(.*?:14:10\)/.test(stack_lines[3]));
diff --git a/deps/v8/test/mjsunit/messages.js b/deps/v8/test/mjsunit/messages.js
index 8da7e6bd7b..7deef02615 100644
--- a/deps/v8/test/mjsunit/messages.js
+++ b/deps/v8/test/mjsunit/messages.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --stack-size=100 --harmony --harmony-reflect --harmony-regexps
-// Flags: --harmony-simd --strong-mode
+// Flags: --harmony-simd
function test(f, expected, type) {
try {
@@ -150,7 +150,7 @@ TypeError);
// kInstanceofFunctionExpected
test(function() {
1 instanceof 1;
-}, "Expecting a function in instanceof check, but got 1", TypeError);
+}, "Expecting an object in instanceof check", TypeError);
// kInstanceofNonobjectProto
test(function() {
@@ -305,12 +305,6 @@ test(function() {
(1).a = 1;
}, "Cannot create property 'a' on number '1'", TypeError);
-// kStrongImplicitCast
-test(function() {
- "use strong";
- "a" + 1;
-}, "In strong mode, implicit conversions are deprecated", TypeError);
-
// kSymbolToString
test(function() {
"" + Symbol();
@@ -345,36 +339,35 @@ test(function() {
eval("/a/x.test(\"a\");");
}, "Invalid regular expression flags", SyntaxError);
-// kMalformedRegExp
-test(function() {
- /(/.test("a");
-}, "Invalid regular expression: /(/: Unterminated group", SyntaxError);
-
-// kParenthesisInArgString
-test(function() {
- new Function(")", "");
-}, "Function arg string contains parenthesis", SyntaxError);
-
-// kUnexpectedEOS
+//kJsonParseUnexpectedEOS
test(function() {
JSON.parse("{")
-}, "Unexpected end of input", SyntaxError);
+}, "Unexpected end of JSON input", SyntaxError);
-// kUnexpectedToken
+// kJsonParseUnexpectedTokenAt
test(function() {
JSON.parse("/")
-}, "Unexpected token /", SyntaxError);
+}, "Unexpected token / in JSON at position 0", SyntaxError);
-// kUnexpectedTokenNumber
+// kJsonParseUnexpectedTokenNumberAt
test(function() {
JSON.parse("{ 1")
-}, "Unexpected number", SyntaxError);
+}, "Unexpected number in JSON at position 2", SyntaxError);
-// kUnexpectedTokenString
+// kJsonParseUnexpectedTokenStringAt
test(function() {
JSON.parse('"""')
-}, "Unexpected string", SyntaxError);
+}, "Unexpected string in JSON at position 2", SyntaxError);
+
+// kMalformedRegExp
+test(function() {
+ /(/.test("a");
+}, "Invalid regular expression: /(/: Unterminated group", SyntaxError);
+// kParenthesisInArgString
+test(function() {
+ new Function(")", "");
+}, "Function arg string contains parenthesis", SyntaxError);
// === ReferenceError ===
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 95e8da1cb2..e638f5645b 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -43,6 +43,9 @@
# This test non-deterministically runs out of memory on Windows ia32.
'regress/regress-crbug-160010': [SKIP],
+ # Issue 4698: not fully supported by Turbofan yet
+ 'es6/tail-call': [PASS, NO_VARIANTS],
+
# Issue 3389: deopt_every_n_garbage_collections is unsafe
'regress/regress-2653': [SKIP],
@@ -118,7 +121,18 @@
'debug-listbreakpoints': [PASS, NO_VARIANTS], # arm64 nosnap with turbofan
'debug-enable-disable-breakpoints': [PASS, NO_VARIANTS], #arm64 nosnap with turbofan.
- # TODO(rossberg)
+ # Issue 3956: Strong mode is deprecating. The expectations inside the
+ # following tests should be updated once deprecation is complete.
+ 'strong/destructuring': [SKIP],
+ 'strong/implicit-conversions': [SKIP],
+ 'strong/implicit-conversions-count': [SKIP],
+ 'strong/implicit-conversions-inlining': [SKIP],
+ 'strong/load-builtins': [SKIP],
+ 'strong/load-element': [SKIP],
+ 'strong/load-element-mutate-backing-store': [SKIP],
+ 'strong/load-property': [SKIP],
+ 'strong/load-property-mutate-backing-store': [SKIP],
+ 'strong/load-super': [SKIP],
'strong/literals': [SKIP], # Rest arguments do not respect strongness in Turbofan.
# Issue 4035: unexpected frame->context() in debugger
@@ -193,7 +207,8 @@
'regress/regress-crbug-491062': [PASS, NO_VARIANTS],
# Issue 488: this test sometimes times out.
- 'array-constructor': [PASS, TIMEOUT],
+ # TODO(arm): This seems to flush out a bug on arm with simulator.
+ 'array-constructor': [PASS, TIMEOUT, ['arch == arm and simulator == True', SKIP]],
# Issue 4413: this test sometimes times out with TSAN because we trigger
# the slow path in C++ with holey arrays in Function.prototype.apply.
@@ -281,11 +296,16 @@
'readonly': [PASS, SLOW],
'regress/regress-1200351': [PASS, ['mode == debug', SLOW]],
'regress/regress-crbug-474297': [PASS, ['mode == debug', SLOW]],
- 'strong/implicit-conversions': [PASS, SLOW],
- 'strong/load-element-mutate-backing-store': [PASS, SLOW],
- # TODO(bradnelson): Enable tests in a separate change.
- 'wasm/*': [SKIP],
+ # TODO(titzer): correct WASM adapter frame alignment on arm64
+ 'wasm/*': [PASS, ['arch == arm64', SKIP]],
+ 'wasm/asm-wasm': [PASS, ['arch == arm or arch == arm64', SKIP]],
+
+ # case-insensitive unicode regexp relies on case mapping provided by ICU.
+ 'harmony/unicode-regexp-ignore-case': [PASS, ['no_i18n == True', FAIL]],
+ 'harmony/unicode-regexp-ignore-case-noi18n': [FAIL, ['no_i18n == True', PASS]],
+ # desugaring regexp property class relies on ICU.
+ 'harmony/unicode-regexp-property-class': [PASS, ['no_i18n == True', FAIL]],
}], # ALWAYS
['novfp3 == True', {
@@ -362,6 +382,9 @@
# BUG(v8:3097)
'debug-references': [SKIP],
+
+ # BUG(v8:4754).
+ 'debug-referenced-by': [PASS, NO_VARIANTS],
}], # 'gc_stress == True'
##############################################################################
@@ -376,6 +399,8 @@
'asm/embenchen/*': [SKIP],
'asm/poppler/*': [SKIP],
'asm/sqlite3/*': [SKIP],
+ # TODO(mips-team): Fix Wasm for big-endian.
+ 'wasm/*': [SKIP],
}], # 'byteorder == big'
##############################################################################
@@ -578,6 +603,7 @@
['arch == x87', {
# Turbofan will hit the known issue that x87 changes sNaN to qNaN by default.
'regress/regress-undefined-nan': [SKIP],
+ 'regress/regress-crbug-242924': [SKIP],
}], # 'arch == x87'
##############################################################################
@@ -727,6 +753,7 @@
# Skip tests that are known to be non-deterministic.
'd8-worker-sharedarraybuffer': [SKIP],
+ 'd8-os': [SKIP],
}], # 'predictable == True'
##############################################################################
@@ -737,377 +764,150 @@
}], # 'arch == ppc and simulator_run == True'
['ignition == True', {
- 'const*': [SKIP],
- 'debug-*': [SKIP],
- 'es6/*': [SKIP],
- 'es7/*': [SKIP],
+ # Skip strong mode tests since strong mode is unsupported on ignition.
'strong/*': [SKIP],
- 'harmony/*': [SKIP],
+
+ # TODO(yangguo,4690): Requires debugger support.
+ 'es6/debug*': [SKIP],
+ 'harmony/debug*': [SKIP],
'regress/debug*': [SKIP],
'regress/regress-debug*': [SKIP],
- # TODO(bradnelson): Figure out why these tests fail with ignition.
- 'wasm/*': [SKIP],
-
- 'allocation-folding': [SKIP],
- 'api-call-after-bypassed-exception': [SKIP],
- 'apply-arguments-gc-safepoint': [SKIP],
- 'arguments-load-across-eval': [SKIP],
- 'arguments-read-and-assignment': [SKIP],
- 'array-bounds-check-removal': [SKIP],
- 'array-elements-from-array-prototype-chain': [SKIP],
- 'array-functions-prototype-misc': [SKIP],
- 'array-join': [SKIP],
- 'array-literal-feedback': [SKIP],
+ # TODO(yangguo,4690): assertion failures in debugger tests.
+ 'debug-allscopes-on-debugger': [FAIL],
+ 'debug-liveedit-restart-frame': [FAIL],
+ 'debug-return-value': [FAIL],
+ 'debug-liveedit-literals': [FAIL],
+ 'debug-liveedit-3': [FAIL],
+ 'debug-liveedit-1': [FAIL],
+ 'debug-step-into-json': [FAIL],
+ 'debug-liveedit-patch-positions-replace': [FAIL],
+ 'debug-step-into-valueof': [FAIL],
+ 'debug-liveedit-patch-positions': [FAIL],
+ 'debug-liveedit-stepin': [FAIL],
+ 'debug-step-4': [FAIL],
+ 'debug-liveedit-newsource': [FAIL],
+ 'debug-liveedit-stack-padding': [FAIL],
+ 'debug-stepframe': [FAIL],
+ 'debug-negative-break-points': [FAIL],
+ 'debug-stepin-accessor': [FAIL],
+ 'debug-step-stub-callfunction': [FAIL],
+ 'debug-liveedit-breakpoints': [FAIL],
+ 'debug-stepin-accessor-ic': [FAIL],
+ 'debug-stepin-builtin': [FAIL],
+ 'debug-stepin-foreach': [FAIL],
+ 'debug-stepnext-do-while': [FAIL],
+ 'debug-stepin-builtin-callback-opt': [FAIL],
+ 'debug-stepin-function-call': [FAIL],
+
+ # TODO(yangguo,4690): Check failure in debug.cc BreakLocation::SetBreakPoint
+ # DCHECK(IsDebugBreak() || IsDebuggerStatement());
+ 'regress/regress-1523': [FAIL],
+ 'regress/regress-102153': [FAIL],
+ 'regress/regress-2825': [FAIL],
+ 'regress/regress-crbug-119800': [FAIL],
+ 'regress/regress-crbug-467180': [FAIL],
+ 'regress/regress-opt-after-debug-deopt': [FAIL],
+
+ # TODO(rmcilroy,4681): Requires support for generators.
+ 'messages': [FAIL],
+ 'es6/array-from': [FAIL],
+ 'regress-3225': [FAIL],
+ 'es6/classes-subclass-builtins': [FAIL],
+ 'es6/computed-property-names-classes': [FAIL],
+ 'es6/computed-property-names-object-literals-methods': [FAIL],
+ 'es6/function-length-configurable': [FAIL],
+ 'es6/generators-poisoned-properties': [FAIL],
+ 'es6/generators-runtime': [FAIL],
+ 'es6/generators-objects': [FAIL],
+ 'es6/generators-parsing': [FAIL],
+ 'es6/generators-iteration': [FAIL],
+ 'es6/generators-states': [FAIL],
+ 'es6/iteration-semantics': [FAIL],
+ 'es6/iterator-prototype': [FAIL],
+ 'es6/generators-mirror': [FAIL],
+ 'es6/object-literals-method': [FAIL],
+ 'es6/object-literals-super': [FAIL],
+ 'es6/generators-relocation': [FAIL],
+ 'es6/spread-array': [FAIL],
+ 'es6/generators-debug-liveedit': [FAIL],
+ 'es6/spread-call': [FAIL],
+ 'es6/typedarray-from': [FAIL],
+ 'es6/typedarray': [FAIL],
+ 'es6/regress/regress-2681': [FAIL],
+ 'es6/regress/regress-2691': [FAIL],
+ 'es6/regress/regress-3280': [FAIL],
+ 'harmony/destructuring-assignment': [FAIL],
+ 'harmony/function-sent': [FAIL],
+ 'harmony/reflect-enumerate-delete': [FAIL],
+ 'harmony/reflect-enumerate-special-cases': [FAIL],
+ 'harmony/proxies-enumerate': [FAIL],
+ 'harmony/reflect-enumerate-opt': [FAIL],
+ 'harmony/reflect-enumerate': [FAIL],
+ 'harmony/destructuring': [FAIL],
+ 'harmony/regress/regress-4482': [FAIL],
+ 'harmony/generators': [FAIL],
+ 'harmony/iterator-close': [FAIL],
+ 'harmony/reflect-construct': [FAIL],
+ 'es6/promises': [FAIL],
+
+ # TODO(rmcilroy,4680): Check failed in
+ # BytecodeGenerator::VisitFunctionLiteral - !shared_info.is_null().
+ 'regress/regress-crbug-429159': [FAIL],
+
+ # TODO(rmcilroy,4680): Pass on debug, fail on release.
+ 'compiler/regress-stacktrace-methods': [PASS, ['mode == release', FAIL]],
+
+ # TODO(rmcilroy,4680): Test assert failures.
+ 'array-literal-feedback': [FAIL],
+ 'undetectable-compare': [FAIL],
+ 'debug-liveedit-2': [FAIL],
+ 'compiler/deopt-tonumber-compare': [FAIL],
+ 'es6/string-search': [FAIL],
+ 'es6/mirror-collections': [FAIL],
+ 'es6/regress/regress-468661': [FAIL],
+ 'harmony/string-replace': [FAIL],
+ 'harmony/string-match': [FAIL],
+ 'harmony/string-split': [FAIL],
+ 'regress/regress-2618': [FAIL],
+ 'regress/regress-4121': [FAIL],
+ 'regress/regress-4266': [FAIL],
+ 'harmony/simd': [FAIL],
+ 'regress/regress-crbug-109362': [FAIL],
+ 'regress/regress-crbug-568477-2': [FAIL],
+ 'regress/regress-crbug-568477-3': [FAIL],
+ 'regress/regress-crbug-568477-1': [FAIL],
+ 'regress/regress-2318': [FAIL],
+
+ # TODO(rmcilroy, 4680): new ES6 instanceof support
+ 'harmony/instanceof-es6': [SKIP],
+
+ # TODO(rmcilroy,4680): Test timeouts.
'array-literal-transitions': [SKIP],
- 'array-tostring': [SKIP],
- 'break': [SKIP],
- 'call-runtime-tail': [SKIP],
- 'compiler/compare-map-elim2': [SKIP],
- 'compiler/deopt-inlined-smi': [SKIP],
- 'compiler/deopt-tonumber-compare': [SKIP],
- 'compiler/escape-analysis-arguments': [SKIP],
- 'compiler/escape-analysis': [SKIP],
- 'compiler/expression-trees': [SKIP],
- 'compiler/inline-arguments': [SKIP],
- 'compiler/inline-arity-mismatch': [SKIP],
- 'compiler/inline-construct': [SKIP],
- 'compiler/lazy-deopt-in-literal': [SKIP],
- 'compiler/manual-concurrent-recompile': [SKIP],
- 'compiler/optimized-for-in': [SKIP],
- 'compiler/optimized-function-calls': [SKIP],
- 'compiler/optimize_max': [SKIP],
- 'compiler/optimize_min': [SKIP],
- 'compiler/opt-next-call-turbo': [SKIP],
- 'compiler/osr-forof': [SKIP],
- 'compiler/property-refs': [SKIP],
- 'compiler/regress-3786': [SKIP],
- 'compiler/regress-446647': [SKIP],
- 'compiler/regress-447567': [SKIP],
- 'compiler/regress-469089': [SKIP],
- 'compiler/regress-96989': [SKIP],
- 'compiler/regress-const': [SKIP],
- 'compiler/regress-funarguments': [SKIP],
- 'compiler/regress-stacktrace-methods': [SKIP],
- 'compiler/regress-variable-liveness': [SKIP],
- 'compiler/rotate': [SKIP],
- 'compiler/safepoint': [SKIP],
- 'compiler/try-deopt': [SKIP],
- 'compiler/try-osr': [SKIP],
- 'compiler/uint32': [SKIP],
- 'compiler/variables': [SKIP],
- 'context-calls-maintained': [SKIP],
- 'contextual-calls': [SKIP],
- 'cross-realm-filtering': [SKIP],
- 'cyclic-array-to-string': [SKIP],
- 'd8-worker-sharedarraybuffer': [SKIP],
- 'delete-in-with': [SKIP],
- 'deopt-minus-zero': [SKIP],
- 'deserialize-optimize-inner': [SKIP],
- 'double-equals': [SKIP],
- 'eval-enclosing-function-name': [SKIP],
- 'eval-stack-trace': [SKIP],
- 'fast-prototype': [SKIP],
- 'field-type-tracking': [SKIP],
- 'for-in-opt': [SKIP],
- 'for-in-special-cases': [SKIP],
- 'function-call': [SKIP],
- 'get-caller-js-function': [SKIP],
- 'get-prototype-of': [SKIP],
- 'getter-in-prototype': [SKIP],
- 'global-hash': [SKIP],
- 'global-load-from-eval-in-with': [SKIP],
- 'global-vars-with': [SKIP],
- 'instanceof-2': [SKIP],
- 'json-replacer-number-wrapper-tostring': [SKIP],
- 'json-replacer-order': [SKIP],
- 'json': [SKIP],
- 'keyed-load-with-symbol-key': [SKIP],
- 'local-load-from-eval': [SKIP],
- 'math-min-max': [SKIP],
- 'messages': [SKIP],
- 'mirror-object': [SKIP],
- 'object-literal-gc': [SKIP],
- 'osr-elements-kind': [SKIP],
- 'property-load-across-eval': [SKIP],
- 'proto-accessor': [SKIP],
- 'readonly': [SKIP],
- 'receiver-in-with-calls': [SKIP],
- 'regress-3225': [SKIP],
- 'regress/clear-keyed-call': [SKIP],
- 'regress/poly_count_operation': [SKIP],
- 'regress/regress-102153': [SKIP],
- 'regress/regress-1030466': [SKIP],
- 'regress/regress-1079': [SKIP],
- 'regress/regress-109195': [SKIP],
- 'regress/regress-1114040': [SKIP],
- 'regress/regress-1125': [SKIP],
- 'regress/regress-1129': [SKIP],
- 'regress/regress-1170187': [SKIP],
- 'regress/regress-117409': [SKIP],
- 'regress/regress-1177809': [SKIP],
- 'regress/regress-119609': [SKIP],
- 'regress/regress-123919': [SKIP],
- 'regress/regress-124594': [SKIP],
- 'regress/regress-125515': [SKIP],
- 'regress/regress-128018': [SKIP],
- 'regress/regress-131994': [SKIP],
- 'regress/regress-133211b': [SKIP],
- 'regress/regress-1365': [SKIP],
- 'regress/regress-1369': [SKIP],
- 'regress/regress-1403': [SKIP],
- 'regress/regress-1412': [SKIP],
- 'regress/regress-1436': [SKIP],
- 'regress/regress-1493017': [SKIP],
- 'regress/regress-1523': [SKIP],
- 'regress/regress-1560': [SKIP],
- 'regress/regress-1586': [SKIP],
- 'regress/regress-1639-2': [SKIP],
- 'regress/regress-1639': [SKIP],
- 'regress/regress-166553': [SKIP],
- 'regress/regress-1708': [SKIP],
- 'regress/regress-1757': [SKIP],
- 'regress/regress-1790': [SKIP],
- 'regress/regress-1853': [SKIP],
- 'regress/regress-1980': [SKIP],
- 'regress/regress-2054': [SKIP],
- 'regress/regress-2071': [SKIP],
- 'regress/regress-2163': [SKIP],
- 'regress/regress-220': [SKIP],
- 'regress/regress-2318': [SKIP],
- 'regress/regress-2339': [SKIP],
- 'regress/regress-2374': [SKIP],
- 'regress/regress-2593': [SKIP],
- 'regress/regress-2618': [SKIP],
- 'regress/regress-263': [SKIP],
- 'regress/regress-265': [SKIP],
- 'regress/regress-269': [SKIP],
- 'regress/regress-2790': [SKIP],
- 'regress/regress-2825': [SKIP],
- 'regress/regress-3135': [SKIP],
- 'regress/regress-3138': [SKIP],
- 'regress/regress-318420': [SKIP],
- 'regress/regress-320532': [SKIP],
- 'regress/regress-3281': [SKIP],
- 'regress/regress-331444': [SKIP],
- 'regress/regress-343609': [SKIP],
- 'regress/regress-347530': [SKIP],
- 'regress/regress-347914': [SKIP],
- 'regress/regress-351261': [SKIP],
- 'regress/regress-352982': [SKIP],
- 'regress/regress-353551': [SKIP],
- 'regress/regress-354357': [SKIP],
- 'regress/regress-356053': [SKIP],
- 'regress/regress-357105': [SKIP],
- 'regress/regress-359441': [SKIP],
- 'regress/regress-361025': [SKIP],
- 'regress/regress-3621': [SKIP],
- 'regress/regress-365172-3': [SKIP],
- 'regress/regress-370827': [SKIP],
- 'regress/regress-377290': [SKIP],
- 'regress/regress-3859': [SKIP],
- 'regress/regress-3884': [SKIP],
- 'regress/regress-3926': [SKIP],
- 'regress/regress-3960': [SKIP],
- 'regress/regress-3969': [SKIP],
- 'regress/regress-3985': [SKIP],
- 'regress/regress-4023': [SKIP],
- 'regress/regress-4027': [SKIP],
- 'regress/regress-403292': [SKIP],
- 'regress/regress-410912': [SKIP],
- 'regress/regress-4121': [SKIP],
- 'regress/regress-419663': [SKIP],
- 'regress/regress-4255-4': [SKIP],
- 'regress/regress-430201b': [SKIP],
- 'regress/regress-430201': [SKIP],
- 'regress/regress-4309-3': [SKIP],
- 'regress/regress-4320': [SKIP],
- 'regress/regress-4325': [SKIP],
- 'regress/regress-436893': [SKIP],
- 'regress/regress-4374': [SKIP],
- 'regress/regress-4388': [SKIP],
- 'regress/regress-444805': [SKIP],
- 'regress/regress-446389': [SKIP],
- 'regress/regress-447756': [SKIP],
- 'regress/regress-4515': [SKIP],
- 'regress/regress-4521': [SKIP],
- 'regress/regress-4525': [SKIP],
- 'regress/regress-453481': [SKIP],
- 'regress/regress-4534': [SKIP],
- 'regress/regress-454725': [SKIP],
- 'regress/regress-457935': [SKIP],
- 'regress/regress-470804': [SKIP],
- 'regress/regress-476488': [SKIP],
- 'regress/regress-503565': [SKIP],
- 'regress/regress-514362': [SKIP],
- 'regress/regress-520029': [SKIP],
- 'regress/regress-542100': [SKIP],
- 'regress/regress-544991': [SKIP],
- 'regress/regress-568765': [SKIP],
- 'regress/regress-572589': [SKIP],
- 'regress/regress-580': [SKIP],
- 'regress/regress-618': [SKIP],
- 'regress/regress-69': [SKIP],
- 'regress/regress-70066': [SKIP],
- 'regress/regress-747': [SKIP],
- 'regress/regress-753': [SKIP],
- 'regress/regress-799761': [SKIP],
- 'regress/regress-806473': [SKIP],
- 'regress/regress-842017': [SKIP],
- 'regress/regress-84234': [SKIP],
- 'regress/regress-88858': [SKIP],
- 'regress/regress-94425': [SKIP],
- 'regress/regress-94873': [SKIP],
- 'regress/regress-95485': [SKIP],
- 'regress/regress-97116b': [SKIP],
- 'regress/regress-97116': [SKIP],
- 'regress/regress-974': [SKIP],
- 'regress/regress-99167': [SKIP],
- 'regress/regress-998565': [SKIP],
- 'regress/regress-arg-materialize-store': [SKIP],
- 'regress/regress-arguments-gc': [SKIP],
- 'regress/regress-assignment-in-test-context': [SKIP],
- 'regress/regress-bce-underflow': [SKIP],
- 'regress/regress-cnlt-elements': [SKIP],
- 'regress/regress-cnlt-enum-indices': [SKIP],
- 'regress/regress-cntl-descriptors-enum': [SKIP],
- 'regress/regress-conditional-position': [SKIP],
- 'regress/regress-convert-enum': [SKIP],
- 'regress/regress-crbug-109362': [SKIP],
- 'regress/regress-crbug-119800': [SKIP],
- 'regress/regress-crbug-163530': [SKIP],
- 'regress/regress-crbug-229923': [SKIP],
- 'regress/regress-crbug-242502': [SKIP],
- 'regress/regress-crbug-242924': [SKIP],
- 'regress/regress-crbug-245480': [SKIP],
- 'regress/regress-crbug-350864': [SKIP],
- 'regress/regress-crbug-351262': [SKIP],
- 'regress/regress-crbug-352058': [SKIP],
- 'regress/regress-crbug-357137': [SKIP],
- 'regress/regress-crbug-385002': [SKIP],
- 'regress/regress-crbug-387599': [SKIP],
- 'regress/regress-crbug-405517': [SKIP],
- 'regress/regress-crbug-405922': [SKIP],
- 'regress/regress-crbug-409614': [SKIP],
- 'regress/regress-crbug-410033': [SKIP],
- 'regress/regress-crbug-412208': [SKIP],
- 'regress/regress-crbug-416558': [SKIP],
- 'regress/regress-crbug-424142': [SKIP],
- 'regress/regress-crbug-429159': [SKIP],
- 'regress/regress-crbug-431602': [SKIP],
- 'regress/regress-crbug-432493': [SKIP],
- 'regress/regress-crbug-450642': [SKIP],
- 'regress/regress-crbug-455644': [SKIP],
- 'regress/regress-crbug-465298': [SKIP],
- 'regress/regress-crbug-467180': [SKIP],
- 'regress/regress-crbug-467531': [SKIP],
- 'regress/regress-crbug-474297': [SKIP],
- 'regress/regress-crbug-480819': [SKIP],
- 'regress/regress-crbug-481896': [SKIP],
- 'regress/regress-crbug-485548-1': [SKIP],
- 'regress/regress-crbug-485548-2': [SKIP],
- 'regress/regress-crbug-487289': [SKIP],
- 'regress/regress-crbug-489293': [SKIP],
- 'regress/regress-crbug-489597': [SKIP],
- 'regress/regress-crbug-498142': [SKIP],
- 'regress/regress-crbug-501809': [SKIP],
- 'regress/regress-crbug-506443': [SKIP],
- 'regress/regress-crbug-507070': [SKIP],
'regress/regress-crbug-517592': [SKIP],
- 'regress/regress-crbug-522895': [SKIP],
- 'regress/regress-crbug-527364': [SKIP],
- 'regress/regress-crbug-546968': [SKIP],
- 'regress/regress-crbug-568477-1': [SKIP],
- 'regress/regress-crbug-568477-2': [SKIP],
- 'regress/regress-crbug-568477-3': [SKIP],
'regress/regress-crbug-568477-4': [SKIP],
- 'regress/regress-crbug-572590': [SKIP],
- 'regress/regress-crbug-573857': [SKIP],
- 'regress/regress-crbug-575080': [SKIP],
- 'regress/regress-deopt-gcb': [SKIP],
- 'regress/regress-deopt-gc': [SKIP],
- 'regress/regress-deopt-in-array-literal-spread': [SKIP],
- 'regress/regress-embedded-cons-string': [SKIP],
- 'regress/regress-existing-shared-function-info': [SKIP],
- 'regress/regress-fast-literal-transition': [SKIP],
- 'regress/regress-function-constructor-receiver': [SKIP],
- 'regress/regress-handle-illegal-redeclaration': [SKIP],
- 'regress/regress-inline-class-constructor': [SKIP],
- 'regress/regress-inlining-function-literal-context': [SKIP],
- 'regress/regress-latin-1': [SKIP],
- 'regress/regress-lazy-deopt-reloc': [SKIP],
- 'regress/regress-opt-after-debug-deopt': [SKIP],
- 'regress/regress-osr-in-case-label': [SKIP],
- 'regress/regress-osr-in-literal': [SKIP],
- 'regress/regress-prepare-break-while-recompile': [SKIP],
- 'regress/regress-put-prototype-transition': [SKIP],
- 'regress/regress-sliced-external-cons-regexp': [SKIP],
- 'regress/regress-store-heapobject': [SKIP],
- 'regress/regress-transcendental': [SKIP],
- 'regress/regress-typedarray-length': [SKIP],
- 'regress/splice-missing-wb': [SKIP],
- 'setter-on-constructor-prototype': [SKIP],
- 'shift-for-integer-div': [SKIP],
- 'simple-constructor': [SKIP],
- 'sparse-array-reverse': [SKIP],
- 'stack-traces': [SKIP],
- 'strict-mode': [SKIP],
- 'string-case': [SKIP],
- 'string-external-cached': [SKIP],
- 'string-externalize': [SKIP],
- 'string-natives': [SKIP],
- 'string-replace-with-empty': [SKIP],
- 'string-slices': [SKIP],
- 'tools/profile': [SKIP],
- 'tools/profviz': [SKIP],
- 'try-finally-continue': [SKIP],
- 'try': [SKIP],
- 'undetectable-compare': [SKIP],
- 'unused-context-in-with': [SKIP],
- 'value-wrapper': [SKIP],
- 'with-function-expression': [SKIP],
- 'with-parameter-access': [SKIP],
- 'with-prototype': [SKIP],
- 'with-readonly': [SKIP],
- 'with-value': [SKIP],
- 'regress/regress-builtinbust-7': [SKIP],
- 'regress/regress-crbug-451770': [SKIP],
- 'regress/regress-crbug-503968': [SKIP],
- 'regress/regress-crbug-504729': [SKIP],
+ 'regress/regress-crbug-409614': [SKIP],
+ 'regress/regress-crbug-42414': [SKIP],
+ 'regress/regress-1853': [SKIP],
+ 'regress/regress-crbug-424142': [SKIP],
}], # ignition == True
+['ignition == True and arch == arm64', {
+ # TODO(rmcilroy,4680): Fails on Arm64 due to expecting to take less than 3
+ # seconds.
+ 'regress/regress-165637': [FAIL],
+}], # ignition == True and arch == arm64
+
['ignition == True and (arch == arm or arch == arm64)', {
- 'array-constructor': [SKIP],
- 'array-sort': [SKIP],
- 'array-store-and-grow': [SKIP],
- 'compiler/division-by-constant': [SKIP],
- 'compiler/osr-big': [SKIP],
+ # TODO(rmcilroy,4680): Arm / Arm64 specific timeouts.
+ 'asm/construct-double': [SKIP],
'compiler/osr-nested': [SKIP],
'compiler/osr-one': [SKIP],
'compiler/osr-two': [SKIP],
- 'mul-exhaustive-part*': [SKIP],
'regress/regress-1257': [SKIP],
'regress/regress-165637': [SKIP],
- 'regress/regress-319722-ArrayBuffer': [SKIP],
- 'regress/regress-411210': [SKIP],
+ 'regress/regress-2185': [SKIP],
'regress/regress-91008': [SKIP],
- 'regress/regress-crbug-347903': [SKIP],
- 'regress/regress-crbug-500497': [SKIP],
- 'regress/regress-crbug-505007-1': [SKIP],
- 'regress/regress-crbug-505007-2': [SKIP],
- 'regress/regress-2193': [SKIP],
- 'regress/regress-3158': [SKIP],
- 'regress/regress-347904': [SKIP],
- 'regress/regress-380092': [SKIP],
- 'regress/regress-4173': [SKIP],
- 'regress/regress-copy-hole-to-field': [SKIP],
- 'regress/regress-crbug-315252': [SKIP],
- 'regress/regress-crbug-412215': [SKIP],
- 'regress/regress-crbug-513507': [SKIP],
- 'regress/regress-deep-proto': [SKIP],
- 'regress/regress-deopt-store-effect': [SKIP],
- 'regress/regress-undefined-store-keyed-fast-element': [SKIP],
- 'stack-traces-overflow': [SKIP],
'unicodelctest': [SKIP],
'unicodelctest-no-optimization': [SKIP],
}], # ignition == True and (arch == arm or arch == arm64)
@@ -1118,6 +918,9 @@
'array-functions-prototype-misc': [SKIP],
'strong/implicit-conversions': [SKIP],
'strong/load-element-mutate-backing-store': [SKIP],
+
+ # Stack overflow.
+ 'big-array-literal': [SKIP],
}], # 'gcov_coverage'
]
diff --git a/deps/v8/test/mjsunit/random-bit-correlations.js b/deps/v8/test/mjsunit/random-bit-correlations.js
index 8322cfac4c..2ac84c54a7 100644
--- a/deps/v8/test/mjsunit/random-bit-correlations.js
+++ b/deps/v8/test/mjsunit/random-bit-correlations.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --random-seed=12 --nostress-opt --noalways-opt --predictable
+// Flags: --random-seed=20 --nostress-opt --noalways-opt --predictable
(function() {
var kHistory = 2;
diff --git a/deps/v8/test/mjsunit/regexp-compile.js b/deps/v8/test/mjsunit/regexp-compile.js
index 6a24325e16..92c3f7b3dd 100644
--- a/deps/v8/test/mjsunit/regexp-compile.js
+++ b/deps/v8/test/mjsunit/regexp-compile.js
@@ -40,3 +40,5 @@ assertEquals(["x", "x"], re.exec("axyb"));
re.compile("(y)");
assertEquals(["y", "y"], re.exec("axyb"));
+
+assertEquals(2, re.compile.length);
diff --git a/deps/v8/test/mjsunit/regexp.js b/deps/v8/test/mjsunit/regexp.js
index b6f019ea26..1a5de2addf 100644
--- a/deps/v8/test/mjsunit/regexp.js
+++ b/deps/v8/test/mjsunit/regexp.js
@@ -719,9 +719,6 @@ assertThrows("RegExp.prototype.toString.call(0)", TypeError);
assertThrows("RegExp.prototype.toString.call('')", TypeError);
assertThrows("RegExp.prototype.toString.call(false)", TypeError);
assertThrows("RegExp.prototype.toString.call(true)", TypeError);
-assertThrows("RegExp.prototype.toString.call([])", TypeError);
-assertThrows("RegExp.prototype.toString.call({})", TypeError);
-assertThrows("RegExp.prototype.toString.call(function(){})", TypeError);
// Test mutually recursive capture and backreferences.
assertEquals(["b", "", ""], /(\2)b(\1)/.exec("aba"));
diff --git a/deps/v8/test/mjsunit/regress-587004.js b/deps/v8/test/mjsunit/regress-587004.js
new file mode 100644
index 0000000000..9dc6052c43
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress-587004.js
@@ -0,0 +1,31 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc --min-semi-space-size=32
+
+// We need to set --min-semi-space-size to enable allocation site pretenuring.
+
+function foo(i) {
+ with({}) {};
+ x = {};
+ x.a = 0.23;
+ x.b = 0.3;
+ return x;
+}
+
+var all = [];
+function step() {
+ for (var i = 0; i < 100; i++) {
+ var z = foo(i);
+ // Write unboxed double in object slack.
+ z.c = 0.1 + z.b
+ all.push(z);
+ }
+ gc(1);
+ gc(1);
+}
+
+step();
+// Now foo will allocate objects in old space.
+step();
diff --git a/deps/v8/test/mjsunit/regress/math-min.js b/deps/v8/test/mjsunit/regress/math-min.js
new file mode 100644
index 0000000000..942e9d0b7d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/math-min.js
@@ -0,0 +1,66 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var a = new Float64Array(4);
+a[2] *= -1;
+a[3] *= -1;
+assertEquals(0, a[0]);
+assertEquals(0, a[1]);
+assertEquals(-0, a[2]);
+assertEquals(-0, a[3]);
+
+function f1() {
+ var z = a[0];
+ // Same register.
+ assertEquals(0, Math.min(z, z));
+}
+
+function f2() {
+ // Different registers.
+ assertEquals(0, Math.min(a[0], a[1]));
+}
+
+function f3() {
+ // Zero and minus zero.
+ assertEquals(-0, Math.min(a[1], a[2]));
+}
+
+function f4() {
+ // Zero and minus zero, reversed order.
+ assertEquals(-0, Math.min(a[2], a[1]));
+}
+
+function f5() {
+ // Minus zero, same register.
+ var m_z = a[2];
+ assertEquals(-0, Math.min(m_z, m_z));
+}
+
+function f6() {
+ // Minus zero, different registers.
+ assertEquals(-0, Math.min(a[2], a[3]));
+}
+
+for (var i = 0; i < 3; i++) {
+ f1();
+ f2();
+ f3();
+ f4();
+ f5();
+ f6();
+}
+%OptimizeFunctionOnNextCall(f1);
+%OptimizeFunctionOnNextCall(f2);
+%OptimizeFunctionOnNextCall(f3);
+%OptimizeFunctionOnNextCall(f4);
+%OptimizeFunctionOnNextCall(f5);
+%OptimizeFunctionOnNextCall(f6);
+f1();
+f2();
+f3();
+f4();
+f5();
+f6();
diff --git a/deps/v8/test/mjsunit/regress/regress-3650-1.js b/deps/v8/test/mjsunit/regress/regress-3650-1.js
new file mode 100644
index 0000000000..db91ec2d4e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-3650-1.js
@@ -0,0 +1,22 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --deopt-every-n-times=55
+// Flags: --nodead-code-elimination
+
+function f(t) {
+ var result = [];
+ for (var i in t) {
+ for (var j in t) {
+ result.push(i + j + t[i] + t[j]);
+ continue;
+ }
+ }
+ return result.join('');
+}
+
+var t = {a: "1", b: "2"};
+assertEquals("aa11ab12ba21bb22", f(t));
+%OptimizeFunctionOnNextCall(f);
+assertEquals("aa11ab12ba21bb22", f(t));
diff --git a/deps/v8/test/mjsunit/regress/regress-3650-2.js b/deps/v8/test/mjsunit/regress/regress-3650-2.js
new file mode 100644
index 0000000000..aaa6d55b68
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-3650-2.js
@@ -0,0 +1,23 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var a = {}
+var b = {}
+a.x = 1;
+a.y = 1;
+b.x = 1;
+
+function foo(c) {
+ var s = 0;
+ for (var p in c) { s++; }
+ return s;
+}
+
+assertEquals(2, foo(a));
+assertEquals(1, foo(b));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(2, foo(a));
+assertEquals(1, foo(b));
diff --git a/deps/v8/test/mjsunit/regress/regress-3650-3.js b/deps/v8/test/mjsunit/regress/regress-3650-3.js
new file mode 100644
index 0000000000..013e4df283
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-3650-3.js
@@ -0,0 +1,17 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(a) {
+ for (var d in a) {
+ delete a[1];
+ }
+}
+
+foo([1,2,3]);
+foo([2,3,4]);
+%OptimizeFunctionOnNextCall(foo);
+foo([1,2,3]);
+assertOptimized(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-4267.js b/deps/v8/test/mjsunit/regress/regress-4267.js
new file mode 100644
index 0000000000..f8cf746723
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4267.js
@@ -0,0 +1,16 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+"use strict";
+
+var a = [];
+Object.defineProperty(a, "0", {configurable: false, value: 10});
+assertEquals(1, a.length);
+var setter = ()=>{ a.length = 0; };
+assertThrows(setter);
+assertThrows(setter);
+%OptimizeFunctionOnNextCall(setter);
+assertThrows(setter);
diff --git a/deps/v8/test/mjsunit/regress/regress-4509-Class-constructor-typeerror-realm.js b/deps/v8/test/mjsunit/regress/regress-4509-Class-constructor-typeerror-realm.js
new file mode 100644
index 0000000000..bc83a11802
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4509-Class-constructor-typeerror-realm.js
@@ -0,0 +1,25 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
+var realm = Realm.create();
+var OtherTypeError = Realm.eval(realm, 'TypeError');
+
+class Derived extends Object {
+ constructor() {
+ return null;
+ }
+}
+
+assertThrows(() => { new Derived() }, TypeError);
+
+var OtherDerived = Realm.eval(realm,
+ "'use strict';" +
+ "class Derived extends Object {" +
+ "constructor() {" +
+ "return null;" +
+ "}};");
+
+// Before throwing the TypeError we have to switch to the caller context.
+assertThrows(() => { new OtherDerived() }, TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-4654.js b/deps/v8/test/mjsunit/regress/regress-4654.js
new file mode 100644
index 0000000000..eb08b1126e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4654.js
@@ -0,0 +1,5 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertEquals('hello\u0000foobar', 'hello\u0000foobar'.normalize('NFC'));
diff --git a/deps/v8/test/mjsunit/regress/regress-4659.js b/deps/v8/test/mjsunit/regress/regress-4659.js
new file mode 100644
index 0000000000..ff436bec1b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4659.js
@@ -0,0 +1,12 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-function-name
+
+var obj = {
+ get longerName(){
+ return 42;
+ }
+};
+assertEquals(42, obj.longerName);
diff --git a/deps/v8/test/mjsunit/regress/regress-4665-2.js b/deps/v8/test/mjsunit/regress/regress-4665-2.js
new file mode 100644
index 0000000000..b94301eea8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4665-2.js
@@ -0,0 +1,33 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-species
+
+// First test case
+
+function FirstBuffer () {}
+FirstBuffer.prototype.__proto__ = Uint8Array.prototype
+FirstBuffer.__proto__ = Uint8Array
+
+var buf = new Uint8Array(10)
+buf.__proto__ = FirstBuffer.prototype
+
+var buf2 = buf.subarray(2)
+assertEquals(8, buf2.length);
+
+// Second test case
+
+function SecondBuffer (arg) {
+ var arr = new Uint8Array(arg)
+ arr.__proto__ = SecondBuffer.prototype
+ return arr
+}
+SecondBuffer.prototype.__proto__ = Uint8Array.prototype
+SecondBuffer.__proto__ = Uint8Array
+
+var buf3 = new SecondBuffer(10)
+
+var buf4 = buf3.subarray(2)
+
+assertEquals(8, buf4.length);
diff --git a/deps/v8/test/mjsunit/regress/regress-4693.js b/deps/v8/test/mjsunit/regress/regress-4693.js
index ed832e65da..6145964607 100644
--- a/deps/v8/test/mjsunit/regress/regress-4693.js
+++ b/deps/v8/test/mjsunit/regress/regress-4693.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-sloppy-function
+// Flags: --harmony-sloppy-function --nolegacy-const
// In sloppy mode we allow function redeclarations within blocks for webcompat.
(function() {
@@ -27,3 +27,54 @@ assertThrows(`
}
})();
`, SyntaxError);
+
+// Conflicts between let and function still throw
+assertThrows(`
+ (function() {
+ if (true) {
+ let f;
+ function f() { return 2 }
+ }
+ })();
+`, SyntaxError);
+
+assertThrows(`
+ (function() {
+ if (true) {
+ function f() { return 2 }
+ let f;
+ }
+ })();
+`, SyntaxError);
+
+// Conflicts between const and function still throw
+assertThrows(`
+ (function() {
+ if (true) {
+ const f;
+ function f() { return 2 }
+ }
+ })();
+`, SyntaxError);
+
+assertThrows(`
+ (function() {
+ if (true) {
+ function f() { return 2 }
+ const f;
+ }
+ })();
+`, SyntaxError);
+
+// Annex B redefinition semantics still apply with more blocks
+(function() {
+ assertEquals(undefined, f); // Annex B
+ if (true) {
+ assertEquals(undefined, f);
+ { function f() { return 1 } }
+ assertEquals(1, f());
+ { function f() { return 2 } }
+ assertEquals(2, f());
+ }
+ assertEquals(2, f()); // Annex B
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-4715.js b/deps/v8/test/mjsunit/regress/regress-4715.js
new file mode 100644
index 0000000000..0e38cdc27d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4715.js
@@ -0,0 +1,48 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --dead-code-elimination --expose-gc
+
+var training = {};
+training.a = "nop";
+training.slow = "nop";
+delete training.slow; // Dictionary-mode properties => slow-mode for-in.
+
+var keepalive = {};
+keepalive.a = "nop"; // Keep a map early in the transition chain alive.
+
+function GetReal() {
+ var r = {};
+ r.a = "nop";
+ r.b = "nop";
+ r.c = "dictionarize",
+ r.d = "gc";
+ r.e = "result";
+ return r;
+};
+
+function SideEffect(object, action) {
+ if (action === "dictionarize") {
+ delete object.a;
+ } else if (action === "gc") {
+ gc();
+ }
+}
+
+function foo(object) {
+ for (var key in object) {
+ SideEffect(object, object[key]);
+ }
+ return key;
+}
+
+// Collect type feedback for slow-mode for-in.
+foo(training);
+SideEffect({a: 0}, "dictionarize");
+SideEffect({}, "gc");
+
+// Compile for slow-mode objects...
+%OptimizeFunctionOnNextCall(foo);
+// ...and pass in a fast-mode object.
+assertEquals("e", foo(GetReal()));
diff --git a/deps/v8/test/mjsunit/regress/regress-575364.js b/deps/v8/test/mjsunit/regress/regress-575364.js
index f1dc49e073..73136c5538 100644
--- a/deps/v8/test/mjsunit/regress/regress-575364.js
+++ b/deps/v8/test/mjsunit/regress/regress-575364.js
@@ -9,4 +9,4 @@ function f() {
}
assertFalse(_WASMEXP_ == undefined);
-assertThrows(function() { _WASMEXP_.asmCompileRun(f.toString()); });
+assertThrows(function() { _WASMEXP_.instantiateModuleFromAsm(f.toString()); });
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-516775.js b/deps/v8/test/mjsunit/regress/regress-crbug-516775.js
index 25d4d0103d..df190c149b 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-516775.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-516775.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-concat-spreadable
+// Flags: --allow-natives-syntax
function arguments_with_length_getter(f) {
arguments.__defineGetter__('length', f);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-577112.js b/deps/v8/test/mjsunit/regress/regress-crbug-577112.js
new file mode 100644
index 0000000000..504f921a33
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-577112.js
@@ -0,0 +1,15 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+Array.prototype.__proto__ = null;
+var prototype = Array.prototype;
+function f() {
+ prototype.lastIndexOf({});
+}
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-580506.js b/deps/v8/test/mjsunit/regress/regress-crbug-580506.js
new file mode 100644
index 0000000000..fb036982cf
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-580506.js
@@ -0,0 +1,22 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function() {
+ 'use strict';
+ class A extends Function {
+ constructor(...args) {
+ super(...args);
+ this.a = 42;
+ }
+ }
+ var v1 = new A("'use strict';");
+ function f(func) {
+ func.__defineSetter__('a', function() { });
+ }
+ var v2 = new A();
+ f(v2);
+ f(v1);
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-580584.js b/deps/v8/test/mjsunit/regress/regress-crbug-580584.js
new file mode 100644
index 0000000000..cb6776f54c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-580584.js
@@ -0,0 +1,19 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f() { return arguments }
+
+// Reconfiguring function.name should update both the attributes and the value.
+Object.defineProperty(f, "name", {
+ writable: true, configurable: true, value: 10});
+assertEquals({value: 10, writable: true, enumerable: false, configurable: true},
+ Object.getOwnPropertyDescriptor(f, "name"));
+
+var args = f();
+
+// Setting a value for arguments[Symbol.iterator] should not affect the
+// attributes.
+args[Symbol.iterator] = 10;
+assertEquals({value: 10, writable: true, configurable: true, enumerable: false},
+ Object.getOwnPropertyDescriptor(args, Symbol.iterator));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-581577.js b/deps/v8/test/mjsunit/regress/regress-crbug-581577.js
new file mode 100644
index 0000000000..d95ada5f5a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-581577.js
@@ -0,0 +1,5 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertEquals("", RegExp.prototype.flags);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-582051.js b/deps/v8/test/mjsunit/regress/regress-crbug-582051.js
new file mode 100644
index 0000000000..93f4e70dfb
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-582051.js
@@ -0,0 +1,44 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+var test_y = false;
+
+function foo(a = 1) {
+ var x = 2;
+ debugger;
+ eval("var y = 3");
+ test_y = true;
+ debugger;
+}
+
+var exception = null;
+var break_count = 0;
+var Debug = debug.Debug;
+var ScopeType = debug.ScopeType;
+
+function listener(event, exec_state) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ var scopes = exec_state.frame(0).allScopes();
+ var expectation = [ ScopeType.Block,
+ ScopeType.Local,
+ ScopeType.Script,
+ ScopeType.Global ];
+ assertEquals(expectation, scopes.map(x => x.scopeType()));
+ assertEquals(2, scopes[0].scopeObject().value().x);
+ if (test_y) assertEquals(3, scopes[0].scopeObject().value().y);
+ assertEquals(1, scopes[1].scopeObject().value().a);
+ break_count++;
+ } catch (e) {
+ print(e);
+ exception = e;
+ }
+}
+Debug.setListener(listener);
+foo();
+
+assertNull(exception);
+assertEquals(2, break_count);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-582703.js b/deps/v8/test/mjsunit/regress/regress-crbug-582703.js
new file mode 100644
index 0000000000..792266f54c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-582703.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+%FunctionGetScript({});
+%FunctionGetSourceCode({});
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-583257.js b/deps/v8/test/mjsunit/regress/regress-crbug-583257.js
new file mode 100644
index 0000000000..85a08c7c81
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-583257.js
@@ -0,0 +1,27 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Object.defineProperty(String.prototype, "0", { __v_1: 1});
+Object.defineProperty(String.prototype, "3", { __v_1: 1});
+
+(function () {
+ var s = new String();
+ function set(object, index, value) { object[index] = value; }
+ set(s, 10, "value");
+ set(s, 1073741823, "value");
+})();
+
+function __f_11() {
+ Object.preventExtensions(new String());
+}
+__f_11();
+__f_11();
+
+(function() {
+ var i = 10;
+ var a = new String("foo");
+ for (var j = 0; j < i; j++) {
+ a[j] = {};
+ }
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-584188.js b/deps/v8/test/mjsunit/regress/regress-crbug-584188.js
new file mode 100644
index 0000000000..f54e5593b7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-584188.js
@@ -0,0 +1,10 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var x = {};
+try {
+Object.defineProperty(String.prototype, "3", { x: function() { x = v; }});
+string = "bla";
+} catch(e) {; }
+assertThrows("Array.prototype.sort.call(string);", TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-590989-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-590989-1.js
new file mode 100644
index 0000000000..73118eb20e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-590989-1.js
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var o = {}
+var p = {foo: 1.5}
+
+function g(x) { return x.foo === +x.foo; }
+
+assertEquals(false, g(o));
+assertEquals(false, g(o));
+%OptimizeFunctionOnNextCall(g);
+assertEquals(false, g(o)); // Still fine here.
+assertEquals(true, g(p));
+%OptimizeFunctionOnNextCall(g);
+assertEquals(false, g(o)); // Confused by type feedback.
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-590989-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-590989-2.js
new file mode 100644
index 0000000000..cae1d9db5b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-590989-2.js
@@ -0,0 +1,12 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f(x) { return x === +x; }
+
+assertEquals(false, f(undefined));
+assertEquals(false, f(undefined));
+%OptimizeFunctionOnNextCall(f);
+assertEquals(false, f(undefined)); // Interestingly this fails right away.
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-592343.js b/deps/v8/test/mjsunit/regress/regress-crbug-592343.js
new file mode 100644
index 0000000000..c98e921b00
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-592343.js
@@ -0,0 +1,12 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var r = /[^\u{1}-\u{1000}\u{1002}-\u{2000}]/u;
+assertTrue(r.test("\u{0}"));
+assertFalse(r.test("\u{1}"));
+assertFalse(r.test("\u{1000}"));
+assertTrue(r.test("\u{1001}"));
+assertFalse(r.test("\u{1002}"));
+assertFalse(r.test("\u{2000}"));
+assertTrue(r.test("\u{2001}"));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-593282.js b/deps/v8/test/mjsunit/regress/regress-crbug-593282.js
new file mode 100644
index 0000000000..85c4e10a12
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-593282.js
@@ -0,0 +1,38 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc --stack-size=120
+
+var __v_11 = {};
+function __f_2(depth) {
+ try {
+ __f_5(depth, __v_11);
+ return true;
+ } catch (e) {
+ gc();
+ }
+}
+function __f_5(n, __v_4) {
+ if (--n == 0) {
+ __f_1(__v_4);
+ return;
+ }
+ __f_5(n, __v_4);
+}
+function __f_1(__v_4) {
+ var __v_5 = new RegExp(__v_4);
+}
+function __f_4() {
+ var __v_1 = 100;
+ var __v_8 = 100000;
+ while (__v_1 < __v_8 - 1) {
+ var __v_3 = Math.floor((__v_1 + __v_8) / 2);
+ if (__f_2(__v_3)) {
+ __v_1 = __v_3;
+ } else {
+ __v_8 = __v_3;
+ }
+ }
+}
+__f_4();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-595657.js b/deps/v8/test/mjsunit/regress/regress-crbug-595657.js
new file mode 100644
index 0000000000..653259781b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-595657.js
@@ -0,0 +1,15 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --stack-size=100
+
+function test() {
+ try {
+ test();
+ } catch(e) {
+ /(\2)(a)/.test("");
+ }
+}
+
+test();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-599003.js b/deps/v8/test/mjsunit/regress/regress-crbug-599003.js
new file mode 100644
index 0000000000..da29455661
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-599003.js
@@ -0,0 +1,39 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-gc --verify-heap
+
+function A() {}
+
+function g1() {
+ var obj = new A();
+ obj.v0 = 0;
+ obj.v1 = 0;
+ obj.v2 = 0;
+ obj.v3 = 0;
+ obj.v4 = 0;
+ obj.v5 = 0;
+ obj.v6 = 0;
+ obj.v7 = 0;
+ obj.v8 = 0;
+ obj.v9 = 0;
+ return obj;
+}
+
+function g2() {
+ return new A();
+}
+
+var o = g1();
+%OptimizeFunctionOnNextCall(g2);
+g2();
+o = null;
+gc();
+
+for (var i = 0; i < 20; i++) {
+ var o = new A();
+}
+g2();
+
+gc(); // Boom!
diff --git a/deps/v8/test/mjsunit/regress/regress-integer-indexed-element.js b/deps/v8/test/mjsunit/regress/regress-integer-indexed-element.js
new file mode 100644
index 0000000000..1bae2d06f5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-integer-indexed-element.js
@@ -0,0 +1,12 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var o = {__proto__:new Int32Array(100)};
+Object.prototype[1.3] = 10;
+assertEquals(undefined, o[1.3]);
+
+var o = new Int32Array(100);
+var o2 = new Int32Array(200);
+o.__proto__ = o2;
+assertEquals(undefined, Reflect.get(o, 1.3, o2));
diff --git a/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex1.js b/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex1.js
index 1fd8d810b3..444fe4beb4 100644
--- a/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex1.js
+++ b/deps/v8/test/mjsunit/regress/regress-seqstrsetchar-ex1.js
@@ -35,18 +35,18 @@ function ToNumber(x) {
// Reduced version of String.fromCharCode;
// does not actually do the same calculation but exhibits untagging bug.
function StringFromCharCode(code) {
- var n = %_ArgumentsLength();
+ var n = arguments.length;
var one_byte = %NewString(n, true);
var i;
for (i = 0; i < n; i++) {
- var code = %_Arguments(i);
+ var code = arguments[i];
if (!%_IsSmi(code)) code = ToNumber(code) & 0xffff;
if (code > 0xff) break;
}
var two_byte = %NewString(n - i, false);
for (var j = 0; i < n; i++, j++) {
- var code = %_Arguments(i);
+ var code = arguments[i];
%_TwoByteSeqStringSetChar(j, code, two_byte);
}
return one_byte + two_byte;
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-4839.js b/deps/v8/test/mjsunit/regress/regress-v8-4839.js
new file mode 100644
index 0000000000..120685b1de
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-4839.js
@@ -0,0 +1,62 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function dummy() { }
+
+(function InlinedFunctionTestContext() {
+ var f = function() { }
+
+ function g() {
+ var s = "hey";
+ dummy(); // Force a deopt point.
+ if (f()) return s;
+ }
+
+ g();
+ g();
+ g();
+ %OptimizeFunctionOnNextCall(g);
+ f = function() { return true; }
+ assertEquals("hey", g());
+})();
+
+(function InlinedConstructorReturnTestContext() {
+ function c() { return 1; }
+
+ var f = function() { return !(new c()); }
+
+ function g() {
+ var s = "hey";
+ dummy(); // Force a deopt point.
+ if (f()) return s;
+ }
+
+ g();
+ g();
+ g();
+ %OptimizeFunctionOnNextCall(g);
+ f = function() { return true; }
+ assertEquals("hey", g());
+})();
+
+(function InlinedConstructorNoReturnTestContext() {
+ function c() { }
+
+ var f = function() { return !(new c()); }
+
+ function g() {
+ var s = "hey";
+ dummy(); // Force a deopt point.
+ if (f()) return s;
+ }
+
+ g();
+ g();
+ g();
+ %OptimizeFunctionOnNextCall(g);
+ f = function() { return true; }
+ assertEquals("hey", g());
+})();
diff --git a/deps/v8/test/mjsunit/samevalue.js b/deps/v8/test/mjsunit/samevalue.js
index 038fd68eb9..356e888016 100644
--- a/deps/v8/test/mjsunit/samevalue.js
+++ b/deps/v8/test/mjsunit/samevalue.js
@@ -26,14 +26,14 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-natives-as natives
+// Flags: --expose-natives-as natives --allow-natives-syntax
// Test the SameValue and SameValueZero internal methods.
var obj1 = {x: 10, y: 11, z: "test"};
var obj2 = {x: 10, y: 11, z: "test"};
var sameValue = Object.is;
-var sameValueZero = natives.ImportNow("SameValueZero");
+var sameValueZero = function(x, y) { return %SameValueZero(x, y); }
// Calls SameValue and SameValueZero and checks that their results match.
function sameValueBoth(a, b) {
diff --git a/deps/v8/test/mjsunit/strict-mode.js b/deps/v8/test/mjsunit/strict-mode.js
index 6beb9c667a..63dc9d0bda 100644
--- a/deps/v8/test/mjsunit/strict-mode.js
+++ b/deps/v8/test/mjsunit/strict-mode.js
@@ -1149,7 +1149,9 @@ function CheckArgumentsPillDescriptor(func, name) {
function strict() {
"use strict";
- return return_my_caller();
+ // Returning result via local variable to avoid tail call optimization.
+ var res = return_my_caller();
+ return res;
}
assertSame(null, strict());
@@ -1163,7 +1165,9 @@ function CheckArgumentsPillDescriptor(func, name) {
(function TestNonStrictFunctionCallerPill() {
function strict(n) {
"use strict";
- return non_strict(n);
+ // Returning result via local variable to avoid tail call optimization.
+ var res = non_strict(n);
+ return res;
}
function recurse(n, then) {
@@ -1191,7 +1195,9 @@ function CheckArgumentsPillDescriptor(func, name) {
(function TestNonStrictFunctionCallerDescriptorPill() {
function strict(n) {
"use strict";
- return non_strict(n);
+ // Returning result via local variable to avoid tail call optimization.
+ var res = non_strict(n);
+ return res;
}
function recurse(n, then) {
diff --git a/deps/v8/test/mjsunit/strong/declaration-after-use.js b/deps/v8/test/mjsunit/strong/declaration-after-use.js
deleted file mode 100644
index 3530105f2b..0000000000
--- a/deps/v8/test/mjsunit/strong/declaration-after-use.js
+++ /dev/null
@@ -1,255 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode
-
-// Note that it's essential for these tests that the reference is inside dead
-// code (because we already produce ReferenceErrors for run-time unresolved
-// variables and don't want to confuse those with strong mode errors). But the
-// errors should *not* be inside lazy, unexecuted functions, since lazy parsing
-// doesn't produce strong mode scoping errors).
-
-// In addition, assertThrows will call eval and that changes variable binding
-// types (see e.g., UNBOUND_EVAL_SHADOWED). We can avoid unwanted side effects
-// by wrapping the code to be tested inside an outer function.
-function assertThrowsHelper(code) {
- "use strict";
- let prologue = "(function outer() { if (false) { ";
- let epilogue = " } })();";
-
- assertThrows("'use strong'; " + prologue + code + epilogue, ReferenceError);
-
- // Make sure the error happens only in strong mode (note that we need strict
- // mode here because of let).
- assertDoesNotThrow("'use strict'; " + prologue + code + epilogue);
-}
-
-(function DeclarationAfterUse() {
- // Note that these tests only test cases where the declaration is found but is
- // after the use. In particular, we cannot yet detect cases where the use can
- // possibly bind to a global variable.
- assertThrowsHelper("x; let x = 0;");
- assertThrowsHelper("function f() { x; let x = 0; }");
- assertThrowsHelper("function f() { x; } let x = 0;");
-
- assertThrowsHelper("x; const x = 0;");
- assertThrowsHelper("function f() { x; const x = 0; }");
- assertThrowsHelper("function f() { x; } const x = 0;");
-
- // These tests needs to be done a bit more manually, since var is not allowed
- // in strong mode:
- assertThrows(
- `(function outer() {
- function f() { 'use strong'; if (false) { x; } } var x = 0; f();
- })()`,
- ReferenceError);
- assertDoesNotThrow(
- "(function outer() {\n" +
- " function f() { if (false) { x; } } var x = 0; f(); \n" +
- "})()");
-
- assertThrows(
- "(function outer() {\n" +
- " function f() { 'use strong'; if (false) { x; } } var x; f(); \n" +
- "})()",
- ReferenceError);
- assertDoesNotThrow(
- "(function outer() {\n" +
- " function f() { if (false) { x; } } var x; f(); \n" +
- "})()");
-
- // Use occurring in the initializer of the declaration:
- assertThrowsHelper("let x = x + 1;");
- assertThrowsHelper("let x = x;");
- assertThrowsHelper("let x = y, y = 4;");
- assertThrowsHelper("let x = function() { x; }");
- assertThrowsHelper("let x = a => { x; }");
- assertThrowsHelper("function f(x) { return x; }; let x = f(x);");
- assertThrowsHelper("const x = x;");
- assertThrowsHelper("const x = function() { x; }");
- assertThrowsHelper("const x = a => { x; }");
- assertThrowsHelper("function f(x) {return x}; const x = f(x);");
-
- assertThrowsHelper("for (let x = x; ; ) { }");
- assertThrowsHelper("for (const x = x; ; ) { }");
- assertThrowsHelper("for (let x = y, y; ; ) { }");
- assertThrowsHelper("for (const x = y, y = 0; ; ) { }");
-
- // Computed property names
- assertThrowsHelper("let o = { 'a': 'b', [o.a]: 'c'};");
-})();
-
-
-(function DeclarationAfterUseInClasses() {
- // Referring to a variable declared later
- assertThrowsHelper("class C { m() { x; } } let x = 0;");
- assertThrowsHelper("class C { static m() { x; } } let x = 0;");
- assertThrowsHelper("class C { [x]() { } } let x = 0;");
-
- assertThrowsHelper("class C { m() { x; } } const x = 0;");
- assertThrowsHelper("class C { static m() { x; } } const x = 0;");
- assertThrowsHelper("class C { [x]() { } } const x = 0;");
-
- // Referring to the class name.
- assertThrowsHelper("class C extends C { }");
- assertThrowsHelper("let C = class C2 extends C { }");
- assertThrowsHelper("let C = class C2 extends C2 { }");
-
- assertThrowsHelper("let C = class C2 { constructor() { C; } }");
- assertThrowsHelper("let C = class C2 { method() { C; } }");
- assertThrowsHelper("let C = class C2 { *generator_method() { C; } }");
-
- assertThrowsHelper(
- `let C = class C2 {
- static a() { return 'A'; }
- [C.a()]() { return 'B'; }
- };`);
-
- assertThrowsHelper(
- `let C = class C2 {
- static a() { return 'A'; }
- [C2.a()]() { return 'B'; }
- };`);
-
- assertThrowsHelper(
- `let C = class C2 {
- [(function() { C; return 'A';})()]() { return 'B'; }
- };`);
-
- // The reference to C or C2 is inside a function, but not a method.
- assertThrowsHelper(
- `let C = class C2 {
- [(function() { C2; return 'A';})()]() { return 'B'; }
- };`);
-
- assertThrowsHelper(
- `let C = class C2 {
- [(function() { C; return 'A';})()]() { return 'B'; }
- };`);
-
- // The reference to C or C2 is inside a method, but it's not a method of the
- // relevant class (C2).
- assertThrowsHelper(
- `let C = class C2 {
- [(new (class D { m() { C2; return 'A'; } })).m()]() {
- return 'B';
- }
- }`);
-
- assertThrowsHelper(
- `let C = class C2 {
- [(new (class D { m() { C; return 'A'; } })).m()]() {
- return 'B';
- }
- }`);
-
- assertThrowsHelper(
- `let C = class C2 {
- [({m() { C2; return 'A'; }}).m()]() { return 'B'; }
- }`);
-
- assertThrowsHelper(
- `let C = class C2 {
- [({m() { C; return 'A'; }}).m()]() { return 'B'; }
- }`);
-
- assertThrowsHelper(
- `class COuter {
- m() {
- class CInner {
- [({ m() { CInner; return 'A'; } }).m()]() {
- return 'B';
- }
- }
- }
- }`);
-})();
-
-
-(function UsesWhichAreFine() {
- "use strong";
-
- let var1 = 0;
- var1;
-
- let var2a = 0, var2b = var2a + 1, var2c = 2 + var2b;
-
- for (let var3 = 0; var3 < 1; var3++) {
- var3;
- }
-
- for (let var4a = 0, var4b = var4a; var4a + var4b < 4; var4a++, var4b++) {
- var4a;
- var4b;
- }
-
- let var5 = 5;
- for (; var5 < 10; ++var5) { }
-
- let arr = [1, 2];
- for (let i of arr) {
- i;
- }
-
- try {
- throw "error";
- } catch (e) {
- e;
- }
-
- function func1() { func1; this; }
- func1();
- func1;
-
- function * func2() { func2; this; }
- func2();
- func2;
-
- function func4(p, ...rest) { p; rest; this; func2; }
- // TODO(arv): The arity checking is not correct with rest parameters.
- func4(1, 2);
-
- let func5 = (p1, p2) => { p1; p2; };
- func5(1, 2);
-
- let func5b = p1 => p1;
- func5b(1);
-
- function func6() {
- var1, var2a, var2b, var2c;
- }
-
- class C1 { constructor() { C1; } }; new C1();
- let C2 = class C3 { constructor() { C3; } }; new C2();
-
- class C4 { method() { C4; } *generator_method() { C4; } }; new C4();
- let C5 = class C6 { method() { C6; } *generator_method() { C6; } }; new C5();
-
- class C7 { static method() { C7; } }; new C7();
- let C8 = class C9 { static method() { C9; } }; new C8();
-
- class C10 { get x() { C10; } }; new C10();
- let C11 = class C12 { get x() { C12; } }; new C11();
-
- // Regression test for unnamed classes.
- let C13 = class { m() { var1; } };
-
- class COuter {
- m() {
- class CInner {
- // Here we can refer to COuter but not to CInner (see corresponding
- // assertion test):
- [({ m() { COuter; return 'A'; } }).m()]() { return 'B'; }
- // And here we can refer to both:
- n() { COuter; CInner; }
- }
- return new CInner();
- }
- }
- (new COuter()).m().n();
-
- // Making sure the check which is supposed to prevent "object literal inside
- // computed property name references the class name" is not too generic:
- class C14 { m() { let obj = { n() { C14 } }; obj.n(); } }; (new C14()).m();
-})();
diff --git a/deps/v8/test/mjsunit/strong/for-in.js b/deps/v8/test/mjsunit/strong/for-in.js
index 8fa9010202..641248c408 100644
--- a/deps/v8/test/mjsunit/strong/for-in.js
+++ b/deps/v8/test/mjsunit/strong/for-in.js
@@ -9,9 +9,3 @@
assertThrows("'use strong'; for (let x in []) {}", SyntaxError);
assertThrows("'use strong'; for (const x in []) {}", SyntaxError);
})();
-
-(function ForOfStatement() {
- assertTrue(eval("'use strong'; for (x of []) {} true"));
- assertTrue(eval("'use strong'; for (let x of []) {} true"));
- assertTrue(eval("'use strong'; for (const x of []) {} true"));
-})();
diff --git a/deps/v8/test/mjsunit/strong/mutually-recursive-classes.js b/deps/v8/test/mjsunit/strong/mutually-recursive-classes.js
deleted file mode 100644
index 204c3964de..0000000000
--- a/deps/v8/test/mjsunit/strong/mutually-recursive-classes.js
+++ /dev/null
@@ -1,229 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --strong-mode
-"use strict"
-
-let prologue_dead = "(function outer() { if (false) { ";
-let epilogue_dead = " } })();";
-
-let prologue_live = "(function outer() { ";
-let epilogue_live = "})();";
-
-// For code which already throws a run-time error in non-strong mode; we assert
-// that we now get the error already compilation time.
-function assertLateErrorsBecomeEarly(code) {
- assertThrows("'use strong'; " + prologue_dead + code + epilogue_dead,
- ReferenceError);
-
- // Make sure the error happens only in strong mode (note that we need strict
- // mode here because of let).
- assertDoesNotThrow("'use strict'; " + prologue_dead + code + epilogue_dead);
-
- // But if we don't put the references inside a dead code, it throws a run-time
- // error (also in strict mode).
- assertThrows("'use strong'; " + prologue_live + code + epilogue_live,
- ReferenceError);
- assertThrows("'use strict'; " + prologue_live + code + epilogue_live,
- ReferenceError);
-}
-
-// For code which doesn't throw an error at all in non-strong mode.
-function assertNonErrorsBecomeEarly(code) {
- assertThrows("'use strong'; " + prologue_dead + code + epilogue_dead,
- ReferenceError);
- assertDoesNotThrow("'use strict'; " + prologue_dead + code + epilogue_dead);
-
- assertThrows("'use strong'; " + prologue_live + code + epilogue_live,
- ReferenceError);
- assertDoesNotThrow("'use strict'; " + prologue_live + code + epilogue_live,
- ReferenceError);
-}
-
-(function InitTimeReferenceForward() {
- // It's never OK to have an init time reference to a class which hasn't been
- // declared.
- assertLateErrorsBecomeEarly(
- `class A extends B { }
- class B {}`);
-
- assertLateErrorsBecomeEarly(
- `class A {
- [B.sm()]() { }
- }
- class B {
- static sm() { return 0; }
- }`);
-})();
-
-(function InitTimeReferenceBackward() {
- // Backwards is of course fine.
- "use strong";
- class A {
- static sm() { return 0; }
- }
- let i = "making these classes non-consecutive";
- class B extends A {};
- "by inserting statements and declarations in between";
- class C {
- [A.sm()]() { }
- };
-})();
-
-(function BasicMutualRecursion() {
- "use strong";
- class A {
- m() { B; }
- static sm() { B; }
- }
- // No statements or declarations between the classes.
- class B {
- m() { A; }
- static sm() { A; }
- }
-})();
-
-(function MutualRecursionWithMoreClasses() {
- "use strong";
- class A {
- m() { B; C; }
- static sm() { B; C; }
- }
- class B {
- m() { A; C; }
- static sm() { A; C; }
- }
- class C {
- m() { A; B; }
- static sm() { A; B; }
- }
-})();
-
-(function ReferringForwardInDeeperScopes() {
- "use strong";
-
- function foo() {
- class A1 {
- m() { B1; }
- }
- class B1 { }
- }
-
- class Outer {
- m() {
- class A2 {
- m() { B2; }
- }
- class B2 { }
- }
- }
-
- for (let i = 0; i < 1; ++i) {
- class A3 {
- m() { B3; }
- }
- class B3 { }
- }
-
- (a, b) => {
- class A4 {
- m() { B4; }
- }
- class B4 { }
- }
-})();
-
-(function ReferringForwardButClassesNotConsecutive() {
- assertNonErrorsBecomeEarly(
- `class A {
- m() { B; }
- }
- ;
- class B {}`);
-
- assertNonErrorsBecomeEarly(
- `let A = class {
- m() { B; }
- }
- class B {}`);
-
- assertNonErrorsBecomeEarly(
- `class A {
- m() { B1; } // Just a normal use-before-declaration.
- }
- let B1 = class B2 {}`);
-
- assertNonErrorsBecomeEarly(
- `class A {
- m() { B; }
- }
- let i = 0;
- class B {}`);
-
- assertNonErrorsBecomeEarly(
- `class A {
- m() { B; }
- }
- function foo() {}
- class B {}`);
-
- assertNonErrorsBecomeEarly(
- `function foo() {
- class A {
- m() { B; }
- }
- }
- class B {}`);
-
- assertNonErrorsBecomeEarly(
- `class A extends class B { m() { C; } } {
- }
- class C { }`);
-
- assertLateErrorsBecomeEarly(
- `class A extends class B { [C.sm()]() { } } {
- }
- class C { static sm() { return 'a';} }`);
-
- assertLateErrorsBecomeEarly(
- `class A extends class B extends C { } {
- }
- class C { }`);
-})();
-
-
-(function RegressionForClassResolution() {
- assertNonErrorsBecomeEarly(
- `let A = class B {
- m() { C; }
- }
- ;;;;
- class C {}
- class B {}`);
-})();
-
-
-(function TestMultipleMethodScopes() {
- "use strong";
-
- // Test cases where the reference is inside multiple method scopes.
- class A1 {
- m() {
- class C1 {
- m() { B1; }
- }
- }
- }
- class B1 { }
-
- ;
-
- class A2 {
- m() {
- class C2 extends B2 {
- }
- }
- }
- class B2 { }
-})();
diff --git a/deps/v8/test/mjsunit/to_number_order.js b/deps/v8/test/mjsunit/to_number_order.js
index 50e4bc762e..c20ec5e147 100644
--- a/deps/v8/test/mjsunit/to_number_order.js
+++ b/deps/v8/test/mjsunit/to_number_order.js
@@ -56,6 +56,13 @@ x = "";
assertEquals(1, Math.pow(v, w));
assertEquals("hestfisk", x, "pow");
+x = "";
+var a = {valueOf: function() { x += "hest"; return 1/0; }};
+var b = {valueOf: function() { x += "fisk"; return 1}};
+assertEquals(1/0, Math.hypot(a, b));
+assertEquals("hestfisk", x, "hypot");
+
+
var year = { valueOf: function() { x += 1; return 2007; } };
var month = { valueOf: function() { x += 2; return 2; } };
var date = { valueOf: function() { x += 3; return 4; } };
diff --git a/deps/v8/test/mjsunit/typeof.js b/deps/v8/test/mjsunit/typeof.js
index 8aa0ab1c3d..864f1cfdb6 100644
--- a/deps/v8/test/mjsunit/typeof.js
+++ b/deps/v8/test/mjsunit/typeof.js
@@ -35,3 +35,5 @@ assertFalse(typeof r == 'function');
function test(x, y) { return x == y; }
assertTrue(test('object', typeof r));
+
+assertFalse(typeof null == "undefined");
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm.js b/deps/v8/test/mjsunit/wasm/asm-wasm.js
index 8dfe85aee1..3f936f5f21 100644
--- a/deps/v8/test/mjsunit/wasm/asm-wasm.js
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm.js
@@ -15,7 +15,9 @@ function EmptyTest() {
return {caller: caller};
}
-assertEquals(11, _WASMEXP_.asmCompileRun(EmptyTest.toString()));
+assertEquals(11, _WASMEXP_.instantiateModuleFromAsm(
+ EmptyTest.toString()).caller());
+
function IntTest() {
"use asm";
@@ -24,7 +26,7 @@ function IntTest() {
b = b|0;
var c = (b + 1)|0
var d = 3.0;
- var e = d | 0; // double conversion
+ var e = ~~d; // double conversion
return (a + c + 1)|0;
}
@@ -35,7 +37,9 @@ function IntTest() {
return {caller: caller};
}
-assertEquals(101, _WASMEXP_.asmCompileRun(IntTest.toString()));
+assertEquals(101, _WASMEXP_.instantiateModuleFromAsm(
+ IntTest.toString()).caller());
+
function Float64Test() {
"use asm";
@@ -59,7 +63,9 @@ function Float64Test() {
return {caller: caller};
}
-assertEquals(1, _WASMEXP_.asmCompileRun(Float64Test.toString()));
+assertEquals(1, _WASMEXP_.instantiateModuleFromAsm(
+ Float64Test.toString()).caller());
+
function BadModule() {
"use asm";
@@ -78,9 +84,10 @@ function BadModule() {
}
assertThrows(function() {
- _WASMEXP_.asmCompileRun(BadModule.toString())
+ _WASMEXP_.instantiateModuleFromAsm(BadModule.toString()).caller();
});
+
function TestReturnInBlock() {
"use asm";
@@ -98,7 +105,9 @@ function TestReturnInBlock() {
return {caller: caller};
}
-assertEquals(1, _WASMEXP_.asmCompileRun(TestReturnInBlock.toString()));
+assertEquals(1, _WASMEXP_.instantiateModuleFromAsm(
+ TestReturnInBlock.toString()).caller());
+
function TestWhileSimple() {
"use asm";
@@ -114,7 +123,9 @@ function TestWhileSimple() {
return {caller: caller};
}
-assertEquals(5, _WASMEXP_.asmCompileRun(TestWhileSimple.toString()));
+assertEquals(5, _WASMEXP_.instantiateModuleFromAsm(
+ TestWhileSimple.toString()).caller());
+
function TestWhileWithoutBraces() {
"use asm";
@@ -129,7 +140,9 @@ function TestWhileWithoutBraces() {
return {caller: caller};
}
-assertEquals(4, _WASMEXP_.asmCompileRun(TestWhileWithoutBraces.toString()));
+assertEquals(4, _WASMEXP_.instantiateModuleFromAsm(
+ TestWhileWithoutBraces.toString()).caller());
+
function TestReturnInWhile() {
"use asm";
@@ -146,7 +159,9 @@ function TestReturnInWhile() {
return {caller: caller};
}
-assertEquals(6, _WASMEXP_.asmCompileRun(TestReturnInWhile.toString()));
+assertEquals(6, _WASMEXP_.instantiateModuleFromAsm(
+ TestReturnInWhile.toString()).caller());
+
function TestReturnInWhileWithoutBraces() {
"use asm";
@@ -161,7 +176,10 @@ function TestReturnInWhileWithoutBraces() {
return {caller: caller};
}
-assertEquals(7, _WASMEXP_.asmCompileRun(TestReturnInWhileWithoutBraces.toString()));
+assertEquals(
+ 7, _WASMEXP_.instantiateModuleFromAsm(
+ TestReturnInWhileWithoutBraces.toString()).caller());
+
function TestBreakInWhile() {
"use asm";
@@ -176,7 +194,9 @@ function TestBreakInWhile() {
return {caller: caller};
}
-assertEquals(8, _WASMEXP_.asmCompileRun(TestBreakInWhile.toString()));
+assertEquals(8, _WASMEXP_.instantiateModuleFromAsm(
+ TestBreakInWhile.toString()).caller());
+
function TestBreakInNestedWhile() {
"use asm";
@@ -198,7 +218,9 @@ function TestBreakInNestedWhile() {
return {caller: caller};
}
-assertEquals(9, _WASMEXP_.asmCompileRun(TestBreakInNestedWhile.toString()));
+assertEquals(9, _WASMEXP_.instantiateModuleFromAsm(
+ TestBreakInNestedWhile.toString()).caller());
+
function TestBreakInBlock() {
"use asm";
@@ -218,7 +240,9 @@ function TestBreakInBlock() {
return {caller: caller};
}
-assertEquals(10, _WASMEXP_.asmCompileRun(TestBreakInBlock.toString()));
+assertEquals(10, _WASMEXP_.instantiateModuleFromAsm(
+ TestBreakInBlock.toString()).caller());
+
function TestBreakInNamedWhile() {
"use asm";
@@ -237,7 +261,9 @@ function TestBreakInNamedWhile() {
return {caller: caller};
}
-assertEquals(11, _WASMEXP_.asmCompileRun(TestBreakInNamedWhile.toString()));
+assertEquals(11, _WASMEXP_.instantiateModuleFromAsm(
+ TestBreakInNamedWhile.toString()).caller());
+
function TestContinue() {
"use asm";
@@ -258,7 +284,9 @@ function TestContinue() {
return {caller: caller};
}
-assertEquals(-5, _WASMEXP_.asmCompileRun(TestContinue.toString()));
+assertEquals(-5, _WASMEXP_.instantiateModuleFromAsm(
+ TestContinue.toString()).caller());
+
function TestContinueInNamedWhile() {
"use asm";
@@ -284,7 +312,9 @@ function TestContinueInNamedWhile() {
return {caller: caller};
}
-assertEquals(20, _WASMEXP_.asmCompileRun(TestContinueInNamedWhile.toString()));
+assertEquals(20, _WASMEXP_.instantiateModuleFromAsm(
+ TestContinueInNamedWhile.toString()).caller());
+
function TestNot() {
"use asm";
@@ -297,7 +327,9 @@ function TestNot() {
return {caller:caller};
}
-assertEquals(1, _WASMEXP_.asmCompileRun(TestNot.toString()));
+assertEquals(1, _WASMEXP_.instantiateModuleFromAsm(
+ TestNot.toString()).caller());
+
function TestNotEquals() {
"use asm";
@@ -313,7 +345,9 @@ function TestNotEquals() {
return {caller:caller};
}
-assertEquals(21, _WASMEXP_.asmCompileRun(TestNotEquals.toString()));
+assertEquals(21, _WASMEXP_.instantiateModuleFromAsm(
+ TestNotEquals.toString()).caller());
+
function TestUnsignedComparison() {
"use asm";
@@ -329,7 +363,9 @@ function TestUnsignedComparison() {
return {caller:caller};
}
-assertEquals(22, _WASMEXP_.asmCompileRun(TestUnsignedComparison.toString()));
+assertEquals(22, _WASMEXP_.instantiateModuleFromAsm(
+ TestUnsignedComparison.toString()).caller());
+
function TestMixedAdd() {
"use asm";
@@ -350,7 +386,9 @@ function TestMixedAdd() {
return {caller:caller};
}
-assertEquals(23, _WASMEXP_.asmCompileRun(TestMixedAdd.toString()));
+assertEquals(23, _WASMEXP_.instantiateModuleFromAsm(
+ TestMixedAdd.toString()).caller());
+
function TestInt32HeapAccess(stdlib, foreign, buffer) {
"use asm";
@@ -368,27 +406,49 @@ function TestInt32HeapAccess(stdlib, foreign, buffer) {
return {caller: caller};
}
-assertEquals(7, _WASMEXP_.asmCompileRun(TestInt32HeapAccess.toString()));
+assertEquals(7, _WASMEXP_.instantiateModuleFromAsm(
+ TestInt32HeapAccess.toString()).caller());
+
+
+function TestInt32HeapAccessExternal() {
+ var memory = new ArrayBuffer(1024);
+ var memory_int32 = new Int32Array(memory);
+ var module = _WASMEXP_.instantiateModuleFromAsm(
+ TestInt32HeapAccess.toString(), null, memory);
+ module.__init__();
+ assertEquals(7, module.caller());
+ assertEquals(7, memory_int32[2]);
+}
+
+TestInt32HeapAccessExternal();
+
function TestHeapAccessIntTypes() {
var types = [
- ['Int8Array', '>> 0'],
- ['Uint8Array', '>> 0'],
- ['Int16Array', '>> 1'],
- ['Uint16Array', '>> 1'],
- ['Int32Array', '>> 2'],
- ['Uint32Array', '>> 2'],
+ [Int8Array, 'Int8Array', '>> 0'],
+ [Uint8Array, 'Uint8Array', '>> 0'],
+ [Int16Array, 'Int16Array', '>> 1'],
+ [Uint16Array, 'Uint16Array', '>> 1'],
+ [Int32Array, 'Int32Array', '>> 2'],
+ [Uint32Array, 'Uint32Array', '>> 2'],
];
for (var i = 0; i < types.length; i++) {
var code = TestInt32HeapAccess.toString();
- code = code.replace('Int32Array', types[i][0]);
- code = code.replace(/>> 2/g, types[i][1]);
- assertEquals(7, _WASMEXP_.asmCompileRun(code));
+ code = code.replace('Int32Array', types[i][1]);
+ code = code.replace(/>> 2/g, types[i][2]);
+ var memory = new ArrayBuffer(1024);
+ var memory_view = new types[i][0](memory);
+ var module = _WASMEXP_.instantiateModuleFromAsm(code, null, memory);
+ module.__init__();
+ assertEquals(7, module.caller());
+ assertEquals(7, memory_view[2]);
+ assertEquals(7, _WASMEXP_.instantiateModuleFromAsm(code).caller());
}
}
TestHeapAccessIntTypes();
+
function TestFloatHeapAccess(stdlib, foreign, buffer) {
"use asm";
@@ -411,7 +471,22 @@ function TestFloatHeapAccess(stdlib, foreign, buffer) {
return {caller: caller};
}
-assertEquals(1, _WASMEXP_.asmCompileRun(TestFloatHeapAccess.toString()));
+assertEquals(1, _WASMEXP_.instantiateModuleFromAsm(
+ TestFloatHeapAccess.toString()).caller());
+
+
+function TestFloatHeapAccessExternal() {
+ var memory = new ArrayBuffer(1024);
+ var memory_float64 = new Float64Array(memory);
+ var module = _WASMEXP_.instantiateModuleFromAsm(
+ TestFloatHeapAccess.toString(), null, memory);
+ module.__init__();
+ assertEquals(1, module.caller());
+ assertEquals(9.0, memory_float64[1]);
+}
+
+TestFloatHeapAccessExternal();
+
function TestConvertI32() {
"use asm";
@@ -427,7 +502,9 @@ function TestConvertI32() {
return {caller:caller};
}
-assertEquals(24, _WASMEXP_.asmCompileRun(TestConvertI32.toString()));
+assertEquals(24, _WASMEXP_.instantiateModuleFromAsm(
+ TestConvertI32.toString()).caller());
+
function TestConvertF64FromInt() {
"use asm";
@@ -443,7 +520,9 @@ function TestConvertF64FromInt() {
return {caller:caller};
}
-assertEquals(25, _WASMEXP_.asmCompileRun(TestConvertF64FromInt.toString()));
+assertEquals(25, _WASMEXP_.instantiateModuleFromAsm(
+ TestConvertF64FromInt.toString()).caller());
+
function TestConvertF64FromUnsigned() {
"use asm";
@@ -461,7 +540,9 @@ function TestConvertF64FromUnsigned() {
return {caller:caller};
}
-assertEquals(26, _WASMEXP_.asmCompileRun(TestConvertF64FromUnsigned.toString()));
+assertEquals(26, _WASMEXP_.instantiateModuleFromAsm(
+ TestConvertF64FromUnsigned.toString()).caller());
+
function TestModInt() {
"use asm";
@@ -475,7 +556,9 @@ function TestModInt() {
return {caller:caller};
}
-assertEquals(-27, _WASMEXP_.asmCompileRun(TestModInt.toString()));
+assertEquals(-27, _WASMEXP_.instantiateModuleFromAsm(
+ TestModInt.toString()).caller());
+
function TestModUnsignedInt() {
"use asm";
@@ -489,7 +572,9 @@ function TestModUnsignedInt() {
return {caller:caller};
}
-assertEquals(8, _WASMEXP_.asmCompileRun(TestModUnsignedInt.toString()));
+assertEquals(8, _WASMEXP_.instantiateModuleFromAsm(
+ TestModUnsignedInt.toString()).caller());
+
function TestModDouble() {
"use asm";
@@ -506,7 +591,9 @@ function TestModDouble() {
return {caller:caller};
}
-assertEquals(28, _WASMEXP_.asmCompileRun(TestModDouble.toString()));
+assertEquals(28, _WASMEXP_.instantiateModuleFromAsm(
+ TestModDouble.toString()).caller());
+
/*
TODO: Fix parsing of negative doubles
@@ -526,9 +613,11 @@ function TestModDoubleNegative() {
return {caller:caller};
}
-assertEquals(28, _WASMEXP_.asmCompileRun(TestModDoubleNegative.toString()));
+assertEquals(28, _WASMEXP_.instantiateModuleFromAsm(
+ TestModDoubleNegative.toString()).caller());
*/
+
function TestNamedFunctions() {
"use asm";
@@ -552,6 +641,7 @@ var module = _WASMEXP_.instantiateModuleFromAsm(TestNamedFunctions.toString());
module.init();
assertEquals(77.5, module.add());
+
function TestGlobalsWithInit() {
"use asm";
@@ -569,6 +659,7 @@ var module = _WASMEXP_.instantiateModuleFromAsm(TestGlobalsWithInit.toString());
module.__init__();
assertEquals(77.5, module.add());
+
function TestForLoop() {
"use asm"
@@ -584,7 +675,9 @@ function TestForLoop() {
return {caller:caller};
}
-assertEquals(54, _WASMEXP_.asmCompileRun(TestForLoop.toString()));
+assertEquals(54, _WASMEXP_.instantiateModuleFromAsm(
+ TestForLoop.toString()).caller());
+
function TestForLoopWithoutInit() {
"use asm"
@@ -601,7 +694,9 @@ function TestForLoopWithoutInit() {
return {caller:caller};
}
-assertEquals(100, _WASMEXP_.asmCompileRun(TestForLoopWithoutInit.toString()));
+assertEquals(100, _WASMEXP_.instantiateModuleFromAsm(
+ TestForLoopWithoutInit.toString()).caller());
+
function TestForLoopWithoutCondition() {
"use asm"
@@ -621,7 +716,9 @@ function TestForLoopWithoutCondition() {
return {caller:caller};
}
-assertEquals(66, _WASMEXP_.asmCompileRun(TestForLoopWithoutCondition.toString()));
+assertEquals(66, _WASMEXP_.instantiateModuleFromAsm(
+ TestForLoopWithoutCondition.toString()).caller());
+
function TestForLoopWithoutNext() {
"use asm"
@@ -637,7 +734,9 @@ function TestForLoopWithoutNext() {
return {caller:caller};
}
-assertEquals(41, _WASMEXP_.asmCompileRun(TestForLoopWithoutNext.toString()));
+assertEquals(41, _WASMEXP_.instantiateModuleFromAsm(
+ TestForLoopWithoutNext.toString()).caller());
+
function TestForLoopWithoutBody() {
"use asm"
@@ -652,7 +751,9 @@ function TestForLoopWithoutBody() {
return {caller:caller};
}
-assertEquals(45, _WASMEXP_.asmCompileRun(TestForLoopWithoutBody.toString()));
+assertEquals(45, _WASMEXP_.instantiateModuleFromAsm(
+ TestForLoopWithoutBody.toString()).caller());
+
function TestDoWhile() {
"use asm"
@@ -670,7 +771,9 @@ function TestDoWhile() {
return {caller:caller};
}
-assertEquals(84, _WASMEXP_.asmCompileRun(TestDoWhile.toString()));
+assertEquals(84, _WASMEXP_.instantiateModuleFromAsm(
+ TestDoWhile.toString()).caller());
+
function TestConditional() {
"use asm"
@@ -683,7 +786,9 @@ function TestConditional() {
return {caller:caller};
}
-assertEquals(41, _WASMEXP_.asmCompileRun(TestConditional.toString()));
+assertEquals(41, _WASMEXP_.instantiateModuleFromAsm(
+ TestConditional.toString()).caller());
+
function TestSwitch() {
"use asm"
@@ -710,7 +815,9 @@ function TestSwitch() {
return {caller:caller};
}
-assertEquals(23, _WASMEXP_.asmCompileRun(TestSwitch.toString()));
+assertEquals(23, _WASMEXP_.instantiateModuleFromAsm(
+ TestSwitch.toString()).caller());
+
function TestSwitchFallthrough() {
"use asm"
@@ -731,7 +838,9 @@ function TestSwitchFallthrough() {
return {caller:caller};
}
-assertEquals(42, _WASMEXP_.asmCompileRun(TestSwitchFallthrough.toString()));
+assertEquals(42, _WASMEXP_.instantiateModuleFromAsm(
+ TestSwitchFallthrough.toString()).caller());
+
function TestNestedSwitch() {
"use asm"
@@ -756,7 +865,9 @@ function TestNestedSwitch() {
return {caller:caller};
}
-assertEquals(43, _WASMEXP_.asmCompileRun(TestNestedSwitch.toString()));
+assertEquals(43, _WASMEXP_.instantiateModuleFromAsm(
+ TestNestedSwitch.toString()).caller());
+
function TestInitFunctionWithNoGlobals() {
"use asm";
@@ -771,6 +882,7 @@ var module = _WASMEXP_.instantiateModuleFromAsm(
module.__init__();
assertEquals(51, module.caller());
+
function TestExportNameDifferentFromFunctionName() {
"use asm";
function caller() {
@@ -783,3 +895,458 @@ var module = _WASMEXP_.instantiateModuleFromAsm(
TestExportNameDifferentFromFunctionName.toString());
module.__init__();
assertEquals(55, module.alt_caller());
+
+
+function TestFunctionTableSingleFunction() {
+ "use asm";
+
+ function dummy() {
+ return 71;
+ }
+
+ function caller() {
+ return function_table[0&0]() | 0;
+ }
+
+ var function_table = [dummy]
+
+ return {caller:caller};
+}
+
+assertEquals(71, _WASMEXP_.instantiateModuleFromAsm(
+ TestFunctionTableSingleFunction.toString()).caller());
+
+
+function TestFunctionTableMultipleFunctions() {
+ "use asm";
+
+ function inc1(x) {
+ x = x|0;
+ return (x+1)|0;
+ }
+
+ function inc2(x) {
+ x = x|0;
+ return (x+2)|0;
+ }
+
+ function caller() {
+ if (function_table[0&1](50) == 51) {
+ if (function_table[1&1](60) == 62) {
+ return 73;
+ }
+ }
+ return 0;
+ }
+
+ var function_table = [inc1, inc2]
+
+ return {caller:caller};
+}
+
+assertEquals(73, _WASMEXP_.instantiateModuleFromAsm(
+ TestFunctionTableMultipleFunctions.toString()).caller());
+
+
+function TestFunctionTable() {
+ "use asm";
+
+ function add(a, b) {
+ a = a|0;
+ b = b|0;
+ return (a+b)|0;
+ }
+
+ function sub(a, b) {
+ a = a|0;
+ b = b|0;
+ return (a-b)|0;
+ }
+
+ function inc(a) {
+ a = a|0;
+ return (a+1)|0;
+ }
+
+ function caller(table_id, fun_id, arg1, arg2) {
+ table_id = table_id|0;
+ fun_id = fun_id|0;
+ arg1 = arg1|0;
+ arg2 = arg2|0;
+ if (table_id == 0) {
+ return funBin[fun_id&3](arg1, arg2)|0;
+ } else if (table_id == 1) {
+ return fun[fun_id&0](arg1)|0;
+ }
+ return 0;
+ }
+
+ var funBin = [add, sub, sub, add];
+ var fun = [inc];
+
+ return {caller:caller};
+}
+
+var module = _WASMEXP_.instantiateModuleFromAsm(TestFunctionTable.toString());
+module.__init__();
+assertEquals(55, module.caller(0, 0, 33, 22));
+assertEquals(11, module.caller(0, 1, 33, 22));
+assertEquals(9, module.caller(0, 2, 54, 45));
+assertEquals(99, module.caller(0, 3, 54, 45));
+assertEquals(23, module.caller(0, 4, 12, 11));
+assertEquals(31, module.caller(1, 0, 30, 11));
+
+
+function TestForeignFunctions() {
+ function AsmModule(stdlib, foreign, buffer) {
+ "use asm";
+
+ var setVal = foreign.setVal;
+ var getVal = foreign.getVal;
+
+ function caller(initial_value, new_value) {
+ initial_value = initial_value|0;
+ new_value = new_value|0;
+ if ((getVal()|0) == (initial_value|0)) {
+ setVal(new_value|0);
+ return getVal()|0;
+ }
+ return 0;
+ }
+
+ return {caller:caller};
+ }
+
+ function ffi(initial_val) {
+ var val = initial_val;
+
+ function getVal() {
+ return val;
+ }
+
+ function setVal(new_val) {
+ val = new_val;
+ }
+
+ return {getVal:getVal, setVal:setVal};
+ }
+
+ var foreign = new ffi(23);
+
+ var module = _WASMEXP_.instantiateModuleFromAsm(AsmModule.toString(),
+ foreign, null);
+
+ module.__init__();
+ assertEquals(103, module.caller(23, 103));
+}
+
+TestForeignFunctions();
+
+
+function TestForeignFunctionMultipleUse() {
+ function AsmModule(stdlib, foreign, buffer) {
+ "use asm";
+
+ var getVal = foreign.getVal;
+
+ function caller(int_val, double_val) {
+ int_val = int_val|0;
+ double_val = +double_val;
+ if ((getVal()|0) == (int_val|0)) {
+ if ((+getVal()) == (+double_val)) {
+ return 89;
+ }
+ }
+ return 0;
+ }
+
+ return {caller:caller};
+ }
+
+ function ffi() {
+ function getVal() {
+ return 83.25;
+ }
+
+ return {getVal:getVal};
+ }
+
+ var foreign = new ffi();
+
+ var module = _WASMEXP_.instantiateModuleFromAsm(AsmModule.toString(),
+ foreign, null);
+
+ module.__init__();
+ assertEquals(89, module.caller(83, 83.25));
+}
+
+TestForeignFunctionMultipleUse();
+
+
+function TestForeignVariables() {
+ function AsmModule(stdlib, foreign, buffer) {
+ "use asm";
+
+ var i1 = foreign.foo | 0;
+ var f1 = +foreign.bar;
+ var i2 = foreign.baz | 0;
+ var f2 = +foreign.baz;
+
+ function geti1() {
+ return i1|0;
+ }
+
+ function getf1() {
+ return +f1;
+ }
+
+ function geti2() {
+ return i2|0;
+ }
+
+ function getf2() {
+ return +f2;
+ }
+
+ return {geti1:geti1, getf1:getf1, geti2:geti2, getf2:getf2};
+ }
+
+ function TestCase(env, i1, f1, i2, f2) {
+ var module = _WASMEXP_.instantiateModuleFromAsm(
+ AsmModule.toString(), env);
+ module.__init__();
+ assertEquals(i1, module.geti1());
+ assertEquals(f1, module.getf1());
+ assertEquals(i2, module.geti2());
+ assertEquals(f2, module.getf2());
+ }
+
+ // Check normal operation.
+ TestCase({foo: 123, bar: 234.5, baz: 345.7}, 123, 234.5, 345, 345.7);
+ // Check partial operation.
+ TestCase({baz: 345.7}, 0, NaN, 345, 345.7);
+ // Check that undefined values are converted to proper defaults.
+ TestCase({qux: 999}, 0, NaN, 0, NaN);
+ // Check that an undefined ffi is ok.
+ TestCase(undefined, 0, NaN, 0, NaN);
+ // Check that true values are converted properly.
+ TestCase({foo: true, bar: true, baz: true}, 1, 1.0, 1, 1.0);
+ // Check that false values are converted properly.
+ TestCase({foo: false, bar: false, baz: false}, 0, 0, 0, 0);
+ // Check that null values are converted properly.
+ TestCase({foo: null, bar: null, baz: null}, 0, 0, 0, 0);
+ // Check that string values are converted properly.
+ TestCase({foo: 'hi', bar: 'there', baz: 'dude'}, 0, NaN, 0, NaN);
+ TestCase({foo: '0xff', bar: '234', baz: '456.1'}, 255, 234, 456, 456.1, 456);
+ // Check that Date values are converted properly.
+ TestCase({foo: new Date(123), bar: new Date(456),
+ baz: new Date(789)}, 123, 456, 789, 789);
+ // Check that list values are converted properly.
+ TestCase({foo: [], bar: [], baz: []}, 0, 0, 0, 0);
+ // Check that object values are converted properly.
+ TestCase({foo: {}, bar: {}, baz: {}}, 0, NaN, 0, NaN);
+ // Check that getter object values are converted properly.
+ var o = {
+ get foo() {
+ return 123.4;
+ }
+ };
+ TestCase({foo: o.foo, bar: o.foo, baz: o.foo}, 123, 123.4, 123, 123.4);
+ // Check that getter object values are converted properly.
+ var o = {
+ get baz() {
+ return 123.4;
+ }
+ };
+ TestCase(o, 0, NaN, 123, 123.4);
+ // Check that objects with valueOf are converted properly.
+ var o = {
+ valueOf: function() { return 99; }
+ };
+ TestCase({foo: o, bar: o, baz: o}, 99, 99, 99, 99);
+ // Check that function values are converted properly.
+ TestCase({foo: TestCase, bar: TestCase, qux: TestCase}, 0, NaN, 0, NaN);
+ // Check that a missing ffi object is safe.
+ TestCase(undefined, 0, NaN, 0, NaN);
+}
+
+TestForeignVariables();
+
+
+(function() {
+ function TestByteHeapAccessCompat(stdlib, foreign, buffer) {
+ "use asm";
+
+ var HEAP8 = new stdlib.Uint8Array(buffer);
+ var HEAP32 = new stdlib.Int32Array(buffer);
+
+ function store(i, v) {
+ i = i | 0;
+ v = v | 0;
+ HEAP32[i >> 2] = v;
+ }
+
+ function storeb(i, v) {
+ i = i | 0;
+ v = v | 0;
+ HEAP8[i | 0] = v;
+ }
+
+ function load(i) {
+ i = i | 0;
+ return HEAP8[i] | 0;
+ }
+
+ function iload(i) {
+ i = i | 0;
+ return HEAP8[HEAP32[i >> 2] | 0] | 0;
+ }
+
+ return {load: load, iload: iload, store: store, storeb: storeb};
+ }
+
+ var m = _WASMEXP_.instantiateModuleFromAsm(
+ TestByteHeapAccessCompat.toString());
+ m.store(0, 20);
+ m.store(4, 21);
+ m.store(8, 22);
+ m.storeb(20, 123);
+ m.storeb(21, 42);
+ m.storeb(22, 77);
+ assertEquals(123, m.load(20));
+ assertEquals(42, m.load(21));
+ assertEquals(77, m.load(22));
+ assertEquals(123, m.iload(0));
+ assertEquals(42, m.iload(4));
+ assertEquals(77, m.iload(8));
+})();
+
+
+(function TestGlobalBlock() {
+ function Module(stdlib, foreign, buffer) {
+ "use asm";
+
+ var x = foreign.x | 0, y = foreign.y | 0;
+
+ function test() {
+ return (x + y) | 0;
+ }
+
+ return {test: test};
+ }
+
+ var m = _WASMEXP_.instantiateModuleFromAsm(
+ Module.toString(), { x: 4, y: 11 });
+ m.__init__();
+ assertEquals(15, m.test());
+})();
+
+
+(function TestComma() {
+ function CommaModule() {
+ "use asm";
+
+ function ifunc(a, b) {
+ a = +a;
+ b = b | 0;
+ return (a, b) | 0;
+ }
+
+ function dfunc(a, b) {
+ a = a | 0;
+ b = +b;
+ return +(a, b);
+ }
+
+ return {ifunc: ifunc, dfunc: dfunc};
+ }
+
+ var m = _WASMEXP_.instantiateModuleFromAsm(CommaModule.toString());
+ assertEquals(123, m.ifunc(456.7, 123));
+ assertEquals(123.4, m.dfunc(456, 123.4));
+})();
+
+
+(function TestOr() {
+ function Module() {
+ "use asm";
+ function func() {
+ var x = 1;
+ var y = 2;
+ return (x | y) | 0;
+ }
+ return {func: func};
+ }
+
+ var m = _WASMEXP_.instantiateModuleFromAsm(Module.toString());
+ assertEquals(3, m.func());
+})();
+
+
+(function TestAnd() {
+ function Module() {
+ "use asm";
+ function func() {
+ var x = 3;
+ var y = 2;
+ return (x & y) | 0;
+ }
+ return {func: func};
+ }
+
+ var m = _WASMEXP_.instantiateModuleFromAsm(Module.toString());
+ assertEquals(2, m.func());
+})();
+
+
+(function TestXor() {
+ function Module() {
+ "use asm";
+ function func() {
+ var x = 3;
+ var y = 2;
+ return (x ^ y) | 0;
+ }
+ return {func: func};
+ }
+
+ var m = _WASMEXP_.instantiateModuleFromAsm(Module.toString());
+ assertEquals(1, m.func());
+})();
+
+
+(function TestIntishAssignment() {
+ function Module(stdlib, foreign, heap) {
+ "use asm";
+ var HEAP32 = new stdlib.Int32Array(heap);
+ function func() {
+ var a = 1;
+ var b = 2;
+ HEAP32[0] = a + b;
+ return HEAP32[0] | 0;
+ }
+ return {func: func};
+ }
+
+ var m = _WASMEXP_.instantiateModuleFromAsm(Module.toString());
+ assertEquals(3, m.func());
+})();
+
+
+(function TestFloatishAssignment() {
+ function Module(stdlib, foreign, heap) {
+ "use asm";
+ var HEAPF32 = new stdlib.Float32Array(heap);
+ var fround = stdlib.Math.fround;
+ function func() {
+ var a = fround(1.0);
+ var b = fround(2.0);
+ HEAPF32[0] = a + b;
+ return +HEAPF32[0];
+ }
+ return {func: func};
+ }
+
+ var m = _WASMEXP_.instantiateModuleFromAsm(Module.toString());
+ assertEquals(3, m.func());
+}) // TODO(bradnelson): Enable when Math.fround implementation lands.
diff --git a/deps/v8/test/mjsunit/wasm/import-table.js b/deps/v8/test/mjsunit/wasm/import-table.js
new file mode 100644
index 0000000000..33d1c3551c
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/import-table.js
@@ -0,0 +1,387 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+
+function testCallImport(func, check) {
+ var kBodySize = 6;
+ var kNameFunOffset = 29 + kBodySize + 1;
+ var kNameMainOffset = kNameFunOffset + 4;
+
+ var ffi = new Object();
+ ffi.fun = func;
+
+ var data = bytes(
+ // signatures
+ kDeclSignatures, 1,
+ 2, kAstI32, kAstF64, kAstF64, // (f64,f64) -> int
+ // -- main function
+ kDeclFunctions,
+ 1,
+ kDeclFunctionName | kDeclFunctionExport,
+ 0, 0,
+ kNameMainOffset, 0, 0, 0, // name offset
+ kBodySize, 0,
+ // main body
+ kExprCallImport, 0, // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ // imports
+ kDeclImportTable,
+ 1,
+ 0, 0, // sig index
+ 0, 0, 0, 0, // module name offset
+ kNameFunOffset, 0, 0, 0, // function name offset
+ // names
+ kDeclEnd,
+ 'f', 'u', 'n', 0, // --
+ 'm', 'a', 'i', 'n', 0 // --
+ );
+
+ var module = _WASMEXP_.instantiateModule(data, ffi);
+
+ assertEquals("function", typeof module.main);
+
+ for (var i = 0; i < 100000; i += 10003) {
+ var a = 22.5 + i, b = 10.5 + i;
+ var r = module.main(a, b);
+ check(r, a, b);
+ }
+}
+
+var global = (function() { return this; })();
+var params = [-99, -99, -99, -99];
+var was_called = false;
+var length = -1;
+
+function FOREIGN_SUB(a, b) {
+ print("FOREIGN_SUB(" + a + ", " + b + ")");
+ was_called = true;
+ params[0] = this;
+ params[1] = a;
+ params[2] = b;
+ return (a - b) | 0;
+}
+
+function check_FOREIGN_SUB(r, a, b) {
+ assertEquals(a - b | 0, r);
+ assertTrue(was_called);
+// assertEquals(global, params[0]); // sloppy mode
+ assertEquals(a, params[1]);
+ assertEquals(b, params[2]);
+ was_called = false;
+}
+
+testCallImport(FOREIGN_SUB, check_FOREIGN_SUB);
+
+
+function FOREIGN_ABCD(a, b, c, d) {
+ print("FOREIGN_ABCD(" + a + ", " + b + ", " + c + ", " + d + ")");
+ was_called = true;
+ params[0] = this;
+ params[1] = a;
+ params[2] = b;
+ params[3] = c;
+ params[4] = d;
+ return (a * b * 6) | 0;
+}
+
+function check_FOREIGN_ABCD(r, a, b) {
+ assertEquals((a * b * 6) | 0, r);
+ assertTrue(was_called);
+// assertEquals(global, params[0]); // sloppy mode.
+ assertEquals(a, params[1]);
+ assertEquals(b, params[2]);
+ assertEquals(undefined, params[3]);
+ assertEquals(undefined, params[4]);
+ was_called = false;
+}
+
+testCallImport(FOREIGN_ABCD, check_FOREIGN_ABCD);
+
+function FOREIGN_ARGUMENTS0() {
+ print("FOREIGN_ARGUMENTS0");
+ was_called = true;
+ length = arguments.length;
+ for (var i = 0; i < arguments.length; i++) {
+ params[i] = arguments[i];
+ }
+ return (arguments[0] * arguments[1] * 7) | 0;
+}
+
+function FOREIGN_ARGUMENTS1(a) {
+ print("FOREIGN_ARGUMENTS1", a);
+ was_called = true;
+ length = arguments.length;
+ for (var i = 0; i < arguments.length; i++) {
+ params[i] = arguments[i];
+ }
+ return (arguments[0] * arguments[1] * 7) | 0;
+}
+
+function FOREIGN_ARGUMENTS2(a, b) {
+ print("FOREIGN_ARGUMENTS2", a, b);
+ was_called = true;
+ length = arguments.length;
+ for (var i = 0; i < arguments.length; i++) {
+ params[i] = arguments[i];
+ }
+ return (a * b * 7) | 0;
+}
+
+function FOREIGN_ARGUMENTS3(a, b, c) {
+ print("FOREIGN_ARGUMENTS3", a, b, c);
+ was_called = true;
+ length = arguments.length;
+ for (var i = 0; i < arguments.length; i++) {
+ params[i] = arguments[i];
+ }
+ return (a * b * 7) | 0;
+}
+
+function FOREIGN_ARGUMENTS4(a, b, c, d) {
+ print("FOREIGN_ARGUMENTS4", a, b, c, d);
+ was_called = true;
+ length = arguments.length;
+ for (var i = 0; i < arguments.length; i++) {
+ params[i] = arguments[i];
+ }
+ return (a * b * 7) | 0;
+}
+
+function check_FOREIGN_ARGUMENTS(r, a, b) {
+ assertEquals((a * b * 7) | 0, r);
+ assertTrue(was_called);
+ assertEquals(2, length);
+ assertEquals(a, params[0]);
+ assertEquals(b, params[1]);
+ was_called = false;
+}
+
+// Check a bunch of uses of the arguments object.
+testCallImport(FOREIGN_ARGUMENTS0, check_FOREIGN_ARGUMENTS);
+testCallImport(FOREIGN_ARGUMENTS1, check_FOREIGN_ARGUMENTS);
+testCallImport(FOREIGN_ARGUMENTS2, check_FOREIGN_ARGUMENTS);
+testCallImport(FOREIGN_ARGUMENTS3, check_FOREIGN_ARGUMENTS);
+testCallImport(FOREIGN_ARGUMENTS4, check_FOREIGN_ARGUMENTS);
+
+function returnValue(val) {
+ return function(a, b) {
+ print("RETURN_VALUE ", val);
+ return val;
+ }
+}
+
+
+function checkReturn(expected) {
+ return function(r, a, b) { assertEquals(expected, r); }
+}
+
+// Check that returning weird values doesn't crash
+testCallImport(returnValue(undefined), checkReturn(0));
+testCallImport(returnValue(null), checkReturn(0));
+testCallImport(returnValue("0"), checkReturn(0));
+testCallImport(returnValue("-77"), checkReturn(-77));
+
+var objWithValueOf = {valueOf: function() { return 198; }}
+
+testCallImport(returnValue(objWithValueOf), checkReturn(198));
+
+
+function testCallBinopVoid(type, func, check) {
+ var kBodySize = 10;
+ var kNameFunOffset = 28 + kBodySize + 1;
+ var kNameMainOffset = kNameFunOffset + 4;
+
+ var ffi = new Object();
+
+ var passed_length = -1;
+ var passed_a = -1;
+ var passed_b = -1;
+ var args_a = -1;
+ var args_b = -1;
+
+ ffi.fun = function(a, b) {
+ passed_length = arguments.length;
+ passed_a = a;
+ passed_b = b;
+ args_a = arguments[0];
+ args_b = arguments[1];
+ }
+
+ var data = bytes(
+ // -- signatures
+ kDeclSignatures, 2,
+ 2, kAstStmt, type, type, // (type,type)->void
+ 2, kAstI32, type, type, // (type,type)->int
+ // -- foreign function
+ kDeclFunctions, 2,
+ kDeclFunctionName | kDeclFunctionImport,
+ 0, 0, // signature index
+ kNameFunOffset, 0, 0, 0, // name offset
+ // -- main function
+ kDeclFunctionName | kDeclFunctionExport,
+ 1, 0, // signature index
+ kNameMainOffset, 0, 0, 0, // name offset
+ kBodySize, 0, // body size
+ // main body
+ kExprBlock, 2, // --
+ kExprCallFunction, 0, // --
+ kExprGetLocal, 0, // --
+ kExprGetLocal, 1, // --
+ kExprI8Const, 99, // --
+ // names
+ kDeclEnd,
+ 'f', 'u', 'n', 0, // --
+ 'm', 'a', 'i', 'n', 0 // --
+ );
+
+ var module = _WASMEXP_.instantiateModule(data, ffi);
+
+ assertEquals("function", typeof module.main);
+
+ print("testCallBinopVoid", type);
+
+ for (var i = 0; i < 100000; i += 10003.1) {
+ var a = 22.5 + i, b = 10.5 + i;
+ var r = module.main(a, b);
+ assertEquals(99, r);
+ assertEquals(2, passed_length);
+ var expected_a, expected_b;
+ switch (type) {
+ case kAstI32: {
+ expected_a = a | 0;
+ expected_b = b | 0;
+ break;
+ }
+ case kAstF32: {
+ expected_a = Math.fround(a);
+ expected_b = Math.fround(b);
+ break;
+ }
+ case kAstF64: {
+ expected_a = a;
+ expected_b = b;
+ break;
+ }
+ }
+
+ assertEquals(expected_a, args_a);
+ assertEquals(expected_b, args_b);
+ assertEquals(expected_a, passed_a);
+ assertEquals(expected_b, passed_b);
+ }
+}
+
+
+testCallBinopVoid(kAstI32);
+// TODO testCallBinopVoid(kAstI64);
+testCallBinopVoid(kAstF32);
+testCallBinopVoid(kAstF64);
+
+
+
+function testCallPrint() {
+ var kBodySize = 10;
+ var kNamePrintOffset = 10 + 7 + 7 + 9 + kBodySize + 1;
+ var kNameMainOffset = kNamePrintOffset + 6;
+
+ var ffi = new Object();
+ ffi.print = print;
+
+ var data = bytes(
+ // -- signatures
+ kDeclSignatures, 2,
+ 1, kAstStmt, kAstI32, // i32->void
+ 1, kAstStmt, kAstF64, // f64->int
+ kDeclFunctions, 3,
+ // -- import print i32
+ kDeclFunctionName | kDeclFunctionImport,
+ 0, 0, // signature index
+ kNamePrintOffset, 0, 0, 0, // name offset
+ // -- import print f64
+ kDeclFunctionName | kDeclFunctionImport,
+ 1, 0, // signature index
+ kNamePrintOffset, 0, 0, 0, // name offset
+ // -- decl main
+ kDeclFunctionName | kDeclFunctionExport,
+ 1, 0, // signature index
+ kNameMainOffset, 0, 0, 0, // name offset
+ kBodySize, 0, // body size
+ // main body
+ kExprBlock, 2, // --
+ kExprCallFunction, 0, // --
+ kExprI8Const, 97, // --
+ kExprCallFunction, 1, // --
+ kExprGetLocal, 0, // --
+ // names
+ kDeclEnd,
+ 'p', 'r', 'i', 'n', 't', 0, // --
+ 'm', 'a', 'i', 'n', 0 // --
+ );
+
+ var module = _WASMEXP_.instantiateModule(data, ffi);
+
+ assertEquals("function", typeof module.main);
+
+ for (var i = -9; i < 900; i += 6.125) {
+ module.main(i);
+ }
+}
+
+testCallPrint();
+testCallPrint();
+
+
+function testCallImport2(foo, bar, expected) {
+ var kBodySize = 5;
+ var kNameFooOffset = 37 + kBodySize + 1;
+ var kNameBarOffset = kNameFooOffset + 4;
+ var kNameMainOffset = kNameBarOffset + 4;
+
+ var ffi = new Object();
+ ffi.foo = foo;
+ ffi.bar = bar;
+
+ var data = bytes(
+ // signatures
+ kDeclSignatures, 1,
+ 0, kAstI32, // void -> i32
+ // -- main function
+ kDeclFunctions,
+ 1,
+ kDeclFunctionName | kDeclFunctionExport,
+ 0, 0,
+ kNameMainOffset, 0, 0, 0, // name offset
+ kBodySize, 0,
+ // main body
+ kExprI32Add, // --
+ kExprCallImport, 0, // --
+ kExprCallImport, 1, // --
+ // imports
+ kDeclImportTable,
+ 2,
+ 0, 0, // sig index
+ 0, 0, 0, 0, // module name offset
+ kNameFooOffset, 0, 0, 0, // function name offset
+ 0, 0, // sig index
+ 0, 0, 0, 0, // module name offset
+ kNameBarOffset, 0, 0, 0, // function name offset
+ // names
+ kDeclEnd,
+ 'f', 'o', 'o', 0, // --
+ 'b', 'a', 'r', 0, // --
+ 'm', 'a', 'i', 'n', 0 // --
+ );
+
+ var module = _WASMEXP_.instantiateModule(data, ffi);
+
+ assertEquals("function", typeof module.main);
+
+ assertEquals(expected, module.main());
+}
+
+testCallImport2(function() { return 33; }, function () { return 44; }, 77);
diff --git a/deps/v8/test/mjsunit/wasm/compile-run-basic.js b/deps/v8/test/mjsunit/wasm/instantiate-run-basic.js
index dbc624a2fb..b103e8f439 100644
--- a/deps/v8/test/mjsunit/wasm/compile-run-basic.js
+++ b/deps/v8/test/mjsunit/wasm/instantiate-run-basic.js
@@ -28,4 +28,4 @@ var data = bytes(
'm', 'a', 'i', 'n', 0 // name
);
-assertEquals(kReturnValue, _WASMEXP_.compileRun(data));
+assertEquals(kReturnValue, _WASMEXP_.instantiateModule(data).main());
diff --git a/deps/v8/test/mjsunit/wasm/stack.js b/deps/v8/test/mjsunit/wasm/stack.js
new file mode 100644
index 0000000000..d4b72c0085
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/stack.js
@@ -0,0 +1,69 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+
+function testStack(func, check) {
+ var kBodySize = 2;
+ var kNameFunOffset = 22 + kBodySize + 1;
+ var kNameMainOffset = kNameFunOffset + 4;
+
+ var ffi = new Object();
+ ffi.fun = func;
+
+ var data = bytes(
+ // signatures
+ kDeclSignatures, 1, // --
+ 0, kAstStmt, // () -> void
+ // -- foreign function
+ kDeclFunctions, 2, // --
+ kDeclFunctionName | kDeclFunctionImport, // --
+ 0, 0, // --
+ kNameFunOffset, 0, 0, 0, // name offset
+ // -- main function
+ kDeclFunctionName | kDeclFunctionExport, // --
+ 0, 0, // --
+ kNameMainOffset, 0, 0, 0, // name offset
+ kBodySize, 0,
+ // main body
+ kExprCallFunction, 0, // --
+ // names
+ kDeclEnd, // --
+ 'f', 'u', 'n', 0, // --
+ 'm', 'a', 'i', 'n', 0 // --
+ );
+
+ var module = _WASMEXP_.instantiateModule(data, ffi);
+
+ assertEquals("function", typeof module.main);
+
+ module.main();
+ check();
+}
+
+// The stack trace contains file path, only keep "stack.js".
+function stripPath(s) {
+ return s.replace(/[^ (]*stack\.js/g, "stack.js");
+}
+
+var stack;
+function STACK() {
+ var e = new Error();
+ stack = e.stack;
+}
+
+function check_STACK() {
+ assertEquals(expected, stripPath(stack));
+}
+
+var expected = "Error\n" +
+ // The line numbers below will change as this test gains / loses lines..
+ " at STACK (stack.js:54:11)\n" + // --
+ " at testStack (stack.js:43:10)\n" +
+ // TODO(jfb) Add WebAssembly stack here.
+ " at stack.js:69:1";
+
+testStack(STACK, check_STACK);
diff --git a/deps/v8/test/mjsunit/wasm/start-function.js b/deps/v8/test/mjsunit/wasm/start-function.js
new file mode 100644
index 0000000000..4008efa563
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/start-function.js
@@ -0,0 +1,172 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+
+function instantiate(sig, body) {
+ var module = new Array();
+ module = module.concat([
+ // -- signatures
+ kDeclSignatures, 1,
+ ]);
+ module = module.concat(sig);
+ module = module.concat([
+ // -- functions
+ kDeclFunctions, 1,
+ 0, // decl flags
+ 0, 0, // signature
+ body.length, 0, // body size
+ ]);
+ module = module.concat(body);
+ module = module.concat([
+ // -- declare start function
+ kDeclStartFunction,
+ 0
+ ]);
+
+ var data = bytes.apply(this, module);
+ print(module);
+ print(data instanceof ArrayBuffer);
+ print(data.byteLength);
+ return _WASMEXP_.instantiateModule(data);
+}
+
+function assertFails(sig, body) {
+ try {
+ var module = instantiate(sig, body);
+ print("expected failure, but passes");
+ assertFalse(true);
+ } catch (expected) {
+ print("ok: " + expected);
+ }
+}
+
+function assertVerifies(sig, body) {
+ var module = instantiate(sig, body);
+ assertFalse(module === undefined);
+ assertFalse(module === null);
+ assertFalse(module === 0);
+ assertEquals("object", typeof module);
+ return module;
+}
+
+assertVerifies([0, kAstStmt], [kExprNop]);
+assertVerifies([0, kAstI32], [kExprI8Const, 0]);
+
+// Arguments aren't allow to start functions.
+assertFails([1, kAstI32, kAstI32], [kExprGetLocal, 0]);
+assertFails([2, kAstI32, kAstI32, kAstF32], [kExprGetLocal, 0]);
+assertFails([3, kAstI32, kAstI32, kAstF32, kAstF64], [kExprGetLocal, 0]);
+
+(function testInvalidIndex() {
+ var kBodySize = 1;
+ var data = bytes(
+ // -- signatures
+ kDeclSignatures, 1,
+ 0, kAstStmt,
+ // -- functions
+ kDeclFunctions, 1,
+ 0, // decl flags
+ 0, 0, // signature
+ kBodySize, 0, // body size
+ kExprNop, // body
+ // -- declare start function
+ kDeclStartFunction,
+ 1
+ );
+
+ assertThrows(function() { _WASMEXP_.instantiateModule(data); });
+})();
+
+
+(function testTwoStartFuncs() {
+ var kBodySize = 1;
+ var data = bytes(
+ // -- signatures
+ kDeclSignatures, 1,
+ 0, kAstStmt,
+ // -- functions
+ kDeclFunctions, 1,
+ 0, // decl flags
+ 0, 0, // signature
+ kBodySize, 0, // body size
+ kExprNop, // body
+ // -- declare start function
+ kDeclStartFunction,
+ 0,
+ // -- declare start function
+ kDeclStartFunction,
+ 0
+ );
+
+ assertThrows(function() { _WASMEXP_.instantiateModule(data); });
+})();
+
+
+(function testRun() {
+ var kBodySize = 6;
+
+ var data = bytes(
+ kDeclMemory,
+ 12, 12, 1, // memory
+ // -- signatures
+ kDeclSignatures, 1,
+ 0, kAstStmt,
+ // -- start function
+ kDeclFunctions, 1,
+ 0, // decl flags
+ 0, 0, // signature
+ kBodySize, 0, // code size
+ // -- start body
+ kExprI32StoreMem, 0, kExprI8Const, 0, kExprI8Const, 77,
+ // -- declare start function
+ kDeclStartFunction,
+ 0
+ );
+
+ var module = _WASMEXP_.instantiateModule(data);
+ var memory = module.memory;
+ var view = new Int8Array(memory);
+ assertEquals(77, view[0]);
+})();
+
+(function testStartFFI() {
+ var kBodySize = 2;
+ var kNameOffset = 4 + 9 + 7 + 3;
+
+ var data = bytes(
+ // -- signatures
+ kDeclSignatures, 1,
+ 0, kAstStmt,
+ // -- imported function
+ kDeclFunctions, 2,
+ kDeclFunctionImport | kDeclFunctionName, // decl flags
+ 0, 0, // signature
+ kNameOffset, 0, 0, 0,
+ // -- start function
+ 0, // decl flags
+ 0, 0, // signature
+ kBodySize, 0, // code size
+ // -- start body
+ kExprCallFunction, 0,
+ // -- declare start function
+ kDeclStartFunction,
+ 1,
+ kDeclEnd,
+ 'f', 'o', 'o', 0
+ );
+
+ var ranned = false;
+ var ffi = new Object();
+ ffi.foo = function() {
+ print("we ranned at stert!");
+ ranned = true;
+ }
+ var module = _WASMEXP_.instantiateModule(data, ffi);
+ var memory = module.memory;
+ var view = new Int8Array(memory);
+ assertTrue(ranned);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/wasm-constants.js b/deps/v8/test/mjsunit/wasm/wasm-constants.js
index 4b710f1037..458b51ad07 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-constants.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-constants.js
@@ -22,6 +22,8 @@ var kDeclFunctions = 0x02;
var kDeclGlobals = 0x03;
var kDeclDataSegments = 0x04;
var kDeclFunctionTable = 0x05;
+var kDeclStartFunction = 0x07;
+var kDeclImportTable = 0x08;
var kDeclEnd = 0x06;
// Function declaration flags
@@ -61,6 +63,7 @@ var kExprLoadGlobal = 0x10;
var kExprStoreGlobal = 0x11;
var kExprCallFunction = 0x12;
var kExprCallIndirect = 0x13;
+var kExprCallImport = 0x1F;
var kExprI32LoadMem8S = 0x20;
var kExprI32LoadMem8U = 0x21;
diff --git a/deps/v8/test/mjsunit/wasm/wasm-object-api.js b/deps/v8/test/mjsunit/wasm/wasm-object-api.js
index 1dfbb6522e..8912271c23 100644
--- a/deps/v8/test/mjsunit/wasm/wasm-object-api.js
+++ b/deps/v8/test/mjsunit/wasm/wasm-object-api.js
@@ -8,4 +8,5 @@ assertFalse(undefined === _WASMEXP_);
assertFalse(undefined == _WASMEXP_);
assertEquals("function", typeof _WASMEXP_.verifyModule);
assertEquals("function", typeof _WASMEXP_.verifyFunction);
-assertEquals("function", typeof _WASMEXP_.compileRun);
+assertEquals("function", typeof _WASMEXP_.instantiateModule);
+assertEquals("function", typeof _WASMEXP_.instantiateModuleFromAsm);
diff --git a/deps/v8/test/optimize_for_size.isolate b/deps/v8/test/optimize_for_size.isolate
index aa7f57036e..16b93157d3 100644
--- a/deps/v8/test/optimize_for_size.isolate
+++ b/deps/v8/test/optimize_for_size.isolate
@@ -2,6 +2,11 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
+ 'variables': {
+ 'command': [
+ '../tools/run-tests.py',
+ ],
+ },
'includes': [
'cctest/cctest.isolate',
'intl/intl.isolate',
diff --git a/deps/v8/test/perf.gyp b/deps/v8/test/perf.gyp
new file mode 100644
index 0000000000..ff846068f4
--- /dev/null
+++ b/deps/v8/test/perf.gyp
@@ -0,0 +1,27 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'perf_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'cctest/cctest.gyp:cctest_exe_run',
+ '../src/d8.gyp:d8_run',
+ ],
+ 'includes': [
+ '../build/features.gypi',
+ '../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'perf.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/deps/v8/test/perf.isolate b/deps/v8/test/perf.isolate
new file mode 100644
index 0000000000..77f66cc67c
--- /dev/null
+++ b/deps/v8/test/perf.isolate
@@ -0,0 +1,23 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'variables': {
+ 'command': [
+ '../tools/run_perf.py',
+ ],
+ 'files': [
+ '../tools/run_perf.py',
+ # This is often used to trigger performance bots. We include it in the
+ # isolate to not get these builds deduped.
+ '../tools/whitespace.txt',
+ 'js-perf-test/',
+ 'memory/',
+ 'simdjs/',
+ ],
+ },
+ 'includes': [
+ 'cctest/cctest_exe.isolate',
+ '../src/d8.isolate',
+ ],
+}
diff --git a/deps/v8/test/test262/archive.py b/deps/v8/test/test262/archive.py
new file mode 100755
index 0000000000..8398e51716
--- /dev/null
+++ b/deps/v8/test/test262/archive.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import tarfile
+
+os.chdir(os.path.dirname(os.path.abspath(__file__)))
+
+def filter_git(tar_info):
+ if tar_info.name.startswith(os.path.join('data', '.git')):
+ return None
+ else:
+ return tar_info
+
+with tarfile.open('data.tar', 'w') as tar:
+ tar.add('data', filter=filter_git)
diff --git a/deps/v8/test/test262/list.py b/deps/v8/test/test262/list.py
new file mode 100755
index 0000000000..69ca62cf20
--- /dev/null
+++ b/deps/v8/test/test262/list.py
@@ -0,0 +1,15 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import tarfile
+
+os.chdir(os.path.dirname(os.path.abspath(__file__)))
+
+for root, dirs, files in os.walk("data"):
+ dirs[:] = [d for d in dirs if not d.endswith('.git')]
+ for name in files:
+ # These names are for gyp, which expects slashes on all platforms.
+ print('/'.join(root.split(os.sep) + [name]))
diff --git a/deps/v8/test/test262/test262.gyp b/deps/v8/test/test262/test262.gyp
index 45e6bc7271..5d79adda35 100644
--- a/deps/v8/test/test262/test262.gyp
+++ b/deps/v8/test/test262/test262.gyp
@@ -19,6 +19,14 @@
'sources': [
'test262.isolate',
],
+ 'actions': [
+ {
+ 'action_name': 'archive_test262',
+ 'inputs': ['archive.py', '<!@(python list.py)'],
+ 'outputs': ['data.tar'],
+ 'action': ['python', 'archive.py'],
+ },
+ ],
},
],
}],
diff --git a/deps/v8/test/test262/test262.isolate b/deps/v8/test/test262/test262.isolate
index dbeca5e55c..0ac045af17 100644
--- a/deps/v8/test/test262/test262.isolate
+++ b/deps/v8/test/test262/test262.isolate
@@ -4,11 +4,14 @@
{
'variables': {
'files': [
- './',
+ 'data.tar',
+ 'harness-adapt.js',
+ 'test262.status',
+ 'testcfg.py',
],
},
'includes': [
'../../src/d8.isolate',
'../../tools/testrunner/testrunner.isolate',
],
-} \ No newline at end of file
+}
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index a7f76a4e27..a926bcc92d 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -33,9 +33,6 @@
'intl402/11.2.3_b': [FAIL],
'intl402/12.2.3_b': [FAIL],
- # Unicode canonicalization is not available with i18n turned off.
- 'built-ins/String/prototype/localeCompare/15.5.4.9_CE': [['no_i18n', SKIP]],
-
###################### NEEDS INVESTIGATION #######################
# Possibly same cause as S8.5_A2.1, below: floating-point tests.
@@ -50,43 +47,10 @@
###################### MISSING ES6 FEATURES #######################
- # It's unclear what the right behavior for [[Enumerate]] is; we're awaiting
- # clarification in the spec. Currently, our for-in implementation for
- # Proxies checks all trap result values for being strings...
- 'built-ins/Proxy/enumerate/return-trap-result': [FAIL],
- # ...and our Reflect.enumerate implementation is built on for-in by wrapping
- # the iteration's results in a new generator; this postpones exceptions.
- 'built-ins/Reflect/enumerate/return-abrupt-from-result': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4093
- 'built-ins/Array/symbol-species': [FAIL],
- 'built-ins/Array/symbol-species-name': [FAIL],
- 'built-ins/ArrayBuffer/symbol-species': [FAIL],
- 'built-ins/ArrayBuffer/symbol-species-name': [FAIL],
- 'built-ins/ArrayBuffer/prototype/slice/species-constructor-is-not-object': [FAIL],
- 'built-ins/ArrayBuffer/prototype/slice/species-returns-smaller-arraybuffer': [FAIL],
- 'built-ins/ArrayBuffer/prototype/slice/species-is-not-object': [FAIL],
- 'built-ins/ArrayBuffer/prototype/slice/species-is-not-constructor': [FAIL],
- 'built-ins/ArrayBuffer/prototype/slice/species-returns-larger-arraybuffer': [FAIL],
- 'built-ins/ArrayBuffer/prototype/slice/species-returns-not-arraybuffer': [FAIL],
- 'built-ins/ArrayBuffer/prototype/slice/species-returns-same-arraybuffer': [FAIL],
- 'built-ins/ArrayBuffer/prototype/slice/species': [FAIL],
- 'built-ins/Map/symbol-species': [FAIL],
- 'built-ins/Map/symbol-species-name': [FAIL],
- 'built-ins/Promise/Symbol.species/prop-desc': [FAIL],
- 'built-ins/Promise/Symbol.species/return-value': [FAIL],
- 'built-ins/Promise/all/species-get-error': [PASS, FAIL],
- 'built-ins/Promise/prototype/then/ctor-custom': [FAIL],
- 'built-ins/Promise/race/species-get-error': [PASS, FAIL],
- 'built-ins/Promise/symbol-species': [FAIL],
- 'built-ins/Promise/symbol-species-name': [FAIL],
- 'built-ins/RegExp/symbol-species': [FAIL],
- 'built-ins/RegExp/symbol-species-name': [FAIL],
- 'built-ins/Set/symbol-species': [FAIL],
- 'built-ins/Set/symbol-species-name': [FAIL],
- 'built-ins/Symbol/species/basic': [FAIL],
- 'built-ins/Symbol/species/builtin-getter-name': [FAIL],
- 'built-ins/Symbol/species/subclassing': [FAIL],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=4768
+ # The Reflect.enumerate trap is removed
+ 'built-ins/Reflect/enumerate/*': [SKIP],
+ 'built-ins/Proxy/enumerate/*': [SKIP],
# https://code.google.com/p/v8/issues/detail?id=4163
'built-ins/GeneratorPrototype/next/context-constructor-invocation': [FAIL],
@@ -99,6 +63,7 @@
'built-ins/Map/iterator-item-second-entry-returns-abrupt': [FAIL],
'built-ins/Map/iterator-items-are-not-object-close-iterator': [FAIL],
'built-ins/Promise/all/iter-close': [FAIL],
+ 'built-ins/Promise/race/iter-close': [PASS, FAIL],
'built-ins/Set/set-iterator-close-after-add-failure': [FAIL],
'built-ins/WeakMap/iterator-close-after-set-failure': [FAIL],
'built-ins/WeakMap/iterator-item-first-entry-returns-abrupt': [FAIL],
@@ -106,15 +71,6 @@
'built-ins/WeakMap/iterator-items-are-not-object-close-iterator': [FAIL],
'built-ins/WeakSet/iterator-close-after-add-failure': [FAIL],
- # https://code.google.com/p/v8/issues/detail?id=4119
- 'built-ins/RegExp/call_with_non_regexp_same_constructor': [FAIL],
- 'built-ins/RegExp/from-regexp-like-short-circuit': [FAIL],
- 'built-ins/RegExp/from-regexp-like': [FAIL],
- 'built-ins/RegExp/from-regexp-like-flag-override': [FAIL],
- 'built-ins/RegExp/from-regexp-like-get-source-err': [FAIL],
- 'built-ins/RegExp/from-regexp-like-get-flags-err': [FAIL],
- 'built-ins/RegExp/from-regexp-like-get-ctor-err': [FAIL],
-
# https://code.google.com/p/v8/issues/detail?id=4348
'built-ins/String/prototype/Symbol.iterator/this-val-non-obj-coercible': [FAIL],
@@ -148,24 +104,6 @@
'built-ins/Array/prototype/values/iteration-mutable': [FAIL],
'built-ins/Array/prototype/Symbol.unscopables/value': [FAIL],
- # https://code.google.com/p/v8/issues/detail?id=3566
- 'built-ins/GeneratorPrototype/return/from-state-completed': [FAIL],
- 'built-ins/GeneratorPrototype/return/from-state-suspended-start': [FAIL],
- 'built-ins/GeneratorPrototype/return/property-descriptor': [FAIL],
- 'built-ins/GeneratorPrototype/return/try-catch-before-try': [FAIL],
- 'built-ins/GeneratorPrototype/return/try-catch-following-catch': [FAIL],
- 'built-ins/GeneratorPrototype/return/try-catch-within-catch': [FAIL],
- 'built-ins/GeneratorPrototype/return/try-catch-within-try': [FAIL],
- 'built-ins/GeneratorPrototype/return/try-finally-before-try': [FAIL],
- 'built-ins/GeneratorPrototype/return/try-finally-following-finally': [FAIL],
- 'built-ins/GeneratorPrototype/return/try-finally-nested-try-catch-within-catch': [FAIL],
- 'built-ins/GeneratorPrototype/return/try-finally-nested-try-catch-within-finally': [FAIL],
- 'built-ins/GeneratorPrototype/return/try-finally-nested-try-catch-within-inner-try': [FAIL],
- 'built-ins/GeneratorPrototype/return/try-finally-nested-try-catch-within-outer-try-after-nested': [FAIL],
- 'built-ins/GeneratorPrototype/return/try-finally-nested-try-catch-within-outer-try-before-nested': [FAIL],
- 'built-ins/GeneratorPrototype/return/try-finally-within-finally': [FAIL],
- 'built-ins/GeneratorPrototype/return/try-finally-within-try': [FAIL],
-
# https://code.google.com/p/v8/issues/detail?id=4248
'language/expressions/compound-assignment/S11.13.2_A5.*': [FAIL],
'language/expressions/compound-assignment/S11.13.2_A6.*': [FAIL],
@@ -194,71 +132,13 @@
'language/expressions/assignment/S11.13.1_A5*': [FAIL],
'language/expressions/assignment/S11.13.1_A6*': [FAIL],
- # https://code.google.com/p/v8/issues/detail?id=3699
+ # https://bugs.chromium.org/p/v8/issues/detail?id=4709
'built-ins/Proxy/revocable/revocation-function-name': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-init-fn-name-arrow': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-init-fn-name-class': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-init-fn-name-cover': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-init-fn-name-fn': [FAIL],
- 'language/expressions/assignment/destructuring/array-elem-init-fn-name-gen': [FAIL],
- 'language/expressions/assignment/destructuring/obj-id-init-fn-name-arrow': [FAIL],
- 'language/expressions/assignment/destructuring/obj-id-init-fn-name-class': [FAIL],
- 'language/expressions/assignment/destructuring/obj-id-init-fn-name-cover': [FAIL],
- 'language/expressions/assignment/destructuring/obj-id-init-fn-name-fn': [FAIL],
- 'language/expressions/assignment/destructuring/obj-id-init-fn-name-gen': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-elem-init-fn-name-arrow': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-elem-init-fn-name-class': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-elem-init-fn-name-cover': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-elem-init-fn-name-fn': [FAIL],
- 'language/expressions/assignment/destructuring/obj-prop-elem-init-fn-name-gen': [FAIL],
- 'language/expressions/assignment/fn-name-arrow': [FAIL],
- 'language/expressions/assignment/fn-name-class': [FAIL],
- 'language/expressions/assignment/fn-name-cover': [FAIL],
- 'language/expressions/assignment/fn-name-fn': [FAIL],
- 'language/expressions/assignment/fn-name-gen': [FAIL],
'language/expressions/assignment/fn-name-lhs-cover': [FAIL],
'language/expressions/assignment/fn-name-lhs-member': [FAIL],
'language/expressions/class/name': [FAIL],
'language/expressions/function/name': [FAIL],
- 'language/expressions/generators/implicit-name': [FAIL],
'language/expressions/generators/name': [FAIL],
- 'language/expressions/generators/name-property-descriptor': [FAIL],
- 'language/expressions/object/fn-name-accessor-get': [FAIL],
- 'language/expressions/object/fn-name-accessor-set': [FAIL],
- 'language/expressions/object/fn-name-arrow': [FAIL],
- 'language/expressions/object/fn-name-class': [FAIL],
- 'language/expressions/object/fn-name-cover': [FAIL],
- 'language/expressions/object/fn-name-fn': [FAIL],
- 'language/expressions/object/fn-name-gen': [FAIL],
- 'language/expressions/object/fn-name-lhs-cover': [FAIL],
- 'language/expressions/object/fn-name-lhs-member': [FAIL],
- 'language/expressions/object/method-definition/fn-name-accessor-get': [FAIL],
- 'language/expressions/object/method-definition/fn-name-accessor-set': [FAIL],
- 'language/expressions/object/method-definition/fn-name-arrow': [FAIL],
- 'language/expressions/object/method-definition/fn-name-class': [FAIL],
- 'language/expressions/object/method-definition/fn-name-cover': [FAIL],
- 'language/expressions/object/method-definition/fn-name-fn': [FAIL],
- 'language/expressions/object/method-definition/fn-name-gen': [FAIL],
- 'language/statements/class/definition/basics': [FAIL],
- 'language/statements/class/definition/fn-name-accessor-get': [FAIL],
- 'language/statements/class/definition/fn-name-accessor-set': [FAIL],
- 'language/statements/class/definition/fn-name-gen-method': [FAIL],
- 'language/statements/class/definition/fn-name-method': [FAIL],
- 'language/statements/const/fn-name-arrow': [FAIL],
- 'language/statements/const/fn-name-class': [FAIL],
- 'language/statements/const/fn-name-cover': [FAIL],
- 'language/statements/const/fn-name-fn': [FAIL],
- 'language/statements/const/fn-name-gen': [FAIL],
- 'language/statements/let/fn-name-arrow': [FAIL],
- 'language/statements/let/fn-name-class': [FAIL],
- 'language/statements/let/fn-name-cover': [FAIL],
- 'language/statements/let/fn-name-fn': [FAIL],
- 'language/statements/let/fn-name-gen': [FAIL],
- 'language/statements/variable/fn-name-arrow': [FAIL],
- 'language/statements/variable/fn-name-class': [FAIL],
- 'language/statements/variable/fn-name-cover': [FAIL],
- 'language/statements/variable/fn-name-fn': [FAIL],
- 'language/statements/variable/fn-name-gen': [FAIL],
# https://code.google.com/p/v8/issues/detail?id=4251
'language/expressions/postfix-increment/S11.3.1_A5_T1': [FAIL],
@@ -273,30 +153,6 @@
# https://code.google.com/p/v8/issues/detail?id=4253
'language/asi/S7.9_A5.7_T1': [PASS, FAIL_OK],
- # https://code.google.com/p/v8/issues/detail?id=3761
- 'language/expressions/object/method-definition/generator-name-prop-symbol': [FAIL],
- 'language/expressions/object/method-definition/name-name-prop-symbol': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=2952
- 'built-ins/RegExp/prototype/exec/u-lastindex-adv': [FAIL],
- 'built-ins/RegExp/prototype/exec/u-captured-value': [FAIL],
- 'built-ins/RegExp/prototype/exec/u-lastindex-value': [FAIL],
- 'built-ins/RegExp/prototype/test/u-captured-value': [FAIL],
- 'built-ins/RegExp/prototype/test/u-lastindex-adv': [FAIL],
- 'built-ins/RegExp/prototype/test/u-lastindex-value': [FAIL],
- 'built-ins/RegExp/prototype/unicode/this-regexp': [FAIL],
- 'built-ins/RegExp/unicode_identity_escape': [FAIL],
- 'language/literals/regexp/u-unicode-esc': [FAIL],
- 'language/literals/regexp/u-surrogate-pairs': [FAIL],
- 'language/literals/regexp/u-case-mapping': [FAIL],
- 'language/literals/regexp/u-astral': [FAIL],
- 'built-ins/RegExp/valid-flags-y': [FAIL],
- 'built-ins/RegExp/prototype/unicode/length': [FAIL],
- 'built-ins/RegExp/prototype/unicode/name': [FAIL],
- 'built-ins/RegExp/prototype/unicode/prop-desc': [FAIL],
- 'built-ins/RegExp/prototype/unicode/this-invald-obj': [FAIL],
- 'built-ins/RegExp/prototype/unicode/this-non-obj': [FAIL],
-
# https://code.google.com/p/v8/issues/detail?id=4602
'built-ins/RegExp/prototype/exec/get-sticky-coerce': [FAIL],
'built-ins/RegExp/prototype/exec/get-sticky-err': [FAIL],
@@ -311,36 +167,19 @@
# happens to be thrown for some other reason (e.g,
# built-ins/RegExp/prototype/Symbol.match/builtin-failure-set-lastindex-err)
'built-ins/RegExp/prototype/Symbol.match/*': [SKIP],
- 'built-ins/Symbol/match/prop-desc': [FAIL],
'built-ins/String/prototype/endsWith/return-abrupt-from-searchstring-regexp-test': [FAIL],
'built-ins/String/prototype/includes/return-abrupt-from-searchstring-regexp-test': [FAIL],
'built-ins/String/prototype/startsWith/return-abrupt-from-searchstring-regexp-test': [FAIL],
- 'built-ins/String/prototype/match/cstm-matcher-get-err': [FAIL],
'built-ins/String/prototype/match/invoke-builtin-match': [FAIL],
- 'built-ins/String/prototype/match/cstm-matcher-invocation': [FAIL],
# https://code.google.com/p/v8/issues/detail?id=4343
'built-ins/RegExp/prototype/Symbol.replace/*': [SKIP],
- 'built-ins/Symbol/replace/prop-desc': [FAIL],
- 'built-ins/String/prototype/replace/cstm-replace-get-err': [FAIL],
- 'built-ins/String/prototype/replace/cstm-replace-invocation': [FAIL],
# https://code.google.com/p/v8/issues/detail?id=4344
'built-ins/RegExp/prototype/Symbol.search/*': [SKIP],
- 'built-ins/Symbol/search/prop-desc': [FAIL],
- 'built-ins/String/prototype/search/cstm-search-get-err': [FAIL],
- 'built-ins/String/prototype/search/invoke-builtin-search-searcher-undef': [FAIL],
- 'built-ins/String/prototype/search/cstm-search-invocation': [FAIL],
- 'built-ins/String/prototype/search/invoke-builtin-search': [FAIL],
# https://code.google.com/p/v8/issues/detail?id=4345
'built-ins/RegExp/prototype/Symbol.split/*': [SKIP],
- 'built-ins/Symbol/split/prop-desc': [FAIL],
- 'built-ins/String/prototype/split/cstm-split-invocation': [FAIL],
- 'built-ins/String/prototype/split/cstm-split-get-err': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4346
- 'built-ins/RegExp/prototype/flags/u': [FAIL],
# https://code.google.com/p/v8/issues/detail?id=4360
'intl402/Collator/10.1.1_1': [FAIL],
@@ -350,13 +189,6 @@
# https://code.google.com/p/v8/issues/detail?id=4361
'intl402/Collator/10.1.1_a': [FAIL],
- # https://code.google.com/p/v8/issues/detail?id=4447
- 'built-ins/Function/prototype/Symbol.hasInstance/*': [SKIP],
- 'built-ins/Symbol/hasInstance/prop-desc': [FAIL],
- 'language/expressions/instanceof/symbol-hasinstance-get-err': [FAIL],
- 'language/expressions/instanceof/symbol-hasinstance-invocation': [FAIL],
- 'language/expressions/instanceof/symbol-hasinstance-to-boolean': [FAIL],
-
# https://code.google.com/p/v8/issues/detail?id=4476
'built-ins/String/prototype/toLocaleLowerCase/special_casing_conditional': [FAIL],
'built-ins/String/prototype/toLocaleLowerCase/supplementary_plane': [FAIL],
@@ -381,11 +213,7 @@
'built-ins/ArrayBuffer/length-is-absent': [FAIL],
'built-ins/ArrayBuffer/length-is-not-number': [FAIL],
'built-ins/ArrayBuffer/positive-integer-length': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=4630
- 'language/statements/generators/invoke-as-constructor': [FAIL],
- 'language/expressions/generators/invoke-as-constructor': [FAIL],
- 'language/expressions/object/method-definition/generator-invoke-ctor': [FAIL],
+ 'language/statements/class/subclass/builtin-objects/ArrayBuffer/regular-subclassing': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=4633
'built-ins/Promise/reject-function-name': [FAIL],
@@ -409,8 +237,20 @@
'built-ins/DataView/prototype/setUint8/index-check-before-value-conversion': [FAIL],
'built-ins/DataView/prototype/setInt8/index-check-before-value-conversion': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=4639
- 'built-ins/ArrayBuffer/allocation-limit': [SKIP],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=4706
+ 'language/statements/class/subclass/builtin-objects/NativeError/EvalError-message': [FAIL],
+ 'language/statements/class/subclass/builtin-objects/NativeError/RangeError-message': [FAIL],
+ 'language/statements/class/subclass/builtin-objects/NativeError/ReferenceError-message': [FAIL],
+ 'language/statements/class/subclass/builtin-objects/NativeError/SyntaxError-message': [FAIL],
+ 'language/statements/class/subclass/builtin-objects/NativeError/TypeError-message': [FAIL],
+ 'language/statements/class/subclass/builtin-objects/NativeError/URIError-message': [FAIL],
+ 'language/statements/class/subclass/builtin-objects/Error/message-property-assignment': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=4663
+ 'built-ins/object/entries/*': [SKIP],
+ 'built-ins/object/values/*': [SKIP],
+ 'built-ins/Object/entries/*': [SKIP],
+ 'built-ins/Object/values/*': [SKIP],
# https://code.google.com/p/chromium/issues/detail?id=581577
'built-ins/RegExp/prototype/source/15.10.7.1-1': [FAIL],
@@ -496,8 +336,9 @@
'built-ins/Array/prototype/indexOf/15.4.4.14-5-9': [FAIL],
'built-ins/Array/prototype/lastIndexOf/15.4.4.15-5-9': [FAIL],
- # https://github.com/tc39/test262/issues/436
- 'built-ins/RegExp/call_with_regexp_match_falsy': [FAIL],
+ # https://github.com/tc39/test262/issues/489
+ # Test will pass in 0 or -GMT, but fail in +GMT
+ 'language/statements/class/subclass/builtin-objects/Date/regular-subclassing': [PASS, FAIL_OK],
############################ SKIPPED TESTS #############################
@@ -544,12 +385,18 @@
'intl402/NumberFormat/prototype/format/11.3.2_TRP': [SKIP],
}], # system == macos
-['no_i18n == True and mode == debug', {
+['no_i18n == True', {
+ # Unicode canonicalization is not available with i18n turned off.
+ 'built-ins/String/prototype/localeCompare/15.5.4.9_CE': [SKIP],
+
+ # Unicode regexp case mapping is not available with i18n turned off.
+ 'language/literals/regexp/u-case-mapping': [SKIP],
+
# BUG(v8:4437).
'built-ins/String/prototype/normalize/return-normalized-string': [SKIP],
'built-ins/String/prototype/normalize/return-normalized-string-from-coerced-form': [SKIP],
'built-ins/String/prototype/normalize/return-normalized-string-using-default-parameter': [SKIP],
-}], # no_i18n == True and mode == debug
+}], # no_i18n == True
['arch == arm or arch == mipsel or arch == mips or arch == arm64 or arch == mips64 or arch == mips64el', {
@@ -571,7 +418,13 @@
# BUG(v8:4653): Test262 tests which rely on quit() are not compatible with
# asan's --omit-quit flag.
'built-ins/Promise/prototype/then/deferred-is-resolved-value': [SKIP],
-}],
+}], # asan == True
+
+['asan == True or msan == True or tsan == True', {
+ # https://bugs.chromium.org/p/v8/issues/detail?id=4639
+ # The failed allocation causes an asan/msan/tsan error
+ 'built-ins/ArrayBuffer/allocation-limit': [SKIP],
+}], # asan == True or msan == True or tsan == True
['ignition == True', {
'annexB/B.2.3.*': [SKIP],
@@ -579,48 +432,37 @@
'built-ins/Array/prototype/reduceRight/*': [SKIP],
'built-ins/GeneratorFunction/*': [SKIP],
'built-ins/GeneratorPrototype/*': [SKIP],
- 'built-ins/Map/*': [SKIP],
- 'built-ins/MapIteratorPrototype/*': [SKIP],
- 'built-ins/Promise/prototype/then/capability-executor-called-twice': [SKIP],
'built-ins/Promise/prototype/then/capability-executor-not-callable': [SKIP],
- 'built-ins/Promise/prototype/then/deferred-is-resolved-value': [SKIP],
- 'built-ins/Proxy/has/*': [SKIP],
'built-ins/Reflect/enumerate/*': [SKIP],
- 'built-ins/Set/*': [SKIP],
- 'built-ins/SetIteratorPrototype/*': [SKIP],
- 'built-ins/WeakMap/*': [SKIP],
- 'built-ins/WeakSet/*': [SKIP],
'language/computed-property-names/class/*': [SKIP],
'language/computed-property-names/to-name-side-effects/*': [SKIP],
'language/directive-prologue/*': [SKIP],
'language/expressions/arrow-function/*': [SKIP],
'language/expressions/assignment/destructuring/*': [SKIP],
- 'language/expressions/class/*': [SKIP],
+ 'language/expressions/class/subclass/builtin-objects/GeneratorFunction/*': [SKIP],
'language/expressions/generators/*': [SKIP],
+ 'language/expressions/instanceof/primitive-prototype-with-object': [SKIP],
+ 'language/expressions/instanceof/prototype-getter-with-object-throws': [SKIP],
+ 'language/expressions/instanceof/prototype-getter-with-object': [SKIP],
'language/expressions/object/method-definition/yield*': [SKIP],
'language/expressions/object/method-definition/generator*': [SKIP],
- 'language/expressions/object/prop-def-id-eval-error-2': [SKIP],
'language/expressions/yield/*': [SKIP],
- 'language/function-code/*': [SKIP],
- 'language/statements/class/*': [SKIP],
- 'language/statements/const/*': [SKIP],
- 'language/statements/for-in/const*': [SKIP],
- 'language/statements/for-in/let*': [SKIP],
- 'language/statements/for-of/*': [SKIP],
+ 'language/statements/class/definition/methods-gen-no-yield': [SKIP],
+ 'language/statements/class/definition/methods-gen-return': [SKIP],
+ 'language/statements/class/definition/methods-gen-yield-as-expression-with-rhs': [SKIP],
+ 'language/statements/class/definition/methods-gen-yield-as-generator-method-binding-identifier': [SKIP],
+ 'language/statements/class/definition/methods-gen-yield-as-literal-property-name': [SKIP],
+ 'language/statements/class/definition/methods-gen-yield-as-property-name': [SKIP],
+ 'language/statements/class/definition/methods-gen-yield-as-statement': [SKIP],
+ 'language/statements/class/definition/methods-gen-yield-as-expression-without-rhs': [SKIP],
+ 'language/statements/class/definition/methods-gen-yield-as-yield-operand': [SKIP],
+ 'language/statements/class/definition/methods-gen-yield-newline': [SKIP],
+ 'language/statements/class/definition/methods-gen-yield-star-before-newline': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/GeneratorFunction/*': [SKIP],
'language/statements/generators/*': [SKIP],
- 'language/statements/try/*': [SKIP],
- 'language/statements/with/*': [SKIP],
'built-ins/Array/prototype/concat/Array.prototype.concat_non-array': [SKIP],
- 'built-ins/Array/prototype/join/S15.4.4.5_A3.1_T1': [SKIP],
- 'built-ins/Array/prototype/join/S15.4.4.5_A3.1_T2': [SKIP],
- 'built-ins/Array/prototype/toString/S15.4.4.2_A1_T2': [SKIP],
- 'built-ins/Array/prototype/toString/S15.4.4.2_A1_T3': [SKIP],
- 'built-ins/Array/prototype/toString/S15.4.4.2_A1_T4': [SKIP],
- 'built-ins/Date/15.9.1.15-1': [SKIP],
'built-ins/Date/prototype/toISOString/15.9.5.43-0-13': [SKIP],
- 'built-ins/JSON/stringify/*': [SKIP],
- 'built-ins/Object/defineProperty/15.2.3.6-4-625gs': [SKIP],
'built-ins/Object/prototype/hasOwnProperty/S15.2.4.5_A12': [SKIP],
'built-ins/Object/prototype/isPrototypeOf/S15.2.4.6_A12': [SKIP],
'built-ins/Object/prototype/propertyIsEnumerable/S15.2.4.7_A12': [SKIP],
@@ -631,10 +473,8 @@
'built-ins/Object/prototype/valueOf/S15.2.4.4_A12': [SKIP],
'built-ins/Object/prototype/valueOf/S15.2.4.4_A14': [SKIP],
'built-ins/Object/prototype/valueOf/S15.2.4.4_A15': [SKIP],
- 'built-ins/Promise/all/ctx-ctor': [SKIP],
- 'built-ins/Promise/race/ctx-ctor': [SKIP],
- 'built-ins/Promise/reject/ctx-ctor': [SKIP],
- 'built-ins/Promise/resolve/ctx-ctor': [SKIP],
+ 'built-ins/Promise/all/S25.4.4.1_A4.1_T1': [SKIP],
+ 'built-ins/Promise/prototype/then/on-rejected-throw': [SKIP],
'built-ins/Promise/reject/S25.4.4.4_A3.1_T1': [SKIP],
'built-ins/String/prototype/codePointAt/this-is-undefined-throws': [SKIP],
'built-ins/String/prototype/concat/S15.5.4.6_A2': [SKIP],
@@ -643,84 +483,138 @@
'built-ins/String/prototype/repeat/this-is-undefined-throws': [SKIP],
'built-ins/String/prototype/startsWith/this-is-undefined-throws': [SKIP],
'built-ins/String/prototype/trim/15.5.4.20-1-1': [SKIP],
- 'built-ins/String/S15.5.5.1_A4_T1': [SKIP],
'language/block-scope/leave/nested-block-let-declaration-only-shadows-outer-parameter-value-1': [SKIP],
'language/block-scope/leave/nested-block-let-declaration-only-shadows-outer-parameter-value-2': [SKIP],
'language/block-scope/leave/verify-context-in-labelled-block': [SKIP],
'language/block-scope/leave/x-after-break-to-label': [SKIP],
- 'language/computed-property-names/object/accessor/getter-super': [SKIP],
- 'language/computed-property-names/object/accessor/setter-super': [SKIP],
- 'language/computed-property-names/object/method/super': [SKIP],
'language/default-parameters/class-definitions': [SKIP],
'language/default-parameters/generators': [SKIP],
- 'language/default-parameters/param-ref-uninitialized': [SKIP],
- 'language/expressions/delete/11.4.1-4.a-5': [SKIP],
- 'language/expressions/delete/11.4.1-4.a-6': [SKIP],
'language/expressions/object/method-definition/name-prop-name-yield-expr': [SKIP],
- 'language/expressions/object/method-definition/name-super-prop-param': [SKIP],
- 'language/expressions/object/method-definition/name-super-prop-body': [SKIP],
- 'language/expressions/object/prop-def-id-eval-error': [SKIP],
'language/expressions/tagged-template/call-expression-context-no-strict': [SKIP],
'language/expressions/tagged-template/call-expression-context-strict': [SKIP],
'language/expressions/template-literal/evaluation-order': [SKIP],
- 'language/expressions/this/11.1.1-1gs': [SKIP],
- 'language/identifier-resolution/S10.2.2_A1_T5': [SKIP],
- 'language/identifier-resolution/S10.2.2_A1_T6': [SKIP],
- 'language/identifier-resolution/S10.2.2_A1_T7': [SKIP],
- 'language/identifier-resolution/S10.2.2_A1_T8': [SKIP],
- 'language/identifier-resolution/S10.2.2_A1_T9': [SKIP],
+ 'language/statements/for-of/body-dstr-assign': [SKIP],
+ 'language/statements/for-of/break': [SKIP],
+ 'language/statements/for-of/break-from-catch': [SKIP],
+ 'language/statements/for-of/break-from-finally': [SKIP],
+ 'language/statements/for-of/break-from-try': [SKIP],
+ 'language/statements/for-of/break-label': [SKIP],
+ 'language/statements/for-of/break-label-from-catch': [SKIP],
+ 'language/statements/for-of/break-label-from-finally': [SKIP],
+ 'language/statements/for-of/break-label-from-try': [SKIP],
+ 'language/statements/for-of/continue': [SKIP],
+ 'language/statements/for-of/continue-from-catch': [SKIP],
+ 'language/statements/for-of/continue-from-finally': [SKIP],
+ 'language/statements/for-of/continue-from-try': [SKIP],
+ 'language/statements/for-of/continue-label': [SKIP],
+ 'language/statements/for-of/continue-label-from-catch': [SKIP],
+ 'language/statements/for-of/continue-label-from-finally': [SKIP],
+ 'language/statements/for-of/continue-label-from-try': [SKIP],
+ 'language/statements/for-of/generator': [SKIP],
+ 'language/statements/for-of/generator-next-error': [SKIP],
+ 'language/statements/for-of/nested': [SKIP],
+ 'language/statements/for-of/return': [SKIP],
+ 'language/statements/for-of/return-from-catch': [SKIP],
+ 'language/statements/for-of/return-from-finally': [SKIP],
+ 'language/statements/for-of/return-from-try': [SKIP],
+ 'language/statements/for-of/throw': [SKIP],
+ 'language/statements/for-of/throw-from-catch': [SKIP],
+ 'language/statements/for-of/throw-from-finally': [SKIP],
+ 'language/statements/for-of/yield': [SKIP],
+ 'language/statements/for-of/yield-from-catch': [SKIP],
+ 'language/statements/for-of/yield-from-finally': [SKIP],
+ 'language/statements/for-of/yield-from-try': [SKIP],
+ 'language/statements/for-of/yield-star': [SKIP],
+ 'language/statements/for-of/yield-star-from-catch': [SKIP],
+ 'language/statements/for-of/yield-star-from-finally': [SKIP],
+ 'language/statements/for-of/yield-star-from-try': [SKIP],
'language/object-literal/concise-generator': [SKIP],
- 'language/object-literal/getter': [SKIP],
- 'language/object-literal/method': [SKIP],
- 'language/object-literal/setter': [SKIP],
- 'language/rest-parameters/arrow-function': [SKIP],
- 'language/rest-parameters/expected-argument-count': [SKIP],
- 'language/rest-parameters/no-alias-arguments': [SKIP],
- 'language/rest-parameters/rest-index': [SKIP],
- 'language/rest-parameters/rest-parameters-apply': [SKIP],
- 'language/rest-parameters/rest-parameters-call': [SKIP],
- 'language/rest-parameters/rest-parameters-produce-an-array': [SKIP],
- 'language/rest-parameters/with-new-target': [SKIP],
'language/statements/do-while/S12.6.1_A4_T5': [SKIP],
- 'language/statements/function/S13.2.2_A18_T2': [SKIP],
- 'language/statements/function/S13.2.2_A19_T1': [SKIP],
- 'language/statements/function/S13.2.2_A19_T2': [SKIP],
- 'language/statements/function/S13.2.2_A19_T3': [SKIP],
- 'language/statements/function/S13.2.2_A19_T4': [SKIP],
- 'language/statements/function/S13.2.2_A19_T5': [SKIP],
- 'language/statements/function/S13.2.2_A19_T6': [SKIP],
- 'language/statements/function/S13.2.2_A19_T7': [SKIP],
- 'language/statements/function/S13.2.2_A19_T8': [SKIP],
- 'language/statements/function/S13.2.2_A18_T1': [SKIP],
- 'language/statements/function/S13.2.2_A17_T2': [SKIP],
- 'language/statements/function/S13.2.2_A17_T3': [SKIP],
- 'language/statements/let/block-local-closure-get-before-initialization': [SKIP],
- 'language/statements/let/block-local-closure-set-before-initialization': [SKIP],
- 'language/statements/let/block-local-use-before-initialization-in-declaration-statement': [SKIP],
- 'language/statements/let/block-local-use-before-initialization-in-prior-statement': [SKIP],
- 'language/statements/let/function-local-closure-get-before-initialization': [SKIP],
- 'language/statements/let/function-local-closure-set-before-initialization': [SKIP],
- 'language/statements/let/function-local-use-before-initialization-in-declaration-statement': [SKIP],
- 'language/statements/let/function-local-use-before-initialization-in-prior-statement': [SKIP],
- 'language/statements/let/global-closure-get-before-initialization': [SKIP],
- 'language/statements/let/global-closure-set-before-initialization': [SKIP],
- 'language/statements/let/global-use-before-initialization-in-declaration-statement': [SKIP],
- 'language/statements/let/global-use-before-initialization-in-prior-statement': [SKIP],
'language/statements/while/S12.6.2_A4_T5': [SKIP],
+ 'language/expressions/instanceof/symbol-hasinstance-not-callable': [SKIP],
}], # ignition == True
['ignition == True and (arch == arm or arch == arm64)', {
+ 'built-ins/Promise/all/ctx-ctor': [SKIP],
+ 'built-ins/Promise/race/ctx-ctor': [SKIP],
'built-ins/decodeURI/S15.1.3.1_A1.12_T3': [SKIP],
'built-ins/decodeURIComponent/S15.1.3.2_A1.10_T1': [SKIP],
'built-ins/decodeURIComponent/S15.1.3.2_A1.11_T2': [SKIP],
'built-ins/decodeURIComponent/S15.1.3.2_A1.12_T2': [SKIP],
'built-ins/decodeURIComponent/S15.1.3.2_A1.12_T3': [SKIP],
'intl402/9.2.2': [SKIP],
+ 'language/statements/class/arguments/default-constructor': [SKIP],
+ 'language/statements/class/definition/constructor-strict-by-default': [SKIP],
+ 'language/statements/class/definition/fn-name-accessor-get': [SKIP],
+ 'language/statements/class/definition/fn-name-accessor-set': [SKIP],
+ 'language/statements/class/definition/fn-name-gen-method': [SKIP],
+ 'language/statements/class/definition/fn-name-method': [SKIP],
+ 'language/statements/class/definition/methods-restricted-properties': [SKIP],
+ 'language/statements/class/definition/prototype-getter': [SKIP],
+ 'language/statements/class/definition/prototype-wiring': [SKIP],
+ 'language/statements/class/definition/this-access-restriction': [SKIP],
+ 'language/statements/class/definition/this-access-restriction-2': [SKIP],
+ 'language/statements/class/definition/this-check-ordering': [SKIP],
+ 'language/statements/class/name': [SKIP],
+ 'language/statements/class/restricted-properties': [SKIP],
+ 'language/statements/class/subclass/binding': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/Array/super-must-be-called': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/ArrayBuffer/super-must-be-called': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/Boolean/super-must-be-called': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/DataView/regular-subclassing': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/DataView/super-must-be-called': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/Date/super-must-be-called': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/Error/regular-subclassing': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/Error/super-must-be-called': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/Function/instance-length': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/Function/instance-name': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/Function/super-must-be-called': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/Map/super-must-be-called': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/NativeError/EvalError-name': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/NativeError/EvalError-super': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/NativeError/RangeError-name': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/NativeError/RangeError-super': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/NativeError/ReferenceError-name': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/NativeError/ReferenceError-super': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/NativeError/SyntaxError-name': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/NativeError/SyntaxError-super': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/NativeError/TypeError-name': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/NativeError/TypeError-super': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/NativeError/URIError-name': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/NativeError/URIError-super': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/Number/super-must-be-called': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/Object/constructor-return-undefined-throws': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/Object/constructor-returns-non-object': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/Promise/regular-subclassing': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/Promise/super-must-be-called': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/RegExp/lastIndex': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/RegExp/super-must-be-called': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/Set/super-must-be-called': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/String/length': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/String/super-must-be-called': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/Symbol/new-symbol-with-super-throws': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/WeakMap/super-must-be-called': [SKIP],
+ 'language/statements/class/subclass/builtin-objects/WeakSet/super-must-be-called': [SKIP],
+ 'language/statements/class/subclass/class-definition-null-proto-missing-return-override': [SKIP],
+ 'language/statements/class/subclass/default-constructor': [SKIP],
+ 'language/statements/class/subclass/default-constructor-2': [SKIP],
+ 'language/statements/class/subclass/derived-class-return-override-with-boolean': [SKIP],
+ 'language/statements/class/subclass/derived-class-return-override-with-null': [SKIP],
+ 'language/statements/class/subclass/derived-class-return-override-with-number': [SKIP],
+ 'language/statements/class/subclass/derived-class-return-override-with-string': [SKIP],
+ 'language/statements/class/subclass/derived-class-return-override-with-symbol': [SKIP],
+ 'language/statements/const/fn-name-arrow': [SKIP],
+ 'language/statements/const/fn-name-class': [SKIP],
+ 'language/statements/const/fn-name-cover': [SKIP],
+ 'language/statements/const/fn-name-fn': [SKIP],
+ 'language/statements/const/fn-name-gen': [SKIP],
'language/statements/let/fn-name-arrow': [SKIP],
+ 'language/statements/let/fn-name-class': [SKIP],
'language/statements/let/fn-name-cover': [SKIP],
'language/statements/let/fn-name-fn': [SKIP],
'language/statements/let/fn-name-gen': [SKIP],
+ 'test-api/Regress470113': [SKIP],
}], # ignition == True and (arch == arm or arch == arm64)
]
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index f222e1e37d..b5ad30949d 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -39,6 +39,8 @@ from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.objects import testcase
+ARCHIVE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data.tar")
+
TEST_262_HARNESS_FILES = ["sta.js", "assert.js"]
TEST_262_SUITE_PATH = ["data", "test"]
@@ -199,6 +201,11 @@ class Test262TestSuite(testsuite.TestSuite):
for f in archive_files:
os.remove(os.path.join(self.root, f))
+ print "Extracting archive..."
+ tar = tarfile.open(ARCHIVE)
+ tar.extractall(path=os.path.dirname(ARCHIVE))
+ tar.close()
+
def GetSuite(name, root):
return Test262TestSuite(name, root)
diff --git a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
index 62abeda1b5..72cfc51d58 100644
--- a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
@@ -1584,7 +1584,7 @@ TEST_P(InstructionSelectorF32ComparisonTest, NegatedWithParameters) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Float32(),
MachineType::Float32());
m.Return(
- m.WordBinaryNot((m.*cmp.constructor)(m.Parameter(0), m.Parameter(1))));
+ m.Word32BinaryNot((m.*cmp.constructor)(m.Parameter(0), m.Parameter(1))));
Stream const s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArmVcmpF32, s[0]->arch_opcode());
@@ -1667,7 +1667,7 @@ TEST_P(InstructionSelectorF64ComparisonTest, NegatedWithParameters) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Float64(),
MachineType::Float64());
m.Return(
- m.WordBinaryNot((m.*cmp.constructor)(m.Parameter(0), m.Parameter(1))));
+ m.Word32BinaryNot((m.*cmp.constructor)(m.Parameter(0), m.Parameter(1))));
Stream const s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArmVcmpF64, s[0]->arch_opcode());
@@ -2544,8 +2544,28 @@ TEST_F(InstructionSelectorTest, Uint32ModWithParametersForSUDIVAndMLS) {
}
+TEST_F(InstructionSelectorTest, Word32ShlWord32SarForSbfx) {
+ TRACED_FORRANGE(int32_t, shl, 1, 31) {
+ TRACED_FORRANGE(int32_t, sar, shl, 31) {
+ if ((shl == sar) && (sar == 16)) continue; // Sxth.
+ if ((shl == sar) && (sar == 24)) continue; // Sxtb.
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Sar(m.Word32Shl(m.Parameter(0), m.Int32Constant(shl)),
+ m.Int32Constant(sar)));
+ Stream s = m.Build(ARMv7);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmSbfx, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(sar - shl, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(32 - sar, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+}
+
+
TEST_F(InstructionSelectorTest, Word32AndWithUbfxImmediateForARMv7) {
- TRACED_FORRANGE(int32_t, width, 1, 32) {
+ TRACED_FORRANGE(int32_t, width, 9, 23) {
+ if (width == 16) continue; // Uxth.
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32And(m.Parameter(0),
m.Int32Constant(0xffffffffu >> (32 - width))));
@@ -2556,7 +2576,8 @@ TEST_F(InstructionSelectorTest, Word32AndWithUbfxImmediateForARMv7) {
EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
}
- TRACED_FORRANGE(int32_t, width, 1, 32) {
+ TRACED_FORRANGE(int32_t, width, 9, 23) {
+ if (width == 16) continue; // Uxth.
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32And(m.Int32Constant(0xffffffffu >> (32 - width)),
m.Parameter(0)));
@@ -2572,7 +2593,7 @@ TEST_F(InstructionSelectorTest, Word32AndWithUbfxImmediateForARMv7) {
TEST_F(InstructionSelectorTest, Word32AndWithBfcImmediateForARMv7) {
TRACED_FORRANGE(int32_t, lsb, 0, 31) {
- TRACED_FORRANGE(int32_t, width, 9, (32 - lsb) - 1) {
+ TRACED_FORRANGE(int32_t, width, 9, (24 - lsb) - 1) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32And(
m.Parameter(0),
@@ -2589,7 +2610,7 @@ TEST_F(InstructionSelectorTest, Word32AndWithBfcImmediateForARMv7) {
}
}
TRACED_FORRANGE(int32_t, lsb, 0, 31) {
- TRACED_FORRANGE(int32_t, width, 9, (32 - lsb) - 1) {
+ TRACED_FORRANGE(int32_t, width, 9, (24 - lsb) - 1) {
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(
m.Word32And(m.Int32Constant(~((0xffffffffu >> (32 - width)) << lsb)),
@@ -2828,8 +2849,11 @@ TEST_F(InstructionSelectorTest, Word32NotWithParameter) {
TEST_F(InstructionSelectorTest, Word32AndWithWord32ShrWithImmediateForARMv7) {
- TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+ TRACED_FORRANGE(int32_t, lsb, 1, 31) {
TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+ if (((width == 8) || (width == 16)) &&
+ ((lsb == 8) || (lsb == 16) || (lsb == 24)))
+ continue; // Uxtb/h ror.
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32And(m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb)),
m.Int32Constant(0xffffffffu >> (32 - width))));
@@ -2841,8 +2865,11 @@ TEST_F(InstructionSelectorTest, Word32AndWithWord32ShrWithImmediateForARMv7) {
EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
}
}
- TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+ TRACED_FORRANGE(int32_t, lsb, 1, 31) {
TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+ if (((width == 8) || (width == 16)) &&
+ ((lsb == 8) || (lsb == 16) || (lsb == 24)))
+ continue; // Uxtb/h ror.
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32And(m.Int32Constant(0xffffffffu >> (32 - width)),
m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb))));
@@ -2857,6 +2884,62 @@ TEST_F(InstructionSelectorTest, Word32AndWithWord32ShrWithImmediateForARMv7) {
}
+TEST_F(InstructionSelectorTest, Word32AndWithWord32ShrAnd0xff) {
+ TRACED_FORRANGE(int32_t, shr, 1, 3) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const r = m.Word32And(m.Word32Shr(p0, m.Int32Constant(shr * 8)),
+ m.Int32Constant(0xff));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmUxtb, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(shr * 8, s.ToInt32(s[0]->InputAt(1)));
+ }
+ TRACED_FORRANGE(int32_t, shr, 1, 3) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const r = m.Word32And(m.Int32Constant(0xff),
+ m.Word32Shr(p0, m.Int32Constant(shr * 8)));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmUxtb, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(shr * 8, s.ToInt32(s[0]->InputAt(1)));
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndWithWord32ShrAnd0xffff) {
+ TRACED_FORRANGE(int32_t, shr, 1, 3) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const r = m.Word32And(m.Word32Shr(p0, m.Int32Constant(shr * 8)),
+ m.Int32Constant(0xffff));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmUxth, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(shr * 8, s.ToInt32(s[0]->InputAt(1)));
+ }
+ TRACED_FORRANGE(int32_t, shr, 1, 3) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const r = m.Word32And(m.Int32Constant(0xffff),
+ m.Word32Shr(p0, m.Int32Constant(shr * 8)));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmUxth, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(shr * 8, s.ToInt32(s[0]->InputAt(1)));
+ }
+}
+
+
TEST_F(InstructionSelectorTest, Word32Clz) {
StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32());
Node* const p0 = m.Parameter(0);
diff --git a/deps/v8/test/unittests/compiler/escape-analysis-unittest.cc b/deps/v8/test/unittests/compiler/escape-analysis-unittest.cc
index b088367a58..d5e12ba0db 100644
--- a/deps/v8/test/unittests/compiler/escape-analysis-unittest.cc
+++ b/deps/v8/test/unittests/compiler/escape-analysis-unittest.cc
@@ -9,7 +9,7 @@
#include "src/compiler/js-graph.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
-#include "src/types-inl.h"
+#include "src/types.h"
#include "src/zone-containers.h"
#include "test/unittests/compiler/graph-unittest.h"
@@ -85,6 +85,20 @@ class EscapeAnalysisTest : public GraphTest {
allocation, value, effect, control);
}
+ Node* StoreElement(const ElementAccess& access, Node* allocation, Node* index,
+ Node* value, Node* effect = nullptr,
+ Node* control = nullptr) {
+ if (!effect) {
+ effect = effect_;
+ }
+ if (!control) {
+ control = control_;
+ }
+ return effect_ =
+ graph()->NewNode(simplified()->StoreElement(access), allocation,
+ index, value, effect, control);
+ }
+
Node* Load(const FieldAccess& access, Node* from, Node* effect = nullptr,
Node* control = nullptr) {
if (!effect) {
@@ -131,12 +145,18 @@ class EscapeAnalysisTest : public GraphTest {
return control_ = graph()->NewNode(common()->Merge(2), control1, control2);
}
- FieldAccess AccessAtIndex(int offset) {
+ FieldAccess FieldAccessAtIndex(int offset) {
FieldAccess access = {kTaggedBase, offset, MaybeHandle<Name>(), Type::Any(),
MachineType::AnyTagged()};
return access;
}
+ ElementAccess MakeElementAccess(int header_size) {
+ ElementAccess access = {kTaggedBase, header_size, Type::Any(),
+ MachineType::AnyTagged()};
+ return access;
+ }
+
// ---------------------------------Assertion Helper--------------------------
void ExpectReplacement(Node* node, Node* rep) {
@@ -166,6 +186,7 @@ class EscapeAnalysisTest : public GraphTest {
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
Node* effect() { return effect_; }
+ Node* control() { return control_; }
private:
SimplifiedOperatorBuilder simplified_;
@@ -185,9 +206,9 @@ TEST_F(EscapeAnalysisTest, StraightNonEscape) {
Node* object1 = Constant(1);
BeginRegion();
Node* allocation = Allocate(Constant(kPointerSize));
- Store(AccessAtIndex(0), allocation, object1);
+ Store(FieldAccessAtIndex(0), allocation, object1);
Node* finish = FinishRegion(allocation);
- Node* load = Load(AccessAtIndex(0), finish);
+ Node* load = Load(FieldAccessAtIndex(0), finish);
Node* result = Return(load);
EndGraph();
@@ -202,13 +223,39 @@ TEST_F(EscapeAnalysisTest, StraightNonEscape) {
}
+TEST_F(EscapeAnalysisTest, StraightNonEscapeNonConstStore) {
+ Node* object1 = Constant(1);
+ Node* object2 = Constant(2);
+ BeginRegion();
+ Node* allocation = Allocate(Constant(kPointerSize));
+ Store(FieldAccessAtIndex(0), allocation, object1);
+ Node* index =
+ graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
+ object1, object2, control());
+ StoreElement(MakeElementAccess(0), allocation, index, object1);
+ Node* finish = FinishRegion(allocation);
+ Node* load = Load(FieldAccessAtIndex(0), finish);
+ Node* result = Return(load);
+ EndGraph();
+
+ Analysis();
+
+ ExpectEscaped(allocation);
+ ExpectReplacement(load, nullptr);
+
+ Transformation();
+
+ ASSERT_EQ(load, NodeProperties::GetValueInput(result, 0));
+}
+
+
TEST_F(EscapeAnalysisTest, StraightEscape) {
Node* object1 = Constant(1);
BeginRegion();
Node* allocation = Allocate(Constant(kPointerSize));
- Store(AccessAtIndex(0), allocation, object1);
+ Store(FieldAccessAtIndex(0), allocation, object1);
Node* finish = FinishRegion(allocation);
- Node* load = Load(AccessAtIndex(0), finish);
+ Node* load = Load(FieldAccessAtIndex(0), finish);
Node* result = Return(allocation);
EndGraph();
graph()->end()->AppendInput(zone(), load);
@@ -229,15 +276,15 @@ TEST_F(EscapeAnalysisTest, StoreLoadEscape) {
BeginRegion();
Node* allocation1 = Allocate(Constant(kPointerSize));
- Store(AccessAtIndex(0), allocation1, object1);
+ Store(FieldAccessAtIndex(0), allocation1, object1);
Node* finish1 = FinishRegion(allocation1);
BeginRegion();
Node* allocation2 = Allocate(Constant(kPointerSize));
- Store(AccessAtIndex(0), allocation2, finish1);
+ Store(FieldAccessAtIndex(0), allocation2, finish1);
Node* finish2 = FinishRegion(allocation2);
- Node* load = Load(AccessAtIndex(0), finish2);
+ Node* load = Load(FieldAccessAtIndex(0), finish2);
Node* result = Return(load);
EndGraph();
Analysis();
@@ -257,16 +304,18 @@ TEST_F(EscapeAnalysisTest, BranchNonEscape) {
Node* object2 = Constant(2);
BeginRegion();
Node* allocation = Allocate(Constant(kPointerSize));
- Store(AccessAtIndex(0), allocation, object1);
+ Store(FieldAccessAtIndex(0), allocation, object1);
Node* finish = FinishRegion(allocation);
Branch();
Node* ifFalse = IfFalse();
Node* ifTrue = IfTrue();
- Node* effect1 = Store(AccessAtIndex(0), allocation, object1, finish, ifFalse);
- Node* effect2 = Store(AccessAtIndex(0), allocation, object2, finish, ifTrue);
+ Node* effect1 =
+ Store(FieldAccessAtIndex(0), allocation, object1, finish, ifFalse);
+ Node* effect2 =
+ Store(FieldAccessAtIndex(0), allocation, object2, finish, ifTrue);
Node* merge = Merge2(ifFalse, ifTrue);
Node* phi = graph()->NewNode(common()->EffectPhi(2), effect1, effect2, merge);
- Node* load = Load(AccessAtIndex(0), finish, phi, merge);
+ Node* load = Load(FieldAccessAtIndex(0), finish, phi, merge);
Node* result = Return(load, phi);
EndGraph();
graph()->end()->AppendInput(zone(), result);
@@ -283,14 +332,81 @@ TEST_F(EscapeAnalysisTest, BranchNonEscape) {
}
+TEST_F(EscapeAnalysisTest, BranchEscapeOne) {
+ Node* object1 = Constant(1);
+ Node* object2 = Constant(2);
+ Node* index = graph()->NewNode(common()->Parameter(0), start());
+ BeginRegion();
+ Node* allocation = Allocate(Constant(kPointerSize));
+ Store(FieldAccessAtIndex(0), allocation, object1);
+ Node* finish = FinishRegion(allocation);
+ Branch();
+ Node* ifFalse = IfFalse();
+ Node* ifTrue = IfTrue();
+ Node* effect1 =
+ Store(FieldAccessAtIndex(0), allocation, object1, finish, ifFalse);
+ Node* effect2 = StoreElement(MakeElementAccess(0), allocation, index, object2,
+ finish, ifTrue);
+ Node* merge = Merge2(ifFalse, ifTrue);
+ Node* phi = graph()->NewNode(common()->EffectPhi(2), effect1, effect2, merge);
+ Node* load = Load(FieldAccessAtIndex(0), finish, phi, merge);
+ Node* result = Return(load, phi);
+ EndGraph();
+
+ Analysis();
+
+ ExpectEscaped(allocation);
+ ExpectReplacement(load, nullptr);
+
+ Transformation();
+
+ ASSERT_EQ(load, NodeProperties::GetValueInput(result, 0));
+}
+
+
+TEST_F(EscapeAnalysisTest, BranchEscapeThroughStore) {
+ Node* object1 = Constant(1);
+ Node* object2 = Constant(2);
+ BeginRegion();
+ Node* allocation = Allocate(Constant(kPointerSize));
+ Store(FieldAccessAtIndex(0), allocation, object1);
+ FinishRegion(allocation);
+ BeginRegion();
+ Node* allocation2 = Allocate(Constant(kPointerSize));
+ Store(FieldAccessAtIndex(0), allocation, object2);
+ Node* finish2 = FinishRegion(allocation2);
+ Branch();
+ Node* ifFalse = IfFalse();
+ Node* ifTrue = IfTrue();
+ Node* effect1 =
+ Store(FieldAccessAtIndex(0), allocation, allocation2, finish2, ifFalse);
+ Node* merge = Merge2(ifFalse, ifTrue);
+ Node* phi = graph()->NewNode(common()->EffectPhi(2), effect1, finish2, merge);
+ Node* load = Load(FieldAccessAtIndex(0), finish2, phi, merge);
+ Node* result = Return(allocation, phi);
+ EndGraph();
+ graph()->end()->AppendInput(zone(), load);
+
+ Analysis();
+
+ ExpectEscaped(allocation);
+ ExpectEscaped(allocation2);
+ ExpectReplacement(load, nullptr);
+
+ Transformation();
+
+ ASSERT_EQ(allocation, NodeProperties::GetValueInput(result, 0));
+}
+
+
TEST_F(EscapeAnalysisTest, DanglingLoadOrder) {
Node* object1 = Constant(1);
Node* object2 = Constant(2);
Node* allocation = Allocate(Constant(kPointerSize));
- Node* store1 = Store(AccessAtIndex(0), allocation, object1);
- Node* load1 = Load(AccessAtIndex(0), allocation);
- Node* store2 = Store(AccessAtIndex(0), allocation, object2);
- Node* load2 = Load(AccessAtIndex(0), allocation, store1);
+ Node* store1 = Store(FieldAccessAtIndex(0), allocation, object1);
+ Node* load1 = Load(FieldAccessAtIndex(0), allocation);
+ Node* store2 = Store(FieldAccessAtIndex(0), allocation, object2);
+ Node* load2 = Load(FieldAccessAtIndex(0), allocation, store1);
Node* result = Return(load2);
EndGraph();
graph()->end()->AppendInput(zone(), store2);
@@ -312,9 +428,9 @@ TEST_F(EscapeAnalysisTest, DeoptReplacement) {
Node* object1 = Constant(1);
BeginRegion();
Node* allocation = Allocate(Constant(kPointerSize));
- Store(AccessAtIndex(0), allocation, object1);
+ Store(FieldAccessAtIndex(0), allocation, object1);
Node* finish = FinishRegion(allocation);
- Node* effect1 = Store(AccessAtIndex(0), allocation, object1, finish);
+ Node* effect1 = Store(FieldAccessAtIndex(0), allocation, object1, finish);
Branch();
Node* ifFalse = IfFalse();
Node* state_values1 = graph()->NewNode(common()->StateValues(1), finish);
@@ -328,7 +444,7 @@ TEST_F(EscapeAnalysisTest, DeoptReplacement) {
Node* deopt = graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
frame_state, effect1, ifFalse);
Node* ifTrue = IfTrue();
- Node* load = Load(AccessAtIndex(0), finish, effect1, ifTrue);
+ Node* load = Load(FieldAccessAtIndex(0), finish, effect1, ifTrue);
Node* result = Return(load, effect1, ifTrue);
EndGraph();
graph()->end()->AppendInput(zone(), deopt);
@@ -351,10 +467,10 @@ TEST_F(EscapeAnalysisTest, DeoptReplacementIdentity) {
Node* object1 = Constant(1);
BeginRegion();
Node* allocation = Allocate(Constant(kPointerSize * 2));
- Store(AccessAtIndex(0), allocation, object1);
- Store(AccessAtIndex(kPointerSize), allocation, allocation);
+ Store(FieldAccessAtIndex(0), allocation, object1);
+ Store(FieldAccessAtIndex(kPointerSize), allocation, allocation);
Node* finish = FinishRegion(allocation);
- Node* effect1 = Store(AccessAtIndex(0), allocation, object1, finish);
+ Node* effect1 = Store(FieldAccessAtIndex(0), allocation, object1, finish);
Branch();
Node* ifFalse = IfFalse();
Node* state_values1 = graph()->NewNode(common()->StateValues(1), finish);
@@ -368,7 +484,7 @@ TEST_F(EscapeAnalysisTest, DeoptReplacementIdentity) {
Node* deopt = graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
frame_state, effect1, ifFalse);
Node* ifTrue = IfTrue();
- Node* load = Load(AccessAtIndex(0), finish, effect1, ifTrue);
+ Node* load = Load(FieldAccessAtIndex(0), finish, effect1, ifTrue);
Node* result = Return(load, effect1, ifTrue);
EndGraph();
graph()->end()->AppendInput(zone(), deopt);
diff --git a/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc b/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
index 89c0a654e9..16030f80d7 100644
--- a/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
+++ b/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
@@ -40,7 +40,7 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
instruction_blocks);
SourcePositionTable source_position_table(graph());
InstructionSelector selector(test_->zone(), node_count, &linkage, &sequence,
- schedule, &source_position_table,
+ schedule, &source_position_table, nullptr,
source_position_mode, features);
selector.SelectInstructions();
if (FLAG_trace_turbo) {
@@ -148,7 +148,7 @@ InstructionSelectorTest::StreamBuilder::GetFrameStateFunctionInfo(
int parameter_count, int local_count) {
return common()->CreateFrameStateFunctionInfo(
FrameStateType::kJavaScriptFunction, parameter_count, local_count,
- Handle<SharedFunctionInfo>(), CALL_MAINTAINS_NATIVE_CONTEXT);
+ Handle<SharedFunctionInfo>());
}
diff --git a/deps/v8/test/unittests/compiler/instruction-selector-unittest.h b/deps/v8/test/unittests/compiler/instruction-selector-unittest.h
index fc7c144939..f1397faa06 100644
--- a/deps/v8/test/unittests/compiler/instruction-selector-unittest.h
+++ b/deps/v8/test/unittests/compiler/instruction-selector-unittest.h
@@ -92,7 +92,7 @@ class InstructionSelectorTest : public TestWithContext,
CallDescriptor* MakeCallDescriptor(Zone* zone, MachineType return_type) {
MachineSignature::Builder builder(zone, 1, 0);
builder.AddReturn(return_type);
- return Linkage::GetSimplifiedCDescriptor(zone, builder.Build());
+ return MakeSimpleCallDescriptor(zone, builder.Build());
}
CallDescriptor* MakeCallDescriptor(Zone* zone, MachineType return_type,
@@ -100,7 +100,7 @@ class InstructionSelectorTest : public TestWithContext,
MachineSignature::Builder builder(zone, 1, 1);
builder.AddReturn(return_type);
builder.AddParam(parameter0_type);
- return Linkage::GetSimplifiedCDescriptor(zone, builder.Build());
+ return MakeSimpleCallDescriptor(zone, builder.Build());
}
CallDescriptor* MakeCallDescriptor(Zone* zone, MachineType return_type,
@@ -110,7 +110,7 @@ class InstructionSelectorTest : public TestWithContext,
builder.AddReturn(return_type);
builder.AddParam(parameter0_type);
builder.AddParam(parameter1_type);
- return Linkage::GetSimplifiedCDescriptor(zone, builder.Build());
+ return MakeSimpleCallDescriptor(zone, builder.Build());
}
CallDescriptor* MakeCallDescriptor(Zone* zone, MachineType return_type,
@@ -122,11 +122,48 @@ class InstructionSelectorTest : public TestWithContext,
builder.AddParam(parameter0_type);
builder.AddParam(parameter1_type);
builder.AddParam(parameter2_type);
- return Linkage::GetSimplifiedCDescriptor(zone, builder.Build());
+ return MakeSimpleCallDescriptor(zone, builder.Build());
}
private:
InstructionSelectorTest* test_;
+
+ // Create a simple call descriptor for testing.
+ CallDescriptor* MakeSimpleCallDescriptor(Zone* zone,
+ MachineSignature* msig) {
+ LocationSignature::Builder locations(zone, msig->return_count(),
+ msig->parameter_count());
+
+ // Add return location(s).
+ const int return_count = static_cast<int>(msig->return_count());
+ for (int i = 0; i < return_count; i++) {
+ locations.AddReturn(LinkageLocation::ForCallerFrameSlot(-1 - i));
+ }
+
+ // Just put all parameters on the stack.
+ const int parameter_count = static_cast<int>(msig->parameter_count());
+ for (int i = 0; i < parameter_count; i++) {
+ locations.AddParam(LinkageLocation::ForCallerFrameSlot(-1 - i));
+ }
+
+ const RegList kCalleeSaveRegisters = 0;
+ const RegList kCalleeSaveFPRegisters = 0;
+
+ MachineType target_type = MachineType::Pointer();
+ LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
+ return new (zone) CallDescriptor( // --
+ CallDescriptor::kCallAddress, // kind
+ target_type, // target MachineType
+ target_loc, // target location
+ msig, // machine_sig
+ locations.Build(), // location_sig
+ 0, // stack_parameter_count
+ Operator::kNoProperties, // properties
+ kCalleeSaveRegisters, // callee-saved registers
+ kCalleeSaveFPRegisters, // callee-saved fp regs
+ CallDescriptor::kNoFlags, // flags
+ "iselect-test-call");
+ }
};
class Stream final {
diff --git a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
new file mode 100644
index 0000000000..eff6d4a931
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
@@ -0,0 +1,299 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/int64-lowering.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+
+#include "src/compiler/node-properties.h"
+
+#include "src/signature.h"
+
+#include "src/wasm/wasm-module.h"
+
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "testing/gmock-support.h"
+
+using testing::AllOf;
+using testing::Capture;
+using testing::CaptureEq;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Int64LoweringTest : public GraphTest {
+ public:
+ Int64LoweringTest() : GraphTest(), machine_(zone()) {
+ value_[0] = 0x1234567890abcdef;
+ value_[1] = 0x1edcba098765432f;
+ value_[2] = 0x1133557799886644;
+ }
+
+ MachineOperatorBuilder* machine() { return &machine_; }
+
+ void LowerGraph(Node* node, Signature<MachineRepresentation>* signature) {
+ Node* ret = graph()->NewNode(common()->Return(), node, graph()->start(),
+ graph()->start());
+ NodeProperties::MergeControlToEnd(graph(), common(), ret);
+
+ Int64Lowering lowering(graph(), machine(), common(), zone(), signature);
+ lowering.LowerGraph();
+ }
+
+ void LowerGraph(Node* node, MachineRepresentation return_type,
+ MachineRepresentation rep = MachineRepresentation::kWord32,
+ int num_params = 0) {
+ Signature<MachineRepresentation>::Builder sig_builder(zone(), 1,
+ num_params);
+ sig_builder.AddReturn(return_type);
+ for (int i = 0; i < num_params; i++) {
+ sig_builder.AddParam(rep);
+ }
+ LowerGraph(node, sig_builder.Build());
+ }
+
+ void CompareCallDescriptors(const CallDescriptor* lhs,
+ const CallDescriptor* rhs) {
+ EXPECT_THAT(lhs->CalleeSavedFPRegisters(), rhs->CalleeSavedFPRegisters());
+ EXPECT_THAT(lhs->CalleeSavedRegisters(), rhs->CalleeSavedRegisters());
+ EXPECT_THAT(lhs->FrameStateCount(), rhs->FrameStateCount());
+ EXPECT_THAT(lhs->InputCount(), rhs->InputCount());
+ for (size_t i = 0; i < lhs->InputCount(); i++) {
+ EXPECT_THAT(lhs->GetInputLocation(i), rhs->GetInputLocation(i));
+ EXPECT_THAT(lhs->GetInputType(i), rhs->GetInputType(i));
+ }
+ EXPECT_THAT(lhs->ReturnCount(), rhs->ReturnCount());
+ for (size_t i = 0; i < lhs->ReturnCount(); i++) {
+ EXPECT_THAT(lhs->GetReturnLocation(i), rhs->GetReturnLocation(i));
+ EXPECT_THAT(lhs->GetReturnType(i), rhs->GetReturnType(i));
+ }
+ EXPECT_THAT(lhs->flags(), rhs->flags());
+ EXPECT_THAT(lhs->kind(), rhs->kind());
+ }
+
+ int64_t value(int i) { return value_[i]; }
+
+ int32_t low_word_value(int i) {
+ return static_cast<int32_t>(value_[i] & 0xffffffff);
+ }
+
+ int32_t high_word_value(int i) {
+ return static_cast<int32_t>(value_[i] >> 32);
+ }
+
+ private:
+ MachineOperatorBuilder machine_;
+ int64_t value_[3];
+};
+
+TEST_F(Int64LoweringTest, Int64Constant) {
+ if (4 != kPointerSize) return;
+
+ LowerGraph(Int64Constant(value(0)), MachineRepresentation::kWord64);
+ EXPECT_THAT(graph()->end()->InputAt(1),
+ IsReturn2(IsInt32Constant(low_word_value(0)),
+ IsInt32Constant(high_word_value(0)), start(), start()));
+}
+
+TEST_F(Int64LoweringTest, Int64Load) {
+ if (4 != kPointerSize) return;
+
+ int32_t base = 0x1234;
+ int32_t index = 0x5678;
+
+ LowerGraph(graph()->NewNode(machine()->Load(MachineType::Int64()),
+ Int32Constant(base), Int32Constant(index),
+ start(), start()),
+ MachineRepresentation::kWord64);
+
+ Capture<Node*> high_word_load;
+ Matcher<Node*> high_word_load_matcher =
+ IsLoad(MachineType::Int32(), IsInt32Constant(base),
+ IsInt32Add(IsInt32Constant(index), IsInt32Constant(0x4)), start(),
+ start());
+
+ EXPECT_THAT(
+ graph()->end()->InputAt(1),
+ IsReturn2(IsLoad(MachineType::Int32(), IsInt32Constant(base),
+ IsInt32Constant(index), AllOf(CaptureEq(&high_word_load),
+ high_word_load_matcher),
+ start()),
+ AllOf(CaptureEq(&high_word_load), high_word_load_matcher),
+ start(), start()));
+}
+
+TEST_F(Int64LoweringTest, Int64Store) {
+ if (4 != kPointerSize) return;
+
+ // We have to build the TF graph explicitly here because Store does not return
+ // a value.
+
+ int32_t base = 1111;
+ int32_t index = 2222;
+ int32_t return_value = 0x5555;
+
+ Signature<MachineRepresentation>::Builder sig_builder(zone(), 1, 0);
+ sig_builder.AddReturn(MachineRepresentation::kWord32);
+
+ Node* store = graph()->NewNode(
+ machine()->Store(StoreRepresentation(MachineRepresentation::kWord64,
+ WriteBarrierKind::kNoWriteBarrier)),
+ Int32Constant(base), Int32Constant(index), Int64Constant(value(0)),
+ start(), start());
+
+ Node* ret = graph()->NewNode(common()->Return(), Int32Constant(return_value),
+ store, start());
+
+ NodeProperties::MergeControlToEnd(graph(), common(), ret);
+
+ Int64Lowering lowering(graph(), machine(), common(), zone(),
+ sig_builder.Build());
+ lowering.LowerGraph();
+
+ const StoreRepresentation rep(MachineRepresentation::kWord32,
+ kNoWriteBarrier);
+
+ EXPECT_THAT(
+ graph()->end()->InputAt(1),
+ IsReturn(
+ IsInt32Constant(return_value),
+ IsStore(
+ rep, IsInt32Constant(base), IsInt32Constant(index),
+ IsInt32Constant(low_word_value(0)),
+ IsStore(rep, IsInt32Constant(base),
+ IsInt32Add(IsInt32Constant(index), IsInt32Constant(4)),
+ IsInt32Constant(high_word_value(0)), start(), start()),
+ start()),
+ start()));
+}
+
+TEST_F(Int64LoweringTest, Int64And) {
+ if (4 != kPointerSize) return;
+
+ LowerGraph(graph()->NewNode(machine()->Word64And(), Int64Constant(value(0)),
+ Int64Constant(value(1))),
+ MachineRepresentation::kWord64);
+ EXPECT_THAT(graph()->end()->InputAt(1),
+ IsReturn2(IsWord32And(IsInt32Constant(low_word_value(0)),
+ IsInt32Constant(low_word_value(1))),
+ IsWord32And(IsInt32Constant(high_word_value(0)),
+ IsInt32Constant(high_word_value(1))),
+ start(), start()));
+}
+
+TEST_F(Int64LoweringTest, TruncateInt64ToInt32) {
+ if (4 != kPointerSize) return;
+
+ LowerGraph(graph()->NewNode(machine()->TruncateInt64ToInt32(),
+ Int64Constant(value(0))),
+ MachineRepresentation::kWord32);
+ EXPECT_THAT(graph()->end()->InputAt(1),
+ IsReturn(IsInt32Constant(low_word_value(0)), start(), start()));
+}
+
+TEST_F(Int64LoweringTest, Parameter) {
+ if (4 != kPointerSize) return;
+
+ LowerGraph(Parameter(0), MachineRepresentation::kWord64,
+ MachineRepresentation::kWord64, 1);
+
+ EXPECT_THAT(graph()->end()->InputAt(1),
+ IsReturn2(IsParameter(0), IsParameter(1), start(), start()));
+}
+
+TEST_F(Int64LoweringTest, Parameter2) {
+ if (4 != kPointerSize) return;
+
+ Signature<MachineRepresentation>::Builder sig_builder(zone(), 1, 5);
+ sig_builder.AddReturn(MachineRepresentation::kWord32);
+
+ sig_builder.AddParam(MachineRepresentation::kWord32);
+ sig_builder.AddParam(MachineRepresentation::kWord64);
+ sig_builder.AddParam(MachineRepresentation::kFloat64);
+ sig_builder.AddParam(MachineRepresentation::kWord64);
+ sig_builder.AddParam(MachineRepresentation::kWord32);
+
+ int start_parameter = start()->op()->ValueOutputCount();
+ LowerGraph(Parameter(4), sig_builder.Build());
+
+ EXPECT_THAT(graph()->end()->InputAt(1),
+ IsReturn(IsParameter(6), start(), start()));
+ // The parameter of the start node should increase by 2, because we lowered
+ // two parameter nodes.
+ EXPECT_THAT(start()->op()->ValueOutputCount(), start_parameter + 2);
+}
+
+TEST_F(Int64LoweringTest, CallI64Return) {
+ if (4 != kPointerSize) return;
+
+ int32_t function = 0x9999;
+
+ Signature<MachineRepresentation>::Builder sig_builder(zone(), 1, 0);
+ sig_builder.AddReturn(MachineRepresentation::kWord64);
+
+ compiler::CallDescriptor* desc =
+ wasm::ModuleEnv::GetWasmCallDescriptor(zone(), sig_builder.Build());
+
+ LowerGraph(graph()->NewNode(common()->Call(desc), Int32Constant(function),
+ start(), start()),
+ MachineRepresentation::kWord64);
+
+ Capture<Node*> call;
+ Matcher<Node*> call_matcher =
+ IsCall(testing::_, IsInt32Constant(function), start(), start());
+
+ EXPECT_THAT(graph()->end()->InputAt(1),
+ IsReturn2(IsProjection(0, AllOf(CaptureEq(&call), call_matcher)),
+ IsProjection(1, AllOf(CaptureEq(&call), call_matcher)),
+ start(), start()));
+
+ CompareCallDescriptors(
+ OpParameter<const CallDescriptor*>(
+ graph()->end()->InputAt(1)->InputAt(0)->InputAt(0)),
+ wasm::ModuleEnv::GetI32WasmCallDescriptor(zone(), desc));
+}
+
+TEST_F(Int64LoweringTest, CallI64Parameter) {
+ if (4 != kPointerSize) return;
+
+ int32_t function = 0x9999;
+
+ Signature<MachineRepresentation>::Builder sig_builder(zone(), 1, 3);
+ sig_builder.AddReturn(MachineRepresentation::kWord32);
+ sig_builder.AddParam(MachineRepresentation::kWord64);
+ sig_builder.AddParam(MachineRepresentation::kWord32);
+ sig_builder.AddParam(MachineRepresentation::kWord64);
+
+ compiler::CallDescriptor* desc =
+ wasm::ModuleEnv::GetWasmCallDescriptor(zone(), sig_builder.Build());
+
+ LowerGraph(graph()->NewNode(common()->Call(desc), Int32Constant(function),
+ Int64Constant(value(0)),
+ Int32Constant(low_word_value(1)),
+ Int64Constant(value(2)), start(), start()),
+ MachineRepresentation::kWord32);
+
+ EXPECT_THAT(
+ graph()->end()->InputAt(1),
+ IsReturn(IsCall(testing::_, IsInt32Constant(function),
+ IsInt32Constant(low_word_value(0)),
+ IsInt32Constant(high_word_value(0)),
+ IsInt32Constant(low_word_value(1)),
+ IsInt32Constant(low_word_value(2)),
+ IsInt32Constant(high_word_value(2)), start(), start()),
+ start(), start()));
+
+ CompareCallDescriptors(
+ OpParameter<const CallDescriptor*>(
+ graph()->end()->InputAt(1)->InputAt(0)),
+ wasm::ModuleEnv::GetI32WasmCallDescriptor(zone(), desc));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/interpreter-assembler-unittest.h b/deps/v8/test/unittests/compiler/interpreter-assembler-unittest.h
deleted file mode 100644
index 15fa38b1be..0000000000
--- a/deps/v8/test/unittests/compiler/interpreter-assembler-unittest.h
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_UNITTESTS_COMPILER_INTERPRETER_ASSEMBLER_UNITTEST_H_
-#define V8_UNITTESTS_COMPILER_INTERPRETER_ASSEMBLER_UNITTEST_H_
-
-#include "src/compiler/interpreter-assembler.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/machine-operator.h"
-#include "test/unittests/test-utils.h"
-#include "testing/gmock-support.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-using ::testing::Matcher;
-
-class InterpreterAssemblerTest : public TestWithIsolateAndZone {
- public:
- InterpreterAssemblerTest() {}
- ~InterpreterAssemblerTest() override {}
-
- class InterpreterAssemblerForTest final : public InterpreterAssembler {
- public:
- InterpreterAssemblerForTest(InterpreterAssemblerTest* test,
- interpreter::Bytecode bytecode)
- : InterpreterAssembler(test->isolate(), test->zone(), bytecode) {}
- ~InterpreterAssemblerForTest() override {}
-
- Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
- const Matcher<Node*>& base_matcher,
- const Matcher<Node*>& index_matcher);
- Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher,
- const Matcher<Node*>& base_matcher,
- const Matcher<Node*>& index_matcher,
- const Matcher<Node*>& value_matcher);
-
- Matcher<Node*> IsBytecodeOperand(int offset);
- Matcher<Node*> IsBytecodeOperandSignExtended(int offset);
- Matcher<Node*> IsBytecodeOperandShort(int offset);
- Matcher<Node*> IsBytecodeOperandShortSignExtended(int offset);
-
- using InterpreterAssembler::call_descriptor;
- using InterpreterAssembler::graph;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(InterpreterAssemblerForTest);
- };
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_UNITTESTS_COMPILER_INTERPRETER_ASSEMBLER_UNITTEST_H_
diff --git a/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc b/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
index 78e9253a17..9e14cda7fb 100644
--- a/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
@@ -64,10 +64,6 @@ Type* const kIntegral32Types[] = {Type::UnsignedSmall(), Type::Negative32(),
Type::Integral32()};
-const LanguageMode kLanguageModes[] = {SLOPPY, STRICT, STRONG};
-
-
-// TODO(mstarzinger): Find a common place and unify with test-js-typed-lowering.
Type* const kNumberTypes[] = {
Type::UnsignedSmall(), Type::Negative32(), Type::Unsigned31(),
Type::SignedSmall(), Type::Signed32(), Type::Unsigned32(),
@@ -88,15 +84,13 @@ TEST_F(JSBuiltinReducerTest, MathMax0) {
Node* control = graph()->start();
Node* context = UndefinedConstant();
Node* frame_state = graph()->start();
- TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
- Node* call = graph()->NewNode(javascript()->CallFunction(2, language_mode),
- function, UndefinedConstant(), context,
- frame_state, frame_state, effect, control);
- Reduction r = Reduce(call);
+ Node* call = graph()->NewNode(javascript()->CallFunction(2), function,
+ UndefinedConstant(), context, frame_state,
+ frame_state, effect, control);
+ Reduction r = Reduce(call);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberConstant(-V8_INFINITY));
- }
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberConstant(-V8_INFINITY));
}
@@ -107,18 +101,15 @@ TEST_F(JSBuiltinReducerTest, MathMax1) {
Node* control = graph()->start();
Node* context = UndefinedConstant();
Node* frame_state = graph()->start();
- TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* call =
- graph()->NewNode(javascript()->CallFunction(3, language_mode),
- function, UndefinedConstant(), p0, context,
- frame_state, frame_state, effect, control);
- Reduction r = Reduce(call);
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ frame_state, effect, control);
+ Reduction r = Reduce(call);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), p0);
- }
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), p0);
}
}
@@ -130,22 +121,18 @@ TEST_F(JSBuiltinReducerTest, MathMax2) {
Node* control = graph()->start();
Node* context = UndefinedConstant();
Node* frame_state = graph()->start();
- TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
- TRACED_FOREACH(Type*, t0, kIntegral32Types) {
- TRACED_FOREACH(Type*, t1, kIntegral32Types) {
- Node* p0 = Parameter(t0, 0);
- Node* p1 = Parameter(t1, 1);
- Node* call =
- graph()->NewNode(javascript()->CallFunction(4, language_mode),
- function, UndefinedConstant(), p0, p1, context,
- frame_state, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsSelect(MachineRepresentation::kNone,
- IsNumberLessThan(p1, p0), p0, p1));
- }
+ TRACED_FOREACH(Type*, t0, kIntegral32Types) {
+ TRACED_FOREACH(Type*, t1, kIntegral32Types) {
+ Node* p0 = Parameter(t0, 0);
+ Node* p1 = Parameter(t1, 1);
+ Node* call = graph()->NewNode(javascript()->CallFunction(4), function,
+ UndefinedConstant(), p0, p1, context,
+ frame_state, frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsSelect(MachineRepresentation::kNone,
+ IsNumberLessThan(p1, p0), p0, p1));
}
}
}
@@ -162,20 +149,17 @@ TEST_F(JSBuiltinReducerTest, MathImul) {
Node* control = graph()->start();
Node* context = UndefinedConstant();
Node* frame_state = graph()->start();
- TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
- TRACED_FOREACH(Type*, t0, kIntegral32Types) {
- TRACED_FOREACH(Type*, t1, kIntegral32Types) {
- Node* p0 = Parameter(t0, 0);
- Node* p1 = Parameter(t1, 1);
- Node* call =
- graph()->NewNode(javascript()->CallFunction(4, language_mode),
- function, UndefinedConstant(), p0, p1, context,
- frame_state, frame_state, effect, control);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsInt32Mul(p0, p1));
- }
+ TRACED_FOREACH(Type*, t0, kIntegral32Types) {
+ TRACED_FOREACH(Type*, t1, kIntegral32Types) {
+ Node* p0 = Parameter(t0, 0);
+ Node* p1 = Parameter(t1, 1);
+ Node* call = graph()->NewNode(javascript()->CallFunction(4), function,
+ UndefinedConstant(), p0, p1, context,
+ frame_state, frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Mul(p0, p1));
}
}
}
@@ -192,18 +176,15 @@ TEST_F(JSBuiltinReducerTest, MathFround) {
Node* control = graph()->start();
Node* context = UndefinedConstant();
Node* frame_state = graph()->start();
- TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* call =
- graph()->NewNode(javascript()->CallFunction(3, language_mode),
- function, UndefinedConstant(), p0, context,
- frame_state, frame_state, effect, control);
- Reduction r = Reduce(call);
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ frame_state, effect, control);
+ Reduction r = Reduce(call);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsTruncateFloat64ToFloat32(p0));
- }
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsTruncateFloat64ToFloat32(p0));
}
}
diff --git a/deps/v8/test/unittests/compiler/js-context-relaxation-unittest.cc b/deps/v8/test/unittests/compiler/js-context-relaxation-unittest.cc
deleted file mode 100644
index a44bd0278d..0000000000
--- a/deps/v8/test/unittests/compiler/js-context-relaxation-unittest.cc
+++ /dev/null
@@ -1,285 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/js-context-relaxation.h"
-#include "src/compiler/js-graph.h"
-#include "test/unittests/compiler/graph-unittest.h"
-#include "test/unittests/compiler/node-test-utils.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class JSContextRelaxationTest : public GraphTest {
- public:
- JSContextRelaxationTest() : GraphTest(3), javascript_(zone()) {}
- ~JSContextRelaxationTest() override {}
-
- protected:
- Reduction Reduce(Node* node, MachineOperatorBuilder::Flags flags =
- MachineOperatorBuilder::kNoFlags) {
- MachineOperatorBuilder machine(zone(), MachineType::PointerRepresentation(),
- flags);
- JSGraph jsgraph(isolate(), graph(), common(), javascript(), nullptr,
- &machine);
- // TODO(titzer): mock the GraphReducer here for better unit testing.
- GraphReducer graph_reducer(zone(), graph());
- JSContextRelaxation reducer;
- return reducer.Reduce(node);
- }
-
- Node* EmptyFrameState() {
- MachineOperatorBuilder machine(zone());
- JSGraph jsgraph(isolate(), graph(), common(), javascript(), nullptr,
- &machine);
- return jsgraph.EmptyFrameState();
- }
-
- Node* ShallowFrameStateChain(Node* outer_context,
- ContextCallingMode context_calling_mode) {
- const FrameStateFunctionInfo* const frame_state_function_info =
- common()->CreateFrameStateFunctionInfo(
- FrameStateType::kJavaScriptFunction, 3, 0,
- Handle<SharedFunctionInfo>(), context_calling_mode);
- const Operator* op = common()->FrameState(BailoutId::None(),
- OutputFrameStateCombine::Ignore(),
- frame_state_function_info);
- return graph()->NewNode(op, graph()->start(), graph()->start(),
- graph()->start(), outer_context, graph()->start(),
- graph()->start());
- }
-
- Node* DeepFrameStateChain(Node* outer_context,
- ContextCallingMode context_calling_mode) {
- const FrameStateFunctionInfo* const frame_state_function_info =
- common()->CreateFrameStateFunctionInfo(
- FrameStateType::kJavaScriptFunction, 3, 0,
- Handle<SharedFunctionInfo>(), context_calling_mode);
- const Operator* op = common()->FrameState(BailoutId::None(),
- OutputFrameStateCombine::Ignore(),
- frame_state_function_info);
- Node* shallow_frame_state =
- ShallowFrameStateChain(outer_context, CALL_MAINTAINS_NATIVE_CONTEXT);
- return graph()->NewNode(op, graph()->start(), graph()->start(),
- graph()->start(), graph()->start(),
- graph()->start(), shallow_frame_state);
- }
-
- JSOperatorBuilder* javascript() { return &javascript_; }
-
- private:
- JSOperatorBuilder javascript_;
-};
-
-
-TEST_F(JSContextRelaxationTest,
- RelaxJSCallFunctionShallowFrameStateChainNoCrossCtx) {
- Node* const input0 = Parameter(0);
- Node* const input1 = Parameter(1);
- Node* const context = Parameter(2);
- Node* const outer_context = Parameter(3);
- Node* const frame_state =
- ShallowFrameStateChain(outer_context, CALL_MAINTAINS_NATIVE_CONTEXT);
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Node* node = graph()->NewNode(
- javascript()->CallFunction(2, STRICT, VectorSlotPair()), input0, input1,
- context, frame_state, frame_state, effect, control);
- Reduction const r = Reduce(node);
- EXPECT_TRUE(r.Changed());
- EXPECT_EQ(outer_context, NodeProperties::GetContextInput(node));
-}
-
-TEST_F(JSContextRelaxationTest,
- RelaxJSCallFunctionShallowFrameStateChainCrossCtx) {
- Node* const input0 = Parameter(0);
- Node* const input1 = Parameter(1);
- Node* const context = Parameter(2);
- Node* const outer_context = Parameter(3);
- Node* const frame_state =
- ShallowFrameStateChain(outer_context, CALL_CHANGES_NATIVE_CONTEXT);
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Node* node = graph()->NewNode(
- javascript()->CallFunction(2, STRICT, VectorSlotPair()), input0, input1,
- context, frame_state, frame_state, effect, control);
- Reduction const r = Reduce(node);
- EXPECT_FALSE(r.Changed());
- EXPECT_EQ(context, NodeProperties::GetContextInput(node));
-}
-
-TEST_F(JSContextRelaxationTest,
- RelaxJSCallFunctionDeepFrameStateChainNoCrossCtx) {
- Node* const input0 = Parameter(0);
- Node* const input1 = Parameter(1);
- Node* const context = Parameter(2);
- Node* const outer_context = Parameter(3);
- Node* const frame_state =
- DeepFrameStateChain(outer_context, CALL_MAINTAINS_NATIVE_CONTEXT);
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Node* node = graph()->NewNode(
- javascript()->CallFunction(2, STRICT, VectorSlotPair()), input0, input1,
- context, frame_state, frame_state, effect, control);
- Reduction const r = Reduce(node);
- EXPECT_TRUE(r.Changed());
- EXPECT_EQ(outer_context, NodeProperties::GetContextInput(node));
-}
-
-TEST_F(JSContextRelaxationTest,
- RelaxJSCallFunctionDeepFrameStateChainCrossCtx) {
- Node* const input0 = Parameter(0);
- Node* const input1 = Parameter(1);
- Node* const context = Parameter(2);
- Node* const outer_context = Parameter(3);
- Node* const frame_state =
- DeepFrameStateChain(outer_context, CALL_CHANGES_NATIVE_CONTEXT);
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Node* node = graph()->NewNode(
- javascript()->CallFunction(2, STRICT, VectorSlotPair()), input0, input1,
- context, frame_state, frame_state, effect, control);
- Reduction const r = Reduce(node);
- EXPECT_FALSE(r.Changed());
- EXPECT_EQ(context, NodeProperties::GetContextInput(node));
-}
-
-TEST_F(JSContextRelaxationTest,
- RelaxJSCallFunctionDeepContextChainFullRelaxForCatch) {
- Node* const input0 = Parameter(0);
- Node* const input1 = Parameter(1);
- Node* const context = Parameter(2);
- Node* const outer_context = Parameter(3);
- const Operator* op = javascript()->CreateCatchContext(Handle<String>());
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Node* nested_context = graph()->NewNode(
- op, graph()->start(), graph()->start(), outer_context, effect, control);
- Node* const frame_state_2 =
- ShallowFrameStateChain(nested_context, CALL_MAINTAINS_NATIVE_CONTEXT);
- Node* node = graph()->NewNode(
- javascript()->CallFunction(2, STRICT, VectorSlotPair()), input0, input1,
- context, frame_state_2, frame_state_2, effect, control);
- Reduction const r = Reduce(node);
- EXPECT_TRUE(r.Changed());
- EXPECT_EQ(outer_context, NodeProperties::GetContextInput(node));
-}
-
-
-TEST_F(JSContextRelaxationTest,
- RelaxJSCallFunctionDeepContextChainFullRelaxForWith) {
- Node* const input0 = Parameter(0);
- Node* const input1 = Parameter(1);
- Node* const context = Parameter(2);
- Node* const outer_context = Parameter(3);
- const Operator* op = javascript()->CreateWithContext();
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Node* nested_context = graph()->NewNode(
- op, graph()->start(), graph()->start(), outer_context, effect, control);
- Node* const frame_state_2 =
- ShallowFrameStateChain(nested_context, CALL_MAINTAINS_NATIVE_CONTEXT);
- Node* node = graph()->NewNode(
- javascript()->CallFunction(2, STRICT, VectorSlotPair()), input0, input1,
- context, frame_state_2, frame_state_2, effect, control);
- Reduction const r = Reduce(node);
- EXPECT_TRUE(r.Changed());
- EXPECT_EQ(outer_context, NodeProperties::GetContextInput(node));
-}
-
-
-TEST_F(JSContextRelaxationTest,
- RelaxJSCallFunctionDeepContextChainFullRelaxForBlock) {
- Node* const input0 = Parameter(0);
- Node* const input1 = Parameter(1);
- Node* const context = Parameter(2);
- Node* const outer_context = Parameter(3);
- Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::null();
- const Operator* op = javascript()->CreateBlockContext(scope_info);
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Node* nested_context =
- graph()->NewNode(op, graph()->start(), outer_context, effect, control);
- Node* const frame_state_2 =
- ShallowFrameStateChain(nested_context, CALL_MAINTAINS_NATIVE_CONTEXT);
- Node* node = graph()->NewNode(
- javascript()->CallFunction(2, STRICT, VectorSlotPair()), input0, input1,
- context, frame_state_2, frame_state_2, effect, control);
- Reduction const r = Reduce(node);
- EXPECT_TRUE(r.Changed());
- EXPECT_EQ(outer_context, NodeProperties::GetContextInput(node));
-}
-
-
-TEST_F(JSContextRelaxationTest,
- RelaxJSCallFunctionDeepContextChainPartialRelaxForScript) {
- Node* const input0 = Parameter(0);
- Node* const input1 = Parameter(1);
- Node* const context = Parameter(2);
- Node* const outer_context = Parameter(3);
- Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::null();
- const Operator* op = javascript()->CreateScriptContext(scope_info);
- Node* const frame_state_1 =
- ShallowFrameStateChain(outer_context, CALL_MAINTAINS_NATIVE_CONTEXT);
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Node* nested_context = graph()->NewNode(op, graph()->start(), outer_context,
- frame_state_1, effect, control);
- Node* const frame_state_2 =
- ShallowFrameStateChain(nested_context, CALL_MAINTAINS_NATIVE_CONTEXT);
- Node* node = graph()->NewNode(
- javascript()->CallFunction(2, STRICT, VectorSlotPair()), input0, input1,
- context, frame_state_2, frame_state_2, effect, control);
- Reduction const r = Reduce(node);
- EXPECT_TRUE(r.Changed());
- EXPECT_EQ(nested_context, NodeProperties::GetContextInput(node));
-}
-
-
-TEST_F(JSContextRelaxationTest,
- RelaxJSCallFunctionDeepContextChainPartialRelaxForModule) {
- Node* const input0 = Parameter(0);
- Node* const input1 = Parameter(1);
- Node* const context = Parameter(2);
- Node* const outer_context = Parameter(3);
- const Operator* op = javascript()->CreateModuleContext();
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Node* nested_context = graph()->NewNode(
- op, graph()->start(), graph()->start(), outer_context, effect, control);
- Node* const frame_state_2 =
- ShallowFrameStateChain(nested_context, CALL_MAINTAINS_NATIVE_CONTEXT);
- Node* node = graph()->NewNode(
- javascript()->CallFunction(2, STRICT, VectorSlotPair()), input0, input1,
- context, frame_state_2, frame_state_2, effect, control);
- Reduction const r = Reduce(node);
- EXPECT_TRUE(r.Changed());
- EXPECT_EQ(nested_context, NodeProperties::GetContextInput(node));
-}
-
-
-TEST_F(JSContextRelaxationTest,
- RelaxJSCallFunctionDeepContextChainPartialNoRelax) {
- Node* const input0 = Parameter(0);
- Node* const input1 = Parameter(1);
- Node* const context = Parameter(2);
- Node* const outer_context = Parameter(3);
- const Operator* op = javascript()->CreateFunctionContext(0);
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Node* nested_context =
- graph()->NewNode(op, graph()->start(), outer_context, effect, control);
- Node* const frame_state_2 =
- ShallowFrameStateChain(nested_context, CALL_MAINTAINS_NATIVE_CONTEXT);
- Node* node = graph()->NewNode(
- javascript()->CallFunction(2, STRICT, VectorSlotPair()), input0, input1,
- context, frame_state_2, frame_state_2, effect, control);
- Reduction const r = Reduce(node);
- EXPECT_FALSE(r.Changed());
- EXPECT_EQ(context, NodeProperties::GetContextInput(node));
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
new file mode 100644
index 0000000000..837c5742d9
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/js-create-lowering-unittest.cc
@@ -0,0 +1,236 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-create-lowering.h"
+#include "src/code-factory.h"
+#include "src/compiler/access-builder.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/operator-properties.h"
+#include "src/isolate-inl.h"
+#include "test/unittests/compiler/compiler-test-utils.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "testing/gmock-support.h"
+
+using testing::_;
+using testing::BitEq;
+using testing::IsNaN;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSCreateLoweringTest : public TypedGraphTest {
+ public:
+ JSCreateLoweringTest()
+ : TypedGraphTest(3), javascript_(zone()), deps_(isolate(), zone()) {}
+ ~JSCreateLoweringTest() override {}
+
+ protected:
+ Reduction Reduce(Node* node) {
+ MachineOperatorBuilder machine(zone());
+ SimplifiedOperatorBuilder simplified(zone());
+ JSGraph jsgraph(isolate(), graph(), common(), javascript(), &simplified,
+ &machine);
+ // TODO(titzer): mock the GraphReducer here for better unit testing.
+ GraphReducer graph_reducer(zone(), graph());
+ JSCreateLowering reducer(&graph_reducer, &deps_, &jsgraph,
+ MaybeHandle<LiteralsArray>(), zone());
+ return reducer.Reduce(node);
+ }
+
+ Node* FrameState(Handle<SharedFunctionInfo> shared, Node* outer_frame_state) {
+ Node* state_values = graph()->NewNode(common()->StateValues(0));
+ return graph()->NewNode(
+ common()->FrameState(
+ BailoutId::None(), OutputFrameStateCombine::Ignore(),
+ common()->CreateFrameStateFunctionInfo(
+ FrameStateType::kJavaScriptFunction, 1, 0, shared)),
+ state_values, state_values, state_values, NumberConstant(0),
+ UndefinedConstant(), outer_frame_state);
+ }
+
+ JSOperatorBuilder* javascript() { return &javascript_; }
+
+ private:
+ JSOperatorBuilder javascript_;
+ CompilationDependencies deps_;
+};
+
+TEST_F(JSCreateLoweringTest, JSCreate) {
+ Handle<JSFunction> function = isolate()->object_function();
+ Node* const target = Parameter(Type::Constant(function, graph()->zone()));
+ Node* const context = Parameter(Type::Any());
+ Node* const effect = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->Create(), target, target,
+ context, EmptyFrameState(), effect));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsFinishRegion(
+ IsAllocate(IsNumberConstant(function->initial_map()->instance_size()),
+ IsBeginRegion(effect), _),
+ _));
+}
+
+// -----------------------------------------------------------------------------
+// JSCreateArguments
+
+TEST_F(JSCreateLoweringTest, JSCreateArgumentsViaStub) {
+ Node* const closure = Parameter(Type::Any());
+ Node* const context = UndefinedConstant();
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
+ Node* const frame_state = FrameState(shared, graph()->start());
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->CreateArguments(CreateArgumentsType::kUnmappedArguments),
+ closure, context, frame_state, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsCall(_, IsHeapConstant(
+ CodeFactory::FastNewStrictArguments(isolate()).code()),
+ closure, context, frame_state, effect, control));
+}
+
+TEST_F(JSCreateLoweringTest, JSCreateArgumentsRestParameterViaStub) {
+ Node* const closure = Parameter(Type::Any());
+ Node* const context = UndefinedConstant();
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
+ Node* const frame_state = FrameState(shared, graph()->start());
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->CreateArguments(CreateArgumentsType::kRestParameter),
+ closure, context, frame_state, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsCall(_, IsHeapConstant(
+ CodeFactory::FastNewRestParameter(isolate()).code()),
+ closure, context, frame_state, effect, control));
+}
+
+TEST_F(JSCreateLoweringTest, JSCreateArgumentsInlinedMapped) {
+ Node* const closure = Parameter(Type::Any());
+ Node* const context = UndefinedConstant();
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
+ Node* const frame_state_outer = FrameState(shared, graph()->start());
+ Node* const frame_state_inner = FrameState(shared, frame_state_outer);
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->CreateArguments(CreateArgumentsType::kMappedArguments),
+ closure, context, frame_state_inner, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFinishRegion(
+ IsAllocate(IsNumberConstant(JSSloppyArgumentsObject::kSize),
+ _, control),
+ _));
+}
+
+TEST_F(JSCreateLoweringTest, JSCreateArgumentsInlinedUnmapped) {
+ Node* const closure = Parameter(Type::Any());
+ Node* const context = UndefinedConstant();
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
+ Node* const frame_state_outer = FrameState(shared, graph()->start());
+ Node* const frame_state_inner = FrameState(shared, frame_state_outer);
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->CreateArguments(CreateArgumentsType::kUnmappedArguments),
+ closure, context, frame_state_inner, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFinishRegion(
+ IsAllocate(IsNumberConstant(JSStrictArgumentsObject::kSize),
+ _, control),
+ _));
+}
+
+TEST_F(JSCreateLoweringTest, JSCreateArgumentsInlinedRestArray) {
+ Node* const closure = Parameter(Type::Any());
+ Node* const context = UndefinedConstant();
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
+ Node* const frame_state_outer = FrameState(shared, graph()->start());
+ Node* const frame_state_inner = FrameState(shared, frame_state_outer);
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->CreateArguments(CreateArgumentsType::kRestParameter),
+ closure, context, frame_state_inner, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFinishRegion(
+ IsAllocate(IsNumberConstant(JSArray::kSize), _, control), _));
+}
+
+// -----------------------------------------------------------------------------
+// JSCreateFunctionContext
+
+TEST_F(JSCreateLoweringTest, JSCreateFunctionContextViaInlinedAllocation) {
+ Node* const closure = Parameter(Type::Any());
+ Node* const context = Parameter(Type::Any());
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r =
+ Reduce(graph()->NewNode(javascript()->CreateFunctionContext(8), closure,
+ context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFinishRegion(IsAllocate(IsNumberConstant(Context::SizeFor(
+ 8 + Context::MIN_CONTEXT_SLOTS)),
+ IsBeginRegion(_), control),
+ _));
+}
+
+// -----------------------------------------------------------------------------
+// JSCreateWithContext
+
+TEST_F(JSCreateLoweringTest, JSCreateWithContext) {
+ Node* const object = Parameter(Type::Receiver());
+ Node* const closure = Parameter(Type::Function());
+ Node* const context = Parameter(Type::Any());
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->CreateWithContext(), object,
+ closure, context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFinishRegion(IsAllocate(IsNumberConstant(Context::SizeFor(
+ Context::MIN_CONTEXT_SLOTS)),
+ IsBeginRegion(_), control),
+ _));
+}
+
+// -----------------------------------------------------------------------------
+// JSCreateCatchContext
+
+TEST_F(JSCreateLoweringTest, JSCreateCatchContext) {
+ Handle<String> name = factory()->length_string();
+ Node* const exception = Parameter(Type::Receiver());
+ Node* const closure = Parameter(Type::Function());
+ Node* const context = Parameter(Type::Any());
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->CreateCatchContext(name), exception,
+ closure, context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFinishRegion(IsAllocate(IsNumberConstant(Context::SizeFor(
+ Context::MIN_CONTEXT_SLOTS + 1)),
+ IsBeginRegion(_), control),
+ _));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
index f38f8eaac7..919c1b2237 100644
--- a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
@@ -7,7 +7,6 @@
#include "src/compiler/js-graph.h"
#include "src/compiler/js-intrinsic-lowering.h"
#include "src/compiler/js-operator.h"
-#include "src/types-inl.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
#include "testing/gmock-support.h"
@@ -24,9 +23,9 @@ namespace v8 {
namespace internal {
namespace compiler {
-class JSIntrinsicLoweringTest : public TypedGraphTest {
+class JSIntrinsicLoweringTest : public GraphTest {
public:
- JSIntrinsicLoweringTest() : TypedGraphTest(3), javascript_(zone()) {}
+ JSIntrinsicLoweringTest() : GraphTest(3), javascript_(zone()) {}
~JSIntrinsicLoweringTest() override {}
protected:
@@ -162,37 +161,6 @@ TEST_F(JSIntrinsicLoweringTest, InlineIsArray) {
// -----------------------------------------------------------------------------
-// %_IsDate
-
-
-TEST_F(JSIntrinsicLoweringTest, InlineIsDate) {
- Node* const input = Parameter(0);
- Node* const context = Parameter(1);
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Reduction const r = Reduce(
- graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineIsDate, 1),
- input, context, effect, control));
- ASSERT_TRUE(r.Changed());
-
- Node* phi = r.replacement();
- Capture<Node*> branch, if_false;
- EXPECT_THAT(
- phi,
- IsPhi(
- MachineRepresentation::kTagged, IsFalseConstant(),
- IsWord32Equal(IsLoadField(AccessBuilder::ForMapInstanceType(),
- IsLoadField(AccessBuilder::ForMap(), input,
- effect, CaptureEq(&if_false)),
- effect, _),
- IsInt32Constant(JS_DATE_TYPE)),
- IsMerge(IsIfTrue(AllOf(CaptureEq(&branch),
- IsBranch(IsObjectIsSmi(input), control))),
- AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
-}
-
-
-// -----------------------------------------------------------------------------
// %_IsTypedArray
@@ -224,38 +192,6 @@ TEST_F(JSIntrinsicLoweringTest, InlineIsTypedArray) {
// -----------------------------------------------------------------------------
-// %_IsFunction
-
-
-TEST_F(JSIntrinsicLoweringTest, InlineIsFunction) {
- Node* const input = Parameter(Type::Any());
- Node* const context = Parameter(Type::Any());
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Reduction const r = Reduce(
- graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineIsFunction, 1),
- input, context, effect, control));
- ASSERT_TRUE(r.Changed());
-
- Node* phi = r.replacement();
- Capture<Node*> branch, if_false;
- EXPECT_THAT(
- phi,
- IsPhi(
- MachineRepresentation::kTagged, IsFalseConstant(),
- IsUint32LessThanOrEqual(
- IsInt32Constant(FIRST_FUNCTION_TYPE),
- IsLoadField(AccessBuilder::ForMapInstanceType(),
- IsLoadField(AccessBuilder::ForMap(), input, effect,
- CaptureEq(&if_false)),
- effect, _)),
- IsMerge(IsIfTrue(AllOf(CaptureEq(&branch),
- IsBranch(IsObjectIsSmi(input), control))),
- AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
-}
-
-
-// -----------------------------------------------------------------------------
// %_IsRegExp
@@ -290,75 +226,16 @@ TEST_F(JSIntrinsicLoweringTest, InlineIsRegExp) {
// %_IsJSReceiver
-TEST_F(JSIntrinsicLoweringTest, InlineIsJSReceiverWithAny) {
- Node* const input = Parameter(Type::Any());
- Node* const context = Parameter(Type::Any());
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Reduction const r = Reduce(graph()->NewNode(
- javascript()->CallRuntime(Runtime::kInlineIsJSReceiver, 1), input,
- context, effect, control));
- ASSERT_TRUE(r.Changed());
-
- Node* phi = r.replacement();
- Capture<Node *> branch, if_false;
- EXPECT_THAT(
- phi,
- IsPhi(
- MachineRepresentation::kTagged, IsFalseConstant(),
- IsUint32LessThanOrEqual(
- IsInt32Constant(FIRST_JS_RECEIVER_TYPE),
- IsLoadField(AccessBuilder::ForMapInstanceType(),
- IsLoadField(AccessBuilder::ForMap(), input, effect,
- CaptureEq(&if_false)),
- effect, _)),
- IsMerge(IsIfTrue(AllOf(CaptureEq(&branch),
- IsBranch(IsObjectIsSmi(input), control))),
- AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
-}
-
-
-TEST_F(JSIntrinsicLoweringTest, InlineIsJSReceiverWithReceiver) {
- Node* const input = Parameter(Type::Receiver());
- Node* const context = Parameter(Type::Any());
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Reduction const r = Reduce(graph()->NewNode(
- javascript()->CallRuntime(Runtime::kInlineIsJSReceiver, 1), input,
- context, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsTrueConstant());
-}
-
-
-TEST_F(JSIntrinsicLoweringTest, InlineIsJSReceiverWithUndefined) {
- Node* const input = Parameter(Type::Undefined());
- Node* const context = Parameter(Type::Any());
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Reduction const r = Reduce(graph()->NewNode(
- javascript()->CallRuntime(Runtime::kInlineIsJSReceiver, 1), input,
- context, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFalseConstant());
-}
-
-
-// -----------------------------------------------------------------------------
-// %_JSValueGetValue
-
-
-TEST_F(JSIntrinsicLoweringTest, InlineJSValueGetValue) {
+TEST_F(JSIntrinsicLoweringTest, InlineIsJSReceiver) {
Node* const input = Parameter(0);
Node* const context = Parameter(1);
Node* const effect = graph()->start();
Node* const control = graph()->start();
Reduction const r = Reduce(graph()->NewNode(
- javascript()->CallRuntime(Runtime::kInlineJSValueGetValue, 1), input,
+ javascript()->CallRuntime(Runtime::kInlineIsJSReceiver, 1), input,
context, effect, control));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsLoadField(AccessBuilder::ForValue(), input, effect, control));
+ EXPECT_THAT(r.replacement(), IsObjectIsReceiver(input));
}
diff --git a/deps/v8/test/unittests/compiler/js-operator-unittest.cc b/deps/v8/test/unittests/compiler/js-operator-unittest.cc
index e0db771458..15b1427871 100644
--- a/deps/v8/test/unittests/compiler/js-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-operator-unittest.cc
@@ -12,34 +12,8 @@ namespace v8 {
namespace internal {
namespace compiler {
-namespace {
-
-const LanguageMode kLanguageModes[] = {SLOPPY, STRICT, STRONG};
-
-
-#if GTEST_HAS_COMBINE
-
-template <typename T>
-class JSOperatorTestWithLanguageModeAndParam
- : public TestWithZone,
- public ::testing::WithParamInterface<::testing::tuple<LanguageMode, T>> {
- protected:
- LanguageMode language_mode() const {
- return ::testing::get<0>(B::GetParam());
- }
- const T& GetParam() const { return ::testing::get<1>(B::GetParam()); }
-
- private:
- typedef ::testing::WithParamInterface<::testing::tuple<LanguageMode, T>> B;
-};
-
-#endif // GTEST_HAS_COMBINE
-
-} // namespace
-
-
// -----------------------------------------------------------------------------
-// Shared operators without language mode.
+// Shared operators.
namespace {
@@ -56,7 +30,6 @@ struct SharedOperator {
int control_output_count;
};
-
const SharedOperator kSharedOperators[] = {
#define SHARED(Name, properties, value_input_count, frame_state_input_count, \
effect_input_count, control_input_count, value_output_count, \
@@ -71,6 +44,10 @@ const SharedOperator kSharedOperators[] = {
SHARED(NotEqual, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
SHARED(StrictEqual, Operator::kNoThrow, 2, 0, 1, 1, 1, 1, 0),
SHARED(StrictNotEqual, Operator::kNoThrow, 2, 0, 1, 1, 1, 1, 0),
+ SHARED(LessThan, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
+ SHARED(GreaterThan, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
+ SHARED(LessThanOrEqual, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
+ SHARED(GreaterThanOrEqual, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
SHARED(ToNumber, Operator::kNoProperties, 1, 1, 1, 1, 1, 1, 2),
SHARED(ToString, Operator::kNoProperties, 1, 1, 1, 1, 1, 1, 2),
SHARED(ToName, Operator::kNoProperties, 1, 1, 1, 1, 1, 1, 2),
@@ -148,123 +125,6 @@ TEST_P(JSSharedOperatorTest, Properties) {
INSTANTIATE_TEST_CASE_P(JSOperatorTest, JSSharedOperatorTest,
::testing::ValuesIn(kSharedOperators));
-
-// -----------------------------------------------------------------------------
-// Shared operators with language mode.
-
-
-#if GTEST_HAS_COMBINE
-
-namespace {
-
-struct SharedOperatorWithLanguageMode {
- const Operator* (JSOperatorBuilder::*constructor)(LanguageMode);
- IrOpcode::Value opcode;
- Operator::Properties properties;
- int value_input_count;
- int frame_state_input_count;
- int effect_input_count;
- int control_input_count;
- int value_output_count;
- int effect_output_count;
- int control_output_count;
-};
-
-
-const SharedOperatorWithLanguageMode kSharedOperatorsWithLanguageMode[] = {
-#define SHARED(Name, properties, value_input_count, frame_state_input_count, \
- effect_input_count, control_input_count, value_output_count, \
- effect_output_count, control_output_count) \
- { \
- &JSOperatorBuilder::Name, IrOpcode::kJS##Name, properties, \
- value_input_count, frame_state_input_count, effect_input_count, \
- control_input_count, value_output_count, effect_output_count, \
- control_output_count \
- }
- SHARED(LessThan, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
- SHARED(GreaterThan, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
- SHARED(LessThanOrEqual, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
- SHARED(GreaterThanOrEqual, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
-#undef SHARED
-};
-
-
-std::ostream& operator<<(std::ostream& os,
- const SharedOperatorWithLanguageMode& sop) {
- return os << IrOpcode::Mnemonic(sop.opcode);
-}
-
-} // namespace
-
-
-class JSSharedOperatorWithLanguageModeTest
- : public JSOperatorTestWithLanguageModeAndParam<
- SharedOperatorWithLanguageMode> {};
-
-
-TEST_P(JSSharedOperatorWithLanguageModeTest, InstancesAreGloballyShared) {
- const SharedOperatorWithLanguageMode& sop = GetParam();
- JSOperatorBuilder javascript1(zone());
- JSOperatorBuilder javascript2(zone());
- EXPECT_EQ((javascript1.*sop.constructor)(language_mode()),
- (javascript2.*sop.constructor)(language_mode()));
-}
-
-
-TEST_P(JSSharedOperatorWithLanguageModeTest, NumberOfInputsAndOutputs) {
- JSOperatorBuilder javascript(zone());
- const SharedOperatorWithLanguageMode& sop = GetParam();
- const Operator* op = (javascript.*sop.constructor)(language_mode());
-
- const int context_input_count = 1;
- EXPECT_EQ(sop.value_input_count, op->ValueInputCount());
- EXPECT_EQ(context_input_count, OperatorProperties::GetContextInputCount(op));
- EXPECT_EQ(sop.frame_state_input_count,
- OperatorProperties::GetFrameStateInputCount(op));
- EXPECT_EQ(sop.effect_input_count, op->EffectInputCount());
- EXPECT_EQ(sop.control_input_count, op->ControlInputCount());
- EXPECT_EQ(sop.value_input_count + context_input_count +
- sop.frame_state_input_count + sop.effect_input_count +
- sop.control_input_count,
- OperatorProperties::GetTotalInputCount(op));
-
- EXPECT_EQ(sop.value_output_count, op->ValueOutputCount());
- EXPECT_EQ(sop.effect_output_count, op->EffectOutputCount());
- EXPECT_EQ(sop.control_output_count, op->ControlOutputCount());
-}
-
-
-TEST_P(JSSharedOperatorWithLanguageModeTest, OpcodeIsCorrect) {
- JSOperatorBuilder javascript(zone());
- const SharedOperatorWithLanguageMode& sop = GetParam();
- const Operator* op = (javascript.*sop.constructor)(language_mode());
- EXPECT_EQ(sop.opcode, op->opcode());
-}
-
-
-TEST_P(JSSharedOperatorWithLanguageModeTest, Parameter) {
- JSOperatorBuilder javascript(zone());
- const SharedOperatorWithLanguageMode& sop = GetParam();
- const Operator* op = (javascript.*sop.constructor)(language_mode());
- EXPECT_EQ(language_mode(), OpParameter<LanguageMode>(op));
-}
-
-
-TEST_P(JSSharedOperatorWithLanguageModeTest, Properties) {
- JSOperatorBuilder javascript(zone());
- const SharedOperatorWithLanguageMode& sop = GetParam();
- const Operator* op = (javascript.*sop.constructor)(language_mode());
- EXPECT_EQ(sop.properties, op->properties());
-}
-
-
-INSTANTIATE_TEST_CASE_P(
- JSOperatorTest, JSSharedOperatorWithLanguageModeTest,
- ::testing::Combine(::testing::ValuesIn(kLanguageModes),
- ::testing::ValuesIn(kSharedOperatorsWithLanguageMode)));
-
-#endif // GTEST_HAS_COMBINE
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
index 6fc89bb0ea..e37d4a2913 100644
--- a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -92,18 +92,6 @@ class JSTypedLoweringTest : public TypedGraphTest {
return reducer.Reduce(node);
}
- Node* FrameState(Handle<SharedFunctionInfo> shared, Node* outer_frame_state) {
- Node* state_values = graph()->NewNode(common()->StateValues(0));
- return graph()->NewNode(
- common()->FrameState(BailoutId::None(),
- OutputFrameStateCombine::Ignore(),
- common()->CreateFrameStateFunctionInfo(
- FrameStateType::kJavaScriptFunction, 1, 0,
- shared, CALL_MAINTAINS_NATIVE_CONTEXT)),
- state_values, state_values, state_values, NumberConstant(0),
- UndefinedConstant(), outer_frame_state);
- }
-
Handle<JSArrayBuffer> NewArrayBuffer(void* bytes, size_t byte_length) {
Handle<JSArrayBuffer> buffer = factory()->NewJSArrayBuffer();
JSArrayBuffer::Setup(buffer, isolate(), true, bytes, byte_length);
@@ -435,15 +423,12 @@ TEST_F(JSTypedLoweringTest, JSShiftLeftWithSigned32AndConstant) {
Node* const effect = graph()->start();
Node* const control = graph()->start();
TRACED_FORRANGE(double, rhs, 0, 31) {
- TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
- Reduction r = Reduce(
- graph()->NewNode(javascript()->ShiftLeft(language_mode, hints), lhs,
- NumberConstant(rhs), context, EmptyFrameState(),
- EmptyFrameState(), effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsNumberShiftLeft(lhs, IsNumberConstant(BitEq(rhs))));
- }
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->ShiftLeft(hints), lhs, NumberConstant(rhs), context,
+ EmptyFrameState(), EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsNumberShiftLeft(lhs, IsNumberConstant(BitEq(rhs))));
}
}
@@ -455,13 +440,11 @@ TEST_F(JSTypedLoweringTest, JSShiftLeftWithSigned32AndUnsigned32) {
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
- TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
- Reduction r = Reduce(graph()->NewNode(
- javascript()->ShiftLeft(language_mode, hints), lhs, rhs, context,
- EmptyFrameState(), EmptyFrameState(), effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberShiftLeft(lhs, rhs));
- }
+ Reduction r = Reduce(graph()->NewNode(javascript()->ShiftLeft(hints), lhs,
+ rhs, context, EmptyFrameState(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberShiftLeft(lhs, rhs));
}
@@ -476,15 +459,12 @@ TEST_F(JSTypedLoweringTest, JSShiftRightWithSigned32AndConstant) {
Node* const effect = graph()->start();
Node* const control = graph()->start();
TRACED_FORRANGE(double, rhs, 0, 31) {
- TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
- Reduction r = Reduce(
- graph()->NewNode(javascript()->ShiftRight(language_mode, hints), lhs,
- NumberConstant(rhs), context, EmptyFrameState(),
- EmptyFrameState(), effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsNumberShiftRight(lhs, IsNumberConstant(BitEq(rhs))));
- }
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->ShiftRight(hints), lhs, NumberConstant(rhs), context,
+ EmptyFrameState(), EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsNumberShiftRight(lhs, IsNumberConstant(BitEq(rhs))));
}
}
@@ -496,13 +476,11 @@ TEST_F(JSTypedLoweringTest, JSShiftRightWithSigned32AndUnsigned32) {
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
- TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
- Reduction r = Reduce(graph()->NewNode(
- javascript()->ShiftRight(language_mode, hints), lhs, rhs, context,
- EmptyFrameState(), EmptyFrameState(), effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberShiftRight(lhs, rhs));
- }
+ Reduction r = Reduce(graph()->NewNode(javascript()->ShiftRight(hints), lhs,
+ rhs, context, EmptyFrameState(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberShiftRight(lhs, rhs));
}
@@ -518,15 +496,12 @@ TEST_F(JSTypedLoweringTest,
Node* const effect = graph()->start();
Node* const control = graph()->start();
TRACED_FORRANGE(double, rhs, 0, 31) {
- TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
- Reduction r = Reduce(graph()->NewNode(
- javascript()->ShiftRightLogical(language_mode, hints), lhs,
- NumberConstant(rhs), context, EmptyFrameState(), EmptyFrameState(),
- effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsNumberShiftRightLogical(lhs, IsNumberConstant(BitEq(rhs))));
- }
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->ShiftRightLogical(hints), lhs, NumberConstant(rhs),
+ context, EmptyFrameState(), EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsNumberShiftRightLogical(lhs, IsNumberConstant(BitEq(rhs))));
}
}
@@ -538,13 +513,11 @@ TEST_F(JSTypedLoweringTest, JSShiftRightLogicalWithUnsigned32AndUnsigned32) {
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
- TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
- Reduction r = Reduce(graph()->NewNode(
- javascript()->ShiftRightLogical(language_mode, hints), lhs, rhs,
- context, EmptyFrameState(), EmptyFrameState(), effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberShiftRightLogical(lhs, rhs));
- }
+ Reduction r = Reduce(graph()->NewNode(javascript()->ShiftRightLogical(hints),
+ lhs, rhs, context, EmptyFrameState(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberShiftRightLogical(lhs, rhs));
}
@@ -627,37 +600,34 @@ TEST_F(JSTypedLoweringTest, JSLoadPropertyFromExternalTypedArray) {
NewArrayBuffer(backing_store, sizeof(backing_store));
VectorSlotPair feedback;
TRACED_FOREACH(ExternalArrayType, type, kExternalArrayTypes) {
- TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
- Handle<JSTypedArray> array =
- factory()->NewJSTypedArray(type, buffer, 0, kLength);
- int const element_size = static_cast<int>(array->element_size());
-
- Node* key = Parameter(
- Type::Range(kMinInt / element_size, kMaxInt / element_size, zone()));
- Node* base = HeapConstant(array);
- Node* vector = UndefinedConstant();
- Node* context = UndefinedConstant();
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Reduction r = Reduce(
- graph()->NewNode(javascript()->LoadProperty(language_mode, feedback),
- base, key, vector, context, EmptyFrameState(),
- EmptyFrameState(), effect, control));
+ Handle<JSTypedArray> array =
+ factory()->NewJSTypedArray(type, buffer, 0, kLength);
+ int const element_size = static_cast<int>(array->element_size());
+
+ Node* key = Parameter(
+ Type::Range(kMinInt / element_size, kMaxInt / element_size, zone()));
+ Node* base = HeapConstant(array);
+ Node* vector = UndefinedConstant();
+ Node* context = UndefinedConstant();
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->LoadProperty(feedback), base, key, vector, context,
+ EmptyFrameState(), EmptyFrameState(), effect, control));
- Matcher<Node*> offset_matcher =
- element_size == 1
- ? key
- : IsWord32Shl(key, IsInt32Constant(WhichPowerOf2(element_size)));
+ Matcher<Node*> offset_matcher =
+ element_size == 1
+ ? key
+ : IsWord32Shl(key, IsInt32Constant(WhichPowerOf2(element_size)));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(
- r.replacement(),
- IsLoadBuffer(BufferAccess(type),
- IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
- offset_matcher,
- IsNumberConstant(array->byte_length()->Number()), effect,
- control));
- }
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsLoadBuffer(BufferAccess(type),
+ IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
+ offset_matcher,
+ IsNumberConstant(array->byte_length()->Number()), effect,
+ control));
}
}
@@ -669,32 +639,29 @@ TEST_F(JSTypedLoweringTest, JSLoadPropertyFromExternalTypedArrayWithSafeKey) {
NewArrayBuffer(backing_store, sizeof(backing_store));
VectorSlotPair feedback;
TRACED_FOREACH(ExternalArrayType, type, kExternalArrayTypes) {
- TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
- Handle<JSTypedArray> array =
- factory()->NewJSTypedArray(type, buffer, 0, kLength);
- ElementAccess access = AccessBuilder::ForTypedArrayElement(type, true);
-
- int min = random_number_generator()->NextInt(static_cast<int>(kLength));
- int max = random_number_generator()->NextInt(static_cast<int>(kLength));
- if (min > max) std::swap(min, max);
- Node* key = Parameter(Type::Range(min, max, zone()));
- Node* base = HeapConstant(array);
- Node* vector = UndefinedConstant();
- Node* context = UndefinedConstant();
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Reduction r = Reduce(
- graph()->NewNode(javascript()->LoadProperty(language_mode, feedback),
- base, key, vector, context, EmptyFrameState(),
- EmptyFrameState(), effect, control));
+ Handle<JSTypedArray> array =
+ factory()->NewJSTypedArray(type, buffer, 0, kLength);
+ ElementAccess access = AccessBuilder::ForTypedArrayElement(type, true);
+
+ int min = random_number_generator()->NextInt(static_cast<int>(kLength));
+ int max = random_number_generator()->NextInt(static_cast<int>(kLength));
+ if (min > max) std::swap(min, max);
+ Node* key = Parameter(Type::Range(min, max, zone()));
+ Node* base = HeapConstant(array);
+ Node* vector = UndefinedConstant();
+ Node* context = UndefinedConstant();
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->LoadProperty(feedback), base, key, vector, context,
+ EmptyFrameState(), EmptyFrameState(), effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(
- r.replacement(),
- IsLoadElement(access,
- IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
- key, effect, control));
- }
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsLoadElement(access,
+ IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
+ key, effect, control));
}
}
@@ -847,15 +814,12 @@ TEST_F(JSTypedLoweringTest, JSLoadNamedStringLength) {
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
- TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
- Reduction const r = Reduce(
- graph()->NewNode(javascript()->LoadNamed(language_mode, name, feedback),
- receiver, vector, context, EmptyFrameState(),
- EmptyFrameState(), effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsLoadField(AccessBuilder::ForStringLength(),
- receiver, effect, control));
- }
+ Reduction const r = Reduce(graph()->NewNode(
+ javascript()->LoadNamed(name, feedback), receiver, vector, context,
+ EmptyFrameState(), EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsLoadField(AccessBuilder::ForStringLength(),
+ receiver, effect, control));
}
@@ -869,14 +833,11 @@ TEST_F(JSTypedLoweringTest, JSLoadNamedFunctionPrototype) {
Node* const context = Parameter(Type::Internal(), 2);
Node* const effect = graph()->start();
Node* const control = graph()->start();
- TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
- Reduction const r = Reduce(
- graph()->NewNode(javascript()->LoadNamed(language_mode, name, feedback),
- receiver, vector, context, EmptyFrameState(),
- EmptyFrameState(), effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsHeapConstant(function_prototype));
- }
+ Reduction const r = Reduce(graph()->NewNode(
+ javascript()->LoadNamed(name, feedback), receiver, vector, context,
+ EmptyFrameState(), EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsHeapConstant(function_prototype));
}
@@ -886,7 +847,6 @@ TEST_F(JSTypedLoweringTest, JSLoadNamedFunctionPrototype) {
TEST_F(JSTypedLoweringTest, JSAddWithString) {
BinaryOperationHints const hints = BinaryOperationHints::Any();
- TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
Node* lhs = Parameter(Type::String(), 0);
Node* rhs = Parameter(Type::String(), 1);
Node* context = Parameter(Type::Any(), 2);
@@ -894,300 +854,15 @@ TEST_F(JSTypedLoweringTest, JSAddWithString) {
Node* frame_state1 = EmptyFrameState();
Node* effect = graph()->start();
Node* control = graph()->start();
- Reduction r = Reduce(
- graph()->NewNode(javascript()->Add(language_mode, hints), lhs, rhs,
- context, frame_state0, frame_state1, effect, control));
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->Add(hints), lhs, rhs, context,
+ frame_state0, frame_state1, effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsCall(_, IsHeapConstant(CodeFactory::StringAdd(
isolate(), STRING_ADD_CHECK_NONE,
NOT_TENURED).code()),
lhs, rhs, context, frame_state0, effect, control));
- }
-}
-
-
-// -----------------------------------------------------------------------------
-// JSCreate
-
-
-TEST_F(JSTypedLoweringTest, JSCreate) {
- Handle<JSFunction> function = isolate()->object_function();
- Node* const target = Parameter(Type::Constant(function, graph()->zone()));
- Node* const context = Parameter(Type::Any());
- Node* const effect = graph()->start();
- Reduction r = Reduce(graph()->NewNode(javascript()->Create(), target, target,
- context, EmptyFrameState(), effect));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(
- r.replacement(),
- IsFinishRegion(
- IsAllocate(IsNumberConstant(function->initial_map()->instance_size()),
- IsBeginRegion(effect), _),
- _));
-}
-
-
-// -----------------------------------------------------------------------------
-// JSCreateArguments
-
-
-TEST_F(JSTypedLoweringTest, JSCreateArgumentsViaStub) {
- Node* const closure = Parameter(Type::Any());
- Node* const context = UndefinedConstant();
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
- Node* const frame_state = FrameState(shared, graph()->start());
- Reduction r = Reduce(
- graph()->NewNode(javascript()->CreateArguments(
- CreateArgumentsParameters::kMappedArguments, 0),
- closure, context, frame_state, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsCall(_, IsHeapConstant(CodeFactory::ArgumentsAccess(
- isolate(), false, false)
- .code()),
- closure, IsNumberConstant(0), _, effect, control));
-}
-
-
-TEST_F(JSTypedLoweringTest, JSCreateArgumentsRestArrayViaStub) {
- Node* const closure = Parameter(Type::Any());
- Node* const context = UndefinedConstant();
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
- Node* const frame_state = FrameState(shared, graph()->start());
- Reduction r = Reduce(graph()->NewNode(
- javascript()->CreateArguments(CreateArgumentsParameters::kRestArray, 0),
- closure, context, frame_state, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(
- r.replacement(),
- IsCall(_,
- IsHeapConstant(CodeFactory::RestArgumentsAccess(isolate()).code()),
- IsNumberConstant(0), _, IsNumberConstant(0), _, effect, control));
-}
-
-
-TEST_F(JSTypedLoweringTest, JSCreateArgumentsInlinedMapped) {
- Node* const closure = Parameter(Type::Any());
- Node* const context = UndefinedConstant();
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
- Node* const frame_state_outer = FrameState(shared, graph()->start());
- Node* const frame_state_inner = FrameState(shared, frame_state_outer);
- Reduction r = Reduce(
- graph()->NewNode(javascript()->CreateArguments(
- CreateArgumentsParameters::kMappedArguments, 0),
- closure, context, frame_state_inner, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsFinishRegion(
- IsAllocate(IsNumberConstant(Heap::kSloppyArgumentsObjectSize),
- _, control),
- _));
-}
-
-
-TEST_F(JSTypedLoweringTest, JSCreateArgumentsInlinedUnmapped) {
- Node* const closure = Parameter(Type::Any());
- Node* const context = UndefinedConstant();
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
- Node* const frame_state_outer = FrameState(shared, graph()->start());
- Node* const frame_state_inner = FrameState(shared, frame_state_outer);
- Reduction r = Reduce(
- graph()->NewNode(javascript()->CreateArguments(
- CreateArgumentsParameters::kUnmappedArguments, 0),
- closure, context, frame_state_inner, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsFinishRegion(
- IsAllocate(IsNumberConstant(Heap::kStrictArgumentsObjectSize),
- _, control),
- _));
-}
-
-
-TEST_F(JSTypedLoweringTest, JSCreateArgumentsInlinedRestArray) {
- Node* const closure = Parameter(Type::Any());
- Node* const context = UndefinedConstant();
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
- Node* const frame_state_outer = FrameState(shared, graph()->start());
- Node* const frame_state_inner = FrameState(shared, frame_state_outer);
- Reduction r = Reduce(graph()->NewNode(
- javascript()->CreateArguments(CreateArgumentsParameters::kRestArray, 0),
- closure, context, frame_state_inner, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsFinishRegion(
- IsAllocate(IsNumberConstant(JSArray::kSize), _, control), _));
-}
-
-
-// -----------------------------------------------------------------------------
-// JSCreateClosure
-
-
-TEST_F(JSTypedLoweringTest, JSCreateClosure) {
- Node* const context = UndefinedConstant();
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
- Reduction r =
- Reduce(graph()->NewNode(javascript()->CreateClosure(shared, NOT_TENURED),
- context, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsCall(_, IsHeapConstant(CodeFactory::FastNewClosure(
- isolate(), shared->language_mode(),
- shared->kind()).code()),
- IsHeapConstant(shared), effect, control));
-}
-
-
-// -----------------------------------------------------------------------------
-// JSCreateLiteralArray
-
-
-TEST_F(JSTypedLoweringTest, JSCreateLiteralArray) {
- Handle<FixedArray> const constant_elements = factory()->NewFixedArray(12);
- int const literal_flags = ArrayLiteral::kShallowElements;
- int const literal_index = 1;
- Node* const closure = Parameter(0);
- Node* const context = Parameter(1);
- Node* const frame_state = EmptyFrameState();
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Reduction const r = Reduce(
- graph()->NewNode(javascript()->CreateLiteralArray(
- constant_elements, literal_flags, literal_index),
- closure, context, frame_state, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(
- r.replacement(),
- IsCall(_, IsHeapConstant(
- CodeFactory::FastCloneShallowArray(isolate()).code()),
- closure, IsNumberConstant(literal_index),
- IsHeapConstant(constant_elements), context, frame_state, effect,
- control));
-}
-
-
-// -----------------------------------------------------------------------------
-// JSCreateLiteralObject
-
-
-TEST_F(JSTypedLoweringTest, JSCreateLiteralObject) {
- Handle<FixedArray> const constant_properties =
- factory()->NewFixedArray(6 * 2);
- int const literal_flags = ObjectLiteral::kShallowProperties;
- int const literal_index = 1;
- Node* const closure = Parameter(0);
- Node* const context = Parameter(1);
- Node* const frame_state = EmptyFrameState();
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Reduction const r = Reduce(
- graph()->NewNode(javascript()->CreateLiteralObject(
- constant_properties, literal_flags, literal_index),
- closure, context, frame_state, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(
- r.replacement(),
- IsCall(_, IsHeapConstant(
- CodeFactory::FastCloneShallowObject(isolate(), 6).code()),
- closure, IsNumberConstant(literal_index),
- IsHeapConstant(constant_properties), _, context, frame_state,
- effect, control));
-}
-
-
-// -----------------------------------------------------------------------------
-// JSCreateFunctionContext
-
-
-TEST_F(JSTypedLoweringTest, JSCreateFunctionContextViaInlinedAllocation) {
- Node* const closure = Parameter(Type::Any());
- Node* const context = Parameter(Type::Any());
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Reduction const r =
- Reduce(graph()->NewNode(javascript()->CreateFunctionContext(8), closure,
- context, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsFinishRegion(IsAllocate(IsNumberConstant(Context::SizeFor(
- 8 + Context::MIN_CONTEXT_SLOTS)),
- IsBeginRegion(_), control),
- _));
-}
-
-
-TEST_F(JSTypedLoweringTest, JSCreateFunctionContextViaStub) {
- Node* const closure = Parameter(Type::Any());
- Node* const context = Parameter(Type::Any());
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Reduction const r =
- Reduce(graph()->NewNode(javascript()->CreateFunctionContext(32), closure,
- context, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsCall(_, IsHeapConstant(
- CodeFactory::FastNewContext(isolate(), 32).code()),
- closure, context, effect, control));
-}
-
-
-// -----------------------------------------------------------------------------
-// JSCreateWithContext
-
-
-TEST_F(JSTypedLoweringTest, JSCreateWithContext) {
- Node* const object = Parameter(Type::Receiver());
- Node* const closure = Parameter(Type::Function());
- Node* const context = Parameter(Type::Any());
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Reduction r =
- Reduce(graph()->NewNode(javascript()->CreateWithContext(), object,
- closure, context, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsFinishRegion(IsAllocate(IsNumberConstant(Context::SizeFor(
- Context::MIN_CONTEXT_SLOTS)),
- IsBeginRegion(_), control),
- _));
-}
-
-
-// -----------------------------------------------------------------------------
-// JSCreateCatchContext
-
-
-TEST_F(JSTypedLoweringTest, JSCreateCatchContext) {
- Handle<String> name = factory()->length_string();
- Node* const exception = Parameter(Type::Receiver());
- Node* const closure = Parameter(Type::Function());
- Node* const context = Parameter(Type::Any());
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Reduction r =
- Reduce(graph()->NewNode(javascript()->CreateCatchContext(name), exception,
- closure, context, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsFinishRegion(IsAllocate(IsNumberConstant(Context::SizeFor(
- Context::MIN_CONTEXT_SLOTS + 1)),
- IsBeginRegion(_), control),
- _));
}
diff --git a/deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc b/deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc
index b77830aa5e..efc823dba6 100644
--- a/deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc
@@ -61,7 +61,7 @@ class LivenessAnalysisTest : public GraphTest {
const FrameStateFunctionInfo* state_info =
common()->CreateFrameStateFunctionInfo(
FrameStateType::kJavaScriptFunction, 0, locals_count_,
- Handle<SharedFunctionInfo>(), CALL_MAINTAINS_NATIVE_CONTEXT);
+ Handle<SharedFunctionInfo>());
const Operator* op = common()->FrameState(
BailoutId(ast_num), OutputFrameStateCombine::Ignore(), state_info);
diff --git a/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc b/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc
index 413c58b6fe..5ccd0c6727 100644
--- a/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/compiler/move-optimizer.h"
+#include "src/compiler/pipeline.h"
#include "test/unittests/compiler/instruction-sequence-unittest.h"
namespace v8 {
@@ -227,8 +228,8 @@ TEST_F(MoveOptimizerTest, GapsCanMoveOverInstruction) {
ctant_def->GetParallelMove(Instruction::GapPosition::END);
ParallelMove* last_start =
last->GetParallelMove(Instruction::GapPosition::START);
- CHECK(inst1_start == nullptr || inst1_start->size() == 0);
- CHECK(inst1_end == nullptr || inst1_end->size() == 0);
+ CHECK(inst1_start == nullptr || NonRedundantSize(inst1_start) == 0);
+ CHECK(inst1_end == nullptr || NonRedundantSize(inst1_end) == 0);
CHECK(last_start->size() == 2);
int redundants = 0;
int assignment = 0;
@@ -246,6 +247,98 @@ TEST_F(MoveOptimizerTest, GapsCanMoveOverInstruction) {
}
+TEST_F(MoveOptimizerTest, SubsetMovesMerge) {
+ StartBlock();
+ EndBlock(Branch(Imm(), 1, 2));
+
+ StartBlock();
+ EndBlock(Jump(2));
+ Instruction* last_move_b1 = LastInstruction();
+ AddMove(last_move_b1, Reg(0), Reg(1));
+ AddMove(last_move_b1, Reg(2), Reg(3));
+
+ StartBlock();
+ EndBlock(Jump(1));
+ Instruction* last_move_b2 = LastInstruction();
+ AddMove(last_move_b2, Reg(0), Reg(1));
+ AddMove(last_move_b2, Reg(4), Reg(5));
+
+ StartBlock();
+ EndBlock(Last());
+
+ Instruction* last = LastInstruction();
+
+ Optimize();
+
+ ParallelMove* last_move = last->parallel_moves()[0];
+ CHECK_EQ(1, NonRedundantSize(last_move));
+ CHECK(Contains(last_move, Reg(0), Reg(1)));
+
+ ParallelMove* b1_move = last_move_b1->parallel_moves()[0];
+ CHECK_EQ(1, NonRedundantSize(b1_move));
+ CHECK(Contains(b1_move, Reg(2), Reg(3)));
+
+ ParallelMove* b2_move = last_move_b2->parallel_moves()[0];
+ CHECK_EQ(1, NonRedundantSize(b2_move));
+ CHECK(Contains(b2_move, Reg(4), Reg(5)));
+}
+
+
+TEST_F(MoveOptimizerTest, GapConflictSubsetMovesDoNotMerge) {
+ StartBlock();
+ EndBlock(Branch(Imm(), 1, 2));
+
+ StartBlock();
+ EndBlock(Jump(2));
+ Instruction* last_move_b1 = LastInstruction();
+ AddMove(last_move_b1, Reg(0), Reg(1));
+ AddMove(last_move_b1, Reg(2), Reg(0));
+ AddMove(last_move_b1, Reg(4), Reg(5));
+
+ StartBlock();
+ EndBlock(Jump(1));
+ Instruction* last_move_b2 = LastInstruction();
+ AddMove(last_move_b2, Reg(0), Reg(1));
+ AddMove(last_move_b2, Reg(4), Reg(5));
+
+ StartBlock();
+ EndBlock(Last());
+
+ Instruction* last = LastInstruction();
+
+ Optimize();
+
+ ParallelMove* last_move = last->parallel_moves()[0];
+ CHECK_EQ(1, NonRedundantSize(last_move));
+ CHECK(Contains(last_move, Reg(4), Reg(5)));
+
+ ParallelMove* b1_move = last_move_b1->parallel_moves()[0];
+ CHECK_EQ(2, NonRedundantSize(b1_move));
+ CHECK(Contains(b1_move, Reg(0), Reg(1)));
+ CHECK(Contains(b1_move, Reg(2), Reg(0)));
+
+ ParallelMove* b2_move = last_move_b2->parallel_moves()[0];
+ CHECK_EQ(1, NonRedundantSize(b2_move));
+ CHECK(Contains(b1_move, Reg(0), Reg(1)));
+}
+
+TEST_F(MoveOptimizerTest, ClobberedDestinationsAreEliminated) {
+ StartBlock();
+ EmitNop();
+ Instruction* first_instr = LastInstruction();
+ AddMove(first_instr, Reg(0), Reg(1));
+ EmitOI(Reg(1), 0, nullptr);
+ Instruction* last_instr = LastInstruction();
+ EndBlock();
+ Optimize();
+
+ ParallelMove* first_move = first_instr->parallel_moves()[0];
+ CHECK_EQ(0, NonRedundantSize(first_move));
+
+ ParallelMove* last_move = last_instr->parallel_moves()[0];
+ CHECK_EQ(0, NonRedundantSize(last_move));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
index 54168ee70b..ee4cf5446e 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.cc
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -323,13 +323,30 @@ class IsReturnMatcher final : public NodeMatcher {
const Matcher<Node*>& control_matcher)
: NodeMatcher(IrOpcode::kReturn),
value_matcher_(value_matcher),
+ value2_matcher_(_),
effect_matcher_(effect_matcher),
- control_matcher_(control_matcher) {}
+ control_matcher_(control_matcher),
+ has_second_return_value_(false) {}
+
+ IsReturnMatcher(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& value2_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(IrOpcode::kReturn),
+ value_matcher_(value_matcher),
+ value2_matcher_(value2_matcher),
+ effect_matcher_(effect_matcher),
+ control_matcher_(control_matcher),
+ has_second_return_value_(true) {}
void DescribeTo(std::ostream* os) const final {
NodeMatcher::DescribeTo(os);
*os << " whose value (";
value_matcher_.DescribeTo(os);
+ if (has_second_return_value_) {
+ *os << ") and second value (";
+ value2_matcher_.DescribeTo(os);
+ }
*os << ") and effect (";
effect_matcher_.DescribeTo(os);
*os << ") and control (";
@@ -341,6 +358,9 @@ class IsReturnMatcher final : public NodeMatcher {
return (NodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
"value", value_matcher_, listener) &&
+ (!has_second_return_value_ ||
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
+ "value2", value2_matcher_, listener)) &&
PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
effect_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetControlInput(node),
@@ -349,8 +369,10 @@ class IsReturnMatcher final : public NodeMatcher {
private:
const Matcher<Node*> value_matcher_;
+ const Matcher<Node*> value2_matcher_;
const Matcher<Node*> effect_matcher_;
const Matcher<Node*> control_matcher_;
+ bool has_second_return_value_;
};
@@ -1467,7 +1489,6 @@ Matcher<Node*> IsDead() {
return MakeMatcher(new NodeMatcher(IrOpcode::kDead));
}
-
Matcher<Node*> IsEnd(const Matcher<Node*>& control0_matcher) {
return MakeMatcher(new IsControl1Matcher(IrOpcode::kEnd, control0_matcher));
}
@@ -1577,6 +1598,13 @@ Matcher<Node*> IsReturn(const Matcher<Node*>& value_matcher,
new IsReturnMatcher(value_matcher, effect_matcher, control_matcher));
}
+Matcher<Node*> IsReturn2(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& value2_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsReturnMatcher(value_matcher, value2_matcher,
+ effect_matcher, control_matcher));
+}
Matcher<Node*> IsTerminate(const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher) {
@@ -1675,6 +1703,15 @@ Matcher<Node*> IsProjection(const Matcher<size_t>& index_matcher,
return MakeMatcher(new IsProjectionMatcher(index_matcher, base_matcher));
}
+Matcher<Node*> IsCall(const Matcher<const CallDescriptor*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ std::vector<Matcher<Node*>> value_matchers;
+ value_matchers.push_back(value0_matcher);
+ return MakeMatcher(new IsCallMatcher(descriptor_matcher, value_matchers,
+ effect_matcher, control_matcher));
+}
Matcher<Node*> IsCall(const Matcher<const CallDescriptor*>& descriptor_matcher,
const Matcher<Node*>& value0_matcher,
@@ -2106,6 +2143,7 @@ IS_UNOP_MATCHER(Float64ExtractLowWord32)
IS_UNOP_MATCHER(Float64ExtractHighWord32)
IS_UNOP_MATCHER(NumberToInt32)
IS_UNOP_MATCHER(NumberToUint32)
+IS_UNOP_MATCHER(ObjectIsReceiver)
IS_UNOP_MATCHER(ObjectIsSmi)
IS_UNOP_MATCHER(Word32Clz)
#undef IS_UNOP_MATCHER
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
index 8592f30566..03f2a3b88f 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.h
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -17,11 +17,8 @@ class ExternalReference;
template <typename T>
class Handle;
class HeapObject;
-template <class>
-class TypeImpl;
+class Type;
enum TypeofMode : int;
-struct ZoneTypeConfig;
-typedef TypeImpl<ZoneTypeConfig> Type;
namespace compiler {
@@ -70,6 +67,10 @@ Matcher<Node*> IsFinishRegion(const Matcher<Node*>& value_matcher,
Matcher<Node*> IsReturn(const Matcher<Node*>& value_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsReturn2(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& value2_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
Matcher<Node*> IsTerminate(const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
Matcher<Node*> IsExternalConstant(
@@ -246,6 +247,7 @@ Matcher<Node*> IsStoreElement(const Matcher<ElementAccess>& access_matcher,
const Matcher<Node*>& value_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsObjectIsReceiver(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsObjectIsSmi(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
diff --git a/deps/v8/test/unittests/compiler/scheduler-rpo-unittest.cc b/deps/v8/test/unittests/compiler/scheduler-rpo-unittest.cc
new file mode 100644
index 0000000000..713ee6e742
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/scheduler-rpo-unittest.cc
@@ -0,0 +1,533 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/schedule.h"
+#include "src/compiler/scheduler.h"
+#include "test/unittests/compiler/compiler-test-utils.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using testing::AnyOf;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class SchedulerRPOTest : public TestWithZone {
+ public:
+ SchedulerRPOTest() {}
+
+ void CheckRPONumbers(BasicBlockVector* order, size_t expected,
+ bool loops_allowed) {
+ CHECK(expected == order->size());
+ for (int i = 0; i < static_cast<int>(order->size()); i++) {
+ CHECK(order->at(i)->rpo_number() == i);
+ if (!loops_allowed) {
+ CHECK(!order->at(i)->loop_end());
+ CHECK(!order->at(i)->loop_header());
+ }
+ }
+ }
+
+ void CheckLoop(BasicBlockVector* order, BasicBlock** blocks, int body_size) {
+ BasicBlock* header = blocks[0];
+ BasicBlock* end = header->loop_end();
+ CHECK(end);
+ CHECK_GT(end->rpo_number(), 0);
+ CHECK_EQ(body_size, end->rpo_number() - header->rpo_number());
+ for (int i = 0; i < body_size; i++) {
+ CHECK_GE(blocks[i]->rpo_number(), header->rpo_number());
+ CHECK_LT(blocks[i]->rpo_number(), end->rpo_number());
+ CHECK(header->LoopContains(blocks[i]));
+ CHECK(header->IsLoopHeader() || blocks[i]->loop_header() == header);
+ }
+ if (header->rpo_number() > 0) {
+ CHECK_NE(order->at(header->rpo_number() - 1)->loop_header(), header);
+ }
+ if (end->rpo_number() < static_cast<int>(order->size())) {
+ CHECK_NE(order->at(end->rpo_number())->loop_header(), header);
+ }
+ }
+
+ struct TestLoop {
+ int count;
+ BasicBlock** nodes;
+ BasicBlock* header() { return nodes[0]; }
+ BasicBlock* last() { return nodes[count - 1]; }
+ ~TestLoop() { delete[] nodes; }
+ };
+
+ TestLoop* CreateLoop(Schedule* schedule, int count) {
+ TestLoop* loop = new TestLoop();
+ loop->count = count;
+ loop->nodes = new BasicBlock*[count];
+ for (int i = 0; i < count; i++) {
+ loop->nodes[i] = schedule->NewBasicBlock();
+ if (i > 0) {
+ schedule->AddSuccessorForTesting(loop->nodes[i - 1], loop->nodes[i]);
+ }
+ }
+ schedule->AddSuccessorForTesting(loop->nodes[count - 1], loop->nodes[0]);
+ return loop;
+ }
+};
+
+TEST_F(SchedulerRPOTest, Degenerate1) {
+ Schedule schedule(zone());
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 1, false);
+ EXPECT_EQ(schedule.start(), order->at(0));
+}
+
+TEST_F(SchedulerRPOTest, Degenerate2) {
+ Schedule schedule(zone());
+
+ schedule.AddGoto(schedule.start(), schedule.end());
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 2, false);
+ EXPECT_EQ(schedule.start(), order->at(0));
+ EXPECT_EQ(schedule.end(), order->at(1));
+}
+
+TEST_F(SchedulerRPOTest, Line) {
+ for (int i = 0; i < 10; i++) {
+ Schedule schedule(zone());
+
+ BasicBlock* last = schedule.start();
+ for (int j = 0; j < i; j++) {
+ BasicBlock* block = schedule.NewBasicBlock();
+ block->set_deferred(i & 1);
+ schedule.AddGoto(last, block);
+ last = block;
+ }
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 1 + i, false);
+
+ for (size_t i = 0; i < schedule.BasicBlockCount(); i++) {
+ BasicBlock* block = schedule.GetBlockById(BasicBlock::Id::FromSize(i));
+ if (block->rpo_number() >= 0 && block->SuccessorCount() == 1) {
+ EXPECT_EQ(block->rpo_number() + 1, block->SuccessorAt(0)->rpo_number());
+ }
+ }
+ }
+}
+
+TEST_F(SchedulerRPOTest, SelfLoop) {
+ Schedule schedule(zone());
+ schedule.AddSuccessorForTesting(schedule.start(), schedule.start());
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 1, true);
+ BasicBlock* loop[] = {schedule.start()};
+ CheckLoop(order, loop, 1);
+}
+
+TEST_F(SchedulerRPOTest, EntryLoop) {
+ Schedule schedule(zone());
+ BasicBlock* body = schedule.NewBasicBlock();
+ schedule.AddSuccessorForTesting(schedule.start(), body);
+ schedule.AddSuccessorForTesting(body, schedule.start());
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 2, true);
+ BasicBlock* loop[] = {schedule.start(), body};
+ CheckLoop(order, loop, 2);
+}
+
+TEST_F(SchedulerRPOTest, EndLoop) {
+ Schedule schedule(zone());
+ base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 2));
+ schedule.AddSuccessorForTesting(schedule.start(), loop1->header());
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 3, true);
+ CheckLoop(order, loop1->nodes, loop1->count);
+}
+
+TEST_F(SchedulerRPOTest, EndLoopNested) {
+ Schedule schedule(zone());
+ base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 2));
+ schedule.AddSuccessorForTesting(schedule.start(), loop1->header());
+ schedule.AddSuccessorForTesting(loop1->last(), schedule.start());
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 3, true);
+ CheckLoop(order, loop1->nodes, loop1->count);
+}
+
+TEST_F(SchedulerRPOTest, Diamond) {
+ Schedule schedule(zone());
+
+ BasicBlock* A = schedule.start();
+ BasicBlock* B = schedule.NewBasicBlock();
+ BasicBlock* C = schedule.NewBasicBlock();
+ BasicBlock* D = schedule.end();
+
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(A, C);
+ schedule.AddSuccessorForTesting(B, D);
+ schedule.AddSuccessorForTesting(C, D);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 4, false);
+
+ EXPECT_EQ(0, A->rpo_number());
+ EXPECT_THAT(B->rpo_number(), AnyOf(1, 2));
+ EXPECT_THAT(C->rpo_number(), AnyOf(1, 2));
+ EXPECT_EQ(3, D->rpo_number());
+}
+
+TEST_F(SchedulerRPOTest, Loop1) {
+ Schedule schedule(zone());
+
+ BasicBlock* A = schedule.start();
+ BasicBlock* B = schedule.NewBasicBlock();
+ BasicBlock* C = schedule.NewBasicBlock();
+ BasicBlock* D = schedule.end();
+
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(B, C);
+ schedule.AddSuccessorForTesting(C, B);
+ schedule.AddSuccessorForTesting(C, D);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 4, true);
+ BasicBlock* loop[] = {B, C};
+ CheckLoop(order, loop, 2);
+}
+
+TEST_F(SchedulerRPOTest, Loop2) {
+ Schedule schedule(zone());
+
+ BasicBlock* A = schedule.start();
+ BasicBlock* B = schedule.NewBasicBlock();
+ BasicBlock* C = schedule.NewBasicBlock();
+ BasicBlock* D = schedule.end();
+
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(B, C);
+ schedule.AddSuccessorForTesting(C, B);
+ schedule.AddSuccessorForTesting(B, D);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 4, true);
+ BasicBlock* loop[] = {B, C};
+ CheckLoop(order, loop, 2);
+}
+
+TEST_F(SchedulerRPOTest, LoopN) {
+ for (int i = 0; i < 11; i++) {
+ Schedule schedule(zone());
+ BasicBlock* A = schedule.start();
+ BasicBlock* B = schedule.NewBasicBlock();
+ BasicBlock* C = schedule.NewBasicBlock();
+ BasicBlock* D = schedule.NewBasicBlock();
+ BasicBlock* E = schedule.NewBasicBlock();
+ BasicBlock* F = schedule.NewBasicBlock();
+ BasicBlock* G = schedule.end();
+
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(B, C);
+ schedule.AddSuccessorForTesting(C, D);
+ schedule.AddSuccessorForTesting(D, E);
+ schedule.AddSuccessorForTesting(E, F);
+ schedule.AddSuccessorForTesting(F, B);
+ schedule.AddSuccessorForTesting(B, G);
+
+ // Throw in extra backedges from time to time.
+ if (i == 1) schedule.AddSuccessorForTesting(B, B);
+ if (i == 2) schedule.AddSuccessorForTesting(C, B);
+ if (i == 3) schedule.AddSuccessorForTesting(D, B);
+ if (i == 4) schedule.AddSuccessorForTesting(E, B);
+ if (i == 5) schedule.AddSuccessorForTesting(F, B);
+
+ // Throw in extra loop exits from time to time.
+ if (i == 6) schedule.AddSuccessorForTesting(B, G);
+ if (i == 7) schedule.AddSuccessorForTesting(C, G);
+ if (i == 8) schedule.AddSuccessorForTesting(D, G);
+ if (i == 9) schedule.AddSuccessorForTesting(E, G);
+ if (i == 10) schedule.AddSuccessorForTesting(F, G);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 7, true);
+ BasicBlock* loop[] = {B, C, D, E, F};
+ CheckLoop(order, loop, 5);
+ }
+}
+
+TEST_F(SchedulerRPOTest, LoopNest1) {
+ Schedule schedule(zone());
+
+ BasicBlock* A = schedule.start();
+ BasicBlock* B = schedule.NewBasicBlock();
+ BasicBlock* C = schedule.NewBasicBlock();
+ BasicBlock* D = schedule.NewBasicBlock();
+ BasicBlock* E = schedule.NewBasicBlock();
+ BasicBlock* F = schedule.end();
+
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(B, C);
+ schedule.AddSuccessorForTesting(C, D);
+ schedule.AddSuccessorForTesting(D, C);
+ schedule.AddSuccessorForTesting(D, E);
+ schedule.AddSuccessorForTesting(E, B);
+ schedule.AddSuccessorForTesting(E, F);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 6, true);
+ BasicBlock* loop1[] = {B, C, D, E};
+ CheckLoop(order, loop1, 4);
+
+ BasicBlock* loop2[] = {C, D};
+ CheckLoop(order, loop2, 2);
+}
+
+TEST_F(SchedulerRPOTest, LoopNest2) {
+ Schedule schedule(zone());
+
+ BasicBlock* A = schedule.start();
+ BasicBlock* B = schedule.NewBasicBlock();
+ BasicBlock* C = schedule.NewBasicBlock();
+ BasicBlock* D = schedule.NewBasicBlock();
+ BasicBlock* E = schedule.NewBasicBlock();
+ BasicBlock* F = schedule.NewBasicBlock();
+ BasicBlock* G = schedule.NewBasicBlock();
+ BasicBlock* H = schedule.end();
+
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(B, C);
+ schedule.AddSuccessorForTesting(C, D);
+ schedule.AddSuccessorForTesting(D, E);
+ schedule.AddSuccessorForTesting(E, F);
+ schedule.AddSuccessorForTesting(F, G);
+ schedule.AddSuccessorForTesting(G, H);
+
+ schedule.AddSuccessorForTesting(E, D);
+ schedule.AddSuccessorForTesting(F, C);
+ schedule.AddSuccessorForTesting(G, B);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 8, true);
+ BasicBlock* loop1[] = {B, C, D, E, F, G};
+ CheckLoop(order, loop1, 6);
+
+ BasicBlock* loop2[] = {C, D, E, F};
+ CheckLoop(order, loop2, 4);
+
+ BasicBlock* loop3[] = {D, E};
+ CheckLoop(order, loop3, 2);
+}
+
+TEST_F(SchedulerRPOTest, LoopFollow1) {
+ Schedule schedule(zone());
+
+ base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
+ base::SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
+
+ BasicBlock* A = schedule.start();
+ BasicBlock* E = schedule.end();
+
+ schedule.AddSuccessorForTesting(A, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->header(), loop2->header());
+ schedule.AddSuccessorForTesting(loop2->last(), E);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+
+ EXPECT_EQ(schedule.BasicBlockCount(), order->size());
+ CheckLoop(order, loop1->nodes, loop1->count);
+ CheckLoop(order, loop2->nodes, loop2->count);
+}
+
+TEST_F(SchedulerRPOTest, LoopFollow2) {
+ Schedule schedule(zone());
+
+ base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
+ base::SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
+
+ BasicBlock* A = schedule.start();
+ BasicBlock* S = schedule.NewBasicBlock();
+ BasicBlock* E = schedule.end();
+
+ schedule.AddSuccessorForTesting(A, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->header(), S);
+ schedule.AddSuccessorForTesting(S, loop2->header());
+ schedule.AddSuccessorForTesting(loop2->last(), E);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+
+ EXPECT_EQ(schedule.BasicBlockCount(), order->size());
+ CheckLoop(order, loop1->nodes, loop1->count);
+ CheckLoop(order, loop2->nodes, loop2->count);
+}
+
+TEST_F(SchedulerRPOTest, LoopFollowN) {
+ for (int size = 1; size < 5; size++) {
+ for (int exit = 0; exit < size; exit++) {
+ Schedule schedule(zone());
+ base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+ base::SmartPointer<TestLoop> loop2(CreateLoop(&schedule, size));
+ BasicBlock* A = schedule.start();
+ BasicBlock* E = schedule.end();
+
+ schedule.AddSuccessorForTesting(A, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->nodes[exit], loop2->header());
+ schedule.AddSuccessorForTesting(loop2->nodes[exit], E);
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+
+ EXPECT_EQ(schedule.BasicBlockCount(), order->size());
+ CheckLoop(order, loop1->nodes, loop1->count);
+ CheckLoop(order, loop2->nodes, loop2->count);
+ }
+ }
+}
+
+TEST_F(SchedulerRPOTest, NestedLoopFollow1) {
+ Schedule schedule(zone());
+
+ base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
+ base::SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
+
+ BasicBlock* A = schedule.start();
+ BasicBlock* B = schedule.NewBasicBlock();
+ BasicBlock* C = schedule.NewBasicBlock();
+ BasicBlock* E = schedule.end();
+
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(B, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->header(), loop2->header());
+ schedule.AddSuccessorForTesting(loop2->last(), C);
+ schedule.AddSuccessorForTesting(C, E);
+ schedule.AddSuccessorForTesting(C, B);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+
+ EXPECT_EQ(schedule.BasicBlockCount(), order->size());
+ CheckLoop(order, loop1->nodes, loop1->count);
+ CheckLoop(order, loop2->nodes, loop2->count);
+
+ BasicBlock* loop3[] = {B, loop1->nodes[0], loop2->nodes[0], C};
+ CheckLoop(order, loop3, 4);
+}
+
+TEST_F(SchedulerRPOTest, LoopBackedges1) {
+ int size = 8;
+ for (int i = 0; i < size; i++) {
+ for (int j = 0; j < size; j++) {
+ Schedule schedule(zone());
+ BasicBlock* A = schedule.start();
+ BasicBlock* E = schedule.end();
+
+ base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+ schedule.AddSuccessorForTesting(A, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->last(), E);
+
+ schedule.AddSuccessorForTesting(loop1->nodes[i], loop1->header());
+ schedule.AddSuccessorForTesting(loop1->nodes[j], E);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, schedule.BasicBlockCount(), true);
+ CheckLoop(order, loop1->nodes, loop1->count);
+ }
+ }
+}
+
+TEST_F(SchedulerRPOTest, LoopOutedges1) {
+ int size = 8;
+ for (int i = 0; i < size; i++) {
+ for (int j = 0; j < size; j++) {
+ Schedule schedule(zone());
+ BasicBlock* A = schedule.start();
+ BasicBlock* D = schedule.NewBasicBlock();
+ BasicBlock* E = schedule.end();
+
+ base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+ schedule.AddSuccessorForTesting(A, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->last(), E);
+
+ schedule.AddSuccessorForTesting(loop1->nodes[i], loop1->header());
+ schedule.AddSuccessorForTesting(loop1->nodes[j], D);
+ schedule.AddSuccessorForTesting(D, E);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, schedule.BasicBlockCount(), true);
+ CheckLoop(order, loop1->nodes, loop1->count);
+ }
+ }
+}
+
+TEST_F(SchedulerRPOTest, LoopOutedges2) {
+ int size = 8;
+ for (int i = 0; i < size; i++) {
+ Schedule schedule(zone());
+ BasicBlock* A = schedule.start();
+ BasicBlock* E = schedule.end();
+
+ base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+ schedule.AddSuccessorForTesting(A, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->last(), E);
+
+ for (int j = 0; j < size; j++) {
+ BasicBlock* O = schedule.NewBasicBlock();
+ schedule.AddSuccessorForTesting(loop1->nodes[j], O);
+ schedule.AddSuccessorForTesting(O, E);
+ }
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, schedule.BasicBlockCount(), true);
+ CheckLoop(order, loop1->nodes, loop1->count);
+ }
+}
+
+TEST_F(SchedulerRPOTest, LoopOutloops1) {
+ int size = 8;
+ for (int i = 0; i < size; i++) {
+ Schedule schedule(zone());
+ BasicBlock* A = schedule.start();
+ BasicBlock* E = schedule.end();
+ base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+ schedule.AddSuccessorForTesting(A, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->last(), E);
+
+ TestLoop** loopN = new TestLoop*[size];
+ for (int j = 0; j < size; j++) {
+ loopN[j] = CreateLoop(&schedule, 2);
+ schedule.AddSuccessorForTesting(loop1->nodes[j], loopN[j]->header());
+ schedule.AddSuccessorForTesting(loopN[j]->last(), E);
+ }
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, schedule.BasicBlockCount(), true);
+ CheckLoop(order, loop1->nodes, loop1->count);
+
+ for (int j = 0; j < size; j++) {
+ CheckLoop(order, loopN[j]->nodes, loopN[j]->count);
+ delete loopN[j];
+ }
+ delete[] loopN;
+ }
+}
+
+TEST_F(SchedulerRPOTest, LoopMultibackedge) {
+ Schedule schedule(zone());
+
+ BasicBlock* A = schedule.start();
+ BasicBlock* B = schedule.NewBasicBlock();
+ BasicBlock* C = schedule.NewBasicBlock();
+ BasicBlock* D = schedule.NewBasicBlock();
+ BasicBlock* E = schedule.NewBasicBlock();
+
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(B, C);
+ schedule.AddSuccessorForTesting(B, D);
+ schedule.AddSuccessorForTesting(B, E);
+ schedule.AddSuccessorForTesting(C, B);
+ schedule.AddSuccessorForTesting(D, B);
+ schedule.AddSuccessorForTesting(E, B);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 5, true);
+
+ BasicBlock* loop1[] = {B, C, D, E};
+ CheckLoop(order, loop1, 4);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/scheduler-unittest.cc b/deps/v8/test/unittests/compiler/scheduler-unittest.cc
index 523c8ce9d4..6cf07345a2 100644
--- a/deps/v8/test/unittests/compiler/scheduler-unittest.cc
+++ b/deps/v8/test/unittests/compiler/scheduler-unittest.cc
@@ -71,67 +71,6 @@ class SchedulerTest : public TestWithIsolateAndZone {
};
-class SchedulerRPOTest : public SchedulerTest {
- public:
- SchedulerRPOTest() {}
-
- // TODO(titzer): pull RPO tests out to their own file.
- void CheckRPONumbers(BasicBlockVector* order, size_t expected,
- bool loops_allowed) {
- CHECK(expected == order->size());
- for (int i = 0; i < static_cast<int>(order->size()); i++) {
- CHECK(order->at(i)->rpo_number() == i);
- if (!loops_allowed) {
- CHECK(!order->at(i)->loop_end());
- CHECK(!order->at(i)->loop_header());
- }
- }
- }
-
- void CheckLoop(BasicBlockVector* order, BasicBlock** blocks, int body_size) {
- BasicBlock* header = blocks[0];
- BasicBlock* end = header->loop_end();
- CHECK(end);
- CHECK_GT(end->rpo_number(), 0);
- CHECK_EQ(body_size, end->rpo_number() - header->rpo_number());
- for (int i = 0; i < body_size; i++) {
- CHECK_GE(blocks[i]->rpo_number(), header->rpo_number());
- CHECK_LT(blocks[i]->rpo_number(), end->rpo_number());
- CHECK(header->LoopContains(blocks[i]));
- CHECK(header->IsLoopHeader() || blocks[i]->loop_header() == header);
- }
- if (header->rpo_number() > 0) {
- CHECK_NE(order->at(header->rpo_number() - 1)->loop_header(), header);
- }
- if (end->rpo_number() < static_cast<int>(order->size())) {
- CHECK_NE(order->at(end->rpo_number())->loop_header(), header);
- }
- }
-
- struct TestLoop {
- int count;
- BasicBlock** nodes;
- BasicBlock* header() { return nodes[0]; }
- BasicBlock* last() { return nodes[count - 1]; }
- ~TestLoop() { delete[] nodes; }
- };
-
- TestLoop* CreateLoop(Schedule* schedule, int count) {
- TestLoop* loop = new TestLoop();
- loop->count = count;
- loop->nodes = new BasicBlock* [count];
- for (int i = 0; i < count; i++) {
- loop->nodes[i] = schedule->NewBasicBlock();
- if (i > 0) {
- schedule->AddSuccessorForTesting(loop->nodes[i - 1], loop->nodes[i]);
- }
- }
- schedule->AddSuccessorForTesting(loop->nodes[count - 1], loop->nodes[0]);
- return loop;
- }
-};
-
-
namespace {
const Operator kHeapConstant(IrOpcode::kHeapConstant, Operator::kPure,
@@ -146,491 +85,6 @@ const Operator kMockTailCall(IrOpcode::kTailCall, Operator::kNoProperties,
} // namespace
-// -----------------------------------------------------------------------------
-// Special reverse-post-order block ordering.
-
-
-TEST_F(SchedulerRPOTest, Degenerate1) {
- Schedule schedule(zone());
- BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
- CheckRPONumbers(order, 1, false);
- EXPECT_EQ(schedule.start(), order->at(0));
-}
-
-
-TEST_F(SchedulerRPOTest, Degenerate2) {
- Schedule schedule(zone());
-
- schedule.AddGoto(schedule.start(), schedule.end());
- BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
- CheckRPONumbers(order, 2, false);
- EXPECT_EQ(schedule.start(), order->at(0));
- EXPECT_EQ(schedule.end(), order->at(1));
-}
-
-
-TEST_F(SchedulerRPOTest, Line) {
- for (int i = 0; i < 10; i++) {
- Schedule schedule(zone());
-
- BasicBlock* last = schedule.start();
- for (int j = 0; j < i; j++) {
- BasicBlock* block = schedule.NewBasicBlock();
- block->set_deferred(i & 1);
- schedule.AddGoto(last, block);
- last = block;
- }
- BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
- CheckRPONumbers(order, 1 + i, false);
-
- for (size_t i = 0; i < schedule.BasicBlockCount(); i++) {
- BasicBlock* block = schedule.GetBlockById(BasicBlock::Id::FromSize(i));
- if (block->rpo_number() >= 0 && block->SuccessorCount() == 1) {
- EXPECT_EQ(block->rpo_number() + 1, block->SuccessorAt(0)->rpo_number());
- }
- }
- }
-}
-
-
-TEST_F(SchedulerRPOTest, SelfLoop) {
- Schedule schedule(zone());
- schedule.AddSuccessorForTesting(schedule.start(), schedule.start());
- BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
- CheckRPONumbers(order, 1, true);
- BasicBlock* loop[] = {schedule.start()};
- CheckLoop(order, loop, 1);
-}
-
-
-TEST_F(SchedulerRPOTest, EntryLoop) {
- Schedule schedule(zone());
- BasicBlock* body = schedule.NewBasicBlock();
- schedule.AddSuccessorForTesting(schedule.start(), body);
- schedule.AddSuccessorForTesting(body, schedule.start());
- BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
- CheckRPONumbers(order, 2, true);
- BasicBlock* loop[] = {schedule.start(), body};
- CheckLoop(order, loop, 2);
-}
-
-
-TEST_F(SchedulerRPOTest, EndLoop) {
- Schedule schedule(zone());
- base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 2));
- schedule.AddSuccessorForTesting(schedule.start(), loop1->header());
- BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
- CheckRPONumbers(order, 3, true);
- CheckLoop(order, loop1->nodes, loop1->count);
-}
-
-
-TEST_F(SchedulerRPOTest, EndLoopNested) {
- Schedule schedule(zone());
- base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 2));
- schedule.AddSuccessorForTesting(schedule.start(), loop1->header());
- schedule.AddSuccessorForTesting(loop1->last(), schedule.start());
- BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
- CheckRPONumbers(order, 3, true);
- CheckLoop(order, loop1->nodes, loop1->count);
-}
-
-
-TEST_F(SchedulerRPOTest, Diamond) {
- Schedule schedule(zone());
-
- BasicBlock* A = schedule.start();
- BasicBlock* B = schedule.NewBasicBlock();
- BasicBlock* C = schedule.NewBasicBlock();
- BasicBlock* D = schedule.end();
-
- schedule.AddSuccessorForTesting(A, B);
- schedule.AddSuccessorForTesting(A, C);
- schedule.AddSuccessorForTesting(B, D);
- schedule.AddSuccessorForTesting(C, D);
-
- BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
- CheckRPONumbers(order, 4, false);
-
- EXPECT_EQ(0, A->rpo_number());
- EXPECT_THAT(B->rpo_number(), AnyOf(1, 2));
- EXPECT_THAT(C->rpo_number(), AnyOf(1, 2));
- EXPECT_EQ(3, D->rpo_number());
-}
-
-
-TEST_F(SchedulerRPOTest, Loop1) {
- Schedule schedule(zone());
-
- BasicBlock* A = schedule.start();
- BasicBlock* B = schedule.NewBasicBlock();
- BasicBlock* C = schedule.NewBasicBlock();
- BasicBlock* D = schedule.end();
-
- schedule.AddSuccessorForTesting(A, B);
- schedule.AddSuccessorForTesting(B, C);
- schedule.AddSuccessorForTesting(C, B);
- schedule.AddSuccessorForTesting(C, D);
-
- BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
- CheckRPONumbers(order, 4, true);
- BasicBlock* loop[] = {B, C};
- CheckLoop(order, loop, 2);
-}
-
-
-TEST_F(SchedulerRPOTest, Loop2) {
- Schedule schedule(zone());
-
- BasicBlock* A = schedule.start();
- BasicBlock* B = schedule.NewBasicBlock();
- BasicBlock* C = schedule.NewBasicBlock();
- BasicBlock* D = schedule.end();
-
- schedule.AddSuccessorForTesting(A, B);
- schedule.AddSuccessorForTesting(B, C);
- schedule.AddSuccessorForTesting(C, B);
- schedule.AddSuccessorForTesting(B, D);
-
- BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
- CheckRPONumbers(order, 4, true);
- BasicBlock* loop[] = {B, C};
- CheckLoop(order, loop, 2);
-}
-
-
-TEST_F(SchedulerRPOTest, LoopN) {
- for (int i = 0; i < 11; i++) {
- Schedule schedule(zone());
- BasicBlock* A = schedule.start();
- BasicBlock* B = schedule.NewBasicBlock();
- BasicBlock* C = schedule.NewBasicBlock();
- BasicBlock* D = schedule.NewBasicBlock();
- BasicBlock* E = schedule.NewBasicBlock();
- BasicBlock* F = schedule.NewBasicBlock();
- BasicBlock* G = schedule.end();
-
- schedule.AddSuccessorForTesting(A, B);
- schedule.AddSuccessorForTesting(B, C);
- schedule.AddSuccessorForTesting(C, D);
- schedule.AddSuccessorForTesting(D, E);
- schedule.AddSuccessorForTesting(E, F);
- schedule.AddSuccessorForTesting(F, B);
- schedule.AddSuccessorForTesting(B, G);
-
- // Throw in extra backedges from time to time.
- if (i == 1) schedule.AddSuccessorForTesting(B, B);
- if (i == 2) schedule.AddSuccessorForTesting(C, B);
- if (i == 3) schedule.AddSuccessorForTesting(D, B);
- if (i == 4) schedule.AddSuccessorForTesting(E, B);
- if (i == 5) schedule.AddSuccessorForTesting(F, B);
-
- // Throw in extra loop exits from time to time.
- if (i == 6) schedule.AddSuccessorForTesting(B, G);
- if (i == 7) schedule.AddSuccessorForTesting(C, G);
- if (i == 8) schedule.AddSuccessorForTesting(D, G);
- if (i == 9) schedule.AddSuccessorForTesting(E, G);
- if (i == 10) schedule.AddSuccessorForTesting(F, G);
-
- BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
- CheckRPONumbers(order, 7, true);
- BasicBlock* loop[] = {B, C, D, E, F};
- CheckLoop(order, loop, 5);
- }
-}
-
-
-TEST_F(SchedulerRPOTest, LoopNest1) {
- Schedule schedule(zone());
-
- BasicBlock* A = schedule.start();
- BasicBlock* B = schedule.NewBasicBlock();
- BasicBlock* C = schedule.NewBasicBlock();
- BasicBlock* D = schedule.NewBasicBlock();
- BasicBlock* E = schedule.NewBasicBlock();
- BasicBlock* F = schedule.end();
-
- schedule.AddSuccessorForTesting(A, B);
- schedule.AddSuccessorForTesting(B, C);
- schedule.AddSuccessorForTesting(C, D);
- schedule.AddSuccessorForTesting(D, C);
- schedule.AddSuccessorForTesting(D, E);
- schedule.AddSuccessorForTesting(E, B);
- schedule.AddSuccessorForTesting(E, F);
-
- BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
- CheckRPONumbers(order, 6, true);
- BasicBlock* loop1[] = {B, C, D, E};
- CheckLoop(order, loop1, 4);
-
- BasicBlock* loop2[] = {C, D};
- CheckLoop(order, loop2, 2);
-}
-
-
-TEST_F(SchedulerRPOTest, LoopNest2) {
- Schedule schedule(zone());
-
- BasicBlock* A = schedule.start();
- BasicBlock* B = schedule.NewBasicBlock();
- BasicBlock* C = schedule.NewBasicBlock();
- BasicBlock* D = schedule.NewBasicBlock();
- BasicBlock* E = schedule.NewBasicBlock();
- BasicBlock* F = schedule.NewBasicBlock();
- BasicBlock* G = schedule.NewBasicBlock();
- BasicBlock* H = schedule.end();
-
- schedule.AddSuccessorForTesting(A, B);
- schedule.AddSuccessorForTesting(B, C);
- schedule.AddSuccessorForTesting(C, D);
- schedule.AddSuccessorForTesting(D, E);
- schedule.AddSuccessorForTesting(E, F);
- schedule.AddSuccessorForTesting(F, G);
- schedule.AddSuccessorForTesting(G, H);
-
- schedule.AddSuccessorForTesting(E, D);
- schedule.AddSuccessorForTesting(F, C);
- schedule.AddSuccessorForTesting(G, B);
-
- BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
- CheckRPONumbers(order, 8, true);
- BasicBlock* loop1[] = {B, C, D, E, F, G};
- CheckLoop(order, loop1, 6);
-
- BasicBlock* loop2[] = {C, D, E, F};
- CheckLoop(order, loop2, 4);
-
- BasicBlock* loop3[] = {D, E};
- CheckLoop(order, loop3, 2);
-}
-
-
-TEST_F(SchedulerRPOTest, LoopFollow1) {
- Schedule schedule(zone());
-
- base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
- base::SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
-
- BasicBlock* A = schedule.start();
- BasicBlock* E = schedule.end();
-
- schedule.AddSuccessorForTesting(A, loop1->header());
- schedule.AddSuccessorForTesting(loop1->header(), loop2->header());
- schedule.AddSuccessorForTesting(loop2->last(), E);
-
- BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
-
- EXPECT_EQ(schedule.BasicBlockCount(), order->size());
- CheckLoop(order, loop1->nodes, loop1->count);
- CheckLoop(order, loop2->nodes, loop2->count);
-}
-
-
-TEST_F(SchedulerRPOTest, LoopFollow2) {
- Schedule schedule(zone());
-
- base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
- base::SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
-
- BasicBlock* A = schedule.start();
- BasicBlock* S = schedule.NewBasicBlock();
- BasicBlock* E = schedule.end();
-
- schedule.AddSuccessorForTesting(A, loop1->header());
- schedule.AddSuccessorForTesting(loop1->header(), S);
- schedule.AddSuccessorForTesting(S, loop2->header());
- schedule.AddSuccessorForTesting(loop2->last(), E);
-
- BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
-
- EXPECT_EQ(schedule.BasicBlockCount(), order->size());
- CheckLoop(order, loop1->nodes, loop1->count);
- CheckLoop(order, loop2->nodes, loop2->count);
-}
-
-
-TEST_F(SchedulerRPOTest, LoopFollowN) {
- for (int size = 1; size < 5; size++) {
- for (int exit = 0; exit < size; exit++) {
- Schedule schedule(zone());
- base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
- base::SmartPointer<TestLoop> loop2(CreateLoop(&schedule, size));
- BasicBlock* A = schedule.start();
- BasicBlock* E = schedule.end();
-
- schedule.AddSuccessorForTesting(A, loop1->header());
- schedule.AddSuccessorForTesting(loop1->nodes[exit], loop2->header());
- schedule.AddSuccessorForTesting(loop2->nodes[exit], E);
- BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
-
- EXPECT_EQ(schedule.BasicBlockCount(), order->size());
- CheckLoop(order, loop1->nodes, loop1->count);
- CheckLoop(order, loop2->nodes, loop2->count);
- }
- }
-}
-
-
-TEST_F(SchedulerRPOTest, NestedLoopFollow1) {
- Schedule schedule(zone());
-
- base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
- base::SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
-
- BasicBlock* A = schedule.start();
- BasicBlock* B = schedule.NewBasicBlock();
- BasicBlock* C = schedule.NewBasicBlock();
- BasicBlock* E = schedule.end();
-
- schedule.AddSuccessorForTesting(A, B);
- schedule.AddSuccessorForTesting(B, loop1->header());
- schedule.AddSuccessorForTesting(loop1->header(), loop2->header());
- schedule.AddSuccessorForTesting(loop2->last(), C);
- schedule.AddSuccessorForTesting(C, E);
- schedule.AddSuccessorForTesting(C, B);
-
- BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
-
- EXPECT_EQ(schedule.BasicBlockCount(), order->size());
- CheckLoop(order, loop1->nodes, loop1->count);
- CheckLoop(order, loop2->nodes, loop2->count);
-
- BasicBlock* loop3[] = {B, loop1->nodes[0], loop2->nodes[0], C};
- CheckLoop(order, loop3, 4);
-}
-
-
-TEST_F(SchedulerRPOTest, LoopBackedges1) {
- int size = 8;
- for (int i = 0; i < size; i++) {
- for (int j = 0; j < size; j++) {
- Schedule schedule(zone());
- BasicBlock* A = schedule.start();
- BasicBlock* E = schedule.end();
-
- base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
- schedule.AddSuccessorForTesting(A, loop1->header());
- schedule.AddSuccessorForTesting(loop1->last(), E);
-
- schedule.AddSuccessorForTesting(loop1->nodes[i], loop1->header());
- schedule.AddSuccessorForTesting(loop1->nodes[j], E);
-
- BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
- CheckRPONumbers(order, schedule.BasicBlockCount(), true);
- CheckLoop(order, loop1->nodes, loop1->count);
- }
- }
-}
-
-
-TEST_F(SchedulerRPOTest, LoopOutedges1) {
- int size = 8;
- for (int i = 0; i < size; i++) {
- for (int j = 0; j < size; j++) {
- Schedule schedule(zone());
- BasicBlock* A = schedule.start();
- BasicBlock* D = schedule.NewBasicBlock();
- BasicBlock* E = schedule.end();
-
- base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
- schedule.AddSuccessorForTesting(A, loop1->header());
- schedule.AddSuccessorForTesting(loop1->last(), E);
-
- schedule.AddSuccessorForTesting(loop1->nodes[i], loop1->header());
- schedule.AddSuccessorForTesting(loop1->nodes[j], D);
- schedule.AddSuccessorForTesting(D, E);
-
- BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
- CheckRPONumbers(order, schedule.BasicBlockCount(), true);
- CheckLoop(order, loop1->nodes, loop1->count);
- }
- }
-}
-
-
-TEST_F(SchedulerRPOTest, LoopOutedges2) {
- int size = 8;
- for (int i = 0; i < size; i++) {
- Schedule schedule(zone());
- BasicBlock* A = schedule.start();
- BasicBlock* E = schedule.end();
-
- base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
- schedule.AddSuccessorForTesting(A, loop1->header());
- schedule.AddSuccessorForTesting(loop1->last(), E);
-
- for (int j = 0; j < size; j++) {
- BasicBlock* O = schedule.NewBasicBlock();
- schedule.AddSuccessorForTesting(loop1->nodes[j], O);
- schedule.AddSuccessorForTesting(O, E);
- }
-
- BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
- CheckRPONumbers(order, schedule.BasicBlockCount(), true);
- CheckLoop(order, loop1->nodes, loop1->count);
- }
-}
-
-
-TEST_F(SchedulerRPOTest, LoopOutloops1) {
- int size = 8;
- for (int i = 0; i < size; i++) {
- Schedule schedule(zone());
- BasicBlock* A = schedule.start();
- BasicBlock* E = schedule.end();
- base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
- schedule.AddSuccessorForTesting(A, loop1->header());
- schedule.AddSuccessorForTesting(loop1->last(), E);
-
- TestLoop** loopN = new TestLoop* [size];
- for (int j = 0; j < size; j++) {
- loopN[j] = CreateLoop(&schedule, 2);
- schedule.AddSuccessorForTesting(loop1->nodes[j], loopN[j]->header());
- schedule.AddSuccessorForTesting(loopN[j]->last(), E);
- }
-
- BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
- CheckRPONumbers(order, schedule.BasicBlockCount(), true);
- CheckLoop(order, loop1->nodes, loop1->count);
-
- for (int j = 0; j < size; j++) {
- CheckLoop(order, loopN[j]->nodes, loopN[j]->count);
- delete loopN[j];
- }
- delete[] loopN;
- }
-}
-
-
-TEST_F(SchedulerRPOTest, LoopMultibackedge) {
- Schedule schedule(zone());
-
- BasicBlock* A = schedule.start();
- BasicBlock* B = schedule.NewBasicBlock();
- BasicBlock* C = schedule.NewBasicBlock();
- BasicBlock* D = schedule.NewBasicBlock();
- BasicBlock* E = schedule.NewBasicBlock();
-
- schedule.AddSuccessorForTesting(A, B);
- schedule.AddSuccessorForTesting(B, C);
- schedule.AddSuccessorForTesting(B, D);
- schedule.AddSuccessorForTesting(B, E);
- schedule.AddSuccessorForTesting(C, B);
- schedule.AddSuccessorForTesting(D, B);
- schedule.AddSuccessorForTesting(E, B);
-
- BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
- CheckRPONumbers(order, 5, true);
-
- BasicBlock* loop1[] = {B, C, D, E};
- CheckLoop(order, loop1, 4);
-}
-
-
-// -----------------------------------------------------------------------------
-// Graph end-to-end scheduling.
-
-
TEST_F(SchedulerTest, BuildScheduleEmpty) {
graph()->SetStart(graph()->NewNode(common()->Start(0)));
graph()->SetEnd(graph()->NewNode(common()->End(1), graph()->start()));
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
index 871189ad79..bd8509ff97 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
@@ -6,7 +6,7 @@
#include "src/compiler/operator.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
-#include "src/types-inl.h"
+#include "src/types.h"
#include "test/unittests/test-utils.h"
namespace v8 {
@@ -65,6 +65,8 @@ const PureOperator kPureOperators[] = {
PURE(ChangeFloat64ToTagged, Operator::kNoProperties, 1),
PURE(ChangeBoolToBit, Operator::kNoProperties, 1),
PURE(ChangeBitToBool, Operator::kNoProperties, 1),
+ PURE(ObjectIsNumber, Operator::kNoProperties, 1),
+ PURE(ObjectIsReceiver, Operator::kNoProperties, 1),
PURE(ObjectIsSmi, Operator::kNoProperties, 1)
#undef PURE
};
diff --git a/deps/v8/test/unittests/compiler/typer-unittest.cc b/deps/v8/test/unittests/compiler/typer-unittest.cc
index 6e4d4d589f..9d664a6d3a 100644
--- a/deps/v8/test/unittests/compiler/typer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typer-unittest.cc
@@ -49,7 +49,7 @@ class TyperTest : public TypedGraphTest {
}
}
- Types<Type, Type*, Zone> types_;
+ Types types_;
JSOperatorBuilder javascript_;
BinaryOperationHints const hints_ = BinaryOperationHints::Any();
Node* context_node_;
@@ -115,7 +115,7 @@ class TyperTest : public TypedGraphTest {
return result;
}
- double RandomInt(Type::RangeType* range) {
+ double RandomInt(RangeType* range) {
return RandomInt(range->Min(), range->Max());
}
@@ -149,12 +149,12 @@ class TyperTest : public TypedGraphTest {
void TestBinaryArithOp(const Operator* op, BinaryFunction opfun) {
TestBinaryArithOpCloseToZero(op, opfun, 8);
for (int i = 0; i < 100; ++i) {
- Type::RangeType* r1 = RandomRange()->AsRange();
- Type::RangeType* r2 = RandomRange()->AsRange();
+ Type* r1 = RandomRange();
+ Type* r2 = RandomRange();
Type* expected_type = TypeBinaryOp(op, r1, r2);
for (int i = 0; i < 10; i++) {
- double x1 = RandomInt(r1);
- double x2 = RandomInt(r2);
+ double x1 = RandomInt(r1->AsRange());
+ double x2 = RandomInt(r2->AsRange());
double result_value = opfun(x1, x2);
Type* result_type = Type::Constant(
isolate()->factory()->NewNumber(result_value), zone());
@@ -166,12 +166,12 @@ class TyperTest : public TypedGraphTest {
template <class BinaryFunction>
void TestBinaryCompareOp(const Operator* op, BinaryFunction opfun) {
for (int i = 0; i < 100; ++i) {
- Type::RangeType* r1 = RandomRange()->AsRange();
- Type::RangeType* r2 = RandomRange()->AsRange();
+ Type* r1 = RandomRange();
+ Type* r2 = RandomRange();
Type* expected_type = TypeBinaryOp(op, r1, r2);
for (int i = 0; i < 10; i++) {
- double x1 = RandomInt(r1);
- double x2 = RandomInt(r2);
+ double x1 = RandomInt(r1->AsRange());
+ double x2 = RandomInt(r2->AsRange());
bool result_value = opfun(x1, x2);
Type* result_type =
Type::Constant(result_value ? isolate()->factory()->true_value()
@@ -185,12 +185,12 @@ class TyperTest : public TypedGraphTest {
template <class BinaryFunction>
void TestBinaryBitOp(const Operator* op, BinaryFunction opfun) {
for (int i = 0; i < 100; ++i) {
- Type::RangeType* r1 = RandomRange(true)->AsRange();
- Type::RangeType* r2 = RandomRange(true)->AsRange();
+ Type* r1 = RandomRange(true);
+ Type* r2 = RandomRange(true);
Type* expected_type = TypeBinaryOp(op, r1, r2);
for (int i = 0; i < 10; i++) {
- int32_t x1 = static_cast<int32_t>(RandomInt(r1));
- int32_t x2 = static_cast<int32_t>(RandomInt(r2));
+ int32_t x1 = static_cast<int32_t>(RandomInt(r1->AsRange()));
+ int32_t x2 = static_cast<int32_t>(RandomInt(r2->AsRange()));
double result_value = opfun(x1, x2);
Type* result_type = Type::Constant(
isolate()->factory()->NewNumber(result_value), zone());
@@ -240,109 +240,72 @@ int32_t bit_xor(int32_t x, int32_t y) { return x ^ y; }
TEST_F(TyperTest, TypeJSAdd) {
- TestBinaryArithOp(javascript_.Add(LanguageMode::SLOPPY, hints_),
- std::plus<double>());
- TestBinaryArithOp(javascript_.Add(LanguageMode::STRONG, hints_),
- std::plus<double>());
+ TestBinaryArithOp(javascript_.Add(hints_), std::plus<double>());
}
TEST_F(TyperTest, TypeJSSubtract) {
- TestBinaryArithOp(javascript_.Subtract(LanguageMode::SLOPPY, hints_),
- std::minus<double>());
- TestBinaryArithOp(javascript_.Subtract(LanguageMode::STRONG, hints_),
- std::minus<double>());
+ TestBinaryArithOp(javascript_.Subtract(hints_), std::minus<double>());
}
TEST_F(TyperTest, TypeJSMultiply) {
- TestBinaryArithOp(javascript_.Multiply(LanguageMode::SLOPPY, hints_),
- std::multiplies<double>());
- TestBinaryArithOp(javascript_.Multiply(LanguageMode::STRONG, hints_),
- std::multiplies<double>());
+ TestBinaryArithOp(javascript_.Multiply(hints_), std::multiplies<double>());
}
TEST_F(TyperTest, TypeJSDivide) {
- TestBinaryArithOp(javascript_.Divide(LanguageMode::SLOPPY, hints_),
- std::divides<double>());
- TestBinaryArithOp(javascript_.Divide(LanguageMode::STRONG, hints_),
- std::divides<double>());
+ TestBinaryArithOp(javascript_.Divide(hints_), std::divides<double>());
}
TEST_F(TyperTest, TypeJSModulus) {
- TestBinaryArithOp(javascript_.Modulus(LanguageMode::SLOPPY, hints_), modulo);
- TestBinaryArithOp(javascript_.Modulus(LanguageMode::STRONG, hints_), modulo);
+ TestBinaryArithOp(javascript_.Modulus(hints_), modulo);
}
TEST_F(TyperTest, TypeJSBitwiseOr) {
- TestBinaryBitOp(javascript_.BitwiseOr(LanguageMode::SLOPPY, hints_), bit_or);
- TestBinaryBitOp(javascript_.BitwiseOr(LanguageMode::STRONG, hints_), bit_or);
+ TestBinaryBitOp(javascript_.BitwiseOr(hints_), bit_or);
}
TEST_F(TyperTest, TypeJSBitwiseAnd) {
- TestBinaryBitOp(javascript_.BitwiseAnd(LanguageMode::SLOPPY, hints_),
- bit_and);
- TestBinaryBitOp(javascript_.BitwiseAnd(LanguageMode::STRONG, hints_),
- bit_and);
+ TestBinaryBitOp(javascript_.BitwiseAnd(hints_), bit_and);
}
TEST_F(TyperTest, TypeJSBitwiseXor) {
- TestBinaryBitOp(javascript_.BitwiseXor(LanguageMode::SLOPPY, hints_),
- bit_xor);
- TestBinaryBitOp(javascript_.BitwiseXor(LanguageMode::STRONG, hints_),
- bit_xor);
+ TestBinaryBitOp(javascript_.BitwiseXor(hints_), bit_xor);
}
TEST_F(TyperTest, TypeJSShiftLeft) {
- TestBinaryBitOp(javascript_.ShiftLeft(LanguageMode::SLOPPY, hints_),
- shift_left);
- TestBinaryBitOp(javascript_.ShiftLeft(LanguageMode::STRONG, hints_),
- shift_left);
+ TestBinaryBitOp(javascript_.ShiftLeft(hints_), shift_left);
}
TEST_F(TyperTest, TypeJSShiftRight) {
- TestBinaryBitOp(javascript_.ShiftRight(LanguageMode::SLOPPY, hints_),
- shift_right);
- TestBinaryBitOp(javascript_.ShiftRight(LanguageMode::STRONG, hints_),
- shift_right);
+ TestBinaryBitOp(javascript_.ShiftRight(hints_), shift_right);
}
TEST_F(TyperTest, TypeJSLessThan) {
- TestBinaryCompareOp(javascript_.LessThan(LanguageMode::SLOPPY),
- std::less<double>());
- TestBinaryCompareOp(javascript_.LessThan(LanguageMode::STRONG),
- std::less<double>());
+ TestBinaryCompareOp(javascript_.LessThan(), std::less<double>());
}
TEST_F(TyperTest, TypeJSLessThanOrEqual) {
- TestBinaryCompareOp(javascript_.LessThanOrEqual(LanguageMode::SLOPPY),
- std::less_equal<double>());
- TestBinaryCompareOp(javascript_.LessThanOrEqual(LanguageMode::STRONG),
- std::less_equal<double>());
+ TestBinaryCompareOp(javascript_.LessThanOrEqual(), std::less_equal<double>());
}
TEST_F(TyperTest, TypeJSGreaterThan) {
- TestBinaryCompareOp(javascript_.GreaterThan(LanguageMode::SLOPPY),
- std::greater<double>());
- TestBinaryCompareOp(javascript_.GreaterThan(LanguageMode::STRONG),
- std::greater<double>());
+ TestBinaryCompareOp(javascript_.GreaterThan(), std::greater<double>());
}
TEST_F(TyperTest, TypeJSGreaterThanOrEqual) {
- TestBinaryCompareOp(javascript_.GreaterThanOrEqual(LanguageMode::SLOPPY),
- std::greater_equal<double>());
- TestBinaryCompareOp(javascript_.GreaterThanOrEqual(LanguageMode::STRONG),
+ TestBinaryCompareOp(javascript_.GreaterThanOrEqual(),
std::greater_equal<double>());
}
@@ -381,27 +344,15 @@ TEST_BINARY_MONOTONICITY(Equal)
TEST_BINARY_MONOTONICITY(NotEqual)
TEST_BINARY_MONOTONICITY(StrictEqual)
TEST_BINARY_MONOTONICITY(StrictNotEqual)
-#undef TEST_BINARY_MONOTONICITY
-
-
-#define TEST_BINARY_MONOTONICITY(name) \
- TEST_F(TyperTest, Monotonicity_##name) { \
- TestBinaryMonotonicity(javascript_.name(LanguageMode::SLOPPY)); \
- TestBinaryMonotonicity(javascript_.name(LanguageMode::STRONG)); \
- }
TEST_BINARY_MONOTONICITY(LessThan)
TEST_BINARY_MONOTONICITY(GreaterThan)
TEST_BINARY_MONOTONICITY(LessThanOrEqual)
TEST_BINARY_MONOTONICITY(GreaterThanOrEqual)
#undef TEST_BINARY_MONOTONICITY
-
-#define TEST_BINARY_MONOTONICITY(name) \
- TEST_F(TyperTest, Monotonicity_##name) { \
- TestBinaryMonotonicity( \
- javascript_.name(LanguageMode::SLOPPY, BinaryOperationHints::Any())); \
- TestBinaryMonotonicity( \
- javascript_.name(LanguageMode::STRONG, BinaryOperationHints::Any())); \
+#define TEST_BINARY_MONOTONICITY(name) \
+ TEST_F(TyperTest, Monotonicity_##name) { \
+ TestBinaryMonotonicity(javascript_.name(BinaryOperationHints::Any())); \
}
TEST_BINARY_MONOTONICITY(BitwiseOr)
TEST_BINARY_MONOTONICITY(BitwiseXor)
diff --git a/deps/v8/test/unittests/heap/memory-reducer-unittest.cc b/deps/v8/test/unittests/heap/memory-reducer-unittest.cc
index 1088f0127d..4787bc66d2 100644
--- a/deps/v8/test/unittests/heap/memory-reducer-unittest.cc
+++ b/deps/v8/test/unittests/heap/memory-reducer-unittest.cc
@@ -74,10 +74,9 @@ MemoryReducer::Event TimerEventPendingGC(double time_ms) {
return TimerEvent(time_ms, true, false);
}
-
-MemoryReducer::Event ContextDisposedEvent(double time_ms) {
+MemoryReducer::Event PossibleGarbageEvent(double time_ms) {
MemoryReducer::Event event;
- event.type = MemoryReducer::kContextDisposed;
+ event.type = MemoryReducer::kPossibleGarbage;
event.time_ms = time_ms;
return event;
}
@@ -114,7 +113,7 @@ TEST(MemoryReducer, FromDoneToWait) {
EXPECT_EQ(0, state1.started_gcs);
EXPECT_EQ(2, state1.last_gc_time_ms);
- state1 = MemoryReducer::Step(state0, ContextDisposedEvent(0));
+ state1 = MemoryReducer::Step(state0, PossibleGarbageEvent(0));
EXPECT_EQ(MemoryReducer::kWait, state1.action);
EXPECT_EQ(MemoryReducer::kLongDelayMs, state1.next_gc_start_ms);
EXPECT_EQ(0, state1.started_gcs);
@@ -127,7 +126,7 @@ TEST(MemoryReducer, FromWaitToWait) {
MemoryReducer::State state0(WaitState(2, 1000.0)), state1(DoneState());
- state1 = MemoryReducer::Step(state0, ContextDisposedEvent(2000));
+ state1 = MemoryReducer::Step(state0, PossibleGarbageEvent(2000));
EXPECT_EQ(MemoryReducer::kWait, state1.action);
EXPECT_EQ(state0.next_gc_start_ms, state1.next_gc_start_ms);
EXPECT_EQ(state0.started_gcs, state1.started_gcs);
@@ -250,7 +249,7 @@ TEST(MemoryReducer, FromRunToRun) {
EXPECT_EQ(state0.started_gcs, state1.started_gcs);
EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
- state1 = MemoryReducer::Step(state0, ContextDisposedEvent(2000));
+ state1 = MemoryReducer::Step(state0, PossibleGarbageEvent(2000));
EXPECT_EQ(MemoryReducer::kRun, state1.action);
EXPECT_EQ(state0.next_gc_start_ms, state1.next_gc_start_ms);
EXPECT_EQ(state0.started_gcs, state1.started_gcs);
diff --git a/deps/v8/test/unittests/heap/slot-set-unittest.cc b/deps/v8/test/unittests/heap/slot-set-unittest.cc
new file mode 100644
index 0000000000..376188915a
--- /dev/null
+++ b/deps/v8/test/unittests/heap/slot-set-unittest.cc
@@ -0,0 +1,143 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/globals.h"
+#include "src/heap/slot-set.h"
+#include "src/heap/spaces.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+TEST(SlotSet, InsertAndLookup1) {
+ SlotSet set;
+ set.SetPageStart(0);
+ for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
+ EXPECT_FALSE(set.Lookup(i));
+ }
+ for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
+ set.Insert(i);
+ }
+ for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
+ EXPECT_TRUE(set.Lookup(i));
+ }
+}
+
+TEST(SlotSet, InsertAndLookup2) {
+ SlotSet set;
+ set.SetPageStart(0);
+ for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
+ if (i % 7 == 0) {
+ set.Insert(i);
+ }
+ }
+ for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
+ if (i % 7 == 0) {
+ EXPECT_TRUE(set.Lookup(i));
+ } else {
+ EXPECT_FALSE(set.Lookup(i));
+ }
+ }
+}
+
+TEST(SlotSet, Iterate) {
+ SlotSet set;
+ set.SetPageStart(0);
+ for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
+ if (i % 7 == 0) {
+ set.Insert(i);
+ }
+ }
+
+ set.Iterate([](Address slot_address) {
+ uintptr_t intaddr = reinterpret_cast<uintptr_t>(slot_address);
+ if (intaddr % 3 == 0) {
+ return SlotSet::KEEP_SLOT;
+ } else {
+ return SlotSet::REMOVE_SLOT;
+ }
+ });
+
+ for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
+ if (i % 21 == 0) {
+ EXPECT_TRUE(set.Lookup(i));
+ } else {
+ EXPECT_FALSE(set.Lookup(i));
+ }
+ }
+}
+
+TEST(SlotSet, Remove) {
+ SlotSet set;
+ set.SetPageStart(0);
+ for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
+ if (i % 7 == 0) {
+ set.Insert(i);
+ }
+ }
+
+ for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
+ if (i % 3 != 0) {
+ set.Remove(i);
+ }
+ }
+
+ for (int i = 0; i < Page::kPageSize; i += kPointerSize) {
+ if (i % 21 == 0) {
+ EXPECT_TRUE(set.Lookup(i));
+ } else {
+ EXPECT_FALSE(set.Lookup(i));
+ }
+ }
+}
+
+void CheckRemoveRangeOn(uint32_t start, uint32_t end) {
+ SlotSet set;
+ set.SetPageStart(0);
+ uint32_t first = start == 0 ? 0 : start - kPointerSize;
+ uint32_t last = end == Page::kPageSize ? end - kPointerSize : end;
+ for (uint32_t i = first; i <= last; i += kPointerSize) {
+ set.Insert(i);
+ }
+ set.RemoveRange(start, end);
+ if (first != start) {
+ EXPECT_TRUE(set.Lookup(first));
+ }
+ if (last == end) {
+ EXPECT_TRUE(set.Lookup(last));
+ }
+ for (uint32_t i = start; i < end; i += kPointerSize) {
+ EXPECT_FALSE(set.Lookup(i));
+ }
+}
+
+TEST(SlotSet, RemoveRange) {
+ CheckRemoveRangeOn(0, Page::kPageSize);
+ CheckRemoveRangeOn(1 * kPointerSize, 1023 * kPointerSize);
+ for (uint32_t start = 0; start <= 32; start++) {
+ CheckRemoveRangeOn(start * kPointerSize, (start + 1) * kPointerSize);
+ CheckRemoveRangeOn(start * kPointerSize, (start + 2) * kPointerSize);
+ const uint32_t kEnds[] = {32, 64, 100, 128, 1024, 1500, 2048};
+ for (int i = 0; i < sizeof(kEnds) / sizeof(uint32_t); i++) {
+ for (int k = -3; k <= 3; k++) {
+ uint32_t end = (kEnds[i] + k);
+ if (start < end) {
+ CheckRemoveRangeOn(start * kPointerSize, end * kPointerSize);
+ }
+ }
+ }
+ }
+ SlotSet set;
+ set.SetPageStart(0);
+ set.Insert(Page::kPageSize / 2);
+ set.RemoveRange(0, Page::kPageSize);
+ for (uint32_t i = 0; i < Page::kPageSize; i += kPointerSize) {
+ EXPECT_FALSE(set.Lookup(i));
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index 2140aa83c7..839215f743 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -21,14 +21,16 @@ class BytecodeArrayBuilderTest : public TestWithIsolateAndZone {
TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
- BytecodeArrayBuilder builder(isolate(), zone());
+ BytecodeArrayBuilder builder(isolate(), zone(), 0, 1, 131);
- builder.set_locals_count(200);
- builder.set_context_count(1);
- builder.set_parameter_count(0);
- CHECK_EQ(builder.locals_count(), 200);
+ CHECK_EQ(builder.locals_count(), 131);
CHECK_EQ(builder.context_count(), 1);
- CHECK_EQ(builder.fixed_register_count(), 201);
+ CHECK_EQ(builder.fixed_register_count(), 132);
+
+ // Emit argument creation operations.
+ builder.CreateArguments(CreateArgumentsType::kMappedArguments)
+ .CreateArguments(CreateArgumentsType::kUnmappedArguments)
+ .CreateArguments(CreateArgumentsType::kRestParameter);
// Emit constant loads.
builder.LoadLiteral(Smi::FromInt(0))
@@ -40,32 +42,23 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.LoadTrue()
.LoadFalse();
- // Emit accumulator transfers. Stores followed by loads to the same register
- // are not generated. Hence, a dummy instruction in between.
Register reg(0);
+ Register other(reg.index() + 1);
+ Register wide(128);
+
builder.LoadAccumulatorWithRegister(reg)
.LoadNull()
.StoreAccumulatorInRegister(reg);
// Emit register-register transfer.
- Register other(1);
builder.MoveRegister(reg, other);
-
- // Emit register-register exchanges.
- Register wide(150);
- builder.ExchangeRegisters(reg, wide);
- builder.ExchangeRegisters(wide, reg);
- Register wider(151);
- builder.ExchangeRegisters(wide, wider);
+ builder.MoveRegister(reg, wide);
// Emit global load / store operations.
Factory* factory = isolate()->factory();
Handle<String> name = factory->NewStringFromStaticChars("var_name");
- builder.LoadGlobal(name, 1, LanguageMode::SLOPPY,
- TypeofMode::NOT_INSIDE_TYPEOF)
- .LoadGlobal(name, 1, LanguageMode::STRICT, TypeofMode::NOT_INSIDE_TYPEOF)
- .LoadGlobal(name, 1, LanguageMode::SLOPPY, TypeofMode::INSIDE_TYPEOF)
- .LoadGlobal(name, 1, LanguageMode::STRICT, TypeofMode::INSIDE_TYPEOF)
+ builder.LoadGlobal(name, 1, TypeofMode::NOT_INSIDE_TYPEOF)
+ .LoadGlobal(name, 1, TypeofMode::INSIDE_TYPEOF)
.StoreGlobal(name, 1, LanguageMode::SLOPPY)
.StoreGlobal(name, 1, LanguageMode::STRICT);
@@ -76,12 +69,10 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.StoreContextSlot(reg, 1);
// Emit load / store property operations.
- builder.LoadNamedProperty(reg, name, 0, LanguageMode::SLOPPY)
- .LoadKeyedProperty(reg, 0, LanguageMode::SLOPPY)
+ builder.LoadNamedProperty(reg, name, 0)
+ .LoadKeyedProperty(reg, 0)
.StoreNamedProperty(reg, name, 0, LanguageMode::SLOPPY)
.StoreKeyedProperty(reg, reg, 0, LanguageMode::SLOPPY)
- .LoadNamedProperty(reg, name, 0, LanguageMode::STRICT)
- .LoadKeyedProperty(reg, 0, LanguageMode::STRICT)
.StoreNamedProperty(reg, name, 0, LanguageMode::STRICT)
.StoreKeyedProperty(reg, reg, 0, LanguageMode::STRICT);
@@ -97,65 +88,64 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
false);
builder.CreateClosure(shared_info, NOT_TENURED);
- // Emit argument creation operations.
- builder.CreateArguments(CreateArgumentsType::kMappedArguments)
- .CreateArguments(CreateArgumentsType::kUnmappedArguments);
-
// Emit literal creation operations.
builder.CreateRegExpLiteral(factory->NewStringFromStaticChars("a"), 0, 0)
.CreateArrayLiteral(factory->NewFixedArray(1), 0, 0)
.CreateObjectLiteral(factory->NewFixedArray(1), 0, 0);
// Call operations.
- builder.Call(reg, reg, 0, 0)
- .Call(reg, reg, 0, 1024)
+ builder.Call(reg, other, 1, 0)
+ .Call(reg, wide, 1, 0)
+ .TailCall(reg, other, 1, 0)
+ .TailCall(reg, wide, 1, 0)
.CallRuntime(Runtime::kIsArray, reg, 1)
- .CallRuntimeForPair(Runtime::kLoadLookupSlot, reg, 1, reg)
- .CallJSRuntime(Context::SPREAD_ITERABLE_INDEX, reg, 1);
+ .CallRuntime(Runtime::kIsArray, wide, 1)
+ .CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, reg, 1, other)
+ .CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, wide, 1, other)
+ .CallJSRuntime(Context::SPREAD_ITERABLE_INDEX, reg, 1)
+ .CallJSRuntime(Context::SPREAD_ITERABLE_INDEX, wide, 1);
// Emit binary operator invocations.
- builder.BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
- .BinaryOperation(Token::Value::SUB, reg, Strength::WEAK)
- .BinaryOperation(Token::Value::MUL, reg, Strength::WEAK)
- .BinaryOperation(Token::Value::DIV, reg, Strength::WEAK)
- .BinaryOperation(Token::Value::MOD, reg, Strength::WEAK);
+ builder.BinaryOperation(Token::Value::ADD, reg)
+ .BinaryOperation(Token::Value::SUB, reg)
+ .BinaryOperation(Token::Value::MUL, reg)
+ .BinaryOperation(Token::Value::DIV, reg)
+ .BinaryOperation(Token::Value::MOD, reg);
// Emit bitwise operator invocations
- builder.BinaryOperation(Token::Value::BIT_OR, reg, Strength::WEAK)
- .BinaryOperation(Token::Value::BIT_XOR, reg, Strength::WEAK)
- .BinaryOperation(Token::Value::BIT_AND, reg, Strength::WEAK);
+ builder.BinaryOperation(Token::Value::BIT_OR, reg)
+ .BinaryOperation(Token::Value::BIT_XOR, reg)
+ .BinaryOperation(Token::Value::BIT_AND, reg);
// Emit shift operator invocations
- builder.BinaryOperation(Token::Value::SHL, reg, Strength::WEAK)
- .BinaryOperation(Token::Value::SAR, reg, Strength::WEAK)
- .BinaryOperation(Token::Value::SHR, reg, Strength::WEAK);
+ builder.BinaryOperation(Token::Value::SHL, reg)
+ .BinaryOperation(Token::Value::SAR, reg)
+ .BinaryOperation(Token::Value::SHR, reg);
// Emit count operatior invocations
- builder.CountOperation(Token::Value::ADD, Strength::WEAK)
- .CountOperation(Token::Value::SUB, Strength::WEAK);
+ builder.CountOperation(Token::Value::ADD).CountOperation(Token::Value::SUB);
// Emit unary operator invocations.
builder.LogicalNot().TypeOf();
// Emit delete
- builder.Delete(reg, LanguageMode::SLOPPY)
- .Delete(reg, LanguageMode::STRICT)
- .DeleteLookupSlot();
+ builder.Delete(reg, LanguageMode::SLOPPY).Delete(reg, LanguageMode::STRICT);
// Emit new.
builder.New(reg, reg, 0);
+ builder.New(wide, wide, 0);
// Emit test operator invocations.
- builder.CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
- .CompareOperation(Token::Value::NE, reg, Strength::WEAK)
- .CompareOperation(Token::Value::EQ_STRICT, reg, Strength::WEAK)
- .CompareOperation(Token::Value::NE_STRICT, reg, Strength::WEAK)
- .CompareOperation(Token::Value::LT, reg, Strength::WEAK)
- .CompareOperation(Token::Value::GT, reg, Strength::WEAK)
- .CompareOperation(Token::Value::LTE, reg, Strength::WEAK)
- .CompareOperation(Token::Value::GTE, reg, Strength::WEAK)
- .CompareOperation(Token::Value::INSTANCEOF, reg, Strength::WEAK)
- .CompareOperation(Token::Value::IN, reg, Strength::WEAK);
+ builder.CompareOperation(Token::Value::EQ, reg)
+ .CompareOperation(Token::Value::NE, reg)
+ .CompareOperation(Token::Value::EQ_STRICT, reg)
+ .CompareOperation(Token::Value::NE_STRICT, reg)
+ .CompareOperation(Token::Value::LT, reg)
+ .CompareOperation(Token::Value::GT, reg)
+ .CompareOperation(Token::Value::LTE, reg)
+ .CompareOperation(Token::Value::GTE, reg)
+ .CompareOperation(Token::Value::INSTANCEOF, reg)
+ .CompareOperation(Token::Value::IN, reg);
// Emit cast operator invocations.
builder.CastAccumulatorToNumber()
@@ -168,50 +158,58 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
// Short jumps with Imm8 operands
builder.Jump(&start)
.JumpIfNull(&start)
- .JumpIfUndefined(&start);
+ .JumpIfUndefined(&start)
+ .JumpIfNotHole(&start);
+
// Perform an operation that returns boolean value to
// generate JumpIfTrue/False
- builder.CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
+ builder.CompareOperation(Token::Value::EQ, reg)
.JumpIfTrue(&start)
- .CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
+ .CompareOperation(Token::Value::EQ, reg)
.JumpIfFalse(&start);
// Perform an operation that returns a non-boolean operation to
// generate JumpIfToBooleanTrue/False.
- builder.BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
+ builder.BinaryOperation(Token::Value::ADD, reg)
.JumpIfTrue(&start)
- .BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
+ .BinaryOperation(Token::Value::ADD, reg)
.JumpIfFalse(&start);
// Insert dummy ops to force longer jumps
for (int i = 0; i < 128; i++) {
builder.LoadTrue();
}
// Longer jumps requiring Constant operand
- builder.Jump(&start)
- .JumpIfNull(&start)
- .JumpIfUndefined(&start);
+ builder.Jump(&start).JumpIfNull(&start).JumpIfUndefined(&start).JumpIfNotHole(
+ &start);
// Perform an operation that returns boolean value to
// generate JumpIfTrue/False
- builder.CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
+ builder.CompareOperation(Token::Value::EQ, reg)
.JumpIfTrue(&start)
- .CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
+ .CompareOperation(Token::Value::EQ, reg)
.JumpIfFalse(&start);
// Perform an operation that returns a non-boolean operation to
// generate JumpIfToBooleanTrue/False.
- builder.BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
+ builder.BinaryOperation(Token::Value::ADD, reg)
.JumpIfTrue(&start)
- .BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
+ .BinaryOperation(Token::Value::ADD, reg)
.JumpIfFalse(&start);
- // Emit throw in it's own basic block so that the rest of the code isn't
- // omitted due to being dead.
+ // Emit stack check bytecode.
+ builder.StackCheck();
+
+ // Emit throw and re-throw in it's own basic block so that the rest of the
+ // code isn't omitted due to being dead.
BytecodeLabel after_throw;
- builder.Jump(&after_throw)
- .Throw()
- .Bind(&after_throw);
+ builder.Jump(&after_throw).Throw().Bind(&after_throw);
+ BytecodeLabel after_rethrow;
+ builder.Jump(&after_rethrow).ReThrow().Bind(&after_rethrow);
- builder.ForInPrepare(reg, reg, reg)
+ builder.ForInPrepare(reg)
.ForInDone(reg, reg)
- .ForInNext(reg, reg, reg, reg)
+ .ForInNext(reg, reg, reg)
+ .ForInStep(reg);
+ builder.ForInPrepare(wide)
+ .ForInDone(reg, other)
+ .ForInNext(wide, wide, wide)
.ForInStep(reg);
// Wide constant pool loads
@@ -223,28 +221,21 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
Handle<String> wide_name = factory->NewStringFromStaticChars("var_wide_name");
// Emit wide global load / store operations.
- builder.LoadGlobal(name, 1024, LanguageMode::SLOPPY,
- TypeofMode::NOT_INSIDE_TYPEOF)
- .LoadGlobal(wide_name, 1, LanguageMode::STRICT,
- TypeofMode::NOT_INSIDE_TYPEOF)
- .LoadGlobal(name, 1024, LanguageMode::SLOPPY, TypeofMode::INSIDE_TYPEOF)
- .LoadGlobal(wide_name, 1, LanguageMode::STRICT, TypeofMode::INSIDE_TYPEOF)
+ builder.LoadGlobal(name, 1024, TypeofMode::NOT_INSIDE_TYPEOF)
+ .LoadGlobal(name, 1024, TypeofMode::INSIDE_TYPEOF)
.StoreGlobal(name, 1024, LanguageMode::SLOPPY)
.StoreGlobal(wide_name, 1, LanguageMode::STRICT);
// Emit wide load / store property operations.
- builder.LoadNamedProperty(reg, wide_name, 0, LanguageMode::SLOPPY)
- .LoadKeyedProperty(reg, 2056, LanguageMode::SLOPPY)
+ builder.LoadNamedProperty(reg, wide_name, 0)
+ .LoadKeyedProperty(reg, 2056)
.StoreNamedProperty(reg, wide_name, 0, LanguageMode::SLOPPY)
.StoreKeyedProperty(reg, reg, 2056, LanguageMode::SLOPPY)
- .LoadNamedProperty(reg, wide_name, 0, LanguageMode::STRICT)
- .LoadKeyedProperty(reg, 2056, LanguageMode::STRICT)
.StoreNamedProperty(reg, wide_name, 0, LanguageMode::STRICT)
.StoreKeyedProperty(reg, reg, 2056, LanguageMode::STRICT);
// Emit wide context operations.
- builder.LoadContextSlot(reg, 1024)
- .StoreContextSlot(reg, 1024);
+ builder.LoadContextSlot(reg, 1024).StoreContextSlot(reg, 1024);
// Emit wide load / store lookup slots.
builder.LoadLookupSlot(wide_name, TypeofMode::NOT_INSIDE_TYPEOF)
@@ -265,26 +256,31 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.CreateObjectLiteral(factory->NewFixedArray(2), 0, 0);
// Longer jumps requiring ConstantWide operand
- builder.Jump(&start).JumpIfNull(&start).JumpIfUndefined(&start);
+ builder.Jump(&start).JumpIfNull(&start).JumpIfUndefined(&start).JumpIfNotHole(
+ &start);
// Perform an operation that returns boolean value to
// generate JumpIfTrue/False
- builder.CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
+ builder.CompareOperation(Token::Value::EQ, reg)
.JumpIfTrue(&start)
- .CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
+ .CompareOperation(Token::Value::EQ, reg)
.JumpIfFalse(&start);
// Perform an operation that returns a non-boolean operation to
// generate JumpIfToBooleanTrue/False.
- builder.BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
+ builder.BinaryOperation(Token::Value::ADD, reg)
.JumpIfTrue(&start)
- .BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
+ .BinaryOperation(Token::Value::ADD, reg)
.JumpIfFalse(&start);
+ builder.Debugger();
+
builder.Return();
// Generate BytecodeArray.
Handle<BytecodeArray> the_array = builder.ToBytecodeArray();
CHECK_EQ(the_array->frame_size(),
- builder.fixed_register_count() * kPointerSize);
+ (builder.fixed_and_temporary_register_count() +
+ builder.translation_register_count()) *
+ kPointerSize);
// Build scorecard of bytecodes encountered in the BytecodeArray.
std::vector<int> scorecard(Bytecodes::ToByte(Bytecode::kLast) + 1);
@@ -301,9 +297,11 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
CHECK_EQ(final_bytecode, Bytecode::kReturn);
CHECK_EQ(scorecard[Bytecodes::ToByte(final_bytecode)], 1);
-#define CHECK_BYTECODE_PRESENT(Name, ...) \
- /* Check Bytecode is marked in scorecard */ \
- CHECK_GE(scorecard[Bytecodes::ToByte(Bytecode::k##Name)], 1);
+#define CHECK_BYTECODE_PRESENT(Name, ...) \
+ /* Check Bytecode is marked in scorecard, unless it's a debug break */ \
+ if (!Bytecodes::IsDebugBreak(Bytecode::k##Name)) { \
+ CHECK_GE(scorecard[Bytecodes::ToByte(Bytecode::k##Name)], 1); \
+ }
BYTECODE_LIST(CHECK_BYTECODE_PRESENT)
#undef CHECK_BYTECODE_PRESENT
}
@@ -313,12 +311,9 @@ TEST_F(BytecodeArrayBuilderTest, FrameSizesLookGood) {
for (int locals = 0; locals < 5; locals++) {
for (int contexts = 0; contexts < 4; contexts++) {
for (int temps = 0; temps < 3; temps++) {
- BytecodeArrayBuilder builder(isolate(), zone());
- builder.set_parameter_count(0);
- builder.set_locals_count(locals);
- builder.set_context_count(contexts);
-
- BytecodeRegisterAllocator temporaries(&builder);
+ BytecodeArrayBuilder builder(isolate(), zone(), 0, contexts, locals);
+ BytecodeRegisterAllocator temporaries(
+ zone(), builder.temporary_register_allocator());
for (int i = 0; i < temps; i++) {
builder.StoreAccumulatorInRegister(temporaries.NewRegister());
}
@@ -349,11 +344,7 @@ TEST_F(BytecodeArrayBuilderTest, RegisterValues) {
TEST_F(BytecodeArrayBuilderTest, Parameters) {
- BytecodeArrayBuilder builder(isolate(), zone());
- builder.set_parameter_count(10);
- builder.set_locals_count(0);
- builder.set_context_count(0);
-
+ BytecodeArrayBuilder builder(isolate(), zone(), 10, 0, 0);
Register param0(builder.Parameter(0));
Register param9(builder.Parameter(9));
CHECK_EQ(param9.index() - param0.index(), 9);
@@ -361,12 +352,9 @@ TEST_F(BytecodeArrayBuilderTest, Parameters) {
TEST_F(BytecodeArrayBuilderTest, RegisterType) {
- BytecodeArrayBuilder builder(isolate(), zone());
- builder.set_parameter_count(10);
- builder.set_locals_count(3);
- builder.set_context_count(0);
-
- BytecodeRegisterAllocator register_allocator(&builder);
+ BytecodeArrayBuilder builder(isolate(), zone(), 10, 0, 3);
+ BytecodeRegisterAllocator register_allocator(
+ zone(), builder.temporary_register_allocator());
Register temp0 = register_allocator.NewRegister();
Register param0(builder.Parameter(0));
Register param9(builder.Parameter(9));
@@ -387,11 +375,7 @@ TEST_F(BytecodeArrayBuilderTest, RegisterType) {
TEST_F(BytecodeArrayBuilderTest, Constants) {
- BytecodeArrayBuilder builder(isolate(), zone());
- builder.set_parameter_count(0);
- builder.set_locals_count(0);
- builder.set_context_count(0);
-
+ BytecodeArrayBuilder builder(isolate(), zone(), 0, 0, 0);
Factory* factory = isolate()->factory();
Handle<HeapObject> heap_num_1 = factory->NewHeapNumber(3.14);
Handle<HeapObject> heap_num_2 = factory->NewHeapNumber(5.2);
@@ -402,7 +386,8 @@ TEST_F(BytecodeArrayBuilderTest, Constants) {
.LoadLiteral(large_smi)
.LoadLiteral(heap_num_1)
.LoadLiteral(heap_num_1)
- .LoadLiteral(heap_num_2_copy);
+ .LoadLiteral(heap_num_2_copy)
+ .Return();
Handle<BytecodeArray> array = builder.ToBytecodeArray();
// Should only have one entry for each identical constant.
@@ -413,23 +398,19 @@ TEST_F(BytecodeArrayBuilderTest, Constants) {
TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
static const int kFarJumpDistance = 256;
- BytecodeArrayBuilder builder(isolate(), zone());
- builder.set_parameter_count(0);
- builder.set_locals_count(1);
- builder.set_context_count(0);
-
+ BytecodeArrayBuilder builder(isolate(), zone(), 0, 0, 1);
Register reg(0);
BytecodeLabel far0, far1, far2, far3, far4;
BytecodeLabel near0, near1, near2, near3, near4;
builder.Jump(&near0)
- .CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
+ .CompareOperation(Token::Value::EQ, reg)
.JumpIfTrue(&near1)
- .CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
+ .CompareOperation(Token::Value::EQ, reg)
.JumpIfFalse(&near2)
- .BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
+ .BinaryOperation(Token::Value::ADD, reg)
.JumpIfTrue(&near3)
- .BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
+ .BinaryOperation(Token::Value::ADD, reg)
.JumpIfFalse(&near4)
.Bind(&near0)
.Bind(&near1)
@@ -437,13 +418,13 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
.Bind(&near3)
.Bind(&near4)
.Jump(&far0)
- .CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
+ .CompareOperation(Token::Value::EQ, reg)
.JumpIfTrue(&far1)
- .CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
+ .CompareOperation(Token::Value::EQ, reg)
.JumpIfFalse(&far2)
- .BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
+ .BinaryOperation(Token::Value::ADD, reg)
.JumpIfTrue(&far3)
- .BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
+ .BinaryOperation(Token::Value::ADD, reg)
.JumpIfFalse(&far4);
for (int i = 0; i < kFarJumpDistance - 18; i++) {
builder.LoadUndefined();
@@ -529,38 +510,31 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
- BytecodeArrayBuilder builder(isolate(), zone());
- builder.set_parameter_count(0);
- builder.set_locals_count(1);
- builder.set_context_count(0);
+ BytecodeArrayBuilder builder(isolate(), zone(), 0, 0, 1);
Register reg(0);
BytecodeLabel label0, label1, label2, label3, label4;
builder.Bind(&label0)
.Jump(&label0)
.Bind(&label1)
- .CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
+ .CompareOperation(Token::Value::EQ, reg)
.JumpIfTrue(&label1)
.Bind(&label2)
- .CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
+ .CompareOperation(Token::Value::EQ, reg)
.JumpIfFalse(&label2)
.Bind(&label3)
- .BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
+ .BinaryOperation(Token::Value::ADD, reg)
.JumpIfTrue(&label3)
.Bind(&label4)
- .BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
+ .BinaryOperation(Token::Value::ADD, reg)
.JumpIfFalse(&label4);
for (int i = 0; i < 63; i++) {
builder.Jump(&label4);
}
- builder.BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
- .JumpIfFalse(&label4);
- builder.BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
- .JumpIfTrue(&label3);
- builder.CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
- .JumpIfFalse(&label2);
- builder.CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
- .JumpIfTrue(&label1);
+ builder.BinaryOperation(Token::Value::ADD, reg).JumpIfFalse(&label4);
+ builder.BinaryOperation(Token::Value::ADD, reg).JumpIfTrue(&label3);
+ builder.CompareOperation(Token::Value::EQ, reg).JumpIfFalse(&label2);
+ builder.CompareOperation(Token::Value::EQ, reg).JumpIfTrue(&label1);
builder.Jump(&label0);
builder.Return();
@@ -625,10 +599,7 @@ TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
TEST_F(BytecodeArrayBuilderTest, LabelReuse) {
- BytecodeArrayBuilder builder(isolate(), zone());
- builder.set_parameter_count(0);
- builder.set_locals_count(0);
- builder.set_context_count(0);
+ BytecodeArrayBuilder builder(isolate(), zone(), 0, 0, 0);
// Labels can only have 1 forward reference, but
// can be referred to mulitple times once bound.
@@ -656,16 +627,11 @@ TEST_F(BytecodeArrayBuilderTest, LabelReuse) {
TEST_F(BytecodeArrayBuilderTest, LabelAddressReuse) {
static const int kRepeats = 3;
- BytecodeArrayBuilder builder(isolate(), zone());
- builder.set_parameter_count(0);
- builder.set_locals_count(0);
- builder.set_context_count(0);
-
+ BytecodeArrayBuilder builder(isolate(), zone(), 0, 0, 0);
for (int i = 0; i < kRepeats; i++) {
BytecodeLabel label;
builder.Jump(&label).Bind(&label).Jump(&label).Jump(&label);
}
-
builder.Return();
Handle<BytecodeArray> array = builder.ToBytecodeArray();
@@ -686,7 +652,6 @@ TEST_F(BytecodeArrayBuilderTest, LabelAddressReuse) {
CHECK(iterator.done());
}
-
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
index cd9f120cad..f2dcd7107c 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
@@ -22,11 +22,7 @@ class BytecodeArrayIteratorTest : public TestWithIsolateAndZone {
TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
// Use a builder to create an array with containing multiple bytecodes
// with 0, 1 and 2 operands.
- BytecodeArrayBuilder builder(isolate(), zone());
- builder.set_parameter_count(3);
- builder.set_locals_count(2);
- builder.set_context_count(0);
-
+ BytecodeArrayBuilder builder(isolate(), zone(), 3, 2, 0);
Factory* factory = isolate()->factory();
Handle<HeapObject> heap_num_0 = factory->NewHeapNumber(2.718);
Handle<HeapObject> heap_num_1 = factory->NewHeapNumber(2147483647);
@@ -46,9 +42,10 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
.LoadLiteral(smi_0)
.LoadLiteral(smi_1)
.LoadAccumulatorWithRegister(reg_0)
- .LoadNamedProperty(reg_1, name, feedback_slot, LanguageMode::SLOPPY)
+ .LoadNamedProperty(reg_1, name, feedback_slot)
.StoreAccumulatorInRegister(reg_2)
.CallRuntime(Runtime::kLoadIC_Miss, reg_0, 1)
+ .Debugger()
.Return();
// Test iterator sees the expected output from the builder.
@@ -82,7 +79,7 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
CHECK(!iterator.done());
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kLoadICSloppy);
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kLoadIC);
CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
CHECK_EQ(iterator.GetIndexOperand(1), name_index);
CHECK_EQ(iterator.GetIndexOperand(2), feedback_slot);
@@ -98,7 +95,11 @@ TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
CHECK_EQ(static_cast<Runtime::FunctionId>(iterator.GetIndexOperand(0)),
Runtime::kLoadIC_Miss);
CHECK_EQ(iterator.GetRegisterOperand(1).index(), reg_0.index());
- CHECK_EQ(iterator.GetCountOperand(2), 1);
+ CHECK_EQ(iterator.GetRegisterCountOperand(2), 1);
+ CHECK(!iterator.done());
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kDebugger);
CHECK(!iterator.done());
iterator.Advance();
diff --git a/deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc
index 0620322162..ec29935b2f 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-register-allocator-unittest.cc
@@ -12,51 +12,219 @@ namespace v8 {
namespace internal {
namespace interpreter {
+class TemporaryRegisterAllocatorTest : public TestWithIsolateAndZone {
+ public:
+ TemporaryRegisterAllocatorTest() : allocator_(zone(), 0) {}
+ ~TemporaryRegisterAllocatorTest() override {}
+ TemporaryRegisterAllocator* allocator() { return &allocator_; }
+
+ private:
+ TemporaryRegisterAllocator allocator_;
+};
+
+TEST_F(TemporaryRegisterAllocatorTest, FirstAllocation) {
+ CHECK_EQ(allocator()->allocation_count(), 0);
+ int reg0_index = allocator()->BorrowTemporaryRegister();
+ CHECK_EQ(reg0_index, 0);
+ CHECK_EQ(allocator()->allocation_count(), 1);
+ CHECK(allocator()->RegisterIsLive(Register(reg0_index)));
+ allocator()->ReturnTemporaryRegister(reg0_index);
+ CHECK(!allocator()->RegisterIsLive(Register(reg0_index)));
+ CHECK_EQ(allocator()->allocation_count(), 1);
+ CHECK(allocator()->first_temporary_register() == Register(0));
+ CHECK(allocator()->last_temporary_register() == Register(0));
+}
+
+TEST_F(TemporaryRegisterAllocatorTest, SimpleAllocations) {
+ for (int i = 0; i < 13; i++) {
+ int reg_index = allocator()->BorrowTemporaryRegister();
+ CHECK_EQ(reg_index, i);
+ CHECK_EQ(allocator()->allocation_count(), i + 1);
+ }
+ for (int i = 0; i < 13; i++) {
+ CHECK(allocator()->RegisterIsLive(Register(i)));
+ allocator()->ReturnTemporaryRegister(i);
+ CHECK(!allocator()->RegisterIsLive(Register(i)));
+ int reg_index = allocator()->BorrowTemporaryRegister();
+ CHECK_EQ(reg_index, i);
+ CHECK_EQ(allocator()->allocation_count(), 13);
+ }
+ for (int i = 0; i < 13; i++) {
+ CHECK(allocator()->RegisterIsLive(Register(i)));
+ allocator()->ReturnTemporaryRegister(i);
+ CHECK(!allocator()->RegisterIsLive(Register(i)));
+ }
+}
+
+TEST_F(TemporaryRegisterAllocatorTest, SimpleRangeAllocation) {
+ static const int kRunLength = 7;
+ int start = allocator()->PrepareForConsecutiveTemporaryRegisters(kRunLength);
+ CHECK(!allocator()->RegisterIsLive(Register(start)));
+ for (int i = 0; i < kRunLength; i++) {
+ CHECK(!allocator()->RegisterIsLive(Register(start + i)));
+ allocator()->BorrowConsecutiveTemporaryRegister(start + i);
+ CHECK(allocator()->RegisterIsLive(Register(start + i)));
+ }
+}
+
+TEST_F(TemporaryRegisterAllocatorTest, RangeAllocationAbuttingFree) {
+ static const int kFreeCount = 3;
+ static const int kRunLength = 6;
+
+ for (int i = 0; i < kFreeCount; i++) {
+ int to_free = allocator()->BorrowTemporaryRegister();
+ CHECK_EQ(to_free, i);
+ }
+ for (int i = 0; i < kFreeCount; i++) {
+ allocator()->ReturnTemporaryRegister(i);
+ }
+
+ int start = allocator()->PrepareForConsecutiveTemporaryRegisters(kRunLength);
+ CHECK(!allocator()->RegisterIsLive(Register(start)));
+ for (int i = 0; i < kRunLength; i++) {
+ CHECK(!allocator()->RegisterIsLive(Register(start + i)));
+ allocator()->BorrowConsecutiveTemporaryRegister(start + i);
+ CHECK(allocator()->RegisterIsLive(Register(start + i)));
+ }
+}
+
+TEST_F(TemporaryRegisterAllocatorTest, RangeAllocationAbuttingHole) {
+ static const int kPreAllocatedCount = 7;
+ static const int kPreAllocatedFreeCount = 6;
+ static const int kRunLength = 8;
+
+ for (int i = 0; i < kPreAllocatedCount; i++) {
+ int to_free = allocator()->BorrowTemporaryRegister();
+ CHECK_EQ(to_free, i);
+ }
+ for (int i = 0; i < kPreAllocatedFreeCount; i++) {
+ allocator()->ReturnTemporaryRegister(i);
+ }
+ int start = allocator()->PrepareForConsecutiveTemporaryRegisters(kRunLength);
+ CHECK(!allocator()->RegisterIsLive(Register(start)));
+ CHECK_EQ(start, kPreAllocatedCount);
+ for (int i = 0; i < kRunLength; i++) {
+ CHECK(!allocator()->RegisterIsLive(Register(start + i)));
+ allocator()->BorrowConsecutiveTemporaryRegister(start + i);
+ CHECK(allocator()->RegisterIsLive(Register(start + i)));
+ }
+}
+
+TEST_F(TemporaryRegisterAllocatorTest, RangeAllocationAvailableInTemporaries) {
+ static const int kNotRunLength = 13;
+ static const int kRunLength = 8;
+
+ // Allocate big batch
+ for (int i = 0; i < kNotRunLength * 2 + kRunLength; i++) {
+ int allocated = allocator()->BorrowTemporaryRegister();
+ CHECK_EQ(allocated, i);
+ }
+ // Free every other register either side of target.
+ for (int i = 0; i < kNotRunLength; i++) {
+ if ((i & 2) == 1) {
+ allocator()->ReturnTemporaryRegister(i);
+ allocator()->ReturnTemporaryRegister(kNotRunLength + kRunLength + i);
+ }
+ }
+ // Free all registers for target.
+ for (int i = kNotRunLength; i < kNotRunLength + kRunLength; i++) {
+ allocator()->ReturnTemporaryRegister(i);
+ }
+
+ int start = allocator()->PrepareForConsecutiveTemporaryRegisters(kRunLength);
+ CHECK_EQ(start, kNotRunLength);
+ for (int i = 0; i < kRunLength; i++) {
+ CHECK(!allocator()->RegisterIsLive(Register(start + i)));
+ allocator()->BorrowConsecutiveTemporaryRegister(start + i);
+ CHECK(allocator()->RegisterIsLive(Register(start + i)));
+ }
+}
+
+TEST_F(TemporaryRegisterAllocatorTest, RangeAvoidsTranslationBoundary) {
+ int boundary = RegisterTranslator::DistanceToTranslationWindow(Register(0));
+ int limit = boundary + 64;
+
+ for (int run_length = 2; run_length < 32; run_length += 7) {
+ ZoneVector<int> run_starts(zone());
+ for (int start = 0; start < limit; start += run_length) {
+ int run_start =
+ allocator()->PrepareForConsecutiveTemporaryRegisters(run_length);
+ run_starts.push_back(run_start);
+ for (int i = 0; i < run_length; i++) {
+ allocator()->BorrowConsecutiveTemporaryRegister(run_start + i);
+ }
+ CHECK(run_start >= boundary || run_start + run_length <= boundary);
+ }
+ for (size_t batch = 0; batch < run_starts.size(); batch++) {
+ for (int i = run_starts[batch]; i < run_starts[batch] + run_length; i++) {
+ allocator()->ReturnTemporaryRegister(i);
+ }
+ }
+ }
+}
+
+TEST_F(TemporaryRegisterAllocatorTest, NotInRange) {
+ for (int i = 0; i < 10; i++) {
+ int reg = allocator()->BorrowTemporaryRegisterNotInRange(2, 5);
+ CHECK(reg == i || (reg > 2 && reg == i + 4));
+ }
+ for (int i = 0; i < 10; i++) {
+ if (i < 2) {
+ allocator()->ReturnTemporaryRegister(i);
+ } else {
+ allocator()->ReturnTemporaryRegister(i + 4);
+ }
+ }
+ int reg0 = allocator()->BorrowTemporaryRegisterNotInRange(0, 3);
+ CHECK_EQ(reg0, 4);
+ int reg1 = allocator()->BorrowTemporaryRegisterNotInRange(3, 10);
+ CHECK_EQ(reg1, 2);
+ int reg2 = allocator()->BorrowTemporaryRegisterNotInRange(2, 6);
+ CHECK_EQ(reg2, 1);
+ allocator()->ReturnTemporaryRegister(reg0);
+ allocator()->ReturnTemporaryRegister(reg1);
+ allocator()->ReturnTemporaryRegister(reg2);
+}
+
class BytecodeRegisterAllocatorTest : public TestWithIsolateAndZone {
public:
BytecodeRegisterAllocatorTest() {}
~BytecodeRegisterAllocatorTest() override {}
};
-
TEST_F(BytecodeRegisterAllocatorTest, TemporariesRecycled) {
- BytecodeArrayBuilder builder(isolate(), zone());
- builder.set_parameter_count(0);
- builder.set_locals_count(0);
- builder.set_context_count(0);
+ BytecodeArrayBuilder builder(isolate(), zone(), 0, 0, 0);
int first;
{
- BytecodeRegisterAllocator temporaries(&builder);
- first = temporaries.NewRegister().index();
- temporaries.NewRegister();
- temporaries.NewRegister();
- temporaries.NewRegister();
+ BytecodeRegisterAllocator allocator(zone(),
+ builder.temporary_register_allocator());
+ first = allocator.NewRegister().index();
+ allocator.NewRegister();
+ allocator.NewRegister();
+ allocator.NewRegister();
}
int second;
{
- BytecodeRegisterAllocator temporaries(&builder);
- second = temporaries.NewRegister().index();
+ BytecodeRegisterAllocator allocator(zone(),
+ builder.temporary_register_allocator());
+ second = allocator.NewRegister().index();
}
CHECK_EQ(first, second);
}
-
TEST_F(BytecodeRegisterAllocatorTest, ConsecutiveRegisters) {
- BytecodeArrayBuilder builder(isolate(), zone());
- builder.set_parameter_count(0);
- builder.set_locals_count(0);
- builder.set_context_count(0);
-
- BytecodeRegisterAllocator temporaries(&builder);
- temporaries.PrepareForConsecutiveAllocations(4);
- Register reg0 = temporaries.NextConsecutiveRegister();
- Register other = temporaries.NewRegister();
- Register reg1 = temporaries.NextConsecutiveRegister();
- Register reg2 = temporaries.NextConsecutiveRegister();
- Register reg3 = temporaries.NextConsecutiveRegister();
+ BytecodeArrayBuilder builder(isolate(), zone(), 0, 0, 0);
+ BytecodeRegisterAllocator allocator(zone(),
+ builder.temporary_register_allocator());
+ allocator.PrepareForConsecutiveAllocations(4);
+ Register reg0 = allocator.NextConsecutiveRegister();
+ Register other = allocator.NewRegister();
+ Register reg1 = allocator.NextConsecutiveRegister();
+ Register reg2 = allocator.NextConsecutiveRegister();
+ Register reg3 = allocator.NextConsecutiveRegister();
USE(other);
CHECK(Register::AreContiguous(reg0, reg1, reg2, reg3));
diff --git a/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc b/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
index 812ee46c9c..212e02996b 100644
--- a/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
@@ -9,19 +9,39 @@
#include "src/interpreter/bytecodes.h"
#include "test/unittests/test-utils.h"
-
namespace v8 {
namespace internal {
namespace interpreter {
TEST(OperandConversion, Registers) {
- for (int i = 0; i < 128; i++) {
- uint8_t operand_value = Register(i).ToOperand();
- Register r = Register::FromOperand(operand_value);
- CHECK_EQ(i, r.index());
+ int register_count = Register::MaxRegisterIndex() + 1;
+ int step = register_count / 7;
+ for (int i = 0; i < register_count; i += step) {
+ if (i <= kMaxInt8) {
+ uint8_t operand0 = Register(i).ToOperand();
+ Register reg0 = Register::FromOperand(operand0);
+ CHECK_EQ(i, reg0.index());
+ }
+
+ uint16_t operand1 = Register(i).ToWideOperand();
+ Register reg1 = Register::FromWideOperand(operand1);
+ CHECK_EQ(i, reg1.index());
+
+ uint32_t operand2 = Register(i).ToRawOperand();
+ Register reg2 = Register::FromRawOperand(operand2);
+ CHECK_EQ(i, reg2.index());
}
-}
+ for (int i = 0; i <= kMaxUInt8; i++) {
+ uint8_t operand = static_cast<uint8_t>(i);
+ Register reg = Register::FromOperand(operand);
+ if (i > 0 && i < -kMinInt8) {
+ CHECK(reg.is_parameter());
+ } else {
+ CHECK(!reg.is_parameter());
+ }
+ }
+}
TEST(OperandConversion, Parameters) {
int parameter_counts[] = {7, 13, 99};
@@ -38,26 +58,115 @@ TEST(OperandConversion, Parameters) {
}
}
-
TEST(OperandConversion, RegistersParametersNoOverlap) {
- std::vector<uint8_t> operand_count(256);
+ int register_count = Register::MaxRegisterIndex() + 1;
+ int parameter_count = Register::MaxParameterIndex() + 1;
+ int32_t register_space_size = base::bits::RoundUpToPowerOfTwo32(
+ static_cast<uint32_t>(register_count + parameter_count));
+ uint32_t range = static_cast<uint32_t>(register_space_size);
+ std::vector<uint8_t> operand_count(range);
- for (int i = 0; i <= kMaxInt8; i++) {
+ for (int i = 0; i < register_count; i += 1) {
Register r = Register(i);
- uint8_t operand = r.ToOperand();
+ uint32_t operand = r.ToWideOperand();
+ CHECK_LT(operand, operand_count.size());
operand_count[operand] += 1;
CHECK_EQ(operand_count[operand], 1);
}
- int parameter_count = Register::MaxParameterIndex() + 1;
- for (int i = 0; i < parameter_count; i++) {
+ for (int i = 0; i < parameter_count; i += 1) {
Register r = Register::FromParameterIndex(i, parameter_count);
- uint8_t operand = r.ToOperand();
+ uint32_t operand = r.ToWideOperand();
+ CHECK_LT(operand, operand_count.size());
operand_count[operand] += 1;
CHECK_EQ(operand_count[operand], 1);
}
}
+TEST(Bytecodes, HasAnyRegisterOperands) {
+ CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kAdd), 1);
+ CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kCall), 2);
+ CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kCallRuntime), 1);
+ CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kCallRuntimeWide), 1);
+ CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kCallRuntimeForPair),
+ 2);
+ CHECK_EQ(
+ Bytecodes::NumberOfRegisterOperands(Bytecode::kCallRuntimeForPairWide),
+ 2);
+ CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kDeletePropertyStrict),
+ 1);
+ CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kForInPrepare), 1);
+ CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kForInPrepareWide), 1);
+ CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kInc), 0);
+ CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kJumpIfTrue), 0);
+ CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kNew), 2);
+ CHECK_EQ(Bytecodes::NumberOfRegisterOperands(Bytecode::kToName), 0);
+}
+
+TEST(Bytecodes, RegisterOperandBitmaps) {
+ CHECK_EQ(Bytecodes::GetRegisterOperandBitmap(Bytecode::kAdd), 1);
+ CHECK_EQ(Bytecodes::GetRegisterOperandBitmap(Bytecode::kCallRuntimeForPair),
+ 10);
+ CHECK_EQ(Bytecodes::GetRegisterOperandBitmap(Bytecode::kStar), 1);
+ CHECK_EQ(Bytecodes::GetRegisterOperandBitmap(Bytecode::kMov), 3);
+ CHECK_EQ(Bytecodes::GetRegisterOperandBitmap(Bytecode::kTestIn), 1);
+ CHECK_EQ(Bytecodes::GetRegisterOperandBitmap(Bytecode::kForInPrepare), 1);
+ CHECK_EQ(Bytecodes::GetRegisterOperandBitmap(Bytecode::kForInDone), 3);
+ CHECK_EQ(Bytecodes::GetRegisterOperandBitmap(Bytecode::kForInNext), 7);
+}
+
+TEST(Bytecodes, RegisterOperands) {
+ CHECK(Bytecodes::IsRegisterOperandType(OperandType::kReg8));
+ CHECK(Bytecodes::IsRegisterInputOperandType(OperandType::kReg8));
+ CHECK(!Bytecodes::IsRegisterOutputOperandType(OperandType::kReg8));
+ CHECK(!Bytecodes::IsRegisterInputOperandType(OperandType::kRegOut8));
+ CHECK(Bytecodes::IsRegisterOutputOperandType(OperandType::kRegOut8));
+
+#define IS_REGISTER_OPERAND_TYPE(Name, _) \
+ CHECK(Bytecodes::IsRegisterOperandType(OperandType::k##Name));
+ REGISTER_OPERAND_TYPE_LIST(IS_REGISTER_OPERAND_TYPE)
+#undef IS_REGISTER_OPERAND_TYPE
+
+#define IS_NOT_REGISTER_OPERAND_TYPE(Name, _) \
+ CHECK(!Bytecodes::IsRegisterOperandType(OperandType::k##Name));
+ NON_REGISTER_OPERAND_TYPE_LIST(IS_NOT_REGISTER_OPERAND_TYPE)
+#undef IS_NOT_REGISTER_OPERAND_TYPE
+
+#define IS_REGISTER_INPUT_OPERAND_TYPE(Name, _) \
+ CHECK(Bytecodes::IsRegisterInputOperandType(OperandType::k##Name));
+ REGISTER_INPUT_OPERAND_TYPE_LIST(IS_REGISTER_INPUT_OPERAND_TYPE)
+#undef IS_REGISTER_INPUT_OPERAND_TYPE
+
+#define IS_NOT_REGISTER_INPUT_OPERAND_TYPE(Name, _) \
+ CHECK(!Bytecodes::IsRegisterInputOperandType(OperandType::k##Name));
+ NON_REGISTER_OPERAND_TYPE_LIST(IS_NOT_REGISTER_INPUT_OPERAND_TYPE);
+ REGISTER_OUTPUT_OPERAND_TYPE_LIST(IS_NOT_REGISTER_INPUT_OPERAND_TYPE)
+#undef IS_NOT_REGISTER_INPUT_OPERAND_TYPE
+
+#define IS_REGISTER_OUTPUT_OPERAND_TYPE(Name, _) \
+ CHECK(Bytecodes::IsRegisterOutputOperandType(OperandType::k##Name));
+ REGISTER_OUTPUT_OPERAND_TYPE_LIST(IS_REGISTER_OUTPUT_OPERAND_TYPE)
+#undef IS_REGISTER_OUTPUT_OPERAND_TYPE
+
+#define IS_NOT_REGISTER_OUTPUT_OPERAND_TYPE(Name, _) \
+ CHECK(!Bytecodes::IsRegisterOutputOperandType(OperandType::k##Name));
+ NON_REGISTER_OPERAND_TYPE_LIST(IS_NOT_REGISTER_OUTPUT_OPERAND_TYPE)
+ REGISTER_INPUT_OPERAND_TYPE_LIST(IS_NOT_REGISTER_OUTPUT_OPERAND_TYPE)
+#undef IS_NOT_REGISTER_INPUT_OPERAND_TYPE
+}
+
+TEST(Bytecodes, DebugBreak) {
+ for (uint32_t i = 0; i < Bytecodes::ToByte(Bytecode::kLast); i++) {
+ Bytecode bytecode = Bytecodes::FromByte(i);
+ Bytecode debugbreak = Bytecodes::GetDebugBreak(bytecode);
+ if (!Bytecodes::IsDebugBreak(debugbreak)) {
+ PrintF("Bytecode %s has no matching debug break with length %d\n",
+ Bytecodes::ToString(bytecode), Bytecodes::Size(bytecode));
+ CHECK(false);
+ }
+ }
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
index ea5d1bb8c3..b3ec5ff668 100644
--- a/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/constant-array-builder-unittest.cc
@@ -33,13 +33,11 @@ STATIC_CONST_MEMBER_DEFINITION const size_t
TEST_F(ConstantArrayBuilderTest, AllocateAllEntries) {
ConstantArrayBuilder builder(isolate(), zone());
for (size_t i = 0; i < kMaxCapacity; i++) {
- Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
- builder.Insert(object);
- CHECK_EQ(builder.size(), i + 1);
- CHECK(builder.At(i)->SameValue(*object));
+ builder.Insert(handle(Smi::FromInt(static_cast<int>(i)), isolate()));
}
+ CHECK_EQ(builder.size(), kMaxCapacity);
for (size_t i = 0; i < kMaxCapacity; i++) {
- CHECK_EQ(Handle<Smi>::cast(builder.At(i))->value(), static_cast<double>(i));
+ CHECK_EQ(Handle<Smi>::cast(builder.At(i))->value(), i);
}
}
@@ -158,8 +156,7 @@ TEST_F(ConstantArrayBuilderTest, ToFixedArray) {
builder.Insert(object);
CHECK(builder.At(i)->SameValue(*object));
}
- Handle<FixedArray> constant_array =
- builder.ToFixedArray(isolate()->factory());
+ Handle<FixedArray> constant_array = builder.ToFixedArray();
CHECK_EQ(constant_array->length(), kNumberOfElements);
for (size_t i = 0; i < kNumberOfElements; i++) {
CHECK(constant_array->get(static_cast<int>(i))->SameValue(*builder.At(i)));
diff --git a/deps/v8/test/unittests/compiler/interpreter-assembler-unittest.cc b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
index f57ca05b3f..3375a6b817 100644
--- a/deps/v8/test/unittests/compiler/interpreter-assembler-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "test/unittests/compiler/interpreter-assembler-unittest.h"
+#include "test/unittests/interpreter/interpreter-assembler-unittest.h"
#include "src/code-factory.h"
#include "src/compiler/graph.h"
@@ -16,7 +16,10 @@ using ::testing::_;
namespace v8 {
namespace internal {
-namespace compiler {
+
+using namespace compiler;
+
+namespace interpreter {
const interpreter::Bytecode kBytecodes[] = {
#define DEFINE_BYTECODE(Name, ...) interpreter::Bytecode::k##Name,
@@ -24,55 +27,47 @@ const interpreter::Bytecode kBytecodes[] = {
#undef DEFINE_BYTECODE
};
-
Matcher<Node*> IsIntPtrConstant(const intptr_t value) {
return kPointerSize == 8 ? IsInt64Constant(static_cast<int64_t>(value))
: IsInt32Constant(static_cast<int32_t>(value));
}
-
Matcher<Node*> IsIntPtrAdd(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
return kPointerSize == 8 ? IsInt64Add(lhs_matcher, rhs_matcher)
: IsInt32Add(lhs_matcher, rhs_matcher);
}
-
Matcher<Node*> IsIntPtrSub(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
return kPointerSize == 8 ? IsInt64Sub(lhs_matcher, rhs_matcher)
: IsInt32Sub(lhs_matcher, rhs_matcher);
}
-
Matcher<Node*> IsWordShl(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
return kPointerSize == 8 ? IsWord64Shl(lhs_matcher, rhs_matcher)
: IsWord32Shl(lhs_matcher, rhs_matcher);
}
-
Matcher<Node*> IsWordSar(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
return kPointerSize == 8 ? IsWord64Sar(lhs_matcher, rhs_matcher)
: IsWord32Sar(lhs_matcher, rhs_matcher);
}
-
Matcher<Node*> IsWordOr(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
return kPointerSize == 8 ? IsWord64Or(lhs_matcher, rhs_matcher)
: IsWord32Or(lhs_matcher, rhs_matcher);
}
-
Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoad(
const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher) {
return ::i::compiler::IsLoad(rep_matcher, base_matcher, index_matcher, _, _);
}
-
Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsStore(
const Matcher<StoreRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher,
@@ -81,52 +76,57 @@ Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsStore(
value_matcher, _, _);
}
-
Matcher<Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsBytecodeOperand(
int offset) {
return IsLoad(
MachineType::Uint8(),
- IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
- IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
- IsInt32Constant(offset)));
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
+ IsIntPtrAdd(
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
+ IsInt32Constant(offset)));
}
-
Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::
IsBytecodeOperandSignExtended(int offset) {
Matcher<Node*> load_matcher = IsLoad(
MachineType::Int8(),
- IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
- IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
- IsInt32Constant(offset)));
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
+ IsIntPtrAdd(
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
+ IsInt32Constant(offset)));
if (kPointerSize == 8) {
load_matcher = IsChangeInt32ToInt64(load_matcher);
}
return load_matcher;
}
-
Matcher<Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsBytecodeOperandShort(
int offset) {
if (TargetSupportsUnalignedAccess()) {
return IsLoad(
MachineType::Uint16(),
- IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
- IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
- IsInt32Constant(offset)));
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
+ IsIntPtrAdd(
+ IsParameter(
+ InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
+ IsInt32Constant(offset)));
} else {
Matcher<Node*> first_byte = IsLoad(
MachineType::Uint8(),
- IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
- IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
- IsInt32Constant(offset)));
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
+ IsIntPtrAdd(
+ IsParameter(
+ InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
+ IsInt32Constant(offset)));
Matcher<Node*> second_byte = IsLoad(
MachineType::Uint8(),
- IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
- IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
- IsInt32Constant(offset + 1)));
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
+ IsIntPtrAdd(
+ IsParameter(
+ InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
+ IsInt32Constant(offset + 1)));
#if V8_TARGET_LITTLE_ENDIAN
return IsWordOr(IsWordShl(second_byte, IsInt32Constant(kBitsPerByte)),
first_byte);
@@ -139,16 +139,17 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsBytecodeOperandShort(
}
}
-
Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::
IsBytecodeOperandShortSignExtended(int offset) {
Matcher<Node*> load_matcher;
if (TargetSupportsUnalignedAccess()) {
load_matcher = IsLoad(
MachineType::Int16(),
- IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
- IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
- IsInt32Constant(offset)));
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
+ IsIntPtrAdd(
+ IsParameter(
+ InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
+ IsInt32Constant(offset)));
} else {
#if V8_TARGET_LITTLE_ENDIAN
int hi_byte_offset = offset + 1;
@@ -162,15 +163,19 @@ Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::
#endif
Matcher<Node*> hi_byte = IsLoad(
MachineType::Int8(),
- IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
- IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
- IsInt32Constant(hi_byte_offset)));
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
+ IsIntPtrAdd(
+ IsParameter(
+ InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
+ IsInt32Constant(hi_byte_offset)));
hi_byte = IsWord32Shl(hi_byte, IsInt32Constant(kBitsPerByte));
Matcher<Node*> lo_byte = IsLoad(
MachineType::Uint8(),
- IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
- IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
- IsInt32Constant(lo_byte_offset)));
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
+ IsIntPtrAdd(
+ IsParameter(
+ InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
+ IsInt32Constant(lo_byte_offset)));
load_matcher = IsWord32Or(hi_byte, lo_byte);
}
@@ -180,7 +185,6 @@ Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::
return load_matcher;
}
-
TARGET_TEST_F(InterpreterAssemblerTest, Dispatch) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@@ -191,35 +195,37 @@ TARGET_TEST_F(InterpreterAssemblerTest, Dispatch) {
EXPECT_EQ(1, end->InputCount());
Node* tail_call_node = end->InputAt(0);
- Matcher<Node*> next_bytecode_offset_matcher =
- IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
- IsInt32Constant(interpreter::Bytecodes::Size(bytecode)));
- Matcher<Node*> target_bytecode_matcher =
- m.IsLoad(MachineType::Uint8(),
- IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
- next_bytecode_offset_matcher);
- Matcher<Node*> code_target_matcher =
- m.IsLoad(MachineType::Pointer(),
- IsParameter(Linkage::kInterpreterDispatchTableParameter),
- IsWord32Shl(target_bytecode_matcher,
- IsInt32Constant(kPointerSizeLog2)));
-
- EXPECT_EQ(CallDescriptor::kCallCodeObject, m.call_descriptor()->kind());
- EXPECT_TRUE(m.call_descriptor()->flags() & CallDescriptor::kCanUseRoots);
+ Matcher<Node*> next_bytecode_offset_matcher = IsIntPtrAdd(
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
+ IsInt32Constant(interpreter::Bytecodes::Size(bytecode)));
+ Matcher<Node*> target_bytecode_matcher = m.IsLoad(
+ MachineType::Uint8(),
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
+ next_bytecode_offset_matcher);
+ Matcher<Node*> code_target_matcher = m.IsLoad(
+ MachineType::Pointer(),
+ IsParameter(InterpreterDispatchDescriptor::kDispatchTableParameter),
+ IsWord32Shl(target_bytecode_matcher,
+ IsInt32Constant(kPointerSizeLog2)));
+
EXPECT_THAT(
tail_call_node,
- IsTailCall(m.call_descriptor(), code_target_matcher,
- IsParameter(Linkage::kInterpreterAccumulatorParameter),
- IsParameter(Linkage::kInterpreterRegisterFileParameter),
- next_bytecode_offset_matcher,
- IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
- IsParameter(Linkage::kInterpreterDispatchTableParameter),
- IsParameter(Linkage::kInterpreterContextParameter), _, _));
+ IsTailCall(
+ _, code_target_matcher,
+ IsParameter(InterpreterDispatchDescriptor::kAccumulatorParameter),
+ IsParameter(InterpreterDispatchDescriptor::kRegisterFileParameter),
+ next_bytecode_offset_matcher,
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
+ IsParameter(InterpreterDispatchDescriptor::kDispatchTableParameter),
+ IsParameter(InterpreterDispatchDescriptor::kContextParameter), _,
+ _));
}
}
-
TARGET_TEST_F(InterpreterAssemblerTest, Jump) {
+ // If debug code is enabled we emit extra code in Jump.
+ if (FLAG_debug_code) return;
+
int jump_offsets[] = {-9710, -77, 0, +3, +97109};
TRACED_FOREACH(int, jump_offset, jump_offsets) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
@@ -230,38 +236,39 @@ TARGET_TEST_F(InterpreterAssemblerTest, Jump) {
EXPECT_EQ(1, end->InputCount());
Node* tail_call_node = end->InputAt(0);
- Matcher<Node*> next_bytecode_offset_matcher =
- IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
- IsInt32Constant(jump_offset));
+ Matcher<Node*> next_bytecode_offset_matcher = IsIntPtrAdd(
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
+ IsInt32Constant(jump_offset));
Matcher<Node*> target_bytecode_matcher =
- m.IsLoad(MachineType::Uint8(),
- IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
- next_bytecode_offset_matcher);
- Matcher<Node*> code_target_matcher =
- m.IsLoad(MachineType::Pointer(),
- IsParameter(Linkage::kInterpreterDispatchTableParameter),
- IsWord32Shl(target_bytecode_matcher,
- IsInt32Constant(kPointerSizeLog2)));
-
- EXPECT_EQ(CallDescriptor::kCallCodeObject, m.call_descriptor()->kind());
- EXPECT_TRUE(m.call_descriptor()->flags() & CallDescriptor::kCanUseRoots);
+ m.IsLoad(MachineType::Uint8(), _, next_bytecode_offset_matcher);
+ Matcher<Node*> code_target_matcher = m.IsLoad(
+ MachineType::Pointer(),
+ IsParameter(InterpreterDispatchDescriptor::kDispatchTableParameter),
+ IsWord32Shl(target_bytecode_matcher,
+ IsInt32Constant(kPointerSizeLog2)));
+
EXPECT_THAT(
tail_call_node,
- IsTailCall(m.call_descriptor(), code_target_matcher,
- IsParameter(Linkage::kInterpreterAccumulatorParameter),
- IsParameter(Linkage::kInterpreterRegisterFileParameter),
- next_bytecode_offset_matcher,
- IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
- IsParameter(Linkage::kInterpreterDispatchTableParameter),
- IsParameter(Linkage::kInterpreterContextParameter), _, _));
+ IsTailCall(
+ _, code_target_matcher,
+ IsParameter(InterpreterDispatchDescriptor::kAccumulatorParameter),
+ IsParameter(
+ InterpreterDispatchDescriptor::kRegisterFileParameter),
+ next_bytecode_offset_matcher, _,
+ IsParameter(
+ InterpreterDispatchDescriptor::kDispatchTableParameter),
+ IsParameter(InterpreterDispatchDescriptor::kContextParameter), _,
+ _));
}
}
}
-
TARGET_TEST_F(InterpreterAssemblerTest, JumpIfWordEqual) {
static const int kJumpIfTrueOffset = 73;
+ // If debug code is enabled we emit extra code in Jump.
+ if (FLAG_debug_code) return;
+
MachineOperatorBuilder machine(zone());
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
@@ -276,61 +283,64 @@ TARGET_TEST_F(InterpreterAssemblerTest, JumpIfWordEqual) {
int jump_offsets[] = {kJumpIfTrueOffset,
interpreter::Bytecodes::Size(bytecode)};
for (int i = 0; i < static_cast<int>(arraysize(jump_offsets)); i++) {
- Matcher<Node*> next_bytecode_offset_matcher =
- IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
- IsInt32Constant(jump_offsets[i]));
+ Matcher<Node*> next_bytecode_offset_matcher = IsIntPtrAdd(
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
+ IsInt32Constant(jump_offsets[i]));
Matcher<Node*> target_bytecode_matcher =
- m.IsLoad(MachineType::Uint8(),
- IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
- next_bytecode_offset_matcher);
- Matcher<Node*> code_target_matcher =
- m.IsLoad(MachineType::Pointer(),
- IsParameter(Linkage::kInterpreterDispatchTableParameter),
- IsWord32Shl(target_bytecode_matcher,
- IsInt32Constant(kPointerSizeLog2)));
+ m.IsLoad(MachineType::Uint8(), _, next_bytecode_offset_matcher);
+ Matcher<Node*> code_target_matcher = m.IsLoad(
+ MachineType::Pointer(),
+ IsParameter(InterpreterDispatchDescriptor::kDispatchTableParameter),
+ IsWord32Shl(target_bytecode_matcher,
+ IsInt32Constant(kPointerSizeLog2)));
EXPECT_THAT(
end->InputAt(i),
- IsTailCall(m.call_descriptor(), code_target_matcher,
- IsParameter(Linkage::kInterpreterAccumulatorParameter),
- IsParameter(Linkage::kInterpreterRegisterFileParameter),
- next_bytecode_offset_matcher,
- IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
- IsParameter(Linkage::kInterpreterDispatchTableParameter),
- IsParameter(Linkage::kInterpreterContextParameter), _, _));
+ IsTailCall(
+ _, code_target_matcher,
+ IsParameter(InterpreterDispatchDescriptor::kAccumulatorParameter),
+ IsParameter(
+ InterpreterDispatchDescriptor::kRegisterFileParameter),
+ next_bytecode_offset_matcher, _,
+ IsParameter(
+ InterpreterDispatchDescriptor::kDispatchTableParameter),
+ IsParameter(InterpreterDispatchDescriptor::kContextParameter), _,
+ _));
}
// TODO(oth): test control flow paths.
}
}
+TARGET_TEST_F(InterpreterAssemblerTest, InterpreterReturn) {
+ // If debug code is enabled we emit extra code in InterpreterReturn.
+ if (FLAG_debug_code) return;
-TARGET_TEST_F(InterpreterAssemblerTest, Return) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
- m.Return();
+ m.InterpreterReturn();
Graph* graph = m.graph();
Node* end = graph->end();
EXPECT_EQ(1, end->InputCount());
Node* tail_call_node = end->InputAt(0);
- EXPECT_EQ(CallDescriptor::kCallCodeObject, m.call_descriptor()->kind());
- EXPECT_TRUE(m.call_descriptor()->flags() & CallDescriptor::kCanUseRoots);
Handle<HeapObject> exit_trampoline =
isolate()->builtins()->InterpreterExitTrampoline();
EXPECT_THAT(
tail_call_node,
- IsTailCall(m.call_descriptor(), IsHeapConstant(exit_trampoline),
- IsParameter(Linkage::kInterpreterAccumulatorParameter),
- IsParameter(Linkage::kInterpreterRegisterFileParameter),
- IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
- IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
- IsParameter(Linkage::kInterpreterDispatchTableParameter),
- IsParameter(Linkage::kInterpreterContextParameter), _, _));
+ IsTailCall(
+ _, IsHeapConstant(exit_trampoline),
+ IsParameter(InterpreterDispatchDescriptor::kAccumulatorParameter),
+ IsParameter(InterpreterDispatchDescriptor::kRegisterFileParameter),
+ IsParameter(
+ InterpreterDispatchDescriptor::kBytecodeOffsetParameter),
+ _,
+ IsParameter(InterpreterDispatchDescriptor::kDispatchTableParameter),
+ IsParameter(InterpreterDispatchDescriptor::kContextParameter), _,
+ _));
}
}
-
TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperand) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@@ -338,7 +348,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperand) {
for (int i = 0; i < number_of_operands; i++) {
int offset = interpreter::Bytecodes::GetOperandOffset(bytecode, i);
switch (interpreter::Bytecodes::GetOperandType(bytecode, i)) {
- case interpreter::OperandType::kCount8:
+ case interpreter::OperandType::kRegCount8:
EXPECT_THAT(m.BytecodeOperandCount(i), m.IsBytecodeOperand(offset));
break;
case interpreter::OperandType::kIdx8:
@@ -350,11 +360,14 @@ TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperand) {
break;
case interpreter::OperandType::kMaybeReg8:
case interpreter::OperandType::kReg8:
+ case interpreter::OperandType::kRegOut8:
+ case interpreter::OperandType::kRegOutPair8:
+ case interpreter::OperandType::kRegOutTriple8:
case interpreter::OperandType::kRegPair8:
EXPECT_THAT(m.BytecodeOperandReg(i),
m.IsBytecodeOperandSignExtended(offset));
break;
- case interpreter::OperandType::kCount16:
+ case interpreter::OperandType::kRegCount16:
EXPECT_THAT(m.BytecodeOperandCount(i),
m.IsBytecodeOperandShort(offset));
break;
@@ -362,7 +375,12 @@ TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperand) {
EXPECT_THAT(m.BytecodeOperandIdx(i),
m.IsBytecodeOperandShort(offset));
break;
+ case interpreter::OperandType::kMaybeReg16:
case interpreter::OperandType::kReg16:
+ case interpreter::OperandType::kRegOut16:
+ case interpreter::OperandType::kRegOutPair16:
+ case interpreter::OperandType::kRegOutTriple16:
+ case interpreter::OperandType::kRegPair16:
EXPECT_THAT(m.BytecodeOperandReg(i),
m.IsBytecodeOperandShortSignExtended(offset));
break;
@@ -374,15 +392,15 @@ TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperand) {
}
}
-
TARGET_TEST_F(InterpreterAssemblerTest, GetSetAccumulator) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
// Should be incoming accumulator if not set.
- EXPECT_THAT(m.GetAccumulator(),
- IsParameter(Linkage::kInterpreterAccumulatorParameter));
+ EXPECT_THAT(
+ m.GetAccumulator(),
+ IsParameter(InterpreterDispatchDescriptor::kAccumulatorParameter));
- // Should be set by SedtAccumulator.
+ // Should be set by SetAccumulator.
Node* accumulator_value_1 = m.Int32Constant(0xdeadbeef);
m.SetAccumulator(accumulator_value_1);
EXPECT_THAT(m.GetAccumulator(), accumulator_value_1);
@@ -399,11 +417,18 @@ TARGET_TEST_F(InterpreterAssemblerTest, GetSetAccumulator) {
Node* tail_call_node = end->InputAt(0);
EXPECT_THAT(tail_call_node,
- IsTailCall(m.call_descriptor(), _, accumulator_value_2, _, _, _,
- _, _, _));
+ IsTailCall(_, _, accumulator_value_2, _, _, _, _, _, _));
}
}
+TARGET_TEST_F(InterpreterAssemblerTest, GetSetContext) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Node* context_node = m.Int32Constant(100);
+ m.SetContext(context_node);
+ EXPECT_THAT(m.GetContext(), context_node);
+ }
+}
TARGET_TEST_F(InterpreterAssemblerTest, RegisterLocation) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
@@ -413,12 +438,11 @@ TARGET_TEST_F(InterpreterAssemblerTest, RegisterLocation) {
EXPECT_THAT(
reg_location_node,
IsIntPtrAdd(
- IsParameter(Linkage::kInterpreterRegisterFileParameter),
+ IsParameter(InterpreterDispatchDescriptor::kRegisterFileParameter),
IsWordShl(reg_index_node, IsInt32Constant(kPointerSizeLog2))));
}
}
-
TARGET_TEST_F(InterpreterAssemblerTest, LoadRegister) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@@ -426,13 +450,13 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadRegister) {
Node* load_reg_node = m.LoadRegister(reg_index_node);
EXPECT_THAT(
load_reg_node,
- m.IsLoad(MachineType::AnyTagged(),
- IsParameter(Linkage::kInterpreterRegisterFileParameter),
- IsWordShl(reg_index_node, IsInt32Constant(kPointerSizeLog2))));
+ m.IsLoad(
+ MachineType::AnyTagged(),
+ IsParameter(InterpreterDispatchDescriptor::kRegisterFileParameter),
+ IsWordShl(reg_index_node, IsInt32Constant(kPointerSizeLog2))));
}
}
-
TARGET_TEST_F(InterpreterAssemblerTest, StoreRegister) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@@ -441,15 +465,15 @@ TARGET_TEST_F(InterpreterAssemblerTest, StoreRegister) {
Node* store_reg_node = m.StoreRegister(store_value, reg_index_node);
EXPECT_THAT(
store_reg_node,
- m.IsStore(StoreRepresentation(MachineRepresentation::kTagged,
- kNoWriteBarrier),
- IsParameter(Linkage::kInterpreterRegisterFileParameter),
- IsWordShl(reg_index_node, IsInt32Constant(kPointerSizeLog2)),
- store_value));
+ m.IsStore(
+ StoreRepresentation(MachineRepresentation::kTagged,
+ kNoWriteBarrier),
+ IsParameter(InterpreterDispatchDescriptor::kRegisterFileParameter),
+ IsWordShl(reg_index_node, IsInt32Constant(kPointerSizeLog2)),
+ store_value));
}
}
-
TARGET_TEST_F(InterpreterAssemblerTest, SmiTag) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@@ -461,7 +485,6 @@ TARGET_TEST_F(InterpreterAssemblerTest, SmiTag) {
}
}
-
TARGET_TEST_F(InterpreterAssemblerTest, IntPtrAdd) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@@ -472,7 +495,6 @@ TARGET_TEST_F(InterpreterAssemblerTest, IntPtrAdd) {
}
}
-
TARGET_TEST_F(InterpreterAssemblerTest, IntPtrSub) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@@ -483,7 +505,6 @@ TARGET_TEST_F(InterpreterAssemblerTest, IntPtrSub) {
}
}
-
TARGET_TEST_F(InterpreterAssemblerTest, WordShl) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@@ -493,7 +514,6 @@ TARGET_TEST_F(InterpreterAssemblerTest, WordShl) {
}
}
-
TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@@ -501,7 +521,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
Node* load_constant = m.LoadConstantPoolEntry(index);
Matcher<Node*> constant_pool_matcher = m.IsLoad(
MachineType::AnyTagged(),
- IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ IsParameter(InterpreterDispatchDescriptor::kBytecodeArrayParameter),
IsIntPtrConstant(BytecodeArray::kConstantPoolOffset - kHeapObjectTag));
EXPECT_THAT(
load_constant,
@@ -512,7 +532,6 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
}
}
-
TARGET_TEST_F(InterpreterAssemblerTest, LoadFixedArrayElement) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@@ -529,7 +548,6 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadFixedArrayElement) {
}
}
-
TARGET_TEST_F(InterpreterAssemblerTest, LoadObjectField) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@@ -542,7 +560,6 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadObjectField) {
}
}
-
TARGET_TEST_F(InterpreterAssemblerTest, LoadContextSlot) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@@ -558,7 +575,6 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadContextSlot) {
}
}
-
TARGET_TEST_F(InterpreterAssemblerTest, StoreContextSlot) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@@ -577,21 +593,22 @@ TARGET_TEST_F(InterpreterAssemblerTest, StoreContextSlot) {
}
}
-
TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime2) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
Node* arg1 = m.Int32Constant(2);
Node* arg2 = m.Int32Constant(3);
- Node* call_runtime = m.CallRuntime(Runtime::kAdd, arg1, arg2);
+ Node* context =
+ m.Parameter(InterpreterDispatchDescriptor::kContextParameter);
+ Node* call_runtime = m.CallRuntime(Runtime::kAdd, context, arg1, arg2);
EXPECT_THAT(
call_runtime,
IsCall(_, _, arg1, arg2, _, IsInt32Constant(2),
- IsParameter(Linkage::kInterpreterContextParameter), _, _));
+ IsParameter(InterpreterDispatchDescriptor::kContextParameter), _,
+ _));
}
}
-
TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime) {
const int kResultSizes[] = {1, 2};
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
@@ -602,6 +619,8 @@ TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime) {
Node* function_id = m.Int32Constant(0);
Node* first_arg = m.Int32Constant(1);
Node* arg_count = m.Int32Constant(2);
+ Node* context =
+ m.Parameter(InterpreterDispatchDescriptor::kContextParameter);
Matcher<Node*> function_table = IsExternalConstant(
ExternalReference::runtime_function_table_address(isolate()));
@@ -612,63 +631,53 @@ TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime) {
m.IsLoad(MachineType::Pointer(), function,
IsInt32Constant(offsetof(Runtime::Function, entry)));
- Node* call_runtime =
- m.CallRuntime(function_id, first_arg, arg_count, result_size);
+ Node* call_runtime = m.CallRuntimeN(function_id, context, first_arg,
+ arg_count, result_size);
EXPECT_THAT(
call_runtime,
IsCall(_, IsHeapConstant(builtin.code()), arg_count, first_arg,
function_entry,
- IsParameter(Linkage::kInterpreterContextParameter), _, _));
+ IsParameter(InterpreterDispatchDescriptor::kContextParameter),
+ _, _));
}
}
}
-
-TARGET_TEST_F(InterpreterAssemblerTest, CallIC) {
- TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
- InterpreterAssemblerForTest m(this, bytecode);
- LoadWithVectorDescriptor descriptor(isolate());
- Node* target = m.Int32Constant(1);
- Node* arg1 = m.Int32Constant(2);
- Node* arg2 = m.Int32Constant(3);
- Node* arg3 = m.Int32Constant(4);
- Node* arg4 = m.Int32Constant(5);
- Node* call_ic = m.CallIC(descriptor, target, arg1, arg2, arg3, arg4);
- EXPECT_THAT(
- call_ic,
- IsCall(_, target, arg1, arg2, arg3, arg4,
- IsParameter(Linkage::kInterpreterContextParameter), _, _));
- }
-}
-
-
TARGET_TEST_F(InterpreterAssemblerTest, CallJS) {
- TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
- InterpreterAssemblerForTest m(this, bytecode);
- Callable builtin = CodeFactory::InterpreterPushArgsAndCall(isolate());
- Node* function = m.Int32Constant(0);
- Node* first_arg = m.Int32Constant(1);
- Node* arg_count = m.Int32Constant(2);
- Node* call_js = m.CallJS(function, first_arg, arg_count);
- EXPECT_THAT(
- call_js,
- IsCall(_, IsHeapConstant(builtin.code()), arg_count, first_arg,
- function, IsParameter(Linkage::kInterpreterContextParameter), _,
- _));
+ TailCallMode tail_call_modes[] = {TailCallMode::kDisallow,
+ TailCallMode::kAllow};
+ TRACED_FOREACH(TailCallMode, tail_call_mode, tail_call_modes) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Callable builtin =
+ CodeFactory::InterpreterPushArgsAndCall(isolate(), tail_call_mode);
+ Node* function = m.Int32Constant(0);
+ Node* first_arg = m.Int32Constant(1);
+ Node* arg_count = m.Int32Constant(2);
+ Node* context =
+ m.Parameter(InterpreterDispatchDescriptor::kContextParameter);
+ Node* call_js =
+ m.CallJS(function, context, first_arg, arg_count, tail_call_mode);
+ EXPECT_THAT(
+ call_js,
+ IsCall(_, IsHeapConstant(builtin.code()), arg_count, first_arg,
+ function,
+ IsParameter(InterpreterDispatchDescriptor::kContextParameter),
+ _, _));
+ }
}
}
-
TARGET_TEST_F(InterpreterAssemblerTest, LoadTypeFeedbackVector) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
Node* feedback_vector = m.LoadTypeFeedbackVector();
- Matcher<Node*> load_function_matcher =
- m.IsLoad(MachineType::AnyTagged(),
- IsParameter(Linkage::kInterpreterRegisterFileParameter),
- IsIntPtrConstant(
- InterpreterFrameConstants::kFunctionFromRegisterPointer));
+ Matcher<Node*> load_function_matcher = m.IsLoad(
+ MachineType::AnyTagged(),
+ IsParameter(InterpreterDispatchDescriptor::kRegisterFileParameter),
+ IsIntPtrConstant(
+ InterpreterFrameConstants::kFunctionFromRegisterPointer));
Matcher<Node*> load_shared_function_info_matcher =
m.IsLoad(MachineType::AnyTagged(), load_function_matcher,
IsIntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
@@ -682,6 +691,6 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadTypeFeedbackVector) {
}
}
-} // namespace compiler
+} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
new file mode 100644
index 0000000000..321c72490b
--- /dev/null
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
@@ -0,0 +1,57 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_UNITTESTS_INTERPRETER_INTERPRETER_ASSEMBLER_UNITTEST_H_
+#define V8_UNITTESTS_INTERPRETER_INTERPRETER_ASSEMBLER_UNITTEST_H_
+
+#include "src/compiler/machine-operator.h"
+#include "src/interpreter/interpreter-assembler.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock-support.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+using ::testing::Matcher;
+
+class InterpreterAssemblerTest : public TestWithIsolateAndZone {
+ public:
+ InterpreterAssemblerTest() {}
+ ~InterpreterAssemblerTest() override {}
+
+ class InterpreterAssemblerForTest final : public InterpreterAssembler {
+ public:
+ InterpreterAssemblerForTest(InterpreterAssemblerTest* test,
+ Bytecode bytecode)
+ : InterpreterAssembler(test->isolate(), test->zone(), bytecode) {}
+ ~InterpreterAssemblerForTest() override {}
+
+ Matcher<compiler::Node*> IsLoad(
+ const Matcher<compiler::LoadRepresentation>& rep_matcher,
+ const Matcher<compiler::Node*>& base_matcher,
+ const Matcher<compiler::Node*>& index_matcher);
+ Matcher<compiler::Node*> IsStore(
+ const Matcher<compiler::StoreRepresentation>& rep_matcher,
+ const Matcher<compiler::Node*>& base_matcher,
+ const Matcher<compiler::Node*>& index_matcher,
+ const Matcher<compiler::Node*>& value_matcher);
+
+ Matcher<compiler::Node*> IsBytecodeOperand(int offset);
+ Matcher<compiler::Node*> IsBytecodeOperandSignExtended(int offset);
+ Matcher<compiler::Node*> IsBytecodeOperandShort(int offset);
+ Matcher<compiler::Node*> IsBytecodeOperandShortSignExtended(int offset);
+
+ using InterpreterAssembler::graph;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InterpreterAssemblerForTest);
+ };
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_UNITTESTS_INTERPRETER_INTERPRETER_ASSEMBLER_UNITTEST_H_
diff --git a/deps/v8/test/unittests/interpreter/register-translator-unittest.cc b/deps/v8/test/unittests/interpreter/register-translator-unittest.cc
new file mode 100644
index 0000000000..e9f65a6af0
--- /dev/null
+++ b/deps/v8/test/unittests/interpreter/register-translator-unittest.cc
@@ -0,0 +1,260 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stack>
+
+#include "src/v8.h"
+
+#include "src/interpreter/register-translator.h"
+#include "src/isolate.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class RegisterTranslatorTest : public TestWithIsolateAndZone,
+ private RegisterMover {
+ public:
+ RegisterTranslatorTest() : translator_(this), move_count_(0) {
+ window_start_ =
+ RegisterTranslator::DistanceToTranslationWindow(Register(0));
+ window_width_ =
+ Register::MaxRegisterIndexForByteOperand() - window_start_ + 1;
+ }
+
+ ~RegisterTranslatorTest() override {}
+
+ bool PopMoveAndMatch(Register from, Register to) {
+ if (!moves_.empty()) {
+ CHECK(from.is_valid() && to.is_valid());
+ const std::pair<Register, Register> top = moves_.top();
+ moves_.pop();
+ return top.first == from && top.second == to;
+ } else {
+ return false;
+ }
+ }
+
+ int move_count() const { return move_count_; }
+ RegisterTranslator* translator() { return &translator_; }
+
+ int window_start() const { return window_start_; }
+ int window_width() const { return window_width_; }
+ int window_limit() const { return window_start_ + window_width_; }
+
+ protected:
+ static const char* const kBadOperandRegex;
+
+ private:
+ void MoveRegisterUntranslated(Register from, Register to) override {
+ moves_.push(std::make_pair(from, to));
+ move_count_++;
+ }
+
+ RegisterTranslator translator_;
+ std::stack<std::pair<Register, Register>> moves_;
+ int move_count_;
+ int window_start_;
+ int window_width_;
+};
+
+const char* const RegisterTranslatorTest::kBadOperandRegex =
+ ".*OperandType::kReg8 \\|\\| .*OperandType::kRegOut8\\) && "
+ "RegisterIsMovableToWindow.*";
+
+TEST_F(RegisterTranslatorTest, TestFrameSizeAdjustmentsForTranslationWindow) {
+ EXPECT_EQ(0, RegisterTranslator::RegisterCountAdjustment(0, 0));
+ EXPECT_EQ(0, RegisterTranslator::RegisterCountAdjustment(10, 10));
+ EXPECT_EQ(window_width(),
+ RegisterTranslator::RegisterCountAdjustment(173, 0));
+ EXPECT_EQ(window_width(),
+ RegisterTranslator::RegisterCountAdjustment(173, 137));
+ EXPECT_EQ(window_width(),
+ RegisterTranslator::RegisterCountAdjustment(173, 137));
+ // TODO(oth): Add a kMaxParameters8 that derives this info from the frame.
+ int param_limit = FLAG_enable_embedded_constant_pool ? 119 : 120;
+ EXPECT_EQ(0, RegisterTranslator::RegisterCountAdjustment(0, param_limit));
+ EXPECT_EQ(window_limit(),
+ RegisterTranslator::RegisterCountAdjustment(0, 128));
+ EXPECT_EQ(window_limit(),
+ RegisterTranslator::RegisterCountAdjustment(0, 129));
+ EXPECT_EQ(window_limit() - 32,
+ RegisterTranslator::RegisterCountAdjustment(32, 129));
+}
+
+TEST_F(RegisterTranslatorTest, TestInTranslationWindow) {
+ EXPECT_GE(window_start(), 0);
+ EXPECT_FALSE(
+ RegisterTranslator::InTranslationWindow(Register(window_start() - 1)));
+ EXPECT_TRUE(RegisterTranslator::InTranslationWindow(
+ Register(Register::MaxRegisterIndexForByteOperand())));
+ EXPECT_FALSE(RegisterTranslator::InTranslationWindow(
+ Register(Register::MaxRegisterIndexForByteOperand() + 1)));
+ for (int index = window_start(); index < window_limit(); index += 1) {
+ EXPECT_TRUE(RegisterTranslator::InTranslationWindow(Register(index)));
+ }
+}
+
+TEST_F(RegisterTranslatorTest, FitsInReg8Operand) {
+ EXPECT_GT(window_start(), 0);
+ EXPECT_TRUE(RegisterTranslator::FitsInReg8Operand(
+ Register::FromParameterIndex(0, 3)));
+ EXPECT_TRUE(RegisterTranslator::FitsInReg8Operand(
+ Register::FromParameterIndex(2, 3)));
+ EXPECT_TRUE(RegisterTranslator::FitsInReg8Operand(Register(0)));
+ EXPECT_TRUE(
+ RegisterTranslator::FitsInReg8Operand(Register(window_start() - 1)));
+ EXPECT_FALSE(RegisterTranslator::FitsInReg8Operand(Register(kMaxInt8)));
+ EXPECT_FALSE(RegisterTranslator::FitsInReg8Operand(Register(kMaxInt8 + 1)));
+ for (int index = window_start(); index < window_limit(); index += 1) {
+ EXPECT_FALSE(RegisterTranslator::FitsInReg8Operand(Register(index)));
+ }
+}
+
+TEST_F(RegisterTranslatorTest, FitsInReg16Operand) {
+ EXPECT_GT(window_start(), 0);
+ EXPECT_TRUE(RegisterTranslator::FitsInReg16Operand(
+ Register::FromParameterIndex(0, 3)));
+ EXPECT_TRUE(RegisterTranslator::FitsInReg16Operand(
+ Register::FromParameterIndex(2, 3)));
+ EXPECT_TRUE(RegisterTranslator::FitsInReg16Operand(
+ Register::FromParameterIndex(0, 999)));
+ EXPECT_TRUE(RegisterTranslator::FitsInReg16Operand(
+ Register::FromParameterIndex(0, Register::MaxParameterIndex() + 1)));
+ EXPECT_TRUE(RegisterTranslator::FitsInReg16Operand(Register(0)));
+ EXPECT_TRUE(
+ RegisterTranslator::FitsInReg16Operand(Register(window_start() - 1)));
+ EXPECT_TRUE(RegisterTranslator::FitsInReg16Operand(Register(kMaxInt8 + 1)));
+ EXPECT_TRUE(RegisterTranslator::FitsInReg16Operand(Register(kMaxInt8 + 2)));
+ for (int index = 0; index <= kMaxInt16 - window_width(); index += 1) {
+ EXPECT_TRUE(RegisterTranslator::FitsInReg16Operand(Register(index)));
+ }
+ for (int index = Register::MaxRegisterIndex() - window_width() + 1;
+ index < Register::MaxRegisterIndex() + 2; index += 1) {
+ EXPECT_FALSE(RegisterTranslator::FitsInReg16Operand(Register(index)));
+ }
+}
+
+TEST_F(RegisterTranslatorTest, NoTranslationRequired) {
+ Register window_reg(window_start());
+ Register local_reg(57);
+ uint32_t operands[] = {local_reg.ToRawOperand()};
+ translator()->TranslateInputRegisters(Bytecode::kLdar, operands, 1);
+ translator()->TranslateOutputRegisters();
+ EXPECT_EQ(0, move_count());
+
+ Register param_reg = Register::FromParameterIndex(129, 130);
+ operands[0] = param_reg.ToRawOperand();
+ translator()->TranslateInputRegisters(Bytecode::kAdd, operands, 1);
+ translator()->TranslateOutputRegisters();
+ EXPECT_EQ(0, move_count());
+}
+
+TEST_F(RegisterTranslatorTest, TranslationRequired) {
+ Register window_reg(window_start());
+ Register local_reg(137);
+ Register local_reg_translated(local_reg.index() + window_width());
+
+ uint32_t operands[] = {local_reg.ToRawOperand()};
+ translator()->TranslateInputRegisters(Bytecode::kLdar, operands, 1);
+ EXPECT_EQ(1, move_count());
+ EXPECT_TRUE(PopMoveAndMatch(local_reg_translated, window_reg));
+ translator()->TranslateOutputRegisters();
+ EXPECT_EQ(1, move_count());
+ EXPECT_FALSE(PopMoveAndMatch(window_reg, local_reg_translated));
+
+ operands[0] = local_reg.ToRawOperand();
+ translator()->TranslateInputRegisters(Bytecode::kStar, operands, 1);
+ EXPECT_EQ(1, move_count());
+ EXPECT_FALSE(PopMoveAndMatch(local_reg_translated, window_reg));
+ translator()->TranslateOutputRegisters();
+ EXPECT_EQ(2, move_count());
+ EXPECT_TRUE(PopMoveAndMatch(window_reg, local_reg_translated));
+
+ Register param_reg = Register::FromParameterIndex(0, 130);
+ operands[0] = {param_reg.ToRawOperand()};
+ translator()->TranslateInputRegisters(Bytecode::kLdar, operands, 1);
+ EXPECT_EQ(3, move_count());
+ EXPECT_TRUE(PopMoveAndMatch(param_reg, window_reg));
+ translator()->TranslateOutputRegisters();
+ EXPECT_EQ(3, move_count());
+ EXPECT_FALSE(PopMoveAndMatch(window_reg, param_reg));
+
+ operands[0] = {param_reg.ToRawOperand()};
+ translator()->TranslateInputRegisters(Bytecode::kStar, operands, 1);
+ EXPECT_EQ(3, move_count());
+ EXPECT_FALSE(PopMoveAndMatch(local_reg_translated, window_reg));
+ translator()->TranslateOutputRegisters();
+ EXPECT_EQ(4, move_count());
+ EXPECT_TRUE(PopMoveAndMatch(window_reg, param_reg));
+}
+
+TEST_F(RegisterTranslatorTest, RangeTranslation) {
+ Register window0(window_start());
+ Register window1(window_start() + 1);
+ Register window2(window_start() + 2);
+ uint32_t operands[3];
+
+ // Bytecode::kNew with valid range operand.
+ Register constructor0(0);
+ Register args0(1);
+ operands[0] = constructor0.ToRawOperand();
+ operands[1] = args0.ToRawOperand();
+ operands[2] = 1;
+ translator()->TranslateInputRegisters(Bytecode::kNew, operands, 3);
+ translator()->TranslateOutputRegisters();
+ EXPECT_EQ(0, move_count());
+
+ // Bytecode::kNewWide with valid range operand.
+ Register constructor1(128);
+ Register constructor1_translated(constructor1.index() + window_width());
+ Register args1(129);
+ Register args1_translated(args1.index() + window_width());
+ operands[0] = constructor1.ToRawOperand();
+ operands[1] = args1.ToRawOperand();
+ operands[2] = 3;
+ translator()->TranslateInputRegisters(Bytecode::kNewWide, operands, 3);
+ translator()->TranslateOutputRegisters();
+ EXPECT_EQ(0, move_count());
+}
+
+TEST_F(RegisterTranslatorTest, BadRange0) {
+ // Bytecode::kNew with invalid range operand (kMaybeReg8).
+ Register constructor1(128);
+ Register args1(129);
+ uint32_t operands[] = {constructor1.ToRawOperand(), args1.ToRawOperand(), 3};
+ ASSERT_DEATH_IF_SUPPORTED(
+ translator()->TranslateInputRegisters(Bytecode::kNew, operands, 3),
+ kBadOperandRegex);
+}
+
+TEST_F(RegisterTranslatorTest, BadRange1) {
+ // Bytecode::kForInPrepare with invalid range operand (kRegTriple8)
+ Register for_in_state(160);
+ Register for_in_state_translated(for_in_state.index() + window_width());
+ uint32_t operands[] = {for_in_state.ToRawOperand()};
+ ASSERT_DEATH_IF_SUPPORTED(translator()->TranslateInputRegisters(
+ Bytecode::kForInPrepare, operands, 1),
+ kBadOperandRegex);
+}
+
+TEST_F(RegisterTranslatorTest, BadRange2) {
+ // Bytecode::kForInNext with invalid range operand (kRegPair8)
+ Register receiver(192);
+ Register receiver_translated(receiver.index() + window_width());
+ Register index(193);
+ Register index_translated(index.index() + window_width());
+ Register cache_info_pair(194);
+ Register cache_info_pair_translated(cache_info_pair.index() + window_width());
+ uint32_t operands[] = {receiver.ToRawOperand(), index.ToRawOperand(),
+ cache_info_pair.ToRawOperand()};
+ ASSERT_DEATH_IF_SUPPORTED(
+ translator()->TranslateInputRegisters(Bytecode::kForInNext, operands, 3),
+ kBadOperandRegex);
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/unittests.gyp b/deps/v8/test/unittests/unittests.gyp
index 5339da35fd..638fd847bf 100644
--- a/deps/v8/test/unittests/unittests.gyp
+++ b/deps/v8/test/unittests/unittests.gyp
@@ -60,10 +60,9 @@
'compiler/instruction-selector-unittest.h',
'compiler/instruction-sequence-unittest.cc',
'compiler/instruction-sequence-unittest.h',
- 'compiler/interpreter-assembler-unittest.cc',
- 'compiler/interpreter-assembler-unittest.h',
+ 'compiler/int64-lowering-unittest.cc',
'compiler/js-builtin-reducer-unittest.cc',
- 'compiler/js-context-relaxation-unittest.cc',
+ 'compiler/js-create-lowering-unittest.cc',
'compiler/js-intrinsic-lowering-unittest.cc',
'compiler/js-operator-unittest.cc',
'compiler/js-typed-lowering-unittest.cc',
@@ -86,6 +85,7 @@
'compiler/schedule-unittest.cc',
'compiler/select-lowering-unittest.cc',
'compiler/scheduler-unittest.cc',
+ 'compiler/scheduler-rpo-unittest.cc',
'compiler/simplified-operator-reducer-unittest.cc',
'compiler/simplified-operator-unittest.cc',
'compiler/state-values-utils-unittest.cc',
@@ -99,6 +99,9 @@
'interpreter/bytecode-array-iterator-unittest.cc',
'interpreter/bytecode-register-allocator-unittest.cc',
'interpreter/constant-array-builder-unittest.cc',
+ 'interpreter/interpreter-assembler-unittest.cc',
+ 'interpreter/interpreter-assembler-unittest.h',
+ 'interpreter/register-translator-unittest.cc',
'libplatform/default-platform-unittest.cc',
'libplatform/task-queue-unittest.cc',
'libplatform/worker-thread-unittest.cc',
@@ -107,6 +110,7 @@
'heap/memory-reducer-unittest.cc',
'heap/heap-unittest.cc',
'heap/scavenge-job-unittest.cc',
+ 'heap/slot-set-unittest.cc',
'locked-queue-unittest.cc',
'run-all-unittests.cc',
'runtime/runtime-interpreter-unittest.cc',
@@ -114,6 +118,7 @@
'test-utils.cc',
'wasm/ast-decoder-unittest.cc',
'wasm/encoder-unittest.cc',
+ 'wasm/loop-assignment-analysis-unittest.cc',
'wasm/module-decoder-unittest.cc',
'wasm/wasm-macro-gen-unittest.cc',
],
diff --git a/deps/v8/test/unittests/wasm/ast-decoder-unittest.cc b/deps/v8/test/unittests/wasm/ast-decoder-unittest.cc
index 923c554604..672158714a 100644
--- a/deps/v8/test/unittests/wasm/ast-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/ast-decoder-unittest.cc
@@ -35,6 +35,8 @@ static const WasmOpcode kInt32BinopOpcodes[] = {
kExprI32Shl, kExprI32ShrU, kExprI32ShrS, kExprI32Eq, kExprI32LtS,
kExprI32LeS, kExprI32LtU, kExprI32LeU};
+#define WASM_BRV_IF_ZERO(depth, val) \
+ kExprBrIf, static_cast<byte>(depth), val, WASM_ZERO
#define EXPECT_VERIFIES(env, x) Verify(kSuccess, env, x, x + arraysize(x))
@@ -87,10 +89,10 @@ class WasmDecoderTest : public TestWithZone {
static void init_env(FunctionEnv* env, FunctionSig* sig) {
env->module = nullptr;
env->sig = sig;
- env->local_int32_count = 0;
- env->local_int64_count = 0;
- env->local_float32_count = 0;
- env->local_float64_count = 0;
+ env->local_i32_count = 0;
+ env->local_i64_count = 0;
+ env->local_f32_count = 0;
+ env->local_f64_count = 0;
env->SumLocals();
}
@@ -179,9 +181,9 @@ static FunctionEnv CreateInt32FunctionEnv(FunctionSig* sig, int count) {
FunctionEnv env;
env.module = nullptr;
env.sig = sig;
- env.local_int32_count = count;
- env.local_float64_count = 0;
- env.local_float32_count = 0;
+ env.local_i32_count = count;
+ env.local_f64_count = 0;
+ env.local_f32_count = 0;
env.total_locals = static_cast<unsigned>(count + sig->parameter_count());
return env;
}
@@ -251,9 +253,6 @@ TEST_F(WasmDecoderTest, Int64Const) {
}
-// TODO(tizer): Fix on arm and reenable.
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
-
TEST_F(WasmDecoderTest, Float32Const) {
byte code[] = {kExprF32Const, 0, 0, 0, 0};
float* ptr = reinterpret_cast<float*>(code + 1);
@@ -273,8 +272,6 @@ TEST_F(WasmDecoderTest, Float64Const) {
}
}
-#endif
-
TEST_F(WasmDecoderTest, Int32Const_off_end) {
byte code[] = {kExprI32Const, 0xaa, 0xbb, 0xcc, 0x44};
@@ -338,7 +335,7 @@ TEST_F(WasmDecoderTest, GetLocal_off_end) {
TEST_F(WasmDecoderTest, GetLocal_varint) {
- env_i_i.local_int32_count = 1000000000;
+ env_i_i.local_i32_count = 1000000000;
env_i_i.total_locals += 1000000000;
{
@@ -532,16 +529,11 @@ TEST_F(WasmDecoderTest, ExprBlock1b) {
}
-// TODO(tizer): Fix on arm and reenable.
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
-
TEST_F(WasmDecoderTest, ExprBlock1c) {
static const byte code[] = {kExprBlock, 1, kExprF32Const, 0, 0, 0, 0};
EXPECT_VERIFIES(&env_f_ff, code);
}
-#endif
-
TEST_F(WasmDecoderTest, IfEmpty) {
static const byte code[] = {kExprIf, kExprGetLocal, 0, kExprNop};
@@ -704,9 +696,6 @@ TEST_F(WasmDecoderTest, ReturnVoid2) {
}
-// TODO(tizer): Fix on arm and reenable.
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
-
TEST_F(WasmDecoderTest, ReturnVoid3) {
EXPECT_VERIFIES_INLINE(&env_v_v, kExprI8Const, 0);
EXPECT_VERIFIES_INLINE(&env_v_v, kExprI32Const, 0, 0, 0, 0);
@@ -717,8 +706,6 @@ TEST_F(WasmDecoderTest, ReturnVoid3) {
EXPECT_VERIFIES_INLINE(&env_v_i, kExprGetLocal, 0);
}
-#endif
-
TEST_F(WasmDecoderTest, Unreachable1) {
EXPECT_VERIFIES_INLINE(&env_v_v, kExprUnreachable);
@@ -881,9 +868,6 @@ TEST_F(WasmDecoderTest, MacrosStmt) {
}
-// TODO(tizer): Fix on arm and reenable.
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
-
TEST_F(WasmDecoderTest, MacrosBreak) {
EXPECT_VERIFIES_INLINE(&env_v_v, WASM_LOOP(1, WASM_BREAK(0)));
@@ -895,8 +879,6 @@ TEST_F(WasmDecoderTest, MacrosBreak) {
WASM_LOOP(1, WASM_BREAKV(0, WASM_F64(0.0))));
}
-#endif
-
TEST_F(WasmDecoderTest, MacrosContinue) {
EXPECT_VERIFIES_INLINE(&env_v_v, WASM_LOOP(1, WASM_CONTINUE(0)));
@@ -1204,14 +1186,13 @@ namespace {
class TestModuleEnv : public ModuleEnv {
public:
TestModuleEnv() {
- mem_start = 0;
- mem_end = 0;
+ instance = nullptr;
module = &mod;
linker = nullptr;
- function_code = nullptr;
mod.globals = new std::vector<WasmGlobal>;
mod.signatures = new std::vector<FunctionSig*>;
mod.functions = new std::vector<WasmFunction>;
+ mod.import_table = new std::vector<WasmImport>;
}
byte AddGlobal(MachineType mem_type) {
mod.globals->push_back({0, mem_type, 0, false});
@@ -1228,6 +1209,11 @@ class TestModuleEnv : public ModuleEnv {
CHECK(mod.functions->size() <= 127);
return static_cast<byte>(mod.functions->size() - 1);
}
+ byte AddImport(FunctionSig* sig) {
+ mod.import_table->push_back({sig, 0, 0});
+ CHECK(mod.import_table->size() <= 127);
+ return static_cast<byte>(mod.import_table->size() - 1);
+ }
private:
WasmModule mod;
@@ -1265,9 +1251,6 @@ TEST_F(WasmDecoderTest, CallsWithTooFewArguments) {
}
-// TODO(tizer): Fix on arm and reenable.
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
-
TEST_F(WasmDecoderTest, CallsWithSpilloverArgs) {
static LocalType a_i_ff[] = {kAstI32, kAstF32, kAstF32};
FunctionSig sig_i_ff(1, 2, a_i_ff);
@@ -1331,8 +1314,6 @@ TEST_F(WasmDecoderTest, CallsWithMismatchedSigs3) {
EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(1, WASM_F32(17.6)));
}
-#endif
-
TEST_F(WasmDecoderTest, SimpleIndirectCalls) {
FunctionEnv* env = &env_i_i;
@@ -1389,6 +1370,39 @@ TEST_F(WasmDecoderTest, IndirectCallsWithMismatchedSigs3) {
EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT(f1, WASM_ZERO, WASM_F32(17.6)));
}
+TEST_F(WasmDecoderTest, SimpleImportCalls) {
+ FunctionEnv* env = &env_i_i;
+ TestModuleEnv module_env;
+ env->module = &module_env;
+
+ byte f0 = module_env.AddImport(sigs.i_v());
+ byte f1 = module_env.AddImport(sigs.i_i());
+ byte f2 = module_env.AddImport(sigs.i_ii());
+
+ EXPECT_VERIFIES_INLINE(env, WASM_CALL_IMPORT0(f0));
+ EXPECT_VERIFIES_INLINE(env, WASM_CALL_IMPORT(f1, WASM_I8(22)));
+ EXPECT_VERIFIES_INLINE(env, WASM_CALL_IMPORT(f2, WASM_I8(32), WASM_I8(72)));
+}
+
+TEST_F(WasmDecoderTest, ImportCallsWithMismatchedSigs3) {
+ FunctionEnv* env = &env_i_i;
+ TestModuleEnv module_env;
+ env->module = &module_env;
+
+ byte f0 = module_env.AddImport(sigs.i_f());
+
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_IMPORT0(f0));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_IMPORT(f0, WASM_I8(17)));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_IMPORT(f0, WASM_I64(27)));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_IMPORT(f0, WASM_F64(37.2)));
+
+ byte f1 = module_env.AddImport(sigs.i_d());
+
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_IMPORT0(f1));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_IMPORT(f1, WASM_I8(16)));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_IMPORT(f1, WASM_I64(16)));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_IMPORT(f1, WASM_F32(17.6)));
+}
TEST_F(WasmDecoderTest, Int32Globals) {
FunctionEnv* env = &env_i_i;
@@ -1575,28 +1589,22 @@ TEST_F(WasmDecoderTest, BreakNesting3) {
}
-// TODO(tizer): Fix on arm and reenable.
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
-
TEST_F(WasmDecoderTest, BreaksWithMultipleTypes) {
EXPECT_FAILURE_INLINE(
- &env_i_i,
- WASM_BLOCK(2, WASM_BRV_IF(0, WASM_ZERO, WASM_I8(7)), WASM_F32(7.7)));
- EXPECT_FAILURE_INLINE(&env_i_i,
- WASM_BLOCK(2, WASM_BRV_IF(0, WASM_ZERO, WASM_I8(7)),
- WASM_BRV_IF(0, WASM_ZERO, WASM_F32(7.7))));
+ &env_i_i, WASM_BLOCK(2, WASM_BRV_IF_ZERO(0, WASM_I8(7)), WASM_F32(7.7)));
+
EXPECT_FAILURE_INLINE(&env_i_i,
- WASM_BLOCK(3, WASM_BRV_IF(0, WASM_ZERO, WASM_I8(8)),
- WASM_BRV_IF(0, WASM_ZERO, WASM_I8(0)),
- WASM_BRV_IF(0, WASM_ZERO, WASM_F32(7.7))));
+ WASM_BLOCK(2, WASM_BRV_IF_ZERO(0, WASM_I8(7)),
+ WASM_BRV_IF_ZERO(0, WASM_F32(7.7))));
EXPECT_FAILURE_INLINE(&env_i_i,
- WASM_BLOCK(3, WASM_BRV_IF(0, WASM_ZERO, WASM_I8(9)),
- WASM_BRV_IF(0, WASM_ZERO, WASM_F32(7.7)),
- WASM_BRV_IF(0, WASM_ZERO, WASM_I8(11))));
+ WASM_BLOCK(3, WASM_BRV_IF_ZERO(0, WASM_I8(8)),
+ WASM_BRV_IF_ZERO(0, WASM_I8(0)),
+ WASM_BRV_IF_ZERO(0, WASM_F32(7.7))));
+ EXPECT_FAILURE_INLINE(&env_i_i, WASM_BLOCK(3, WASM_BRV_IF_ZERO(0, WASM_I8(9)),
+ WASM_BRV_IF_ZERO(0, WASM_F32(7.7)),
+ WASM_BRV_IF_ZERO(0, WASM_I8(11))));
}
-#endif
-
TEST_F(WasmDecoderTest, BreakNesting_6_levels) {
for (int mask = 0; mask < 64; mask++) {
@@ -1630,9 +1638,6 @@ TEST_F(WasmDecoderTest, BreakNesting_6_levels) {
}
-// TODO(tizer): Fix on arm and reenable.
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
-
TEST_F(WasmDecoderTest, ExprBreak_TypeCheck) {
FunctionEnv* envs[] = {&env_i_i, &env_l_l, &env_f_ff, &env_d_dd};
for (size_t i = 0; i < arraysize(envs); i++) {
@@ -1655,17 +1660,14 @@ TEST_F(WasmDecoderTest, ExprBreak_TypeCheck) {
WASM_F64(1.2)));
}
-#endif
-
TEST_F(WasmDecoderTest, ExprBreak_TypeCheckAll) {
byte code1[] = {WASM_BLOCK(2,
WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
WASM_GET_LOCAL(1))};
- byte code2[] = {WASM_BLOCK(
- 2, WASM_IF(WASM_ZERO, WASM_BRV_IF(0, WASM_ZERO, WASM_GET_LOCAL(0))),
- WASM_GET_LOCAL(1))};
-
+ byte code2[] = {
+ WASM_BLOCK(2, WASM_IF(WASM_ZERO, WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(0))),
+ WASM_GET_LOCAL(1))};
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
@@ -1715,37 +1717,42 @@ TEST_F(WasmDecoderTest, ExprBr_Unify) {
}
}
+TEST_F(WasmDecoderTest, ExprBrIf_cond_type) {
+ FunctionEnv env;
+ byte code[] = {
+ WASM_BLOCK(1, WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)))};
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
+ LocalType types[] = {kLocalTypes[i], kLocalTypes[j]};
+ FunctionSig sig(0, 2, types);
+ init_env(&env, &sig);
-TEST_F(WasmDecoderTest, ExprBrIf_type) {
- EXPECT_VERIFIES_INLINE(
- &env_i_i,
- WASM_BLOCK(2, WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)),
- WASM_GET_LOCAL(0)));
- EXPECT_FAILURE_INLINE(
- &env_d_dd,
- WASM_BLOCK(2, WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)),
- WASM_GET_LOCAL(0)));
+ if (types[1] == kAstI32) {
+ EXPECT_VERIFIES(&env, code);
+ } else {
+ EXPECT_FAILURE(&env, code);
+ }
+ }
+ }
+}
+TEST_F(WasmDecoderTest, ExprBrIf_val_type) {
FunctionEnv env;
+ byte code[] = {
+ WASM_BLOCK(2, WASM_BRV_IF(0, WASM_GET_LOCAL(1), WASM_GET_LOCAL(2)),
+ WASM_GET_LOCAL(0))};
for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
- LocalType type = kLocalTypes[i];
- LocalType storage[] = {kAstI32, kAstI32, type};
- FunctionSig sig(1, 2, storage);
- init_env(&env, &sig); // (i32, X) -> i32
-
- byte code1[] = {
- WASM_BLOCK(2, WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
- WASM_GET_LOCAL(0))};
-
- byte code2[] = {
- WASM_BLOCK(2, WASM_BRV_IF(0, WASM_GET_LOCAL(1), WASM_GET_LOCAL(0)),
- WASM_GET_LOCAL(0))};
- if (type == kAstI32) {
- EXPECT_VERIFIES(&env, code1);
- EXPECT_VERIFIES(&env, code2);
- } else {
- EXPECT_FAILURE(&env, code1);
- EXPECT_FAILURE(&env, code2);
+ for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
+ LocalType types[] = {kLocalTypes[i], kLocalTypes[i], kLocalTypes[j],
+ kAstI32};
+ FunctionSig sig(1, 3, types);
+ init_env(&env, &sig);
+
+ if (i == j) {
+ EXPECT_VERIFIES(&env, code);
+ } else {
+ EXPECT_FAILURE(&env, code);
+ }
}
}
}
@@ -1761,13 +1768,10 @@ TEST_F(WasmDecoderTest, ExprBrIf_Unify) {
FunctionSig sig(1, 2, storage);
init_env(&env, &sig); // (i32, X) -> i32
- byte code1[] = {
- WASM_BLOCK(2, WASM_BRV_IF(0, WASM_ZERO, WASM_GET_LOCAL(which)),
- WASM_GET_LOCAL(which ^ 1))};
- byte code2[] = {
- WASM_LOOP(2, WASM_BRV_IF(1, WASM_ZERO, WASM_GET_LOCAL(which)),
- WASM_GET_LOCAL(which ^ 1))};
-
+ byte code1[] = {WASM_BLOCK(2, WASM_BRV_IF_ZERO(0, WASM_GET_LOCAL(which)),
+ WASM_GET_LOCAL(which ^ 1))};
+ byte code2[] = {WASM_LOOP(2, WASM_BRV_IF_ZERO(1, WASM_GET_LOCAL(which)),
+ WASM_GET_LOCAL(which ^ 1))};
if (type == kAstI32) {
EXPECT_VERIFIES(&env, code1);
@@ -1800,6 +1804,12 @@ TEST_F(WasmDecoderTest, TableSwitch0c) {
EXPECT_VERIFIES(&env_v_v, code);
}
+TEST_F(WasmDecoderTest, TableSwitch0d) {
+ static byte code[] = {
+ WASM_BLOCK(1, WASM_TABLESWITCH_OP(0, 2, WASM_CASE_BR(0), WASM_CASE_BR(1)),
+ WASM_I8(67))};
+ EXPECT_VERIFIES(&env_v_v, code);
+}
TEST_F(WasmDecoderTest, TableSwitch1) {
static byte code[] = {WASM_TABLESWITCH_OP(1, 1, WASM_CASE(0)),
@@ -1831,9 +1841,6 @@ TEST_F(WasmDecoderTest, TableSwitch2) {
}
-// TODO(tizer): Fix on arm and reenable.
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
-
TEST_F(WasmDecoderTest, TableSwitch1b) {
EXPECT_VERIFIES_INLINE(&env_i_i, WASM_TABLESWITCH_OP(1, 1, WASM_CASE(0)),
WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0), WASM_ZERO));
@@ -1845,29 +1852,25 @@ TEST_F(WasmDecoderTest, TableSwitch1b) {
WASM_TABLESWITCH_BODY(WASM_ZERO, WASM_F64(0.0)));
}
-#endif
-
-
-TEST_F(WasmDecoderTest, TableSwitch_br) {
- EXPECT_VERIFIES_INLINE(&env_i_i, WASM_TABLESWITCH_OP(0, 1, WASM_CASE_BR(0)),
- WASM_GET_LOCAL(0));
+TEST_F(WasmDecoderTest, TableSwitch_br1) {
for (int depth = 0; depth < 2; depth++) {
- EXPECT_VERIFIES_INLINE(
- &env_i_i, WASM_BLOCK(1, WASM_TABLESWITCH_OP(0, 1, WASM_CASE_BR(depth)),
- WASM_GET_LOCAL(0)));
+ byte code[] = {WASM_BLOCK(1, WASM_TABLESWITCH_OP(0, 1, WASM_CASE_BR(depth)),
+ WASM_GET_LOCAL(0))};
+ EXPECT_VERIFIES(&env_v_i, code);
+ EXPECT_FAILURE(&env_i_i, code);
}
}
TEST_F(WasmDecoderTest, TableSwitch_invalid_br) {
for (int depth = 1; depth < 4; depth++) {
- EXPECT_FAILURE_INLINE(&env_i_i,
+ EXPECT_FAILURE_INLINE(&env_v_i,
WASM_TABLESWITCH_OP(0, 1, WASM_CASE_BR(depth)),
WASM_GET_LOCAL(0));
EXPECT_FAILURE_INLINE(
- &env_i_i,
- WASM_BLOCK(1, WASM_TABLESWITCH_OP(0, 1, WASM_CASE_BR(depth + 1)),
- WASM_GET_LOCAL(0)));
+ &env_v_i,
+ WASM_TABLESWITCH_OP(0, 2, WASM_CASE_BR(depth), WASM_CASE_BR(depth)),
+ WASM_GET_LOCAL(0));
}
}
@@ -1880,17 +1883,12 @@ TEST_F(WasmDecoderTest, TableSwitch_invalid_case_ref) {
}
-// TODO(tizer): Fix on arm and reenable.
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
-
TEST_F(WasmDecoderTest, TableSwitch1_br) {
EXPECT_VERIFIES_INLINE(
&env_i_i, WASM_TABLESWITCH_OP(1, 1, WASM_CASE(0)),
WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0), WASM_BRV(0, WASM_ZERO)));
}
-#endif
-
TEST_F(WasmDecoderTest, TableSwitch2_br) {
EXPECT_VERIFIES_INLINE(
@@ -1914,9 +1912,6 @@ TEST_F(WasmDecoderTest, TableSwitch2x2) {
}
-// TODO(tizer): Fix on arm and reenable.
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
-
TEST_F(WasmDecoderTest, ExprBreakNesting1) {
EXPECT_VERIFIES_INLINE(&env_v_v, WASM_BLOCK(1, WASM_BRV(0, WASM_ZERO)));
EXPECT_VERIFIES_INLINE(&env_v_v, WASM_BLOCK(1, WASM_BR(0)));
@@ -1934,18 +1929,56 @@ TEST_F(WasmDecoderTest, ExprBreakNesting1) {
EXPECT_VERIFIES_INLINE(&env_v_v, WASM_LOOP(1, WASM_BR(1)));
}
-#endif
-
TEST_F(WasmDecoderTest, Select) {
EXPECT_VERIFIES_INLINE(
+ &env_i_i, WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(&env_f_ff,
+ WASM_SELECT(WASM_F32(0.0), WASM_F32(0.0), WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(&env_d_dd,
+ WASM_SELECT(WASM_F64(0.0), WASM_F64(0.0), WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(&env_l_l,
+ WASM_SELECT(WASM_I64(0), WASM_I64(0), WASM_ZERO));
+}
+
+TEST_F(WasmDecoderTest, Select_fail1) {
+ EXPECT_FAILURE_INLINE(&env_i_i, WASM_SELECT(WASM_F32(0.0), WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_INLINE(&env_i_i, WASM_SELECT(WASM_GET_LOCAL(0), WASM_F32(0.0),
+ WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_INLINE(
&env_i_i,
- WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+ WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_F32(0.0)));
}
+TEST_F(WasmDecoderTest, Select_fail2) {
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ LocalType type = kLocalTypes[i];
+ if (type == kAstI32) continue;
+
+ LocalType types[] = {type, kAstI32, type};
+ FunctionSig sig(1, 2, types);
+ FunctionEnv env;
+ init_env(&env, &sig);
+
+ EXPECT_VERIFIES_INLINE(
+ &env,
+ WASM_SELECT(WASM_GET_LOCAL(1), WASM_GET_LOCAL(1), WASM_GET_LOCAL(0)));
+
+ EXPECT_FAILURE_INLINE(
+ &env,
+ WASM_SELECT(WASM_GET_LOCAL(1), WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+
+ EXPECT_FAILURE_INLINE(
+ &env,
+ WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(1), WASM_GET_LOCAL(0)));
+
+ EXPECT_FAILURE_INLINE(
+ &env,
+ WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)));
+ }
+}
-// TODO(tizer): Fix on arm and reenable.
-#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
TEST_F(WasmDecoderTest, Select_TypeCheck) {
EXPECT_FAILURE_INLINE(&env_i_i, WASM_SELECT(WASM_F32(9.9), WASM_GET_LOCAL(0),
@@ -1958,22 +1991,18 @@ TEST_F(WasmDecoderTest, Select_TypeCheck) {
&env_i_i, WASM_SELECT(WASM_F32(9.9), WASM_GET_LOCAL(0), WASM_I64(0)));
}
-#endif
-
class WasmOpcodeLengthTest : public TestWithZone {
public:
WasmOpcodeLengthTest() : TestWithZone() {}
};
-
-#define EXPECT_LENGTH(expected, opcode) \
- { \
- static const byte code[] = {opcode, 0, 0, 0, 0, 0, 0, 0, 0}; \
- EXPECT_EQ(expected, OpcodeLength(code)); \
+#define EXPECT_LENGTH(expected, opcode) \
+ { \
+ static const byte code[] = {opcode, 0, 0, 0, 0, 0, 0, 0, 0}; \
+ EXPECT_EQ(expected, OpcodeLength(code, code + sizeof(code))); \
}
-
TEST_F(WasmOpcodeLengthTest, Statements) {
EXPECT_LENGTH(1, kExprNop);
EXPECT_LENGTH(2, kExprBlock);
@@ -1997,6 +2026,7 @@ TEST_F(WasmOpcodeLengthTest, MiscExpressions) {
EXPECT_LENGTH(2, kExprLoadGlobal);
EXPECT_LENGTH(2, kExprStoreGlobal);
EXPECT_LENGTH(2, kExprCallFunction);
+ EXPECT_LENGTH(2, kExprCallImport);
EXPECT_LENGTH(2, kExprCallIndirect);
EXPECT_LENGTH(1, kExprIf);
EXPECT_LENGTH(1, kExprIfElse);
@@ -2014,11 +2044,11 @@ TEST_F(WasmOpcodeLengthTest, VariableLength) {
byte size5[] = {kExprLoadGlobal, 1 | 0x80, 2 | 0x80, 3 | 0x80, 4};
byte size6[] = {kExprLoadGlobal, 1 | 0x80, 2 | 0x80, 3 | 0x80, 4 | 0x80, 5};
- EXPECT_EQ(2, OpcodeLength(size2));
- EXPECT_EQ(3, OpcodeLength(size3));
- EXPECT_EQ(4, OpcodeLength(size4));
- EXPECT_EQ(5, OpcodeLength(size5));
- EXPECT_EQ(6, OpcodeLength(size6));
+ EXPECT_EQ(2, OpcodeLength(size2, size2 + sizeof(size2)));
+ EXPECT_EQ(3, OpcodeLength(size3, size3 + sizeof(size3)));
+ EXPECT_EQ(4, OpcodeLength(size4, size4 + sizeof(size4)));
+ EXPECT_EQ(5, OpcodeLength(size5, size5 + sizeof(size5)));
+ EXPECT_EQ(6, OpcodeLength(size6, size6 + sizeof(size6)));
}
@@ -2183,14 +2213,12 @@ class WasmOpcodeArityTest : public TestWithZone {
WasmOpcodeArityTest() : TestWithZone() {}
};
-
-#define EXPECT_ARITY(expected, ...) \
- { \
- static const byte code[] = {__VA_ARGS__}; \
- EXPECT_EQ(expected, OpcodeArity(&env, code)); \
+#define EXPECT_ARITY(expected, ...) \
+ { \
+ static const byte code[] = {__VA_ARGS__}; \
+ EXPECT_EQ(expected, OpcodeArity(&env, code, code + sizeof(code))); \
}
-
TEST_F(WasmOpcodeArityTest, Control) {
FunctionEnv env;
EXPECT_ARITY(0, kExprNop);
@@ -2249,12 +2277,16 @@ TEST_F(WasmOpcodeArityTest, Calls) {
module.AddSignature(sigs.f_ff());
module.AddSignature(sigs.i_d());
+ module.AddImport(sigs.f_ff());
+ module.AddImport(sigs.i_d());
+
{
FunctionEnv env;
WasmDecoderTest::init_env(&env, sigs.i_ii());
env.module = &module;
EXPECT_ARITY(2, kExprCallFunction, 0);
+ EXPECT_ARITY(2, kExprCallImport, 0);
EXPECT_ARITY(3, kExprCallIndirect, 0);
EXPECT_ARITY(1, kExprBr);
EXPECT_ARITY(2, kExprBrIf);
@@ -2266,6 +2298,7 @@ TEST_F(WasmOpcodeArityTest, Calls) {
env.module = &module;
EXPECT_ARITY(1, kExprCallFunction, 1);
+ EXPECT_ARITY(1, kExprCallImport, 1);
EXPECT_ARITY(2, kExprCallIndirect, 1);
EXPECT_ARITY(1, kExprBr);
EXPECT_ARITY(2, kExprBrIf);
diff --git a/deps/v8/test/unittests/wasm/encoder-unittest.cc b/deps/v8/test/unittests/wasm/encoder-unittest.cc
index 156cf6b1e5..e09e71aeb8 100644
--- a/deps/v8/test/unittests/wasm/encoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/encoder-unittest.cc
@@ -56,28 +56,28 @@ TEST_F(EncoderTest, Function_Builder_Variable_Indexing) {
WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
uint16_t f_index = builder->AddFunction();
WasmFunctionBuilder* function = builder->FunctionAt(f_index);
- uint16_t local_float32 = function->AddLocal(kAstF32);
+ uint16_t local_f32 = function->AddLocal(kAstF32);
uint16_t param_float32 = function->AddParam(kAstF32);
- uint16_t local_int32 = function->AddLocal(kAstI32);
- uint16_t local_float64 = function->AddLocal(kAstF64);
- uint16_t local_int64 = function->AddLocal(kAstI64);
+ uint16_t local_i32 = function->AddLocal(kAstI32);
+ uint16_t local_f64 = function->AddLocal(kAstF64);
+ uint16_t local_i64 = function->AddLocal(kAstI64);
uint16_t param_int32 = function->AddParam(kAstI32);
- uint16_t local_int32_2 = function->AddLocal(kAstI32);
+ uint16_t local_i32_2 = function->AddLocal(kAstI32);
byte code[] = {kExprGetLocal, static_cast<uint8_t>(param_float32)};
uint32_t local_indices[] = {1};
function->EmitCode(code, sizeof(code), local_indices, 1);
code[1] = static_cast<uint8_t>(param_int32);
function->EmitCode(code, sizeof(code), local_indices, 1);
- code[1] = static_cast<uint8_t>(local_int32);
+ code[1] = static_cast<uint8_t>(local_i32);
function->EmitCode(code, sizeof(code), local_indices, 1);
- code[1] = static_cast<uint8_t>(local_int32_2);
+ code[1] = static_cast<uint8_t>(local_i32_2);
function->EmitCode(code, sizeof(code), local_indices, 1);
- code[1] = static_cast<uint8_t>(local_int64);
+ code[1] = static_cast<uint8_t>(local_i64);
function->EmitCode(code, sizeof(code), local_indices, 1);
- code[1] = static_cast<uint8_t>(local_float32);
+ code[1] = static_cast<uint8_t>(local_f32);
function->EmitCode(code, sizeof(code), local_indices, 1);
- code[1] = static_cast<uint8_t>(local_float64);
+ code[1] = static_cast<uint8_t>(local_f64);
function->EmitCode(code, sizeof(code), local_indices, 1);
WasmFunctionEncoder* f = function->Build(&zone, builder);
diff --git a/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc b/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
new file mode 100644
index 0000000000..958621970c
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
@@ -0,0 +1,211 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/test-utils.h"
+
+#include "src/v8.h"
+
+#include "test/cctest/wasm/test-signatures.h"
+
+#include "src/bit-vector.h"
+#include "src/objects.h"
+
+#include "src/wasm/ast-decoder.h"
+#include "src/wasm/wasm-macro-gen.h"
+#include "src/wasm/wasm-module.h"
+
+#define WASM_SET_ZERO(i) WASM_SET_LOCAL(i, WASM_ZERO)
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class WasmLoopAssignmentAnalyzerTest : public TestWithZone {
+ public:
+ WasmLoopAssignmentAnalyzerTest() : TestWithZone(), sigs() {
+ init_env(&env, sigs.v_v());
+ }
+
+ TestSignatures sigs;
+ FunctionEnv env;
+
+ static void init_env(FunctionEnv* env, FunctionSig* sig) {
+ env->module = nullptr;
+ env->sig = sig;
+ env->local_i32_count = 0;
+ env->local_i64_count = 0;
+ env->local_f32_count = 0;
+ env->local_f64_count = 0;
+ env->SumLocals();
+ }
+
+ BitVector* Analyze(const byte* start, const byte* end) {
+ return AnalyzeLoopAssignmentForTesting(zone(), &env, start, end);
+ }
+};
+
+
+TEST_F(WasmLoopAssignmentAnalyzerTest, Empty0) {
+ byte code[] = { 0 };
+ BitVector* assigned = Analyze(code, code);
+ CHECK_NULL(assigned);
+}
+
+
+TEST_F(WasmLoopAssignmentAnalyzerTest, Empty1) {
+ byte code[] = {kExprLoop, 0};
+ for (int i = 0; i < 5; i++) {
+ BitVector* assigned = Analyze(code, code + arraysize(code));
+ for (int j = 0; j < assigned->length(); j++) {
+ CHECK_EQ(false, assigned->Contains(j));
+ }
+ env.AddLocals(kAstI32, 1);
+ }
+}
+
+
+TEST_F(WasmLoopAssignmentAnalyzerTest, One) {
+ env.AddLocals(kAstI32, 5);
+ for (int i = 0; i < 5; i++) {
+ byte code[] = {WASM_LOOP(1, WASM_SET_ZERO(i))};
+ BitVector* assigned = Analyze(code, code + arraysize(code));
+ for (int j = 0; j < assigned->length(); j++) {
+ CHECK_EQ(j == i, assigned->Contains(j));
+ }
+ }
+}
+
+
+TEST_F(WasmLoopAssignmentAnalyzerTest, OneBeyond) {
+ env.AddLocals(kAstI32, 5);
+ for (int i = 0; i < 5; i++) {
+ byte code[] = {WASM_LOOP(1, WASM_SET_ZERO(i)), WASM_SET_ZERO(1)};
+ BitVector* assigned = Analyze(code, code + arraysize(code));
+ for (int j = 0; j < assigned->length(); j++) {
+ CHECK_EQ(j == i, assigned->Contains(j));
+ }
+ }
+}
+
+
+TEST_F(WasmLoopAssignmentAnalyzerTest, Two) {
+ env.AddLocals(kAstI32, 5);
+ for (int i = 0; i < 5; i++) {
+ for (int j = 0; j < 5; j++) {
+ byte code[] = {WASM_LOOP(2, WASM_SET_ZERO(i), WASM_SET_ZERO(j))};
+ BitVector* assigned = Analyze(code, code + arraysize(code));
+ for (int k = 0; k < assigned->length(); k++) {
+ bool expected = k == i || k == j;
+ CHECK_EQ(expected, assigned->Contains(k));
+ }
+ }
+ }
+}
+
+
+TEST_F(WasmLoopAssignmentAnalyzerTest, NestedIf) {
+ env.AddLocals(kAstI32, 5);
+ for (int i = 0; i < 5; i++) {
+ byte code[] = {WASM_LOOP(
+ 1, WASM_IF_ELSE(WASM_SET_ZERO(0), WASM_SET_ZERO(i), WASM_SET_ZERO(1)))};
+ BitVector* assigned = Analyze(code, code + arraysize(code));
+ for (int j = 0; j < assigned->length(); j++) {
+ bool expected = i == j || j == 0 || j == 1;
+ CHECK_EQ(expected, assigned->Contains(j));
+ }
+ }
+}
+
+
+static byte LEBByte(uint32_t val, byte which) {
+ byte b = (val >> (which * 7)) & 0x7F;
+ if (val >> ((which + 1) * 7)) b |= 0x80;
+ return b;
+}
+
+
+TEST_F(WasmLoopAssignmentAnalyzerTest, BigLocal) {
+ env.AddLocals(kAstI32, 65000);
+ for (int i = 13; i < 65000; i = static_cast<int>(i * 1.5)) {
+ byte code[] = {kExprLoop,
+ 1,
+ kExprSetLocal,
+ LEBByte(i, 0),
+ LEBByte(i, 1),
+ LEBByte(i, 2),
+ 11,
+ 12,
+ 13};
+
+ BitVector* assigned = Analyze(code, code + arraysize(code));
+ for (int j = 0; j < assigned->length(); j++) {
+ bool expected = i == j;
+ CHECK_EQ(expected, assigned->Contains(j));
+ }
+ }
+}
+
+
+TEST_F(WasmLoopAssignmentAnalyzerTest, Break) {
+ env.AddLocals(kAstI32, 3);
+ byte code[] = {
+ WASM_LOOP(1, WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(1, WASM_SET_ZERO(1)))),
+ WASM_SET_ZERO(0)};
+
+ BitVector* assigned = Analyze(code, code + arraysize(code));
+ for (int j = 0; j < assigned->length(); j++) {
+ bool expected = j == 1;
+ CHECK_EQ(expected, assigned->Contains(j));
+ }
+}
+
+
+TEST_F(WasmLoopAssignmentAnalyzerTest, Loop1) {
+ env.AddLocals(kAstI32, 5);
+ byte code[] = {
+ WASM_LOOP(1, WASM_IF(WASM_GET_LOCAL(0),
+ WASM_BRV(0, WASM_SET_LOCAL(
+ 3, WASM_I32_SUB(WASM_GET_LOCAL(0),
+ WASM_I8(1)))))),
+ WASM_GET_LOCAL(0)};
+
+ BitVector* assigned = Analyze(code, code + arraysize(code));
+ for (int j = 0; j < assigned->length(); j++) {
+ bool expected = j == 3;
+ CHECK_EQ(expected, assigned->Contains(j));
+ }
+}
+
+
+TEST_F(WasmLoopAssignmentAnalyzerTest, Loop2) {
+ env.AddLocals(kAstI32, 3);
+ const byte kIter = 0;
+ env.AddLocals(kAstF32, 3);
+ const byte kSum = 3;
+
+ byte code[] = {WASM_BLOCK(
+ 3,
+ WASM_WHILE(
+ WASM_GET_LOCAL(kIter),
+ WASM_BLOCK(2, WASM_SET_LOCAL(
+ kSum, WASM_F32_ADD(
+ WASM_GET_LOCAL(kSum),
+ WASM_LOAD_MEM(MachineType::Float32(),
+ WASM_GET_LOCAL(kIter)))),
+ WASM_SET_LOCAL(kIter, WASM_I32_SUB(WASM_GET_LOCAL(kIter),
+ WASM_I8(4))))),
+ WASM_STORE_MEM(MachineType::Float32(), WASM_ZERO, WASM_GET_LOCAL(kSum)),
+ WASM_GET_LOCAL(kIter))};
+
+ BitVector* assigned = Analyze(code + 2, code + arraysize(code));
+ for (int j = 0; j < assigned->length(); j++) {
+ bool expected = j == kIter || j == kSum;
+ CHECK_EQ(expected, assigned->Contains(j));
+ }
+}
+
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index 0738b5909b..467ffcc232 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -45,6 +45,15 @@ struct LocalTypePair {
{kLocalF64, kAstF64}};
+// TODO(titzer): use these macros everywhere below.
+#define U32_LE(v) \
+ static_cast<byte>(v), static_cast<byte>((v) >> 8), \
+ static_cast<byte>((v) >> 16), static_cast<byte>((v) >> 24)
+
+
+#define U16_LE(v) static_cast<byte>(v), static_cast<byte>((v) >> 8)
+
+
TEST_F(WasmModuleVerifyTest, DecodeEmpty) {
static const byte data[1]{kDeclEnd};
{
@@ -61,7 +70,7 @@ TEST_F(WasmModuleVerifyTest, DecodeEmpty) {
TEST_F(WasmModuleVerifyTest, OneGlobal) {
- const byte data[] = {
+ static const byte data[] = {
kDeclGlobals,
1,
0,
@@ -100,7 +109,7 @@ TEST_F(WasmModuleVerifyTest, OneGlobal) {
TEST_F(WasmModuleVerifyTest, ZeroGlobals) {
- const byte data[] = {
+ static const byte data[] = {
kDeclGlobals, 0, // declare 0 globals
};
ModuleResult result = DecodeModule(data, data + arraysize(data));
@@ -125,7 +134,7 @@ static void AppendUint32v(std::vector<byte>& buffer, uint32_t val) {
TEST_F(WasmModuleVerifyTest, NGlobals) {
- const byte data[] = {
+ static const byte data[] = {
0, 0, 0, 0, // name offset
kMemI32, // memory type
0, // exported
@@ -146,7 +155,7 @@ TEST_F(WasmModuleVerifyTest, NGlobals) {
TEST_F(WasmModuleVerifyTest, GlobalWithInvalidNameOffset) {
- const byte data[] = {
+ static const byte data[] = {
kDeclGlobals,
1, // declare one global
0,
@@ -162,7 +171,7 @@ TEST_F(WasmModuleVerifyTest, GlobalWithInvalidNameOffset) {
TEST_F(WasmModuleVerifyTest, GlobalWithInvalidMemoryType) {
- const byte data[] = {
+ static const byte data[] = {
kDeclGlobals,
1, // declare one global
0,
@@ -178,7 +187,7 @@ TEST_F(WasmModuleVerifyTest, GlobalWithInvalidMemoryType) {
TEST_F(WasmModuleVerifyTest, TwoGlobals) {
- const byte data[] = {
+ static const byte data[] = {
kDeclGlobals,
2,
0,
@@ -333,10 +342,10 @@ TEST_F(WasmModuleVerifyTest, OneEmptyVoidVoidFunction) {
EXPECT_EQ(kCodeStartOffset, function->code_start_offset);
EXPECT_EQ(kCodeEndOffset, function->code_end_offset);
- EXPECT_EQ(523, function->local_int32_count);
- EXPECT_EQ(1037, function->local_int64_count);
- EXPECT_EQ(1551, function->local_float32_count);
- EXPECT_EQ(2065, function->local_float64_count);
+ EXPECT_EQ(523, function->local_i32_count);
+ EXPECT_EQ(1037, function->local_i64_count);
+ EXPECT_EQ(1551, function->local_f32_count);
+ EXPECT_EQ(2065, function->local_f64_count);
EXPECT_TRUE(function->exported);
EXPECT_FALSE(function->external);
@@ -373,10 +382,10 @@ TEST_F(WasmModuleVerifyTest, OneFunctionImported) {
EXPECT_EQ(0, function->code_start_offset);
EXPECT_EQ(0, function->code_end_offset);
- EXPECT_EQ(0, function->local_int32_count);
- EXPECT_EQ(0, function->local_int64_count);
- EXPECT_EQ(0, function->local_float32_count);
- EXPECT_EQ(0, function->local_float64_count);
+ EXPECT_EQ(0, function->local_i32_count);
+ EXPECT_EQ(0, function->local_i64_count);
+ EXPECT_EQ(0, function->local_f32_count);
+ EXPECT_EQ(0, function->local_f64_count);
EXPECT_FALSE(function->exported);
EXPECT_TRUE(function->external);
@@ -410,10 +419,10 @@ TEST_F(WasmModuleVerifyTest, OneFunctionWithNopBody) {
EXPECT_EQ(kCodeStartOffset, function->code_start_offset);
EXPECT_EQ(kCodeEndOffset, function->code_end_offset);
- EXPECT_EQ(0, function->local_int32_count);
- EXPECT_EQ(0, function->local_int64_count);
- EXPECT_EQ(0, function->local_float32_count);
- EXPECT_EQ(0, function->local_float64_count);
+ EXPECT_EQ(0, function->local_i32_count);
+ EXPECT_EQ(0, function->local_i64_count);
+ EXPECT_EQ(0, function->local_f32_count);
+ EXPECT_EQ(0, function->local_f64_count);
EXPECT_FALSE(function->exported);
EXPECT_FALSE(function->external);
@@ -450,10 +459,10 @@ TEST_F(WasmModuleVerifyTest, OneFunctionWithNopBody_WithLocals) {
EXPECT_EQ(kCodeStartOffset, function->code_start_offset);
EXPECT_EQ(kCodeEndOffset, function->code_end_offset);
- EXPECT_EQ(513, function->local_int32_count);
- EXPECT_EQ(1027, function->local_int64_count);
- EXPECT_EQ(1541, function->local_float32_count);
- EXPECT_EQ(2055, function->local_float64_count);
+ EXPECT_EQ(513, function->local_i32_count);
+ EXPECT_EQ(1027, function->local_i64_count);
+ EXPECT_EQ(1541, function->local_f32_count);
+ EXPECT_EQ(2055, function->local_f64_count);
EXPECT_FALSE(function->exported);
EXPECT_FALSE(function->external);
@@ -463,10 +472,13 @@ TEST_F(WasmModuleVerifyTest, OneFunctionWithNopBody_WithLocals) {
TEST_F(WasmModuleVerifyTest, OneGlobalOneFunctionWithNopBodyOneDataSegment) {
- static const byte kCodeStartOffset = 2 + kDeclGlobalSize + 4 + 2 + 17;
+ static const byte kDeclMemorySize = 4;
+ static const byte kCodeStartOffset =
+ 2 + kDeclMemorySize + kDeclGlobalSize + 4 + 2 + 17;
static const byte kCodeEndOffset = kCodeStartOffset + 3;
static const byte data[] = {
+ kDeclMemory, 28, 28, 1,
// global#0 --------------------------------------------------
kDeclGlobals, 1, 0, 0, 0, 0, // name offset
kMemU8, // memory type
@@ -531,24 +543,17 @@ TEST_F(WasmModuleVerifyTest, OneGlobalOneFunctionWithNopBodyOneDataSegment) {
TEST_F(WasmModuleVerifyTest, OneDataSegment) {
const byte data[] = {
- kDeclDataSegments,
- 1,
- 0xaa,
- 0xbb,
- 0x09,
+ kDeclMemory, 28, 28, 1, kDeclDataSegments, 1, 0xaa, 0xbb, 0x09,
0, // dest addr
- 11,
- 0,
- 0,
+ 11, 0, 0,
0, // source offset
- 3,
- 0,
- 0,
+ 3, 0, 0,
0, // source size
1, // init
};
{
+ EXPECT_VERIFIES(data);
ModuleResult result = DecodeModule(data, data + arraysize(data));
EXPECT_TRUE(result.ok());
EXPECT_EQ(0, result.val->globals->size());
@@ -565,7 +570,7 @@ TEST_F(WasmModuleVerifyTest, OneDataSegment) {
if (result.val) delete result.val;
}
- for (size_t size = 1; size < arraysize(data); size++) {
+ for (size_t size = 5; size < arraysize(data); size++) {
// Should fall off end of module bytes.
ModuleResult result = DecodeModule(data, data + size);
EXPECT_FALSE(result.ok());
@@ -576,32 +581,18 @@ TEST_F(WasmModuleVerifyTest, OneDataSegment) {
TEST_F(WasmModuleVerifyTest, TwoDataSegments) {
const byte data[] = {
- kDeclDataSegments,
- 2,
- 0xee,
- 0xff,
- 0x07,
+ kDeclMemory, 28, 28, 1, kDeclDataSegments, 2, 0xee, 0xff, 0x07,
0, // dest addr
- 9,
- 0,
- 0,
+ 9, 0, 0,
0, // #0: source offset
- 4,
- 0,
- 0,
+ 4, 0, 0,
0, // source size
0, // init
- 0xcc,
- 0xdd,
- 0x06,
+ 0xcc, 0xdd, 0x06,
0, // #1: dest addr
- 6,
- 0,
- 0,
+ 6, 0, 0,
0, // source offset
- 10,
- 0,
- 0,
+ 10, 0, 0,
0, // source size
1, // init
};
@@ -629,7 +620,7 @@ TEST_F(WasmModuleVerifyTest, TwoDataSegments) {
if (result.val) delete result.val;
}
- for (size_t size = 1; size < arraysize(data); size++) {
+ for (size_t size = 5; size < arraysize(data); size++) {
// Should fall off end of module bytes.
ModuleResult result = DecodeModule(data, data + size);
EXPECT_FALSE(result.ok());
@@ -638,6 +629,71 @@ TEST_F(WasmModuleVerifyTest, TwoDataSegments) {
}
+TEST_F(WasmModuleVerifyTest, DataSegmentWithInvalidSource) {
+ const int dest_addr = 0x100;
+ const byte mem_size_log2 = 15;
+ const int kDataSize = 19;
+
+ for (int source_offset = 0; source_offset < 5 + kDataSize; source_offset++) {
+ for (int source_size = -1; source_size < 5 + kDataSize; source_size += 3) {
+ byte data[] = {
+ kDeclMemory,
+ mem_size_log2,
+ mem_size_log2,
+ 1,
+ kDeclDataSegments,
+ 1,
+ U32_LE(dest_addr),
+ U32_LE(source_offset),
+ U32_LE(source_size),
+ 1, // init
+ };
+
+ STATIC_ASSERT(kDataSize == arraysize(data));
+
+ if (source_offset < kDataSize && source_size >= 0 &&
+ (source_offset + source_size) <= kDataSize) {
+ EXPECT_VERIFIES(data);
+ } else {
+ EXPECT_FAILURE(data);
+ }
+ }
+ }
+}
+
+
+TEST_F(WasmModuleVerifyTest, DataSegmentWithInvalidDest) {
+ const int source_size = 3;
+ const int source_offset = 11;
+
+ for (byte mem_size_log2 = 12; mem_size_log2 < 20; mem_size_log2++) {
+ int mem_size = 1 << mem_size_log2;
+
+ for (int dest_addr = mem_size - source_size;
+ dest_addr < mem_size + source_size; dest_addr++) {
+ byte data[] = {
+ kDeclMemory,
+ mem_size_log2,
+ mem_size_log2,
+ 1,
+ kDeclDataSegments,
+ 1,
+ U32_LE(dest_addr),
+ U32_LE(source_offset),
+ U32_LE(source_size),
+ 1, // init
+ };
+
+ if (dest_addr <= (mem_size - source_size)) {
+ EXPECT_VERIFIES(data);
+ } else {
+ EXPECT_FAILURE(data);
+ }
+ }
+ }
+}
+
+
// To make below tests for indirect calls much shorter.
#define FUNCTION(sig_index, external) \
kDeclFunctionImport, static_cast<byte>(sig_index), \
@@ -848,7 +904,7 @@ class WasmFunctionVerifyTest : public TestWithZone {};
TEST_F(WasmFunctionVerifyTest, Ok_v_v_empty) {
- byte data[] = {
+ static const byte data[] = {
0, kLocalVoid, // signature
3, 0, // local int32 count
4, 0, // local int64 count
@@ -868,10 +924,10 @@ TEST_F(WasmFunctionVerifyTest, Ok_v_v_empty) {
EXPECT_EQ(0, function->name_offset);
EXPECT_EQ(arraysize(data) - 1, function->code_start_offset);
EXPECT_EQ(arraysize(data), function->code_end_offset);
- EXPECT_EQ(3, function->local_int32_count);
- EXPECT_EQ(4, function->local_int64_count);
- EXPECT_EQ(5, function->local_float32_count);
- EXPECT_EQ(6, function->local_float64_count);
+ EXPECT_EQ(3, function->local_i32_count);
+ EXPECT_EQ(4, function->local_i64_count);
+ EXPECT_EQ(5, function->local_f32_count);
+ EXPECT_EQ(6, function->local_f64_count);
EXPECT_FALSE(function->external);
EXPECT_FALSE(function->exported);
}
@@ -889,7 +945,7 @@ TEST_F(WasmModuleVerifyTest, WLLSectionNoLen) {
TEST_F(WasmModuleVerifyTest, WLLSectionEmpty) {
- const byte data[] = {
+ static const byte data[] = {
kDeclWLL, 0, // empty section
};
ModuleResult result = DecodeModule(data, data + arraysize(data));
@@ -899,7 +955,7 @@ TEST_F(WasmModuleVerifyTest, WLLSectionEmpty) {
TEST_F(WasmModuleVerifyTest, WLLSectionOne) {
- const byte data[] = {
+ static const byte data[] = {
kDeclWLL,
1, // LEB128 1
0, // one byte section
@@ -911,10 +967,10 @@ TEST_F(WasmModuleVerifyTest, WLLSectionOne) {
TEST_F(WasmModuleVerifyTest, WLLSectionTen) {
- const byte data[] = {
+ static const byte data[] = {
kDeclWLL,
- 10, // LEB128 10
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // 10 byte section
+ 10, // LEB128 10
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // 10 byte section
};
ModuleResult result = DecodeModule(data, data + arraysize(data));
EXPECT_TRUE(result.ok());
@@ -923,20 +979,19 @@ TEST_F(WasmModuleVerifyTest, WLLSectionTen) {
TEST_F(WasmModuleVerifyTest, WLLSectionOverflow) {
- const byte data[] = {
+ static const byte data[] = {
kDeclWLL,
- 11, // LEB128 11
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // 10 byte section
+ 11, // LEB128 11
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // 10 byte section
};
EXPECT_FAILURE(data);
}
TEST_F(WasmModuleVerifyTest, WLLSectionUnderflow) {
- const byte data[] = {
- kDeclWLL,
- 0xff, 0xff, 0xff, 0xff, 0x0f, // LEB128 0xffffffff
- 1, 2, 3, 4, // 4 byte section
+ static const byte data[] = {
+ kDeclWLL, 0xff, 0xff, 0xff, 0xff, 0x0f, // LEB128 0xffffffff
+ 1, 2, 3, 4, // 4 byte section
};
EXPECT_FAILURE(data);
}
@@ -944,14 +999,92 @@ TEST_F(WasmModuleVerifyTest, WLLSectionUnderflow) {
TEST_F(WasmModuleVerifyTest, WLLSectionLoop) {
// Would infinite loop decoding if wrapping and allowed.
- const byte data[] = {
- kDeclWLL,
- 0xfa, 0xff, 0xff, 0xff, 0x0f, // LEB128 0xfffffffa
- 1, 2, 3, 4, // 4 byte section
+ static const byte data[] = {
+ kDeclWLL, 0xfa, 0xff, 0xff, 0xff, 0x0f, // LEB128 0xfffffffa
+ 1, 2, 3, 4, // 4 byte section
};
EXPECT_FAILURE(data);
}
+TEST_F(WasmModuleVerifyTest, ImportTable_empty) {
+ static const byte data[] = {kDeclSignatures, 0, kDeclImportTable, 0};
+ EXPECT_VERIFIES(data);
+}
+
+TEST_F(WasmModuleVerifyTest, ImportTable_nosigs) {
+ static const byte data[] = {kDeclImportTable, 0};
+ EXPECT_FAILURE(data);
+}
+
+TEST_F(WasmModuleVerifyTest, ImportTable_invalid_sig) {
+ static const byte data[] = {
+ kDeclSignatures,
+ 0,
+ kDeclImportTable,
+ 1,
+ 0,
+ 0, // sig index
+ 1,
+ 0,
+ 0,
+ 0, // module name
+ 1,
+ 0,
+ 0,
+ 0 // function name
+ };
+ EXPECT_FAILURE(data);
+}
+
+TEST_F(WasmModuleVerifyTest, ImportTable_one_sig) {
+ static const byte data[] = {
+ kDeclSignatures,
+ 1,
+ 0,
+ static_cast<byte>(kAstStmt),
+ kDeclImportTable,
+ 1,
+ 0,
+ 0, // sig index
+ 1,
+ 0,
+ 0,
+ 0, // module name
+ 1,
+ 0,
+ 0,
+ 0 // function name
+ };
+ EXPECT_VERIFIES(data);
+}
+
+TEST_F(WasmModuleVerifyTest, ImportTable_off_end) {
+ static const byte data[] = {
+ kDeclSignatures,
+ 1,
+ 0,
+ static_cast<byte>(kAstStmt),
+ kDeclImportTable,
+ 1,
+ 0,
+ 0, // sig index
+ 1,
+ 0,
+ 0,
+ 0, // module name
+ 1,
+ 0,
+ 0,
+ 0 // function name
+ };
+
+ for (size_t length = 5; length < sizeof(data); length++) {
+ ModuleResult result = DecodeModule(data, data + length);
+ EXPECT_FALSE(result.ok());
+ if (result.val) delete result.val;
+ }
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc b/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
index c5bb5eca00..f3f604b3ed 100644
--- a/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
@@ -131,15 +131,25 @@ TEST_F(WasmMacroGenTest, Expressions) {
EXPECT_SIZE(6, WASM_LOOP(3, WASM_NOP, WASM_NOP, WASM_ZERO));
}
-
-TEST_F(WasmMacroGenTest, FunctionCalls) {
+TEST_F(WasmMacroGenTest, CallFunction) {
EXPECT_SIZE(2, WASM_CALL_FUNCTION0(0));
EXPECT_SIZE(2, WASM_CALL_FUNCTION0(1));
EXPECT_SIZE(2, WASM_CALL_FUNCTION0(11));
EXPECT_SIZE(4, WASM_CALL_FUNCTION(0, WASM_ZERO));
EXPECT_SIZE(6, WASM_CALL_FUNCTION(1, WASM_ZERO, WASM_ZERO));
+}
+
+TEST_F(WasmMacroGenTest, CallImport) {
+ EXPECT_SIZE(2, WASM_CALL_IMPORT0(0));
+ EXPECT_SIZE(2, WASM_CALL_IMPORT0(1));
+ EXPECT_SIZE(2, WASM_CALL_IMPORT0(11));
+
+ EXPECT_SIZE(4, WASM_CALL_IMPORT(0, WASM_ZERO));
+ EXPECT_SIZE(6, WASM_CALL_IMPORT(1, WASM_ZERO, WASM_ZERO));
+}
+TEST_F(WasmMacroGenTest, CallIndirect) {
EXPECT_SIZE(4, WASM_CALL_INDIRECT0(0, WASM_ZERO));
EXPECT_SIZE(4, WASM_CALL_INDIRECT0(1, WASM_ZERO));
EXPECT_SIZE(4, WASM_CALL_INDIRECT0(11, WASM_ZERO));
diff --git a/deps/v8/test/webkit/fast/regex/toString-expected.txt b/deps/v8/test/webkit/fast/regex/toString-expected.txt
index 154411242e..08852f9543 100644
--- a/deps/v8/test/webkit/fast/regex/toString-expected.txt
+++ b/deps/v8/test/webkit/fast/regex/toString-expected.txt
@@ -58,3 +58,4 @@ PASS successfullyParsed is true
TEST COMPLETE
+
diff --git a/deps/v8/test/webkit/function-declarations-in-switch-statement-expected.txt b/deps/v8/test/webkit/function-declarations-in-switch-statement-expected.txt
index 602b8b9f87..938e028933 100644
--- a/deps/v8/test/webkit/function-declarations-in-switch-statement-expected.txt
+++ b/deps/v8/test/webkit/function-declarations-in-switch-statement-expected.txt
@@ -26,7 +26,7 @@ PASS 20 is 20
WARN: shouldBe() expects string arguments
PASS 20 is 20
WARN: shouldBe() expects string arguments
-PASS 20 is 20
+FAIL -1 should be 20. Was -1.
PASS successfullyParsed is true
TEST COMPLETE
diff --git a/deps/v8/test/webkit/webkit.status b/deps/v8/test/webkit/webkit.status
index 971cf4691f..fa527427bc 100644
--- a/deps/v8/test/webkit/webkit.status
+++ b/deps/v8/test/webkit/webkit.status
@@ -35,9 +35,8 @@
'dfg-inline-arguments-reset-changetype': [PASS, FAIL],
# TODO(turbofan): We run out of stack earlier on 64-bit for now.
'fast/js/deep-recursion-test': [PASS, NO_VARIANTS],
- # This test leads to a SyntaxError from conflicting let declarations
- # in ES2015
- 'function-declarations-in-switch-statement': [FAIL],
+ # Irregexp interpreter overflows stack. We should just not crash.
+ 'fast/js/regexp-stack-overflow': [PASS, FAIL],
}], # ALWAYS
['mode == debug', {
# Too slow in debug mode.
diff --git a/deps/v8/tools/android-sync.sh b/deps/v8/tools/android-sync.sh
index 4acb1cc5a0..6d9500fc52 100755
--- a/deps/v8/tools/android-sync.sh
+++ b/deps/v8/tools/android-sync.sh
@@ -88,6 +88,8 @@ function sync_dir {
echo -n "sync to $ANDROID_V8/$OUTDIR/$ARCH_MODE"
sync_file "$OUTDIR/$ARCH_MODE/cctest"
sync_file "$OUTDIR/$ARCH_MODE/d8"
+sync_file "$OUTDIR/$ARCH_MODE/natives_blob.bin"
+sync_file "$OUTDIR/$ARCH_MODE/snapshot_blob.bin"
sync_file "$OUTDIR/$ARCH_MODE/unittests"
echo ""
echo -n "sync to $ANDROID_V8/tools"
diff --git a/deps/v8/tools/eval_gc_time.sh b/deps/v8/tools/eval_gc_time.sh
index 21cd93d0ac..92246d3866 100755
--- a/deps/v8/tools/eval_gc_time.sh
+++ b/deps/v8/tools/eval_gc_time.sh
@@ -23,7 +23,7 @@ case $1 in
print_usage_and_die
esac
-case $2 in
+case $2 in
max|avg)
RANK_MODE=$2
;;
@@ -104,3 +104,4 @@ case $OP in
*)
;;
esac
+
diff --git a/deps/v8/tools/fuzz-harness.sh b/deps/v8/tools/fuzz-harness.sh
index 31023de3ab..c874d01845 100755
--- a/deps/v8/tools/fuzz-harness.sh
+++ b/deps/v8/tools/fuzz-harness.sh
@@ -36,6 +36,7 @@ JSFUNFUZZ_URL="https://bugzilla.mozilla.org/attachment.cgi?id=310631"
JSFUNFUZZ_MD5="d0e497201c5cd7bffbb1cdc1574f4e32"
v8_root=$(readlink -f $(dirname $BASH_SOURCE)/../)
+jsfunfuzz_dir="$v8_root/tools/jsfunfuzz"
if [ -n "$1" ]; then
d8="${v8_root}/$1"
@@ -48,24 +49,28 @@ if [ ! -f "$d8" ]; then
exit 1
fi
-jsfunfuzz_file="$v8_root/tools/jsfunfuzz.zip"
-if [ ! -f "$jsfunfuzz_file" ]; then
- echo "Downloading $jsfunfuzz_file ..."
- wget -q -O "$jsfunfuzz_file" $JSFUNFUZZ_URL || exit 1
-fi
+# Deprecated download method. A prepatched archive is downloaded as a hook
+# if jsfunfuzz=1 is specified as a gyp flag. Requires google.com authentication
+# for google storage.
+if [ "$3" == "--download" ]; then
-jsfunfuzz_sum=$(md5sum "$jsfunfuzz_file" | awk '{ print $1 }')
-if [ $jsfunfuzz_sum != $JSFUNFUZZ_MD5 ]; then
- echo "Failed to verify checksum!"
- exit 1
-fi
+ jsfunfuzz_file="$v8_root/tools/jsfunfuzz.zip"
+ if [ ! -f "$jsfunfuzz_file" ]; then
+ echo "Downloading $jsfunfuzz_file ..."
+ wget -q -O "$jsfunfuzz_file" $JSFUNFUZZ_URL || exit 1
+ fi
-jsfunfuzz_dir="$v8_root/tools/jsfunfuzz"
-if [ ! -d "$jsfunfuzz_dir" ]; then
- echo "Unpacking into $jsfunfuzz_dir ..."
- unzip "$jsfunfuzz_file" -d "$jsfunfuzz_dir" || exit 1
- echo "Patching runner ..."
- cat << EOF | patch -s -p0 -d "$v8_root"
+ jsfunfuzz_sum=$(md5sum "$jsfunfuzz_file" | awk '{ print $1 }')
+ if [ $jsfunfuzz_sum != $JSFUNFUZZ_MD5 ]; then
+ echo "Failed to verify checksum!"
+ exit 1
+ fi
+
+ if [ ! -d "$jsfunfuzz_dir" ]; then
+ echo "Unpacking into $jsfunfuzz_dir ..."
+ unzip "$jsfunfuzz_file" -d "$jsfunfuzz_dir" || exit 1
+ echo "Patching runner ..."
+ cat << EOF | patch -s -p0 -d "$v8_root"
--- tools/jsfunfuzz/jsfunfuzz/multi_timed_run.py~
+++ tools/jsfunfuzz/jsfunfuzz/multi_timed_run.py
@@ -125,7 +125,7 @@
@@ -78,6 +83,8 @@ if [ ! -d "$jsfunfuzz_dir" ]; then
logfilename = "w%d" % iteration
one_timed_run(logfilename)
EOF
+ fi
+
fi
flags='--debug-code --expose-gc --verify-gc'
@@ -85,7 +92,12 @@ python -u "$jsfunfuzz_dir/jsfunfuzz/multi_timed_run.py" 300 \
"$d8" $flags "$jsfunfuzz_dir/jsfunfuzz/jsfunfuzz.js"
exit_code=$(cat w* | grep " looking good" -c)
exit_code=$((100-exit_code))
-archive=fuzz-results-$(date +%Y%m%d%H%M%S).tar.bz2
+
+if [ -n "$2" ]; then
+ archive="$2"
+else
+ archive=fuzz-results-$(date +%Y%m%d%H%M%S).tar.bz2
+fi
echo "Creating archive $archive"
tar -cjf $archive err-* w*
rm -f err-* w*
diff --git a/deps/v8/tools/gcmole/download_gcmole_tools.py b/deps/v8/tools/gcmole/download_gcmole_tools.py
new file mode 100755
index 0000000000..7183d28f34
--- /dev/null
+++ b/deps/v8/tools/gcmole/download_gcmole_tools.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import re
+import subprocess
+
+GCMOLE_PATH = os.path.dirname(os.path.abspath(__file__))
+SHA1_PATH = os.path.join(GCMOLE_PATH, 'gcmole-tools.tar.gz.sha1')
+
+if re.search(r'\bgcmole=1', os.environ.get('GYP_DEFINES', '')):
+ subprocess.check_call([
+ 'download_from_google_storage',
+ '-b', 'chrome-v8-gcmole',
+ '-u', '--no_resume',
+ '-s', SHA1_PATH,
+ '--platform=linux*'
+ ])
+else:
+ print 'Skipping gcmole download as gcmole is not set in gyp flags.'
diff --git a/deps/v8/tools/gcmole/gcmole-tools.tar.gz.sha1 b/deps/v8/tools/gcmole/gcmole-tools.tar.gz.sha1
new file mode 100644
index 0000000000..67d758f754
--- /dev/null
+++ b/deps/v8/tools/gcmole/gcmole-tools.tar.gz.sha1
@@ -0,0 +1 @@
+b10748117f8f53d05dda0a77424b8794e645e330
diff --git a/deps/v8/tools/gcmole/run-gcmole.isolate b/deps/v8/tools/gcmole/run-gcmole.isolate
new file mode 100644
index 0000000000..df6e9a267f
--- /dev/null
+++ b/deps/v8/tools/gcmole/run-gcmole.isolate
@@ -0,0 +1,34 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'variables': {
+ 'command': [
+ 'run-gcmole.py',
+ ],
+ 'files': [
+ 'gccause.lua',
+ 'gcmole.lua',
+ 'parallel.py',
+ 'run-gcmole.py',
+ # The following contains all relevant source and gyp files.
+ '../gyp/v8.gyp',
+ '../../base/',
+ '../../include/',
+ '../../src/',
+ '../../test/cctest/',
+ '../../third_party/icu/source/',
+ ],
+ },
+ 'conditions': [
+ ['gcmole==1', {
+ 'variables': {
+ 'files': [
+ # This assumes gcmole tools have been fetched by a hook
+ # into v8/tools/gcmole/gcmole_tools.
+ 'gcmole-tools/',
+ ],
+ },
+ }],
+ ],
+}
diff --git a/deps/v8/tools/gcmole/run-gcmole.py b/deps/v8/tools/gcmole/run-gcmole.py
new file mode 100755
index 0000000000..a1e4f24ab1
--- /dev/null
+++ b/deps/v8/tools/gcmole/run-gcmole.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import subprocess
+import sys
+
+GCMOLE_PATH = os.path.dirname(os.path.abspath(__file__))
+CLANG_BIN = os.path.join(GCMOLE_PATH, 'gcmole-tools', 'bin')
+CLANG_PLUGINS = os.path.join(GCMOLE_PATH, 'gcmole-tools')
+LUA = os.path.join(GCMOLE_PATH, 'gcmole-tools', 'lua52')
+DRIVER = os.path.join(GCMOLE_PATH, 'gcmole.lua')
+BASE_PATH = os.path.dirname(os.path.dirname(GCMOLE_PATH))
+
+assert len(sys.argv) == 2
+
+sys.exit(subprocess.call(
+ [LUA, DRIVER, sys.argv[1]],
+ env={'CLANG_BIN': CLANG_BIN, 'CLANG_PLUGINS': CLANG_PLUGINS},
+ cwd=BASE_PATH,
+))
diff --git a/deps/v8/tools/gcmole/run_gcmole.gyp b/deps/v8/tools/gcmole/run_gcmole.gyp
new file mode 100644
index 0000000000..9d13f7606a
--- /dev/null
+++ b/deps/v8/tools/gcmole/run_gcmole.gyp
@@ -0,0 +1,23 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'run_gcmole_run',
+ 'type': 'none',
+ 'includes': [
+ '../../build/features.gypi',
+ '../../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'run-gcmole.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index 516f8e7490..15eafedfce 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -82,7 +82,7 @@ consts_misc = [
{ 'name': 'OddballTrue', 'value': 'Oddball::kTrue' },
{ 'name': 'OddballTheHole', 'value': 'Oddball::kTheHole' },
{ 'name': 'OddballNull', 'value': 'Oddball::kNull' },
- { 'name': 'OddballArgumentMarker', 'value': 'Oddball::kArgumentMarker' },
+ { 'name': 'OddballArgumentsMarker', 'value': 'Oddball::kArgumentsMarker' },
{ 'name': 'OddballUndefined', 'value': 'Oddball::kUndefined' },
{ 'name': 'OddballUninitialized', 'value': 'Oddball::kUninitialized' },
{ 'name': 'OddballOther', 'value': 'Oddball::kOther' },
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index ca5fb0902b..66f579d33d 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -120,18 +120,30 @@
}],
['v8_use_snapshot=="true" and v8_use_external_startup_data==1 and want_separate_host_toolset==0', {
'dependencies': ['v8_base', 'v8_external_snapshot'],
- 'inputs': [ '<(PRODUCT_DIR)/snapshot_blob.bin', ],
+ 'inputs': ['<(PRODUCT_DIR)/snapshot_blob.bin'],
+ 'conditions': [
+ ['v8_separate_ignition_snapshot==1', {
+ 'inputs': ['<(PRODUCT_DIR)/snapshot_blob_ignition.bin'],
+ }],
+ ]
}],
['v8_use_snapshot=="true" and v8_use_external_startup_data==1 and want_separate_host_toolset==1', {
'dependencies': ['v8_base', 'v8_external_snapshot'],
'target_conditions': [
['_toolset=="host"', {
- 'inputs': [
- '<(PRODUCT_DIR)/snapshot_blob_host.bin',
- ],
+ 'inputs': ['<(PRODUCT_DIR)/snapshot_blob_host.bin'],
}, {
- 'inputs': [
- '<(PRODUCT_DIR)/snapshot_blob.bin',
+ 'inputs': ['<(PRODUCT_DIR)/snapshot_blob.bin'],
+ }],
+ ],
+ 'conditions': [
+ ['v8_separate_ignition_snapshot==1', {
+ 'target_conditions': [
+ ['_toolset=="host"', {
+ 'inputs': ['<(PRODUCT_DIR)/snapshot_blob_ignition_host.bin'],
+ }, {
+ 'inputs': ['<(PRODUCT_DIR)/snapshot_blob_ignition.bin'],
+ }],
],
}],
],
@@ -283,6 +295,65 @@
],
},
}],
+ # Extra snapshot blob for ignition.
+ ['v8_separate_ignition_snapshot==1', {
+ # This is concatenated to the other actions list of
+ # v8_external_snapshot.
+ 'actions': [
+ {
+ 'action_name': 'run_mksnapshot (ignition)',
+ 'inputs': ['<(mksnapshot_exec)'],
+ 'variables': {
+ # TODO: Extract common mksnapshot_flags to a separate
+ # variable.
+ 'mksnapshot_flags_ignition': [
+ '--ignition',
+ '--log-snapshot-positions',
+ '--logfile', '<(INTERMEDIATE_DIR)/snapshot_ignition.log',
+ ],
+ 'conditions': [
+ ['v8_random_seed!=0', {
+ 'mksnapshot_flags_ignition': ['--random-seed', '<(v8_random_seed)'],
+ }],
+ ['v8_vector_stores!=0', {
+ 'mksnapshot_flags_ignition': ['--vector-stores'],
+ }],
+ ],
+ },
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'target_conditions': [
+ ['_toolset=="host"', {
+ 'outputs': ['<(PRODUCT_DIR)/snapshot_blob_ignition_host.bin'],
+ 'action': [
+ '<(mksnapshot_exec)',
+ '<@(mksnapshot_flags_ignition)',
+ '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob_ignition_host.bin',
+ '<(embed_script)',
+ ],
+ }, {
+ 'outputs': ['<(PRODUCT_DIR)/snapshot_blob_ignition.bin'],
+ 'action': [
+ '<(mksnapshot_exec)',
+ '<@(mksnapshot_flags_ignition)',
+ '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob_ignition.bin',
+ '<(embed_script)',
+ ],
+ }],
+ ],
+ }, {
+ 'outputs': ['<(PRODUCT_DIR)/snapshot_blob_ignition.bin'],
+ 'action': [
+ '<(mksnapshot_exec)',
+ '<@(mksnapshot_flags_ignition)',
+ '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob_ignition.bin',
+ '<(embed_script)',
+ ],
+ }],
+ ],
+ },
+ ],
+ }],
],
'dependencies': [
'v8_base',
@@ -297,9 +368,7 @@
'actions': [
{
'action_name': 'run_mksnapshot (external)',
- 'inputs': [
- '<(mksnapshot_exec)',
- ],
+ 'inputs': ['<(mksnapshot_exec)'],
'variables': {
'mksnapshot_flags': [
'--log-snapshot-positions',
@@ -318,9 +387,7 @@
['want_separate_host_toolset==1', {
'target_conditions': [
['_toolset=="host"', {
- 'outputs': [
- '<(PRODUCT_DIR)/snapshot_blob_host.bin',
- ],
+ 'outputs': ['<(PRODUCT_DIR)/snapshot_blob_host.bin'],
'action': [
'<(mksnapshot_exec)',
'<@(mksnapshot_flags)',
@@ -328,9 +395,7 @@
'<(embed_script)',
],
}, {
- 'outputs': [
- '<(PRODUCT_DIR)/snapshot_blob.bin',
- ],
+ 'outputs': ['<(PRODUCT_DIR)/snapshot_blob.bin'],
'action': [
'<(mksnapshot_exec)',
'<@(mksnapshot_flags)',
@@ -340,9 +405,7 @@
}],
],
}, {
- 'outputs': [
- '<(PRODUCT_DIR)/snapshot_blob.bin',
- ],
+ 'outputs': ['<(PRODUCT_DIR)/snapshot_blob.bin'],
'action': [
'<(mksnapshot_exec)',
'<@(mksnapshot_flags)',
@@ -541,16 +604,16 @@
'../../src/compiler/instruction-scheduler.h',
'../../src/compiler/instruction.cc',
'../../src/compiler/instruction.h',
- '../../src/compiler/interpreter-assembler.cc',
- '../../src/compiler/interpreter-assembler.h',
+ '../../src/compiler/int64-lowering.cc',
+ '../../src/compiler/int64-lowering.h',
'../../src/compiler/js-builtin-reducer.cc',
'../../src/compiler/js-builtin-reducer.h',
'../../src/compiler/js-call-reducer.cc',
'../../src/compiler/js-call-reducer.h',
- '../../src/compiler/js-context-relaxation.cc',
- '../../src/compiler/js-context-relaxation.h',
'../../src/compiler/js-context-specialization.cc',
'../../src/compiler/js-context-specialization.h',
+ '../../src/compiler/js-create-lowering.cc',
+ '../../src/compiler/js-create-lowering.h',
'../../src/compiler/js-frame-specialization.cc',
'../../src/compiler/js-frame-specialization.h',
'../../src/compiler/js-generic-lowering.cc',
@@ -778,6 +841,8 @@
'../../src/fast-dtoa.h',
'../../src/field-index.h',
'../../src/field-index-inl.h',
+ '../../src/field-type.cc',
+ '../../src/field-type.h',
'../../src/fixed-dtoa.cc',
'../../src/fixed-dtoa.h',
'../../src/flag-definitions.h',
@@ -799,6 +864,7 @@
'../../src/handles.cc',
'../../src/handles.h',
'../../src/hashmap.h',
+ '../../src/heap-symbols.h',
'../../src/heap/array-buffer-tracker.cc',
'../../src/heap/array-buffer-tracker.h',
'../../src/heap/memory-reducer.cc',
@@ -823,11 +889,14 @@
'../../src/heap/objects-visiting-inl.h',
'../../src/heap/objects-visiting.cc',
'../../src/heap/objects-visiting.h',
+ '../../src/heap/remembered-set.cc',
+ '../../src/heap/remembered-set.h',
'../../src/heap/scavenge-job.h',
'../../src/heap/scavenge-job.cc',
'../../src/heap/scavenger-inl.h',
'../../src/heap/scavenger.cc',
'../../src/heap/scavenger.h',
+ '../../src/heap/slot-set.h',
'../../src/heap/slots-buffer.cc',
'../../src/heap/slots-buffer.h',
'../../src/heap/spaces-inl.h',
@@ -872,11 +941,20 @@
'../../src/interpreter/constant-array-builder.h',
'../../src/interpreter/control-flow-builders.cc',
'../../src/interpreter/control-flow-builders.h',
+ '../../src/interpreter/handler-table-builder.cc',
+ '../../src/interpreter/handler-table-builder.h',
'../../src/interpreter/interpreter.cc',
'../../src/interpreter/interpreter.h',
+ '../../src/interpreter/interpreter-assembler.cc',
+ '../../src/interpreter/interpreter-assembler.h',
+ '../../src/interpreter/register-translator.cc',
+ '../../src/interpreter/register-translator.h',
+ '../../src/interpreter/source-position-table.cc',
+ '../../src/interpreter/source-position-table.h',
'../../src/isolate-inl.h',
'../../src/isolate.cc',
'../../src/isolate.h',
+ '../../src/json-parser.h',
'../../src/json-stringifier.h',
'../../src/key-accumulator.h',
'../../src/key-accumulator.cc',
@@ -914,7 +992,6 @@
'../../src/parsing/expression-classifier.h',
'../../src/parsing/func-name-inferrer.cc',
'../../src/parsing/func-name-inferrer.h',
- '../../src/parsing/json-parser.h',
'../../src/parsing/parameter-initializer-rewriter.cc',
'../../src/parsing/parameter-initializer-rewriter.h',
'../../src/parsing/parser-base.h',
@@ -953,6 +1030,8 @@
'../../src/profiler/profile-generator.h',
'../../src/profiler/sampler.cc',
'../../src/profiler/sampler.h',
+ '../../src/profiler/sampling-heap-profiler.cc',
+ '../../src/profiler/sampling-heap-profiler.h',
'../../src/profiler/strings-storage.cc',
'../../src/profiler/strings-storage.h',
'../../src/profiler/unbound-queue-inl.h',
@@ -1033,6 +1112,7 @@
'../../src/snapshot/snapshot-common.cc',
'../../src/snapshot/snapshot-source-sink.cc',
'../../src/snapshot/snapshot-source-sink.h',
+ '../../src/source-position.h',
'../../src/splay-tree.h',
'../../src/splay-tree-inl.h',
'../../src/startup-data-util.cc',
@@ -1058,7 +1138,6 @@
'../../src/type-feedback-vector.h',
'../../src/type-info.cc',
'../../src/type-info.h',
- '../../src/types-inl.h',
'../../src/types.cc',
'../../src/types.h',
'../../src/typing-asm.cc',
@@ -1072,6 +1151,7 @@
'../../src/unicode-cache.h',
'../../src/unicode-decoder.cc',
'../../src/unicode-decoder.h',
+ '../../src/utils-inl.h',
'../../src/utils.cc',
'../../src/utils.h',
'../../src/v8.cc',
@@ -1537,6 +1617,7 @@
'../../src/base/atomicops_internals_mips64_gcc.h',
'../../src/base/atomicops_internals_portable.h',
'../../src/base/atomicops_internals_ppc_gcc.h',
+ '../../src/base/atomicops_internals_s390_gcc.h',
'../../src/base/atomicops_internals_tsan.h',
'../../src/base/atomicops_internals_x86_gcc.cc',
'../../src/base/atomicops_internals_x86_gcc.h',
@@ -1929,7 +2010,6 @@
'../../src/js/generator.js',
'../../src/js/harmony-atomics.js',
'../../src/js/harmony-regexp.js',
- '../../src/js/harmony-reflect.js',
'../../src/js/harmony-object-observe.js',
'../../src/js/harmony-sharedarraybuffer.js',
'../../src/js/harmony-simd.js',
diff --git a/deps/v8/tools/ic-explorer.html b/deps/v8/tools/ic-explorer.html
new file mode 100644
index 0000000000..43b486a50c
--- /dev/null
+++ b/deps/v8/tools/ic-explorer.html
@@ -0,0 +1,338 @@
+<html>
+ <head>
+<style>
+ .entry-details {
+ }
+ .entry-details TD {
+ }
+ .details {
+ width: 2em;
+ border: 1px black dotted;
+ }
+ .count {
+ text-align: right;
+ width: 5em;
+ font-family: monospace;
+ }
+ .percentage {
+ text-align: right;
+ width: 5em;
+ font-family: monospace;
+ }
+ .key {
+ padding-left: 1em;
+ }
+ .drilldown-group-title {
+ font-weight: bold;
+ padding: 0.5em 0 0.2em 0;
+ }
+</style>
+ <script>
+"use strict"
+var entries = [];
+
+class Entry {
+ constructor(id, line) {
+ this.id = id;
+ this.line = line;
+ var parts = line.split(" ");
+ if (parts.length < 6) return
+ this.isValid = false;
+ if (parts[0][0] !== "[") return;
+ if (parts[1] === "patching") return;
+ this.type = parts[0].substr(1);
+ this.category = "Other";
+ if (this.type.indexOf("Store") !== -1) {
+ this.category = "Store";
+ } else if (this.type.indexOf("Load") !== -1) {
+ this.category = "Load";
+ }
+ if (this.type.length == 0) return;
+ if (this.type.indexOf('BinaryOpIC(') === 0) {
+ this.type = "BinaryOpIC";
+ var split = parts[0].split('(');
+ this.state = "(" + split[1] + " => " + parts[2];
+ var offset = this.parsePositionAndFile(parts, 6);
+ if (offset == -1) return
+ if (this.file === undefined) return
+ this.file = this.file.slice(0,-1);
+ } else {
+ var offset = this.parsePositionAndFile(parts, 2);
+ if (offset == -1) return
+ this.state = parts[++offset];
+ if (this.type !== "CompareIC") {
+ // if there is no address we have a smi key
+ var address = parts[++offset];
+ if (address !== undefined && address.indexOf("0x") === 0) {
+ this.key = parts.slice(++offset).join(" ");
+ } else {
+ this.key = address;
+ }
+ }
+ }
+ this.filePosition = this.file + " " + this.position;
+ if (this.key) {
+ var isStringKey = false
+ if (this.key.indexOf("<String[") === 0) {
+ isStringKey = true;
+ this.key = "\"" + this.key.slice(this.key.indexOf(']')+3);
+ } else if (this.key.indexOf("<") === 0) {
+ this.key = this.key.slice(1);
+ }
+ if (this.key.endsWith(">]")) {
+ this.key = this.key.slice(0, -2);
+ } else if (this.key.endsWith("]")) {
+ this.key = this.key.slice(0, -1);
+ }
+ if (isStringKey) {
+ this.key = this.key + "\"";
+ }
+ }
+ this.isValid = true;
+ }
+
+ parsePositionAndFile(parts, start) {
+ // find the position of 'at' in the parts array.
+ var offset = start;
+ for (var i = start+1; i<parts.length; i++) {
+ offset++;
+ if (parts[i] == 'at') break;
+ }
+ if (parts[offset] !== 'at') return -1;
+ this.position = parts.slice(start, offset).join(' ');
+ offset += 1;
+ this.isNative = parts[offset] == "native"
+ offset += this.isNative ? 1 : 0;
+ this.file = parts[offset];
+ return offset;
+ }
+}
+
+function loadFile() {
+ var files = document.getElementById("uploadInput").files;
+
+ var file = files[0];
+ var reader = new FileReader();
+
+ reader.onload = function(evt) {
+ entries = [];
+ var end = this.result.length;
+ var current = 0;
+ var next = 0;
+ var line;
+ var i = 0;
+ var entry;
+ while (current < end) {
+ next = this.result.indexOf("\n", current);
+ if (next === -1) break;
+ i++;
+
+ line = this.result.substring(current, next);
+ current = next+1;
+ entry = new Entry(i, line);
+ if (entry.isValid) entries.push(entry);
+ }
+
+ document.getElementById("count").innerHTML = i;
+ updateTable();
+ }
+ reader.readAsText(file);
+ initGroupKeySelect();
+}
+
+
+
+var properties = ['type', 'category', 'file', 'filePosition', 'state' , 'key', 'isNative']
+
+class Group {
+ constructor(property, key, entry) {
+ this.property = property;
+ this.key = key;
+ this.count = 1;
+ this.entries = [entry];
+ this.percentage = undefined;
+ this.groups = undefined;
+ }
+
+ add(entry) {
+ this.count ++;
+ this.entries.push(entry)
+ }
+
+ createSubGroups() {
+ this.groups = {};
+ for (var i=0; i<properties.length; i++) {
+ var subProperty = properties[i];
+ if (this.property == subProperty) continue;
+ this.groups[subProperty] = groupBy(this.entries, subProperty);
+ }
+ }
+}
+
+function groupBy(entries, property) {
+ var accumulator = {};
+ accumulator.__proto__ = null;
+ var length = entries.length;
+ for (var i = 0; i < length; i++) {
+ var entry = entries[i];
+ var key = entry[property];
+ if (accumulator[key] == undefined) {
+ accumulator[key] = new Group(property, key, entry)
+ } else {
+ var group = accumulator[key];
+ if (group.entries == undefined) console.log([group, entry]);
+ group.add(entry)
+ }
+ }
+ var result = []
+ for (var key in accumulator) {
+ var group = accumulator[key];
+ group.percentage = Math.round(group.count / length * 100 * 100) / 100;
+ result.push(group);
+ }
+ result.sort((a,b) => { return b.count - a.count });
+ return result;
+}
+
+
+
+
+function updateTable() {
+ var select = document.getElementById("group-key");
+ var key = select.options[select.selectedIndex].text;
+ console.log(key);
+ var tableBody = document.getElementById("table-body");
+ removeAllChildren(tableBody);
+ var groups = groupBy(entries, key, true);
+ display(groups, tableBody);
+}
+
+function selecedOption(node) {
+ return node.options[node.selectedIndex]
+}
+
+function removeAllChildren(node) {
+ while (node.firstChild) {
+ node.removeChild(node.firstChild);
+ }
+}
+
+function display(entries, parent) {
+ var fragment = document.createDocumentFragment();
+
+ function td(tr, content, className) {
+ var td = document.createElement("td");
+ td.innerHTML = content;
+ td.className = className
+ tr.appendChild(td);
+ return td
+ }
+ var max = Math.min(1000, entries.length)
+ for (var i = 0; i<max; i++) {
+ var entry = entries[i];
+ var tr = document.createElement("tr");
+ tr.entry = entry;
+ td(tr, '<span onclick="toggleDetails(this)">details</a>', 'details');
+ td(tr, entry.percentage +"%", 'percentage');
+ td(tr, entry.count, 'count');
+ td(tr, entry.key, 'key');
+ fragment.appendChild(tr);
+ }
+ var omitted = entries.length - max;
+ if (omitted > 0) {
+ var tr = document.createElement("tr");
+ var td = td(tr, 'Omitted ' + omitted + " entries.");
+ td.colSpan = 4;
+ fragment.appendChild(tr);
+ }
+ parent.appendChild(fragment);
+}
+
+function displayDrilldown(entry, previousSibling) {
+ var tr = document.createElement('tr');
+ tr.className = "entry-details";
+ tr.style.display = "none";
+ // indent by one td.
+ tr.appendChild(document.createElement("td"));
+ var td = document.createElement("td");
+ td.colSpan = 3;
+ for (var key in entry.groups) {
+ td.appendChild(displayDrilldownGroup(entry, key));
+ }
+ tr.appendChild(td);
+ // Append the new TR after previousSibling.
+ previousSibling.parentNode.insertBefore(tr, previousSibling.nextSibling)
+}
+
+function displayDrilldownGroup(entry, key) {
+ var max = 20;
+ var group = entry.groups[key];
+ var div = document.createElement("div")
+ div.className = 'drilldown-group-title'
+ div.innerHTML = key + ' [top ' + max + ']';
+ var table = document.createElement("table");
+ display(group.slice(0, max), table, false)
+ div.appendChild(table);
+ return div;
+}
+
+function toggleDetails(node) {
+ var tr = node.parentNode.parentNode;
+ var entry = tr.entry;
+
+ // Create subgroup in-place if the don't exist yet.
+ if (entry.groups === undefined) {
+ entry.createSubGroups();
+ displayDrilldown(entry, tr);
+ }
+ var details = tr.nextSibling;
+ var display = details.style.display;
+ if (display != "none") {
+ display = "none";
+ }else {
+ display = "table-row"
+ };
+ details.style.display = display;
+}
+
+function initGroupKeySelect() {
+ var select = document.getElementById("group-key");
+ for (var i in properties) {
+ var option = document.createElement("option");
+ option.text = properties[i];
+ select.add(option);
+ }
+}
+
+ </script>
+ </head>
+ <body>
+ <h1>
+ <span style="color: #00FF00">I</span>
+ <span style="color: #FF00FF">C</span>
+ <span style="color: #00FFFF">E</span>
+ </h1>
+ Your IC-Explorer.
+ <h2>Usage</h2>
+ Run your script with <code>--trace_ic</code> and upload on this page:<br/>
+ <code>/path/to/d8 --trace_ic your_script.js > trace.txt</code>
+ <h2>Data</h2>
+ <form name="fileForm">
+ <p>
+ <input id="uploadInput" type="file" name="files" onchange="loadFile();" >
+ trace entries: <span id="count">0</span>
+ </p>
+ </form>
+ <h2>Result</h2>
+ <p>
+ Group-Key:
+ <select id="group-key" onchange="updateTable()"></select>
+ </p>
+ <p>
+ <table id="table" width="100%">
+ <tbody id="table-body">
+ </tbody>
+ </table>
+ </p>
+ </body>
+</html>
diff --git a/deps/v8/tools/jsfunfuzz/download_jsfunfuzz.py b/deps/v8/tools/jsfunfuzz/download_jsfunfuzz.py
new file mode 100644
index 0000000000..19eff02438
--- /dev/null
+++ b/deps/v8/tools/jsfunfuzz/download_jsfunfuzz.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import re
+import subprocess
+
+FUZZ_PATH = os.path.dirname(os.path.abspath(__file__))
+SHA1_PATH = os.path.join(FUZZ_PATH, 'jsfunfuzz.tar.gz.sha1')
+
+if re.search(r'\bjsfunfuzz=1', os.environ.get('GYP_DEFINES', '')):
+ subprocess.check_call([
+ 'download_from_google_storage',
+ '-b', 'chrome-v8-jsfunfuzz',
+ '-u', '--no_resume',
+ '-s', SHA1_PATH,
+ '--platform=linux*'
+ ])
+else:
+ print 'Skipping jsfunfuzz download as jsfunfuzz is not set in gyp flags.'
diff --git a/deps/v8/tools/jsfunfuzz/fuzz-harness.sh b/deps/v8/tools/jsfunfuzz/fuzz-harness.sh
new file mode 100755
index 0000000000..205a61b335
--- /dev/null
+++ b/deps/v8/tools/jsfunfuzz/fuzz-harness.sh
@@ -0,0 +1,84 @@
+#!/bin/bash
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# A simple harness that downloads and runs 'jsfunfuzz' against d8. This
+# takes a long time because it runs many iterations and is intended for
+# automated usage. The package containing 'jsfunfuzz' can be found as an
+# attachment to this bug:
+# https://bugzilla.mozilla.org/show_bug.cgi?id=jsfunfuzz
+
+JSFUNFUZZ_URL="https://bugzilla.mozilla.org/attachment.cgi?id=310631"
+JSFUNFUZZ_MD5="d0e497201c5cd7bffbb1cdc1574f4e32"
+
+v8_root=$(readlink -f $(dirname $BASH_SOURCE)/../../)
+jsfunfuzz_dir="$v8_root/tools/jsfunfuzz"
+cd "$jsfunfuzz_dir"
+
+if [ -n "$1" ]; then
+ d8="${v8_root}/$1"
+else
+ d8="${v8_root}/d8"
+fi
+
+if [ ! -f "$d8" ]; then
+ echo "Failed to find d8 binary: $d8"
+ exit 1
+fi
+
+# Deprecated download method. A prepatched archive is downloaded as a hook
+# if jsfunfuzz=1 is specified as a gyp flag. Requires google.com authentication
+# for google storage.
+if [ "$3" == "--download" ]; then
+
+ jsfunfuzz_file="$v8_root/tools/jsfunfuzz.zip"
+ if [ ! -f "$jsfunfuzz_file" ]; then
+ echo "Downloading $jsfunfuzz_file ..."
+ wget -q -O "$jsfunfuzz_file" $JSFUNFUZZ_URL || exit 1
+ fi
+
+ jsfunfuzz_sum=$(md5sum "$jsfunfuzz_file" | awk '{ print $1 }')
+ if [ $jsfunfuzz_sum != $JSFUNFUZZ_MD5 ]; then
+ echo "Failed to verify checksum!"
+ exit 1
+ fi
+
+ if [ ! -d "$jsfunfuzz_dir" ]; then
+ echo "Unpacking into $jsfunfuzz_dir ..."
+ unzip "$jsfunfuzz_file" -d "$jsfunfuzz_dir" || exit 1
+ echo "Patching runner ..."
+ cat << EOF | patch -s -p0 -d "$v8_root"
+--- tools/jsfunfuzz/jsfunfuzz/multi_timed_run.py~
++++ tools/jsfunfuzz/jsfunfuzz/multi_timed_run.py
+@@ -125,7 +125,7 @@
+
+ def many_timed_runs():
+ iteration = 0
+- while True:
++ while iteration < 100:
+ iteration += 1
+ logfilename = "w%d" % iteration
+ one_timed_run(logfilename)
+EOF
+ fi
+
+fi
+
+flags='--debug-code --expose-gc --verify-gc'
+python -u "$jsfunfuzz_dir/jsfunfuzz/multi_timed_run.py" 300 \
+ "$d8" $flags "$jsfunfuzz_dir/jsfunfuzz/jsfunfuzz.js"
+exit_code=$(cat w* | grep " looking good" -c)
+exit_code=$((100-exit_code))
+
+if [ -n "$2" ]; then
+ archive="$2"
+else
+ archive=fuzz-results-$(date +%Y%m%d%H%M%S).tar.bz2
+fi
+echo "Creating archive $archive"
+tar -cjf $archive err-* w*
+rm -f err-* w*
+
+echo "Total failures: $exit_code"
+exit $exit_code
diff --git a/deps/v8/tools/jsfunfuzz/jsfunfuzz.gyp b/deps/v8/tools/jsfunfuzz/jsfunfuzz.gyp
new file mode 100644
index 0000000000..fb0e5f4949
--- /dev/null
+++ b/deps/v8/tools/jsfunfuzz/jsfunfuzz.gyp
@@ -0,0 +1,26 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'jsfunfuzz_run',
+ 'type': 'none',
+ 'dependencies': [
+ '../../src/d8.gyp:d8_run',
+ ],
+ 'includes': [
+ '../../build/features.gypi',
+ '../../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'jsfunfuzz.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/deps/v8/tools/jsfunfuzz/jsfunfuzz.isolate b/deps/v8/tools/jsfunfuzz/jsfunfuzz.isolate
new file mode 100644
index 0000000000..56cb4a733f
--- /dev/null
+++ b/deps/v8/tools/jsfunfuzz/jsfunfuzz.isolate
@@ -0,0 +1,18 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'variables': {
+ 'command': [
+ 'fuzz-harness.sh',
+ ],
+ 'files': [
+ # Grab current directory. This avoids adding logic for checking the
+ # existence of the jsfunfuzz subdirectory.
+ './',
+ ],
+ },
+ 'includes': [
+ '../../src/d8.isolate',
+ ],
+}
diff --git a/deps/v8/tools/jsfunfuzz/jsfunfuzz.tar.gz.sha1 b/deps/v8/tools/jsfunfuzz/jsfunfuzz.tar.gz.sha1
new file mode 100644
index 0000000000..449996007d
--- /dev/null
+++ b/deps/v8/tools/jsfunfuzz/jsfunfuzz.tar.gz.sha1
@@ -0,0 +1 @@
+d92e66273ea2a0da89456a977edd0224a8e837e9 \ No newline at end of file
diff --git a/deps/v8/tools/ll_prof.py b/deps/v8/tools/ll_prof.py
index 7dac2e05eb..e65796145e 100755
--- a/deps/v8/tools/ll_prof.py
+++ b/deps/v8/tools/ll_prof.py
@@ -173,11 +173,19 @@ class Code(object):
break
count += cnt
total_count += count
- count = 100.0 * count / self.self_ticks
- if count >= 0.01:
- print "%15.2f %x: %s" % (count, lines[i][0], lines[i][1])
+ percent = 100.0 * count / self.self_ticks
+ offset = lines[i][0]
+ if percent >= 0.01:
+ # 5 spaces for tick count
+ # 1 space following
+ # 1 for '|'
+ # 1 space following
+ # 6 for the percentage number, incl. the '.'
+ # 1 for the '%' sign
+ # => 15
+ print "%5d | %6.2f%% %x(%d): %s" % (count, percent, offset, offset, lines[i][1])
else:
- print "%s %x: %s" % (" " * 15, lines[i][0], lines[i][1])
+ print "%s %x(%d): %s" % (" " * 15, offset, offset, lines[i][1])
print
assert total_count == self.self_ticks, \
"Lost ticks (%d != %d) in %s" % (total_count, self.self_ticks, self)
diff --git a/deps/v8/tools/luci-go/linux64/isolate.sha1 b/deps/v8/tools/luci-go/linux64/isolate.sha1
index c2821fca10..41d0add796 100644
--- a/deps/v8/tools/luci-go/linux64/isolate.sha1
+++ b/deps/v8/tools/luci-go/linux64/isolate.sha1
@@ -1 +1 @@
-32a3d49a4f7279ad022f346f7d960b2d58e2a0fe \ No newline at end of file
+cf7c1fac12790056ac393774827a5720c7590bac
diff --git a/deps/v8/tools/luci-go/mac64/isolate.sha1 b/deps/v8/tools/luci-go/mac64/isolate.sha1
index fcb6c8fa9e..15744d663a 100644
--- a/deps/v8/tools/luci-go/mac64/isolate.sha1
+++ b/deps/v8/tools/luci-go/mac64/isolate.sha1
@@ -1 +1 @@
-83306c575904ec92c1af9ccc67240d26069df337 \ No newline at end of file
+4678a9332ef5a7b90b184763afee1c100981f710
diff --git a/deps/v8/tools/luci-go/win64/isolate.exe.sha1 b/deps/v8/tools/luci-go/win64/isolate.exe.sha1
index 032483cba7..7c5b7ebf6e 100644
--- a/deps/v8/tools/luci-go/win64/isolate.exe.sha1
+++ b/deps/v8/tools/luci-go/win64/isolate.exe.sha1
@@ -1 +1 @@
-da358c2666ef9b89022e0eadf363cc6e123384e2 \ No newline at end of file
+98457ff4fc79d05661fea53d2b3aff70fac90022
diff --git a/deps/v8/tools/perf/statistics-for-json.R b/deps/v8/tools/perf/statistics-for-json.R
new file mode 100644
index 0000000000..fde2cd75db
--- /dev/null
+++ b/deps/v8/tools/perf/statistics-for-json.R
@@ -0,0 +1,113 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Do statistical tests on benchmark results
+# This script requires the libraries rjson, R.utils, ggplot2 and data.table
+# Install them prior to running
+
+# To use the script, first get some benchmark results, for example via
+# tools/run_perf.py ../v8-perf/benchmarks/Octane2.1/Octane2.1-TF.json
+# --outdir=out/x64.release-on --outdir-no-patch=out/x64.release-off
+# --json-test-results=results-on.json
+# --json-test-results-no-patch=results-off.json
+# then run this script
+# Rscript statistics-for-json.R results-on.json results-off.json ~/SVG
+# to produce graphs (and get stdio output of statistical tests).
+
+
+suppressMessages(library("rjson")) # for fromJson
+suppressMessages(library("R.utils")) # for printf
+suppressMessages(library("ggplot2")) # for plotting
+suppressMessages(library("data.table")) # less broken than data.frame
+
+# Clear all variables from environment
+rm(list=ls())
+
+args <- commandArgs(TRUE)
+if (length(args) != 3) {
+ printf(paste("usage: Rscript %%this_script patched-results.json",
+ "unpatched-results.json\n"))
+} else {
+ patch <- fromJSON(file=args[1])
+ nopatch <- fromJSON(file=args[2])
+ outputPath <- args[3]
+ df <- data.table(L = numeric(), R = numeric(), E = numeric(),
+ p.value = numeric(), yL = character(),
+ p.value.sig = logical())
+
+ for (i in seq(1, length(patch$traces))) {
+ testName <- patch$traces[[i]]$graphs[[2]]
+ printf("%s\n", testName)
+
+ nopatch_res <- as.integer(nopatch$traces[[i]]$results)
+ patch_res <- as.integer(patch$traces[[i]]$results)
+ if (length(nopatch_res) > 0) {
+ patch_norm <- shapiro.test(patch_res);
+ nopatch_norm <- shapiro.test(nopatch_res);
+
+ # Shaprio-Wilk test indicates whether data is not likely to
+ # come from a normal distribution. The p-value is the probability
+ # to obtain the sample from a normal distribution. This means, the
+ # smaller p, the more likely the sample was not drawn from a normal
+ # distribution. See [wikipedia:Shapiro-Wilk-Test].
+ printf(" Patched scores look %s distributed (W=%.4f, p=%.4f)\n",
+ ifelse(patch_norm$p.value < 0.05, "not normally", "normally"),
+ patch_norm$statistic, patch_norm$p.value);
+ printf(" Unpatched scores look %s distributed (W=%.4f, p=%.4f)\n",
+ ifelse(nopatch_norm$p.value < 0.05, "not normally", "normally"),
+ nopatch_norm$statistic, nopatch_norm$p.value);
+
+ hist <- ggplot(data=data.frame(x=as.integer(patch_res)), aes(x)) +
+ theme_bw() +
+ geom_histogram(bins=50) +
+ ylab("Points") +
+ xlab(patch$traces[[i]]$graphs[[2]])
+ ggsave(filename=sprintf("%s/%s.svg", outputPath, testName),
+ plot=hist, width=7, height=7)
+
+ hist <- ggplot(data=data.frame(x=as.integer(nopatch_res)), aes(x)) +
+ theme_bw() +
+ geom_histogram(bins=50) +
+ ylab("Points") +
+ xlab(patch$traces[[i]]$graphs[[2]])
+ ggsave(filename=sprintf("%s/%s-before.svg", outputPath, testName),
+ plot=hist, width=7, height=7)
+
+ # The Wilcoxon rank-sum test
+ mww <- wilcox.test(patch_res, nopatch_res, conf.int = TRUE, exact=TRUE)
+ printf(paste(" Wilcoxon U-test W=%.4f, p=%.4f,",
+ "confidence interval [%.1f, %.1f],",
+ "est. effect size %.1f \n"),
+ mww$statistic, mww$p.value,
+ mww$conf.int[1], mww$conf.int[2], mww$estimate);
+ df <-rbind(df, list(mww$conf.int[1], mww$conf.int[2],
+ unname(mww$estimate), unname(mww$p.value),
+ testName, ifelse(mww$p.value < 0.05, TRUE, FALSE)))
+ # t-test
+ t <- t.test(patch_res, nopatch_res, paired=FALSE)
+ printf(paste(" Welch t-test t=%.4f, df = %.2f, p=%.4f,",
+ "confidence interval [%.1f, %.1f], mean diff %.1f \n"),
+ t$statistic, t$parameter, t$p.value,
+ t$conf.int[1], t$conf.int[2], t$estimate[1]-t$estimate[2]);
+ }
+ }
+ df2 <- cbind(x=1:nrow(df), df[order(E),])
+ speedup <- ggplot(df2, aes(x = x, y = E, colour=p.value.sig)) +
+ geom_errorbar(aes(ymax = L, ymin = R), colour="black") +
+ geom_point(size = 4) +
+ scale_x_discrete(limits=df2$yL,
+ name=paste("Benchmark, n=", length(patch_res))) +
+ theme_bw() +
+ geom_hline(yintercept = 0) +
+ ylab("Est. Effect Size in Points") +
+ theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust=0.5)) +
+ theme(legend.position = "bottom") +
+ scale_colour_manual(name="Statistical Significance (MWW, p < 0.05)",
+ values=c("red", "green"),
+ labels=c("not significant", "significant")) +
+ theme(legend.justification=c(0,1), legend.position=c(0,1))
+ print(speedup)
+ ggsave(filename=sprintf("%s/speedup-estimates.svg", outputPath),
+ plot=speedup, width=7, height=7)
+}
diff --git a/deps/v8/tools/presubmit.py b/deps/v8/tools/presubmit.py
index 998656908d..23940bb686 100755
--- a/deps/v8/tools/presubmit.py
+++ b/deps/v8/tools/presubmit.py
@@ -354,30 +354,6 @@ class SourceProcessor(SourceFileProcessor):
if not contents.endswith('\n') or contents.endswith('\n\n'):
print "%s does not end with a single new line." % name
result = False
- # Check two empty lines between declarations.
- if name.endswith(".cc"):
- line = 0
- lines = []
- parts = contents.split('\n')
- while line < len(parts) - 2:
- if self.EndOfDeclaration(parts[line]):
- if self.StartOfDeclaration(parts[line + 1]):
- lines.append(str(line + 1))
- line += 1
- elif parts[line + 1] == "" and \
- self.StartOfDeclaration(parts[line + 2]):
- lines.append(str(line + 1))
- line += 2
- line += 1
- if len(lines) >= 1:
- linenumbers = ', '.join(lines)
- if len(lines) > 1:
- print "%s does not have two empty lines between declarations " \
- "in lines %s." % (name, linenumbers)
- else:
- print "%s does not have two empty lines between declarations " \
- "in line %s." % (name, linenumbers)
- result = False
# Sanitize flags for fuzzer.
if "mjsunit" in name:
match = FLAGS_LINE.search(contents)
diff --git a/deps/v8/tools/release/auto_roll.py b/deps/v8/tools/release/auto_roll.py
index 27fd370971..fc9aeee461 100755
--- a/deps/v8/tools/release/auto_roll.py
+++ b/deps/v8/tools/release/auto_roll.py
@@ -123,7 +123,6 @@ class UpdateChromiumCheckout(Step):
cwd = self._options.chromium
self.GitCheckout("master", cwd=cwd)
self.DeleteBranch("work-branch", cwd=cwd)
- self.Command("gclient", "sync --nohooks", cwd=cwd)
self.GitPull(cwd=cwd)
# Update v8 remotes.
diff --git a/deps/v8/tools/release/common_includes.py b/deps/v8/tools/release/common_includes.py
index c2b64c38ec..c3a216c664 100644
--- a/deps/v8/tools/release/common_includes.py
+++ b/deps/v8/tools/release/common_includes.py
@@ -50,6 +50,7 @@ DAY_IN_SECONDS = 24 * 60 * 60
PUSH_MSG_GIT_RE = re.compile(r".* \(based on (?P<git_rev>[a-fA-F0-9]+)\)$")
PUSH_MSG_NEW_RE = re.compile(r"^Version \d+\.\d+\.\d+$")
VERSION_FILE = os.path.join("include", "v8-version.h")
+WATCHLISTS_FILE = "WATCHLISTS"
# V8 base directory.
V8_BASE = os.path.dirname(
@@ -381,7 +382,7 @@ class GitInterface(VCInterface):
# is the case for all automated merge and push commits - also no title is
# the prefix of another title).
commit = None
- for wait_interval in [3, 7, 15, 35, 45, 60]:
+ for wait_interval in [5, 10, 20, 40, 60, 60]:
self.step.Git("fetch")
commit = self.step.GitLog(n=1, format="%H", grep=message, branch=remote)
if commit:
diff --git a/deps/v8/tools/release/create_release.py b/deps/v8/tools/release/create_release.py
index 3bbb50e491..7477ea1461 100755
--- a/deps/v8/tools/release/create_release.py
+++ b/deps/v8/tools/release/create_release.py
@@ -11,7 +11,6 @@ import urllib2
from common_includes import *
-
class Preparation(Step):
MESSAGE = "Preparation."
@@ -164,6 +163,7 @@ class MakeBranch(Step):
self.Git("checkout -b work-branch %s" % self["push_hash"])
self.GitCheckoutFile(CHANGELOG_FILE, self["latest_version"])
self.GitCheckoutFile(VERSION_FILE, self["latest_version"])
+ self.GitCheckoutFile(WATCHLISTS_FILE, self["latest_version"])
class AddChangeLog(Step):
@@ -183,6 +183,19 @@ class SetVersion(Step):
self.SetVersion(os.path.join(self.default_cwd, VERSION_FILE), "new_")
+class EnableMergeWatchlist(Step):
+ MESSAGE = "Enable watchlist entry for merge notifications."
+
+ def RunStep(self):
+ old_watchlist_content = FileToText(os.path.join(self.default_cwd,
+ WATCHLISTS_FILE))
+ new_watchlist_content = re.sub("(# 'v8-merges@googlegroups\.com',)",
+ "'v8-merges@googlegroups.com',",
+ old_watchlist_content)
+ TextToFile(new_watchlist_content, os.path.join(self.default_cwd,
+ WATCHLISTS_FILE))
+
+
class CommitBranch(Step):
MESSAGE = "Commit version and changelog to new branch."
@@ -288,6 +301,7 @@ class CreateRelease(ScriptsBase):
MakeBranch,
AddChangeLog,
SetVersion,
+ EnableMergeWatchlist,
CommitBranch,
PushBranch,
TagRevision,
diff --git a/deps/v8/tools/release/test_scripts.py b/deps/v8/tools/release/test_scripts.py
index 4a3cb5b24a..4f133ac28a 100644
--- a/deps/v8/tools/release/test_scripts.py
+++ b/deps/v8/tools/release/test_scripts.py
@@ -43,8 +43,6 @@ import merge_to_branch
from merge_to_branch import *
import push_to_candidates
from push_to_candidates import *
-import chromium_roll
-from chromium_roll import ChromiumRoll
import releases
from releases import Releases
from auto_tag import AutoTag
@@ -391,6 +389,20 @@ class ScriptTest(unittest.TestCase):
f.write(" // Some line...\n")
f.write("#define V8_IS_CANDIDATE_VERSION 0\n")
+ def WriteFakeWatchlistsFile(self):
+ watchlists_file = os.path.join(TEST_CONFIG["DEFAULT_CWD"], WATCHLISTS_FILE)
+ if not os.path.exists(os.path.dirname(watchlists_file)):
+ os.makedirs(os.path.dirname(watchlists_file))
+ with open(watchlists_file, "w") as f:
+
+ content = """
+ 'merges': [
+ # Only enabled on branches created with tools/release/create_release.py
+ # 'v8-merges@googlegroups.com',
+ ],
+"""
+ f.write(content)
+
def MakeStep(self):
"""Convenience wrapper."""
options = ScriptsBase(TEST_CONFIG, self, self._state).MakeOptions([])
@@ -954,6 +966,8 @@ Performance and stability improvements on all platforms."""
Cmd("git checkout -f 3.22.4 -- ChangeLog", "", cb=ResetChangeLog),
Cmd("git checkout -f 3.22.4 -- include/v8-version.h", "",
cb=self.WriteFakeVersionFile),
+ Cmd("git checkout -f 3.22.4 -- WATCHLISTS", "",
+ cb=self.WriteFakeWatchlistsFile),
Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], "",
cb=CheckVersionCommit),
Cmd("git push origin "
@@ -985,6 +999,18 @@ Performance and stability improvements on all platforms."""
# Note: The version file is on build number 5 again in the end of this test
# since the git command that merges to master is mocked out.
+ # Check for correct content of the WATCHLISTS file
+
+ watchlists_content = FileToText(os.path.join(TEST_CONFIG["DEFAULT_CWD"],
+ WATCHLISTS_FILE))
+ expected_watchlists_content = """
+ 'merges': [
+ # Only enabled on branches created with tools/release/create_release.py
+ 'v8-merges@googlegroups.com',
+ ],
+"""
+ self.assertEqual(watchlists_content, expected_watchlists_content)
+
C_V8_22624_LOG = """V8 CL.
git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22624 123
@@ -1086,7 +1112,6 @@ deps = {
Cmd("git status -s -uno", "", cwd=chrome_dir),
Cmd("git checkout -f master", "", cwd=chrome_dir),
Cmd("git branch", "", cwd=chrome_dir),
- Cmd("gclient sync --nohooks", "syncing...", cwd=chrome_dir),
Cmd("git pull", "", cwd=chrome_dir),
Cmd("git fetch origin", ""),
Cmd("git new-branch work-branch", "", cwd=chrome_dir),
diff --git a/deps/v8/tools/run-deopt-fuzzer.gyp b/deps/v8/tools/run-deopt-fuzzer.gyp
new file mode 100644
index 0000000000..73f0aaf7a5
--- /dev/null
+++ b/deps/v8/tools/run-deopt-fuzzer.gyp
@@ -0,0 +1,26 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'run_deopt_fuzzer_run',
+ 'type': 'none',
+ 'dependencies': [
+ '../src/d8.gyp:d8_run',
+ ],
+ 'includes': [
+ '../build/features.gypi',
+ '../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'run-deopt-fuzzer.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/deps/v8/tools/run-deopt-fuzzer.isolate b/deps/v8/tools/run-deopt-fuzzer.isolate
new file mode 100644
index 0000000000..196fb5dbbc
--- /dev/null
+++ b/deps/v8/tools/run-deopt-fuzzer.isolate
@@ -0,0 +1,19 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'variables': {
+ 'command': [
+ 'run-deopt-fuzzer.py',
+ ],
+ 'files': [
+ 'run-deopt-fuzzer.py',
+ ],
+ },
+ 'includes': [
+ 'testrunner/testrunner.isolate',
+ '../src/d8.isolate',
+ '../test/mjsunit/mjsunit.isolate',
+ '../test/webkit/webkit.isolate',
+ ],
+}
diff --git a/deps/v8/tools/run-deopt-fuzzer.py b/deps/v8/tools/run-deopt-fuzzer.py
index 70e106ec1b..e4d8f16b4f 100755
--- a/deps/v8/tools/run-deopt-fuzzer.py
+++ b/deps/v8/tools/run-deopt-fuzzer.py
@@ -48,6 +48,9 @@ from testrunner.local import verbose
from testrunner.objects import context
+# Base dir of the v8 checkout to be used as cwd.
+BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+
ARCH_GUESS = utils.DefaultArch()
DEFAULT_TESTS = ["mjsunit", "webkit"]
TIMEOUT_DEFAULT = 60
@@ -290,6 +293,9 @@ def ShardTests(tests, shard_count, shard_run):
def Main():
+ # Use the v8 root as cwd as some test cases use "load" with relative paths.
+ os.chdir(BASE_DIR)
+
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
@@ -297,9 +303,8 @@ def Main():
return 1
exit_code = 0
- workspace = os.path.abspath(join(os.path.dirname(sys.argv[0]), ".."))
- suite_paths = utils.GetSuitePaths(join(workspace, "test"))
+ suite_paths = utils.GetSuitePaths(join(BASE_DIR, "test"))
if len(args) == 0:
suite_paths = [ s for s in suite_paths if s in DEFAULT_TESTS ]
@@ -314,7 +319,7 @@ def Main():
suites = []
for root in suite_paths:
suite = testsuite.TestSuite.LoadTestSuite(
- os.path.join(workspace, "test", root))
+ os.path.join(BASE_DIR, "test", root))
if suite:
suite.SetupWorkingDirectory()
suites.append(suite)
@@ -326,7 +331,7 @@ def Main():
for mode in options.mode:
for arch in options.arch:
try:
- code = Execute(arch, mode, args, options, suites, workspace)
+ code = Execute(arch, mode, args, options, suites, BASE_DIR)
exit_code = exit_code or code
except KeyboardInterrupt:
return 2
diff --git a/deps/v8/tools/run-tests.py b/deps/v8/tools/run-tests.py
index fe8091efb3..c94457fe6d 100755
--- a/deps/v8/tools/run-tests.py
+++ b/deps/v8/tools/run-tests.py
@@ -60,27 +60,33 @@ ARCH_GUESS = utils.DefaultArch()
# expected runtimes (suites with slow test cases first). These groups are
# invoked in seperate steps on the bots.
TEST_MAP = {
+ # This needs to stay in sync with test/bot_default.isolate.
"bot_default": [
"mjsunit",
"cctest",
"webkit",
+ "fuzzer",
"message",
"preparser",
"intl",
"unittests",
],
+ # This needs to stay in sync with test/default.isolate.
"default": [
"mjsunit",
"cctest",
+ "fuzzer",
"message",
"preparser",
"intl",
"unittests",
],
+ # This needs to stay in sync with test/ignition.isolate.
"ignition": [
"mjsunit",
"cctest",
],
+ # This needs to stay in sync with test/optimize_for_size.isolate.
"optimize_for_size": [
"mjsunit",
"cctest",
diff --git a/deps/v8/tools/run-valgrind.gyp b/deps/v8/tools/run-valgrind.gyp
new file mode 100644
index 0000000000..d06be933a9
--- /dev/null
+++ b/deps/v8/tools/run-valgrind.gyp
@@ -0,0 +1,26 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'run_valgrind_run',
+ 'type': 'none',
+ 'dependencies': [
+ '../src/d8.gyp:d8_run',
+ ],
+ 'includes': [
+ '../build/features.gypi',
+ '../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'run-valgrind.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/deps/v8/tools/run-valgrind.isolate b/deps/v8/tools/run-valgrind.isolate
new file mode 100644
index 0000000000..5947409e17
--- /dev/null
+++ b/deps/v8/tools/run-valgrind.isolate
@@ -0,0 +1,29 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'variables': {
+ 'command': [
+ 'run-valgrind.py',
+ ],
+ 'files': [
+ 'run-valgrind.py',
+ ],
+ },
+ 'conditions': [
+ ['has_valgrind==1', {
+ 'variables': {
+ 'files': [
+ # This assumes vagrind binaries have been fetched as a custom deps
+ # into v8/third_party/valgrind. It is not clear on which target
+ # machine this will run, but grabbing both is cheap.
+ '../third_party/valgrind/linux_x86/',
+ '../third_party/valgrind/linux_x64/',
+ ],
+ },
+ }],
+ ],
+ 'includes': [
+ '../src/d8.isolate',
+ ],
+}
diff --git a/deps/v8/tools/run_perf.py b/deps/v8/tools/run_perf.py
index a8cc3fab71..db4245f499 100755
--- a/deps/v8/tools/run_perf.py
+++ b/deps/v8/tools/run_perf.py
@@ -350,9 +350,9 @@ class Node(object):
class DefaultSentinel(Node):
"""Fake parent node with all default values."""
- def __init__(self):
+ def __init__(self, binary = "d8"):
super(DefaultSentinel, self).__init__()
- self.binary = "d8"
+ self.binary = binary
self.run_count = 10
self.timeout = 60
self.path = []
@@ -543,11 +543,10 @@ def MakeGraphConfig(suite, arch, parent):
raise Exception("Invalid suite configuration.")
-def BuildGraphConfigs(suite, arch, parent=None):
+def BuildGraphConfigs(suite, arch, parent):
"""Builds a tree structure of graph objects that corresponds to the suite
configuration.
"""
- parent = parent or DefaultSentinel()
# TODO(machenbach): Implement notion of cpu type?
if arch not in suite.get("archs", SUPPORTED_ARCHS):
@@ -732,6 +731,12 @@ class AndroidPlatform(Platform): # pragma: no cover
target_dir,
skip_if_missing=True,
)
+ self._PushFile(
+ shell_dir,
+ "snapshot_blob_ignition.bin",
+ target_dir,
+ skip_if_missing=True,
+ )
def PreTests(self, node, path):
suite_dir = os.path.abspath(os.path.dirname(path))
@@ -813,6 +818,11 @@ def Main(args):
default="out")
parser.add_option("--outdir-no-patch",
help="Base directory with compile output without patch")
+ parser.add_option("--binary-override-path",
+ help="JavaScript engine binary. By default, d8 under "
+ "architecture-specific build dir. "
+ "Not supported in conjunction with outdir-no-patch.")
+
(options, args) = parser.parse_args(args)
if len(args) == 0: # pragma: no cover
@@ -843,7 +853,18 @@ def Main(args):
else:
build_config = "%s.release" % options.arch
- options.shell_dir = os.path.join(workspace, options.outdir, build_config)
+ if options.binary_override_path == None:
+ options.shell_dir = os.path.join(workspace, options.outdir, build_config)
+ default_binary_name = "d8"
+ else:
+ if not os.path.isfile(options.binary_override_path):
+ print "binary-override-path must be a file name"
+ return 1
+ if options.outdir_no_patch:
+ print "specify either binary-override-path or outdir-no-patch"
+ return 1
+ options.shell_dir = os.path.dirname(options.binary_override_path)
+ default_binary_name = os.path.basename(options.binary_override_path)
if options.outdir_no_patch:
options.shell_dir_no_patch = os.path.join(
@@ -872,7 +893,8 @@ def Main(args):
platform.PreExecution()
# Build the graph/trace tree structure.
- root = BuildGraphConfigs(suite, options.arch)
+ default_parent = DefaultSentinel(default_binary_name)
+ root = BuildGraphConfigs(suite, options.arch, default_parent)
# Callback to be called on each node on traversal.
def NodeCB(node):
diff --git a/deps/v8/tools/testrunner/local/execution.py b/deps/v8/tools/testrunner/local/execution.py
index c9fe54175a..0d90ab8d0d 100644
--- a/deps/v8/tools/testrunner/local/execution.py
+++ b/deps/v8/tools/testrunner/local/execution.py
@@ -28,6 +28,7 @@
import collections
import os
+import re
import shutil
import sys
import time
@@ -38,6 +39,7 @@ from . import perfdata
from . import statusfile
from . import testsuite
from . import utils
+from ..objects import output
# Base dir of the v8 checkout.
@@ -82,7 +84,7 @@ def MakeProcessContext(context):
def GetCommand(test, context):
d8testflag = []
- shell = test.suite.shell()
+ shell = test.shell()
if shell == "d8":
d8testflag = ["--test"]
if utils.IsWindows():
@@ -134,15 +136,28 @@ class Job(object):
raise NotImplementedError()
+def SetupProblem(exception, test):
+ stderr = ">>> EXCEPTION: %s\n" % exception
+ match = re.match(r"^.*No such file or directory: '(.*)'$", str(exception))
+ if match:
+ # Extra debuging information when files are claimed missing.
+ f = match.group(1)
+ stderr += ">>> File %s exists? -> %s\n" % (f, os.path.exists(f))
+ return test.id, output.Output(1, False, "", stderr), 0
+
+
class TestJob(Job):
def __init__(self, test):
self.test = test
def Run(self, process_context):
- # Retrieve a new suite object on the worker-process side. The original
- # suite object isn't pickled.
- self.test.SetSuiteObject(process_context.suites)
- instr = _GetInstructions(self.test, process_context.context)
+ try:
+ # Retrieve a new suite object on the worker-process side. The original
+ # suite object isn't pickled.
+ self.test.SetSuiteObject(process_context.suites)
+ instr = _GetInstructions(self.test, process_context.context)
+ except Exception, e:
+ return SetupProblem(e, self.test)
start_time = time.time()
if instr.dep_command is not None:
diff --git a/deps/v8/tools/testrunner/local/pool.py b/deps/v8/tools/testrunner/local/pool.py
index 6d123fd4e5..99996ee3ce 100644
--- a/deps/v8/tools/testrunner/local/pool.py
+++ b/deps/v8/tools/testrunner/local/pool.py
@@ -109,6 +109,7 @@ class Pool():
process boundary.
"""
try:
+ internal_error = False
gen = iter(gen)
self.advance = self._advance_more
@@ -134,7 +135,9 @@ class Pool():
yield MaybeResult.create_heartbeat()
self.count -= 1
if result.exception:
- # Ignore items with unexpected exceptions.
+ # TODO(machenbach): Handle a few known types of internal errors
+ # gracefully, e.g. missing test files.
+ internal_error = True
continue
elif result.break_now:
# A keyboard interrupt happened in one of the worker processes.
@@ -144,6 +147,8 @@ class Pool():
self.advance(gen)
finally:
self.terminate()
+ if internal_error:
+ raise Exception("Internal error in a worker process.")
def _advance_more(self, gen):
while self.count < self.num_workers * self.BUFFER_FACTOR:
diff --git a/deps/v8/tools/testrunner/local/testsuite.py b/deps/v8/tools/testrunner/local/testsuite.py
index e3d1e232e8..55e0eb21ae 100644
--- a/deps/v8/tools/testrunner/local/testsuite.py
+++ b/deps/v8/tools/testrunner/local/testsuite.py
@@ -41,8 +41,7 @@ ALL_VARIANT_FLAGS = {
"turbofan": [["--turbo"]],
"turbofan_opt": [["--turbo", "--always-opt"]],
"nocrankshaft": [["--nocrankshaft"]],
- "ignition": [["--ignition", "--turbo", "--ignition-fake-try-catch",
- "--ignition-fallback-on-eval-and-catch"]],
+ "ignition": [["--ignition", "--turbo"]],
"preparser": [["--min-preparse-length=0"]],
}
@@ -52,8 +51,7 @@ FAST_VARIANT_FLAGS = {
"stress": [["--stress-opt"]],
"turbofan": [["--turbo"]],
"nocrankshaft": [["--nocrankshaft"]],
- "ignition": [["--ignition", "--turbo", "--ignition-fake-try-catch",
- "--ignition-fallback-on-eval-and-catch"]],
+ "ignition": [["--ignition", "--turbo"]],
"preparser": [["--min-preparse-length=0"]],
}
diff --git a/deps/v8/tools/testrunner/objects/testcase.py b/deps/v8/tools/testrunner/objects/testcase.py
index fa2265c070..b91f8b4b56 100644
--- a/deps/v8/tools/testrunner/objects/testcase.py
+++ b/deps/v8/tools/testrunner/objects/testcase.py
@@ -30,12 +30,13 @@ from . import output
class TestCase(object):
def __init__(self, suite, path, variant='default', flags=None,
- dependency=None):
+ dependency=None, override_shell=None):
self.suite = suite # TestSuite object
self.path = path # string, e.g. 'div-mod', 'test-api/foo'
self.flags = flags or [] # list of strings, flags specific to this test
self.variant = variant # name of the used testing variant
self.dependency = dependency # |path| for testcase that must be run first
+ self.override_shell = override_shell
self.outcomes = set([])
self.output = None
self.id = None # int, used to map result back to TestCase instance
@@ -44,7 +45,7 @@ class TestCase(object):
def CopyAddingFlags(self, variant, flags):
copy = TestCase(self.suite, self.path, variant, self.flags + flags,
- self.dependency)
+ self.dependency, self.override_shell)
copy.outcomes = self.outcomes
return copy
@@ -55,15 +56,16 @@ class TestCase(object):
"""
assert self.id is not None
return [self.suitename(), self.path, self.variant, self.flags,
- self.dependency, list(self.outcomes or []), self.id]
+ self.dependency, self.override_shell, list(self.outcomes or []),
+ self.id]
@staticmethod
def UnpackTask(task):
"""Creates a new TestCase object based on packed task data."""
# For the order of the fields, refer to PackTask() above.
- test = TestCase(str(task[0]), task[1], task[2], task[3], task[4])
- test.outcomes = set(task[5])
- test.id = task[6]
+ test = TestCase(str(task[0]), task[1], task[2], task[3], task[4], task[5])
+ test.outcomes = set(task[6])
+ test.id = task[7]
test.run = 1
return test
@@ -87,6 +89,11 @@ class TestCase(object):
def GetLabel(self):
return self.suitename() + "/" + self.suite.CommonTestName(self)
+ def shell(self):
+ if self.override_shell:
+ return self.override_shell
+ return self.suite.shell()
+
def __getstate__(self):
"""Representation to pickle test cases.
diff --git a/deps/v8/tools/try_perf.py b/deps/v8/tools/try_perf.py
index 2403f7d782..fbd4036dad 100755
--- a/deps/v8/tools/try_perf.py
+++ b/deps/v8/tools/try_perf.py
@@ -53,6 +53,10 @@ def main():
parser.add_argument('benchmarks', nargs='+', help='The benchmarks to run.')
parser.add_argument('--extra-flags', default='',
help='Extra flags to be passed to the executable.')
+ parser.add_argument('-r', '--revision', type=str, default=None,
+ help='Revision (use full hash!) to use for the try job; '
+ 'default: the revision will be determined by the '
+ 'try server; see its waterfall for more info')
for option in sorted(BOTS):
parser.add_argument(
option, dest='bots', action='append_const', const=BOTS[option],
@@ -85,6 +89,7 @@ def main():
cmd = ['git cl try -m internal.client.v8']
cmd += ['-b %s' % bot for bot in options.bots]
+ if options.revision: cmd += ['-r %s' % options.revision]
benchmarks = ['"%s"' % benchmark for benchmark in options.benchmarks]
cmd += ['-p \'testfilter=[%s]\'' % ','.join(benchmarks)]
if options.extra_flags:
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index 0461bcbb66..39cfeb1707 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -55,8 +55,8 @@ INSTANCE_TYPES = {
132: "MAP_TYPE",
133: "CODE_TYPE",
131: "ODDBALL_TYPE",
- 173: "CELL_TYPE",
- 176: "PROPERTY_CELL_TYPE",
+ 171: "CELL_TYPE",
+ 174: "PROPERTY_CELL_TYPE",
129: "HEAP_NUMBER_TYPE",
134: "MUTABLE_HEAP_NUMBER_TYPE",
135: "FOREIGN_TYPE",
@@ -73,59 +73,57 @@ INSTANCE_TYPES = {
146: "FIXED_FLOAT64_ARRAY_TYPE",
147: "FIXED_UINT8_CLAMPED_ARRAY_TYPE",
149: "FILLER_TYPE",
- 150: "DECLARED_ACCESSOR_DESCRIPTOR_TYPE",
- 151: "DECLARED_ACCESSOR_INFO_TYPE",
- 152: "EXECUTABLE_ACCESSOR_INFO_TYPE",
- 153: "ACCESSOR_PAIR_TYPE",
- 154: "ACCESS_CHECK_INFO_TYPE",
- 155: "INTERCEPTOR_INFO_TYPE",
- 156: "CALL_HANDLER_INFO_TYPE",
- 157: "FUNCTION_TEMPLATE_INFO_TYPE",
- 158: "OBJECT_TEMPLATE_INFO_TYPE",
- 159: "SIGNATURE_INFO_TYPE",
- 160: "TYPE_SWITCH_INFO_TYPE",
- 162: "ALLOCATION_MEMENTO_TYPE",
- 161: "ALLOCATION_SITE_TYPE",
- 163: "SCRIPT_TYPE",
- 164: "CODE_CACHE_TYPE",
- 165: "POLYMORPHIC_CODE_CACHE_TYPE",
- 166: "TYPE_FEEDBACK_INFO_TYPE",
- 167: "ALIASED_ARGUMENTS_ENTRY_TYPE",
- 168: "BOX_TYPE",
- 177: "PROTOTYPE_INFO_TYPE",
- 178: "SLOPPY_BLOCK_WITH_EVAL_CONTEXT_EXTENSION_TYPE",
- 171: "FIXED_ARRAY_TYPE",
+ 150: "ACCESSOR_INFO_TYPE",
+ 151: "ACCESSOR_PAIR_TYPE",
+ 152: "ACCESS_CHECK_INFO_TYPE",
+ 153: "INTERCEPTOR_INFO_TYPE",
+ 154: "CALL_HANDLER_INFO_TYPE",
+ 155: "FUNCTION_TEMPLATE_INFO_TYPE",
+ 156: "OBJECT_TEMPLATE_INFO_TYPE",
+ 157: "SIGNATURE_INFO_TYPE",
+ 158: "TYPE_SWITCH_INFO_TYPE",
+ 160: "ALLOCATION_MEMENTO_TYPE",
+ 159: "ALLOCATION_SITE_TYPE",
+ 161: "SCRIPT_TYPE",
+ 162: "CODE_CACHE_TYPE",
+ 163: "POLYMORPHIC_CODE_CACHE_TYPE",
+ 164: "TYPE_FEEDBACK_INFO_TYPE",
+ 165: "ALIASED_ARGUMENTS_ENTRY_TYPE",
+ 166: "BOX_TYPE",
+ 175: "PROTOTYPE_INFO_TYPE",
+ 176: "SLOPPY_BLOCK_WITH_EVAL_CONTEXT_EXTENSION_TYPE",
+ 169: "FIXED_ARRAY_TYPE",
148: "FIXED_DOUBLE_ARRAY_TYPE",
- 172: "SHARED_FUNCTION_INFO_TYPE",
- 174: "WEAK_CELL_TYPE",
- 175: "TRANSITION_ARRAY_TYPE",
- 181: "JS_MESSAGE_OBJECT_TYPE",
- 180: "JS_VALUE_TYPE",
- 182: "JS_DATE_TYPE",
- 183: "JS_OBJECT_TYPE",
- 184: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
- 185: "JS_GENERATOR_OBJECT_TYPE",
- 186: "JS_MODULE_TYPE",
- 187: "JS_GLOBAL_OBJECT_TYPE",
- 188: "JS_GLOBAL_PROXY_TYPE",
- 189: "JS_ARRAY_TYPE",
- 190: "JS_ARRAY_BUFFER_TYPE",
- 191: "JS_TYPED_ARRAY_TYPE",
- 192: "JS_DATA_VIEW_TYPE",
- 179: "JS_PROXY_TYPE",
- 193: "JS_SET_TYPE",
- 194: "JS_MAP_TYPE",
- 195: "JS_SET_ITERATOR_TYPE",
- 196: "JS_MAP_ITERATOR_TYPE",
- 197: "JS_ITERATOR_RESULT_TYPE",
- 198: "JS_WEAK_MAP_TYPE",
- 199: "JS_WEAK_SET_TYPE",
- 200: "JS_PROMISE_TYPE",
- 201: "JS_REGEXP_TYPE",
- 202: "JS_BOUND_FUNCTION_TYPE",
- 203: "JS_FUNCTION_TYPE",
- 169: "DEBUG_INFO_TYPE",
- 170: "BREAK_POINT_INFO_TYPE",
+ 170: "SHARED_FUNCTION_INFO_TYPE",
+ 172: "WEAK_CELL_TYPE",
+ 173: "TRANSITION_ARRAY_TYPE",
+ 179: "JS_MESSAGE_OBJECT_TYPE",
+ 178: "JS_VALUE_TYPE",
+ 180: "JS_DATE_TYPE",
+ 181: "JS_OBJECT_TYPE",
+ 182: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+ 183: "JS_GENERATOR_OBJECT_TYPE",
+ 184: "JS_MODULE_TYPE",
+ 185: "JS_GLOBAL_OBJECT_TYPE",
+ 186: "JS_GLOBAL_PROXY_TYPE",
+ 187: "JS_ARRAY_TYPE",
+ 188: "JS_ARRAY_BUFFER_TYPE",
+ 189: "JS_TYPED_ARRAY_TYPE",
+ 190: "JS_DATA_VIEW_TYPE",
+ 177: "JS_PROXY_TYPE",
+ 191: "JS_SET_TYPE",
+ 192: "JS_MAP_TYPE",
+ 193: "JS_SET_ITERATOR_TYPE",
+ 194: "JS_MAP_ITERATOR_TYPE",
+ 195: "JS_ITERATOR_RESULT_TYPE",
+ 196: "JS_WEAK_MAP_TYPE",
+ 197: "JS_WEAK_SET_TYPE",
+ 198: "JS_PROMISE_TYPE",
+ 199: "JS_REGEXP_TYPE",
+ 200: "JS_BOUND_FUNCTION_TYPE",
+ 201: "JS_FUNCTION_TYPE",
+ 167: "DEBUG_INFO_TYPE",
+ 168: "BREAK_POINT_INFO_TYPE",
}
# List of known V8 maps.
@@ -133,7 +131,7 @@ KNOWN_MAPS = {
0x08081: (136, "ByteArrayMap"),
0x080ad: (132, "MetaMap"),
0x080d9: (131, "NullMap"),
- 0x08105: (171, "FixedArrayMap"),
+ 0x08105: (169, "FixedArrayMap"),
0x08131: (4, "OneByteInternalizedStringMap"),
0x0815d: (138, "FreeSpaceMap"),
0x08189: (149, "OnePointerFillerMap"),
@@ -143,9 +141,9 @@ KNOWN_MAPS = {
0x08239: (131, "TheHoleMap"),
0x08265: (131, "BooleanMap"),
0x08291: (131, "UninitializedMap"),
- 0x082bd: (173, "CellMap"),
- 0x082e9: (176, "GlobalPropertyCellMap"),
- 0x08315: (172, "SharedFunctionInfoMap"),
+ 0x082bd: (171, "CellMap"),
+ 0x082e9: (174, "GlobalPropertyCellMap"),
+ 0x08315: (170, "SharedFunctionInfoMap"),
0x08341: (134, "MutableHeapNumberMap"),
0x0836d: (130, "Float32x4Map"),
0x08399: (130, "Int32x4Map"),
@@ -157,21 +155,21 @@ KNOWN_MAPS = {
0x084a1: (130, "Int8x16Map"),
0x084cd: (130, "Uint8x16Map"),
0x084f9: (130, "Bool8x16Map"),
- 0x08525: (171, "NativeContextMap"),
+ 0x08525: (169, "NativeContextMap"),
0x08551: (133, "CodeMap"),
- 0x0857d: (171, "ScopeInfoMap"),
- 0x085a9: (171, "FixedCOWArrayMap"),
+ 0x0857d: (169, "ScopeInfoMap"),
+ 0x085a9: (169, "FixedCOWArrayMap"),
0x085d5: (148, "FixedDoubleArrayMap"),
- 0x08601: (174, "WeakCellMap"),
- 0x0862d: (175, "TransitionArrayMap"),
+ 0x08601: (172, "WeakCellMap"),
+ 0x0862d: (173, "TransitionArrayMap"),
0x08659: (68, "OneByteStringMap"),
- 0x08685: (171, "FunctionContextMap"),
+ 0x08685: (169, "FunctionContextMap"),
0x086b1: (131, "NoInterceptorResultSentinelMap"),
0x086dd: (131, "ArgumentsMarkerMap"),
0x08709: (131, "ExceptionMap"),
0x08735: (131, "TerminationExceptionMap"),
- 0x08761: (171, "HashTableMap"),
- 0x0878d: (171, "OrderedHashTableMap"),
+ 0x08761: (169, "HashTableMap"),
+ 0x0878d: (169, "OrderedHashTableMap"),
0x087b9: (128, "SymbolMap"),
0x087e5: (64, "StringMap"),
0x08811: (69, "ConsOneByteStringMap"),
@@ -201,37 +199,37 @@ KNOWN_MAPS = {
0x08c31: (145, "FixedFloat32ArrayMap"),
0x08c5d: (146, "FixedFloat64ArrayMap"),
0x08c89: (147, "FixedUint8ClampedArrayMap"),
- 0x08cb5: (171, "SloppyArgumentsElementsMap"),
- 0x08ce1: (171, "CatchContextMap"),
- 0x08d0d: (171, "WithContextMap"),
- 0x08d39: (171, "BlockContextMap"),
- 0x08d65: (171, "ModuleContextMap"),
- 0x08d91: (171, "ScriptContextMap"),
- 0x08dbd: (171, "ScriptContextTableMap"),
- 0x08de9: (181, "JSMessageObjectMap"),
+ 0x08cb5: (169, "SloppyArgumentsElementsMap"),
+ 0x08ce1: (169, "CatchContextMap"),
+ 0x08d0d: (169, "WithContextMap"),
+ 0x08d39: (169, "BlockContextMap"),
+ 0x08d65: (169, "ModuleContextMap"),
+ 0x08d91: (169, "ScriptContextMap"),
+ 0x08dbd: (169, "ScriptContextTableMap"),
+ 0x08de9: (179, "JSMessageObjectMap"),
0x08e15: (135, "ForeignMap"),
- 0x08e41: (183, "NeanderMap"),
- 0x08e6d: (183, "ExternalMap"),
- 0x08e99: (162, "AllocationMementoMap"),
- 0x08ec5: (161, "AllocationSiteMap"),
- 0x08ef1: (165, "PolymorphicCodeCacheMap"),
- 0x08f1d: (163, "ScriptMap"),
+ 0x08e41: (181, "NeanderMap"),
+ 0x08e6d: (181, "ExternalMap"),
+ 0x08e99: (160, "AllocationMementoMap"),
+ 0x08ec5: (159, "AllocationSiteMap"),
+ 0x08ef1: (163, "PolymorphicCodeCacheMap"),
+ 0x08f1d: (161, "ScriptMap"),
0x08f75: (137, "BytecodeArrayMap"),
- 0x08fa1: (168, "BoxMap"),
- 0x08fcd: (152, "ExecutableAccessorInfoMap"),
- 0x08ff9: (153, "AccessorPairMap"),
- 0x09025: (154, "AccessCheckInfoMap"),
- 0x09051: (155, "InterceptorInfoMap"),
- 0x0907d: (156, "CallHandlerInfoMap"),
- 0x090a9: (157, "FunctionTemplateInfoMap"),
- 0x090d5: (158, "ObjectTemplateInfoMap"),
- 0x09101: (164, "CodeCacheMap"),
- 0x0912d: (166, "TypeFeedbackInfoMap"),
- 0x09159: (167, "AliasedArgumentsEntryMap"),
- 0x09185: (169, "DebugInfoMap"),
- 0x091b1: (170, "BreakPointInfoMap"),
- 0x091dd: (177, "PrototypeInfoMap"),
- 0x09209: (178, "SloppyBlockWithEvalContextExtensionMap"),
+ 0x08fa1: (166, "BoxMap"),
+ 0x08fcd: (150, "AccessorInfoMap"),
+ 0x08ff9: (151, "AccessorPairMap"),
+ 0x09025: (152, "AccessCheckInfoMap"),
+ 0x09051: (153, "InterceptorInfoMap"),
+ 0x0907d: (154, "CallHandlerInfoMap"),
+ 0x090a9: (155, "FunctionTemplateInfoMap"),
+ 0x090d5: (156, "ObjectTemplateInfoMap"),
+ 0x09101: (162, "CodeCacheMap"),
+ 0x0912d: (164, "TypeFeedbackInfoMap"),
+ 0x09159: (165, "AliasedArgumentsEntryMap"),
+ 0x09185: (167, "DebugInfoMap"),
+ 0x091b1: (168, "BreakPointInfoMap"),
+ 0x091dd: (175, "PrototypeInfoMap"),
+ 0x09209: (176, "SloppyBlockWithEvalContextExtensionMap"),
}
# List of known V8 objects.
@@ -270,28 +268,27 @@ KNOWN_OBJECTS = {
("OLD_SPACE", 0x0980d): "MinusInfinityValue",
("OLD_SPACE", 0x0981d): "MessageListeners",
("OLD_SPACE", 0x09839): "CodeStubs",
- ("OLD_SPACE", 0x10201): "DummyVector",
- ("OLD_SPACE", 0x1403d): "NonMonomorphicCache",
- ("OLD_SPACE", 0x14651): "PolymorphicCodeCache",
- ("OLD_SPACE", 0x14659): "NativesSourceCache",
- ("OLD_SPACE", 0x148f5): "ExperimentalNativesSourceCache",
- ("OLD_SPACE", 0x14929): "ExtraNativesSourceCache",
- ("OLD_SPACE", 0x14949): "ExperimentalExtraNativesSourceCache",
- ("OLD_SPACE", 0x14955): "EmptyScript",
- ("OLD_SPACE", 0x14995): "IntrinsicFunctionNames",
- ("OLD_SPACE", 0x2e73d): "UndefinedCell",
- ("OLD_SPACE", 0x2e745): "ObservationState",
- ("OLD_SPACE", 0x2e751): "ScriptList",
- ("OLD_SPACE", 0x2e8d9): "ClearedOptimizedCodeMap",
- ("OLD_SPACE", 0x2e8e5): "EmptyWeakCell",
- ("OLD_SPACE", 0x54715): "EmptySlowElementDictionary",
- ("OLD_SPACE", 0x54761): "WeakObjectToCodeTable",
- ("OLD_SPACE", 0x54875): "ArrayProtector",
- ("OLD_SPACE", 0x54885): "EmptyPropertyCell",
- ("OLD_SPACE", 0x54895): "NoScriptSharedFunctionInfos",
- ("OLD_SPACE", 0x5711d): "InterpreterTable",
- ("OLD_SPACE", 0x57325): "EmptyBytecodeArray",
- ("OLD_SPACE", 0x5a2d1): "StringTable",
- ("CODE_SPACE", 0x1a2a1): "JsEntryCode",
- ("CODE_SPACE", 0x1f081): "JsConstructEntryCode",
+ ("OLD_SPACE", 0x0feb9): "DummyVector",
+ ("OLD_SPACE", 0x13fed): "NonMonomorphicCache",
+ ("OLD_SPACE", 0x14601): "PolymorphicCodeCache",
+ ("OLD_SPACE", 0x14609): "NativesSourceCache",
+ ("OLD_SPACE", 0x1488d): "ExperimentalNativesSourceCache",
+ ("OLD_SPACE", 0x148c1): "ExtraNativesSourceCache",
+ ("OLD_SPACE", 0x148e1): "ExperimentalExtraNativesSourceCache",
+ ("OLD_SPACE", 0x148ed): "EmptyScript",
+ ("OLD_SPACE", 0x1492d): "IntrinsicFunctionNames",
+ ("OLD_SPACE", 0x2e919): "EmptyPropertiesDictionary",
+ ("OLD_SPACE", 0x2e965): "UndefinedCell",
+ ("OLD_SPACE", 0x2e96d): "ObservationState",
+ ("OLD_SPACE", 0x2e979): "ScriptList",
+ ("OLD_SPACE", 0x2eb01): "ClearedOptimizedCodeMap",
+ ("OLD_SPACE", 0x2eb0d): "EmptyWeakCell",
+ ("OLD_SPACE", 0x534d1): "EmptySlowElementDictionary",
+ ("OLD_SPACE", 0x5351d): "WeakObjectToCodeTable",
+ ("OLD_SPACE", 0x53631): "ArrayProtector",
+ ("OLD_SPACE", 0x53641): "EmptyPropertyCell",
+ ("OLD_SPACE", 0x53651): "NoScriptSharedFunctionInfos",
+ ("OLD_SPACE", 0x59cf1): "StringTable",
+ ("CODE_SPACE", 0x1a001): "JsEntryCode",
+ ("CODE_SPACE", 0x1e721): "JsConstructEntryCode",
}
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index 687be113dd..d1395f5d91 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -5,4 +5,4 @@ Try to write something funny. And please don't add trailing whitespace.
A Smi balks into a war and says:
"I'm so deoptimized today!"
The doubles heard this and started to unbox.
-The Smi looked at them when a crazy v8-autoroll account showed up.....
+The Smi looked at them when a crazy v8-autoroll account showed up..